sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
mlflow/mlflow:mlflow/utils/env_pack.py
import shutil import subprocess import sys import tarfile import tempfile from contextlib import contextmanager from dataclasses import dataclass from pathlib import Path from typing import Generator, Literal import yaml from mlflow.artifacts import download_artifacts from mlflow.exceptions import MlflowException from mlflow.models.model import MLMODEL_FILE_NAME from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.utils.databricks_utils import DatabricksRuntimeVersion, get_databricks_runtime_version from mlflow.utils.environment import _REQUIREMENTS_FILE_NAME from mlflow.utils.logging_utils import eprint EnvPackType = Literal["databricks_model_serving"] @dataclass(kw_only=True) class EnvPackConfig: name: EnvPackType install_dependencies: bool = True _ARTIFACT_PATH = "_databricks" _MODEL_VERSION_TAR = "model_version.tar" _MODEL_ENVIRONMENT_TAR = "model_environment.tar" def _validate_env_pack(env_pack): """Checks if env_pack is a supported value Supported values are: - the string "databricks_model_serving" - an ``EnvPackConfig`` with ``name == 'databricks_model_serving'`` and a boolean ``install_dependencies`` field. - None """ if env_pack is None: return None if isinstance(env_pack, str): if env_pack == "databricks_model_serving": return EnvPackConfig(name="databricks_model_serving", install_dependencies=True) raise MlflowException.invalid_parameter_value( f"Invalid env_pack value: {env_pack!r}. Expected: 'databricks_model_serving'." ) if isinstance(env_pack, EnvPackConfig): if env_pack.name != "databricks_model_serving": raise MlflowException.invalid_parameter_value( f"Invalid EnvPackConfig.name: {env_pack.name!r}. " "Expected 'databricks_model_serving'." ) if not isinstance(env_pack.install_dependencies, bool): raise MlflowException.invalid_parameter_value( "EnvPackConfig.install_dependencies must be a bool." ) return env_pack # Anything else is invalid raise MlflowException.invalid_parameter_value( "env_pack must be either None, the string 'databricks_model_serving', or an EnvPackConfig " "with a boolean 'install_dependencies' field." ) def _tar(root_path: Path, tar_path: Path) -> tarfile.TarFile: """ Package all files under root_path into a tar at tar_path, excluding __pycache__, *.pyc, and wheels_info.json. """ def exclude(tarinfo: tarfile.TarInfo): name = tarinfo.name base = Path(name).name if "__pycache__" in name or base.endswith(".pyc") or base == "wheels_info.json": return None return tarinfo # Pull in symlinks with tarfile.open(tar_path, "w", dereference=True) as tar: tar.add(root_path, arcname=".", filter=exclude) return tar @contextmanager def _get_source_artifacts( model_uri: str, local_model_path: str | None = None ) -> Generator[Path, None, None]: """ Get source artifacts and handle cleanup of downloads. Does not mutate local_model_path contents if provided. Args: model_uri: The URI of the model to package. local_model_path: Optional local path to model artifacts. Yields: Path: The path to the source artifacts directory. """ source_dir = Path(local_model_path or download_artifacts(artifact_uri=model_uri)) yield source_dir if not local_model_path: shutil.rmtree(source_dir) # TODO: Check pip requirements using uv instead. @contextmanager def pack_env_for_databricks_model_serving( model_uri: str, *, enforce_pip_requirements: bool = False, local_model_path: str | None = None, ) -> Generator[str, None, None]: """ Generate Databricks artifacts for fast deployment. Args: model_uri: The URI of the model to package. enforce_pip_requirements: Whether to enforce pip requirements installation. local_model_path: Optional local path to model artifacts. If provided, pack the local artifacts instead of downloading. Yields: str: The path to the local artifacts directory containing the model artifacts and environment. Example: >>> with pack_env_for_databricks_model_serving("models:/my-model/1") as artifacts_dir: ... # Use artifacts_dir here ... pass """ dbr_version = DatabricksRuntimeVersion.parse() if not dbr_version.is_client_image: raise ValueError( f"Serverless environment is required when packing environment for Databricks Model " f"Serving. Current version: {dbr_version}" ) with _get_source_artifacts(model_uri, local_model_path) as source_artifacts_dir: # Check runtime version consistency # We read the MLmodel file directly instead of using Model.to_dict() because to_dict() adds # the current runtime version via get_databricks_runtime_version(), which would prevent us # from detecting runtime version mismatches. mlmodel_path = source_artifacts_dir / MLMODEL_FILE_NAME with open(mlmodel_path) as f: model_dict = yaml.safe_load(f) if "databricks_runtime" not in model_dict: raise ValueError( "Model must have been created in a Databricks runtime environment. " "Missing 'databricks_runtime' field in MLmodel file." ) current_runtime = DatabricksRuntimeVersion.parse() model_runtime = DatabricksRuntimeVersion.parse(model_dict["databricks_runtime"]) if current_runtime.major != model_runtime.major: raise ValueError( f"Runtime version mismatch. Model was created with runtime " f"{model_dict['databricks_runtime']} (major version {model_runtime.major}), " f"but current runtime is {get_databricks_runtime_version()} " f"(major version {current_runtime.major})" ) # Check that _databricks directory does not exist in source if (source_artifacts_dir / _ARTIFACT_PATH).exists(): raise MlflowException( f"Source artifacts contain a '{_ARTIFACT_PATH}' directory and is not " "eligible for use with env_pack.", error_code=INVALID_PARAMETER_VALUE, ) if enforce_pip_requirements: eprint("Installing model requirements...") try: subprocess.run( [ sys.executable, "-m", "pip", "install", "-r", str(source_artifacts_dir / _REQUIREMENTS_FILE_NAME), ], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, ) except subprocess.CalledProcessError as e: eprint("Error installing requirements:") eprint(e.stdout) raise with tempfile.TemporaryDirectory() as temp_dir: # Copy source artifacts to packaged_model_dir packaged_model_dir = Path(temp_dir) / "model" shutil.copytree( source_artifacts_dir, packaged_model_dir, dirs_exist_ok=False, symlinks=False ) # Package model artifacts and env into packaged_model_dir/_databricks packaged_artifacts_dir = packaged_model_dir / _ARTIFACT_PATH packaged_artifacts_dir.mkdir(exist_ok=False) _tar(source_artifacts_dir, packaged_artifacts_dir / _MODEL_VERSION_TAR) _tar(Path(sys.prefix), packaged_artifacts_dir / _MODEL_ENVIRONMENT_TAR) yield str(packaged_model_dir)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/utils/env_pack.py", "license": "Apache License 2.0", "lines": 180, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/utils/test_env_pack.py
import subprocess import sys import tarfile import venv from pathlib import Path from unittest import mock import pytest import yaml from mlflow.exceptions import MlflowException from mlflow.utils import env_pack from mlflow.utils.databricks_utils import DatabricksRuntimeVersion from mlflow.utils.env_pack import EnvPackConfig, _validate_env_pack @pytest.fixture def mock_dbr_version(): with mock.patch.object( DatabricksRuntimeVersion, "parse", return_value=DatabricksRuntimeVersion( is_client_image=True, major=2, minor=0, is_gpu_image=False, ), ): yield def test_tar_function_path_handling(tmp_path): # Create test files root_dir = tmp_path / "root" root_dir.mkdir() (root_dir / "test.txt").write_text("test content") (root_dir / "__pycache__").mkdir() (root_dir / "__pycache__" / "test.pyc").write_text("bytecode") (root_dir / "wheels_info.json").write_text("{}") # Create tar file tar_path = tmp_path / "test.tar" env_pack._tar(root_dir, tar_path) # Verify tar contents with tarfile.open(tar_path) as tar: members = tar.getmembers() names = {m.name for m in members} assert names == {".", "./test.txt"} def test_pack_env_for_databricks_model_serving_pip_requirements(tmp_path, mock_dbr_version): """Test that pack_env_for_databricks_model_serving correctly handles pip requirements installation. """ # Mock download_artifacts to return a path mock_artifacts_dir = tmp_path / "artifacts" mock_artifacts_dir.mkdir() (mock_artifacts_dir / "requirements.txt").write_text("numpy==1.21.0") # Create MLmodel file with correct runtime version mlmodel_path = mock_artifacts_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.2.0", "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) # Create a mock environment directory mock_env_dir = tmp_path / "mock_env" venv.create(mock_env_dir, with_pip=True) with ( mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir), ), mock.patch("subprocess.run") as mock_run, mock.patch("sys.prefix", str(mock_env_dir)), ): # Mock subprocess.run to simulate successful pip install mock_run.return_value = mock.Mock(returncode=0) with env_pack.pack_env_for_databricks_model_serving( "models:/test-model/1", enforce_pip_requirements=True ) as artifacts_dir: # Verify artifacts directory exists and contains expected files artifacts_path = Path(artifacts_dir) assert artifacts_path.exists() assert (artifacts_path / env_pack._ARTIFACT_PATH).exists() assert (artifacts_path / env_pack._ARTIFACT_PATH / env_pack._MODEL_VERSION_TAR).exists() assert ( artifacts_path / env_pack._ARTIFACT_PATH / env_pack._MODEL_ENVIRONMENT_TAR ).exists() # Verify the environment tar contains our mock files env_tar_path = ( artifacts_path / env_pack._ARTIFACT_PATH / env_pack._MODEL_ENVIRONMENT_TAR ) with tarfile.open(env_tar_path, "r:tar") as tar: members = tar.getmembers() member_names = {m.name for m in members} # Check for pip in site-packages based on platform if sys.platform == "win32": expected_pip_path = "./Lib/site-packages/pip" else: expected_pip_path = ( f"./lib/python{sys.version_info.major}.{sys.version_info.minor}" "/site-packages/pip" ) assert expected_pip_path in member_names # Verify subprocess.run was called with correct arguments mock_run.assert_called_once() args, kwargs = mock_run.call_args assert args[0] == [ sys.executable, "-m", "pip", "install", "-r", str(mock_artifacts_dir / "requirements.txt"), ] assert kwargs["check"] is True assert kwargs["stdout"] == subprocess.PIPE assert kwargs["stderr"] == subprocess.STDOUT assert kwargs["text"] is True def test_pack_env_for_databricks_model_serving_pip_requirements_error(tmp_path, mock_dbr_version): # Mock download_artifacts to return a path mock_artifacts_dir = tmp_path / "artifacts" mock_artifacts_dir.mkdir() (mock_artifacts_dir / "requirements.txt").write_text("invalid-package==1.0.0") # Create MLmodel file with correct runtime version mlmodel_path = mock_artifacts_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.2.0", "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) with ( mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir), ), mock.patch("subprocess.run") as mock_run, mock.patch("mlflow.utils.env_pack.eprint") as mock_eprint, ): mock_run.return_value = mock.Mock( returncode=1, stdout="ERROR: Could not find a version that satisfies the requirement invalid-package", ) mock_run.side_effect = subprocess.CalledProcessError(1, "pip install", "Error message") with pytest.raises( subprocess.CalledProcessError, match="Command 'pip install' returned non-zero exit status 1.", ): with env_pack.pack_env_for_databricks_model_serving( "models:/test/1", enforce_pip_requirements=True ): pass # Verify error messages were printed mock_eprint.assert_any_call("Error installing requirements:") mock_eprint.assert_any_call("Error message") def test_pack_env_for_databricks_model_serving_unsupported_version(): with mock.patch.object( DatabricksRuntimeVersion, "parse", return_value=DatabricksRuntimeVersion( is_client_image=False, # Not a client image major=13, minor=0, is_gpu_image=False, ), ): with pytest.raises(ValueError, match="Serverless environment is required"): with env_pack.pack_env_for_databricks_model_serving("models:/test/1"): pass def test_pack_env_for_databricks_model_serving_runtime_version_check(tmp_path, monkeypatch): """Test that pack_env_for_databricks_model_serving correctly checks runtime version compatibility. """ # Mock download_artifacts to return a path mock_artifacts_dir = tmp_path / "artifacts" mock_artifacts_dir.mkdir() # Create MLmodel file with different runtime version mlmodel_path = mock_artifacts_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.3.0", # Different major version "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) # Set current runtime to client.2.0 monkeypatch.setenv("DATABRICKS_RUNTIME_VERSION", "client.2.0") with mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir) ): with pytest.raises(ValueError, match="Runtime version mismatch"): with env_pack.pack_env_for_databricks_model_serving("models:/test-model/1"): pass # Test that same major version works mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.2.1", # Same major version "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) # Create a mock environment directory mock_env_dir = tmp_path / "mock_env" mock_env_dir.mkdir() with ( mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir) ), mock.patch("sys.prefix", str(mock_env_dir)), ): with env_pack.pack_env_for_databricks_model_serving( "models:/test-model/1" ) as artifacts_dir: assert Path(artifacts_dir).exists() @pytest.mark.parametrize( "test_input", [ None, "databricks_model_serving", EnvPackConfig(name="databricks_model_serving", install_dependencies=True), EnvPackConfig(name="databricks_model_serving", install_dependencies=False), ], ) def test_validate_env_pack_with_valid_inputs(test_input): # valid string should not raise; None should be treated as no-op if test_input is None: assert _validate_env_pack(test_input) is None else: assert _validate_env_pack(test_input) is not None @pytest.mark.parametrize( ("test_input", "error_message"), [ (EnvPackConfig(name="other", install_dependencies=True), "Invalid EnvPackConfig.name*"), ( EnvPackConfig(name="databricks_model_serving", install_dependencies="yes"), "EnvPackConfig.install_dependencies must be a bool.", ), ({"name": "databricks_model_serving"}, "env_pack must be either None*"), ("something_else", "Invalid env_pack value*"), ], ) def test_validate_env_pack_throws_errors_on_invalid_inputs(test_input, error_message): with pytest.raises(MlflowException, match=error_message): _validate_env_pack(test_input) def test_pack_env_for_databricks_model_serving_missing_runtime_version(tmp_path, mock_dbr_version): # Mock download_artifacts to return a path mock_artifacts_dir = tmp_path / "artifacts" mock_artifacts_dir.mkdir() # Create MLmodel file without databricks_runtime field mlmodel_path = mock_artifacts_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) with mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir) ): with pytest.raises( ValueError, match="Model must have been created in a Databricks runtime environment" ): with env_pack.pack_env_for_databricks_model_serving("models:/test-model/1"): pass def test_pack_env_for_databricks_model_serving_rejects_existing_databricks_dir( tmp_path, mock_dbr_version ): # Mock download_artifacts to return a path mock_artifacts_dir = tmp_path / "artifacts" mock_artifacts_dir.mkdir() (mock_artifacts_dir / "requirements.txt").write_text("numpy==1.21.0") # Create MLmodel file with correct runtime version mlmodel_path = mock_artifacts_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.2.0", "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) # Create existing _databricks directory existing_databricks_dir = mock_artifacts_dir / env_pack._ARTIFACT_PATH existing_databricks_dir.mkdir() with ( mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir), ), ): # This should raise an error because _databricks directory exists in source with pytest.raises( MlflowException, match="Source artifacts contain a '_databricks' directory" ): with env_pack.pack_env_for_databricks_model_serving( "models:/test-model/1", enforce_pip_requirements=False ): pass def test_pack_env_with_local_model_path_no_mutation(tmp_path, mock_dbr_version): # Create a local directory with model artifacts local_model_dir = tmp_path / "local_model" local_model_dir.mkdir() (local_model_dir / "requirements.txt").write_text("numpy==1.21.0") (local_model_dir / "model.pkl").write_text("model data") # Create MLmodel file with correct runtime version mlmodel_path = local_model_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.2.0", "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) # Create a mock environment directory mock_env_dir = tmp_path / "mock_env" venv.create(mock_env_dir, with_pip=True) with mock.patch("sys.prefix", str(mock_env_dir)): # Call with local_model_path with env_pack.pack_env_for_databricks_model_serving( "models:/test-model/1", local_model_path=str(local_model_dir), enforce_pip_requirements=False, ) as artifacts_dir: # Verify returned directory contains expected files artifacts_path = Path(artifacts_dir) assert artifacts_path.exists() assert (artifacts_path / "requirements.txt").exists() assert (artifacts_path / "model.pkl").exists() assert (artifacts_path / "MLmodel").exists() # Verify _databricks directory exists in returned path databricks_path = artifacts_path / env_pack._ARTIFACT_PATH assert databricks_path.exists() assert (databricks_path / env_pack._MODEL_VERSION_TAR).exists() assert (databricks_path / env_pack._MODEL_ENVIRONMENT_TAR).exists() # CRITICAL: Verify original local_model_dir is NOT mutated assert not (local_model_dir / env_pack._ARTIFACT_PATH).exists() # Verify original files are untouched assert (local_model_dir / "requirements.txt").read_text() == "numpy==1.21.0" assert (local_model_dir / "model.pkl").read_text() == "model data" # After context exit, verify local_model_dir is still not mutated assert not (local_model_dir / env_pack._ARTIFACT_PATH).exists() def test_pack_env_with_download_cleanup(tmp_path, mock_dbr_version): # Mock download_artifacts to return a path mock_artifacts_dir = tmp_path / "downloaded_artifacts" mock_artifacts_dir.mkdir() (mock_artifacts_dir / "requirements.txt").write_text("numpy==1.21.0") # Create MLmodel file with correct runtime version mlmodel_path = mock_artifacts_dir / "MLmodel" mlmodel_path.write_text( yaml.dump( { "databricks_runtime": "client.2.0", "flavors": {"python_function": {"model_path": "model.pkl"}}, } ) ) # Create a mock environment directory mock_env_dir = tmp_path / "mock_env" venv.create(mock_env_dir, with_pip=True) with ( mock.patch( "mlflow.utils.env_pack.download_artifacts", return_value=str(mock_artifacts_dir), ), mock.patch("sys.prefix", str(mock_env_dir)), ): # Call without local_model_path to trigger download with env_pack.pack_env_for_databricks_model_serving( "models:/test-model/1", enforce_pip_requirements=False ) as artifacts_dir: # During context, downloaded artifacts should exist assert Path(artifacts_dir).exists() assert (Path(artifacts_dir) / "requirements.txt").exists() # After context exit, downloaded artifacts should be cleaned up assert not mock_artifacts_dir.exists()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/utils/test_env_pack.py", "license": "Apache License 2.0", "lines": 377, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/pyspark/optuna/study.py
import datetime import logging import tempfile import traceback from collections.abc import Callable, Iterable from dataclasses import dataclass from pathlib import Path from typing import Any import optuna import pandas as pd from optuna import exceptions, pruners, samplers, storages from optuna.study import Study from optuna.trial import FrozenTrial, TrialState from pyspark.sql import SparkSession from pyspark.sql.functions import col import mlflow from mlflow import MlflowClient from mlflow.exceptions import ExecutionException from mlflow.optuna.storage import MlflowStorage _logger = logging.getLogger(__name__) @dataclass class ResumeInfo: is_resumed: bool study_name: str | None = None existing_trials: int | None = None completed_trials: int | None = None best_value: float | None = None best_params: dict[str, Any] | None = None def is_spark_connect_mode() -> bool: """Check if the current Spark session is running in client mode.""" try: from pyspark.sql.utils import is_remote except ImportError: return False return is_remote() def _optimize_sequential( study: "optuna.Study", func: "optuna.study.study.ObjectiveFuncType", mlflow_client: MlflowClient, n_trials: int = 1, timeout: float | None = None, catch: Iterable[type[Exception]] = (), callbacks: Iterable[Callable[[Study, FrozenTrial], None]] | None = None, ) -> None: """ Run optimization sequentially. It is modified from _optimize_sequential in optuna (https://github.com/optuna/optuna/blob/e1e30e7150047e5f582b8fef1eeb65386cb1c4c1/optuna/study/_optimize.py#L121) Convert the nested call to one function and log the error messages to mlflow. """ i_trial = 0 time_start = datetime.datetime.now() while True: if study._stop_flag: break if i_trial >= n_trials: break i_trial += 1 if timeout is not None: elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds() if elapsed_seconds >= timeout: break state = None value_or_values = None func_err = None func_err_fail_exc_info = None trial = study.ask() try: value_or_values = func(trial) except exceptions.TrialPruned as e: state = TrialState.PRUNED func_err = e except (Exception, KeyboardInterrupt) as e: state = TrialState.FAIL func_err = e func_err_fail_exc_info = traceback.format_exc() try: frozen_trial, warning_message = optuna.study._tell._tell_with_warning( study=study, trial=trial, value_or_values=value_or_values, state=state, suppress_warning=True, ) except Exception: frozen_trial = study._storage.get_trial(trial._trial_id) warning_message = None if frozen_trial.state == TrialState.COMPLETE: _logger.info(f"Trial {trial.number} finished with parameters: {trial.params}.") elif frozen_trial.state == TrialState.PRUNED: _logger.info("Trial {} pruned. {}".format(frozen_trial._trial_id, str(func_err))) mlflow_client.set_terminated(frozen_trial._trial_id, status="KILLED") elif frozen_trial.state == TrialState.FAIL: error_message = None if func_err is not None: error_message = func_err_fail_exc_info elif warning_message is not None: error_message = warning_message with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir, "error_message.txt") path.write_text(error_message) # Log the file as an artifact in the active MLflow run mlflow_client.log_artifact(frozen_trial._trial_id, path) mlflow_client.set_terminated(frozen_trial._trial_id, status="FAILED") if ( frozen_trial.state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch) ): raise func_err if callbacks is not None: for callback in callbacks: callback(study, frozen_trial) class MlflowSparkStudy(Study): """A wrapper of :class:`~optuna.study.Study` to incorporate Optuna with spark via MLflow experiment. This class automatically resumes existing studies with the same name, allowing for interrupted optimization to continue from where it left off. .. code-block:: python :caption: Basic Usage from mlflow.optuna.storage import MlflowStorage from mlflow.pyspark.optuna.study import MlflowSparkStudy def objective(trial): x = trial.suggest_float("x", -10, 10) return (x - 2) ** 2 experiment_id = "507151065975140" study_name = "spark_mlflow_storage" storage = MlflowStorage(experiment_id=experiment_id) mlflow_study = MlflowSparkStudy(study_name, storage) mlflow_study.optimize(objective, n_trials=4) # Later, create another instance with same name to resume resumed_study = MlflowSparkStudy(study_name, storage) print(f"Resumed with {len(resumed_study.trials)} existing trials") resumed_study.optimize(objective, n_trials=4) # Continue optimization """ def __init__( self, study_name: str, storage: MlflowStorage, sampler: samplers.BaseSampler | None = None, pruner: pruners.BasePruner | None = None, mlflow_tracking_uri: str | None = None, ): self.study_name = study_name self._storage = storages.get_storage(storage) self.sampler = sampler or samplers.TPESampler() self.pruner = pruner or pruners.MedianPruner() self.spark = SparkSession.active() # check whether the SparkConnect mode self._is_spark_connect_mode = is_spark_connect_mode() self._mlflow_tracking_env = mlflow_tracking_uri or mlflow.get_tracking_uri() mlflow.set_tracking_uri(self._mlflow_tracking_env) self.mlflow_client = MlflowClient() if not isinstance(self._storage, MlflowStorage): raise ValueError( f"MlflowSparkStudy only works with `MlflowStorage`. But get {type(self._storage)}." ) # Check if study exists and auto-resume if it does if self._storage.get_study_id_by_name_if_exists(self.study_name): # Load existing study self._study = optuna.load_study( study_name=self.study_name, sampler=self.sampler, storage=self._storage ) self._study_id = self._storage.get_study_id_from_name(self.study_name) self._is_resumed = True _logger.info( f"Resuming existing study '{self.study_name}' with {len(self._study.trials)} trials" ) else: # Create new study self._study = optuna.create_study( study_name=self.study_name, sampler=self.sampler, storage=self._storage ) self._study_id = self._storage.get_study_id_from_name(self.study_name) self._is_resumed = False _logger.info(f"Created new study '{self.study_name}'") self._directions = self._storage.get_study_directions(self._study_id) @property def is_resumed_study(self) -> bool: """Check if this study was resumed from existing data. Returns: True if the study was resumed from existing data, False if it's a new study """ return self._is_resumed @property def completed_trials_count(self) -> int: """Number of completed trials in the study. Returns: Count of trials that have completed successfully """ return len([t for t in self._study.trials if t.state == TrialState.COMPLETE]) def get_resume_info(self) -> ResumeInfo | None: """Get information about the resumed study. Returns: ResumeInfo dataclass containing resume information including trial counts and best results """ if not self._is_resumed: return ResumeInfo(is_resumed=False) return ResumeInfo( is_resumed=True, study_name=self.study_name, existing_trials=len(self._study.trials), completed_trials=self.completed_trials_count, best_value=self._study.best_value if self._study.trials else None, best_params=self._study.best_params if self._study.trials else None, ) def optimize( self, func: "optuna.study.study.ObjectiveFuncType", n_trials: int | None = None, timeout: float | None = None, n_jobs: int = -1, catch: Iterable[type[Exception]] = (), callbacks: Iterable[Callable[[Study, FrozenTrial], None]] | None = None, ) -> None: # Add logging for resume information if self._is_resumed and self._study.trials: _logger.info(f""" Continuing optimization with {len(self._study.trials)} existing trials. Current best value: {self._study.best_value} """) elif self._is_resumed: _logger.info("Resuming study with no previous trials") else: _logger.info("Starting optimization for new study") experiment_id = self._storage._experiment_id study_name = self.study_name mlflow_tracking_env = self._mlflow_tracking_env sampler = self.sampler def run_task_on_executor_pd(iterator): mlflow.set_tracking_uri(mlflow_tracking_env) mlflow_client = MlflowClient() storage = MlflowStorage(experiment_id=experiment_id) study = optuna.load_study(study_name=study_name, sampler=sampler, storage=storage) num_trials = sum(map(len, iterator)) error_message = None try: _optimize_sequential( study=study, func=func, mlflow_client=mlflow_client, n_trials=num_trials, timeout=timeout, catch=catch, callbacks=callbacks, ) except BaseException: error_message = traceback.format_exc() yield pd.DataFrame({"error": [error_message]}) num_tasks = n_trials if n_jobs == -1: n_jobs = num_tasks input_df = self.spark.range(start=0, end=num_tasks, step=1, numPartitions=n_jobs) trial_tag = f"optuna_trial_{study_name}_{experiment_id}" if self._is_spark_connect_mode: self.spark.addTag(trial_tag) else: job_group_id = self.spark.sparkContext.getLocalProperty("spark.jobGroup.id") if job_group_id is None: job_group_id = trial_tag job_group_description = f"optuna_trial_{study_name}" self.spark.sparkContext.setJobGroup( job_group_id, job_group_description, interruptOnCancel=True ) try: result_df = input_df.mapInPandas( func=run_task_on_executor_pd, schema="error string", ) except KeyboardInterrupt: if self._is_spark_connect_mode: self.spark.interruptTag(trial_tag) else: self.spark.sparkContext.cancelJobGroup(trial_tag) _logger.debug("MlflowSparkStudy optimize terminated by user.") self.mlflow_client.set_terminated(self._study_id, "KILLED") raise if "error" in result_df.columns: failed_runs = result_df.filter(col("error").isNotNull()) error_rows = failed_runs.select("error").collect() if len(error_rows) > 0: first_non_null_value = error_rows[0][0] self.mlflow_client.set_terminated(self._study_id, "KILLED") raise ExecutionException( f"Optimization run for Optuna MlflowSparkStudy failed. " f"See full error details in the failed MLflow runs. " f"Number of failed runs: {len(error_rows)}. " f"First trial failure message: {first_non_null_value}" ) self.mlflow_client.set_terminated(self._study_id)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/pyspark/optuna/study.py", "license": "Apache License 2.0", "lines": 290, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/pyspark/optuna/test_study.py
import logging import os import numpy as np import pyspark import pytest from optuna.samplers import TPESampler from packaging.version import Version import mlflow from mlflow.exceptions import ExecutionException from mlflow.pyspark.optuna.study import MlflowSparkStudy from tests.optuna.test_storage import setup_storage # noqa: F401 from tests.pyfunc.test_spark import get_spark_session _logger = logging.getLogger(__name__) def _get_spark_session_with_retry(max_tries=3): conf = pyspark.SparkConf() for attempt in range(max_tries): try: return get_spark_session(conf) except Exception as e: if attempt >= max_tries - 1: raise _logger.exception( f"Attempt {attempt} to create a SparkSession failed ({e!r}), retrying..." ) # Specify `autouse=True` to ensure that a context is created # before any tests are executed. This ensures that the Hadoop filesystem # does not create its own SparkContext without the MLeap libraries required by # other tests. @pytest.fixture(scope="module", autouse=True) def spark(): if Version(pyspark.__version__) < Version("3.1"): spark_home = ( os.environ.get("SPARK_HOME") if "SPARK_HOME" in os.environ else os.path.dirname(pyspark.__file__) ) conf_dir = os.path.join(spark_home, "conf") os.makedirs(conf_dir, exist_ok=True) with open(os.path.join(conf_dir, "spark-defaults.conf"), "w") as f: conf = """ spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" """ f.write(conf) with _get_spark_session_with_retry() as spark: yield spark def test_study_optimize_run(setup_storage): storage = setup_storage study_name = "test-study" sampler = TPESampler(seed=10) mlflow_study = MlflowSparkStudy( study_name, storage, sampler=sampler, mlflow_tracking_uri=mlflow.get_tracking_uri() ) def objective(trial): x = trial.suggest_float("x", -10, 10) return (x - 2) ** 2 mlflow_study.optimize(objective, n_trials=8, n_jobs=4) assert sorted(mlflow_study.best_params.keys()) == ["x"] assert len(mlflow_study.trials) == 8 np.testing.assert_allclose(mlflow_study.best_params["x"], 5.426412865334919, rtol=1e-6) def test_study_with_failed_objective(setup_storage): storage = setup_storage study_name = "test-study" sampler = TPESampler(seed=10) mlflow_study = MlflowSparkStudy( study_name, storage, sampler=sampler, mlflow_tracking_uri=mlflow.get_tracking_uri() ) def fail_objective(_): raise ValueError() with pytest.raises( ExecutionException, match="Optimization run for Optuna MlflowSparkStudy failed", ): mlflow_study.optimize(fail_objective, n_trials=4) def test_auto_resume_existing_study(setup_storage): storage = setup_storage study_name = "resume-test-study" sampler = TPESampler(seed=42) # Create first study and run some trials study1 = MlflowSparkStudy(study_name, storage, sampler=sampler) assert not study1.is_resumed_study def objective(trial): return trial.suggest_float("x", 0, 10) ** 2 study1.optimize(objective, n_trials=3, n_jobs=1) first_trial_count = len(study1.trials) first_best_value = study1.best_value # Create second study with same name - should resume study2 = MlflowSparkStudy(study_name, storage, sampler=sampler) assert study2.is_resumed_study assert len(study2.trials) == first_trial_count assert study2.best_value == first_best_value # Continue optimization study2.optimize(objective, n_trials=2, n_jobs=1) assert len(study2.trials) == first_trial_count + 2 # Assert that the resumed study generates a better (lower) objective value than the first study assert study2.best_value <= first_best_value def test_new_study_is_not_resumed(setup_storage): storage = setup_storage study_name = "new-study" study = MlflowSparkStudy(study_name, storage) assert not study.is_resumed_study assert study.completed_trials_count == 0 info = study.get_resume_info() assert not info.is_resumed def test_resume_info_method(setup_storage): storage = setup_storage study_name = "info-test-study" sampler = TPESampler(seed=123) # New study study1 = MlflowSparkStudy(study_name, storage, sampler=sampler) info = study1.get_resume_info() assert not info.is_resumed # Run some trials def objective(trial): return trial.suggest_float("x", 0, 1) ** 2 study1.optimize(objective, n_trials=2, n_jobs=1) # Resume study study2 = MlflowSparkStudy(study_name, storage, sampler=sampler) info = study2.get_resume_info() assert info.is_resumed assert info.study_name == study_name assert info.existing_trials == 2 assert info.completed_trials == 2 assert hasattr(info, "best_value") assert hasattr(info, "best_params") assert info.best_value is not None assert info.best_params is not None def test_completed_trials_count_property(setup_storage): storage = setup_storage study_name = "count-test-study" study = MlflowSparkStudy(study_name, storage) assert study.completed_trials_count == 0 def objective(trial): return trial.suggest_float("x", 0, 1) study.optimize(objective, n_trials=3, n_jobs=1) assert study.completed_trials_count == 3 # Resume and check count is preserved resumed_study = MlflowSparkStudy(study_name, storage) assert resumed_study.completed_trials_count == 3 def test_resume_preserves_best_results(setup_storage): storage = setup_storage study_name = "best-results-study" sampler = TPESampler(seed=456) def objective(trial): x = trial.suggest_float("x", -10, 10) return (x - 2) ** 2 # First optimization study1 = MlflowSparkStudy(study_name, storage, sampler=sampler) study1.optimize(objective, n_trials=5, n_jobs=1) original_best_value = study1.best_value original_best_params = study1.best_params.copy() # Resume and verify best results are preserved study2 = MlflowSparkStudy(study_name, storage, sampler=sampler) assert study2.best_value == original_best_value assert study2.best_params == original_best_params # Continue optimization and verify it can improve study2.optimize(objective, n_trials=5, n_jobs=1) # Best value should be the same or better (lower for minimization) assert study2.best_value <= original_best_value
{ "repo_id": "mlflow/mlflow", "file_path": "tests/pyspark/optuna/test_study.py", "license": "Apache License 2.0", "lines": 159, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/test_mlflow_version_comp.py
import os import subprocess import sys import uuid from pathlib import Path import numpy as np import sklearn from pyspark.sql import SparkSession from sklearn.linear_model import LinearRegression import mlflow from mlflow.models import Model def check_load(model_uri: str) -> None: Model.load(model_uri) model = mlflow.sklearn.load_model(model_uri) np.testing.assert_array_equal(model.predict([[1, 2]]), [3.0]) model = mlflow.pyfunc.load_model(model_uri) np.testing.assert_array_equal(model.predict([[1, 2]]), [3.0]) def check_register(model_uri: str) -> None: mv = mlflow.register_model(model_uri, "model") model = mlflow.pyfunc.load_model(f"models:/{mv.name}/{mv.version}") np.testing.assert_array_equal(model.predict([[1, 2]]), [3.0]) def check_list_artifacts_with_run_id_and_path(run_id: str, path: str) -> None: # List artifacts client = mlflow.MlflowClient() artifacts = [a.path for a in client.list_artifacts(run_id=run_id, path=path)] # Ensure both run and model artifacts are listed assert "model/MLmodel" in artifacts assert "model/test.txt" in artifacts artifacts = [a.path for a in client.list_artifacts(run_id=run_id, path=path)] assert "model/MLmodel" in artifacts assert "model/test.txt" in artifacts # Non-existing artifact path should return an empty list assert len(client.list_artifacts(run_id=run_id, path="unknown")) == 0 assert len(mlflow.artifacts.list_artifacts(run_id=run_id, artifact_path="unknown")) == 0 def check_list_artifacts_with_model_uri(model_uri: str) -> None: artifacts = [a.path for a in mlflow.artifacts.list_artifacts(artifact_uri=model_uri)] assert "model/MLmodel" in artifacts assert "model/test.txt" in artifacts def check_download_artifacts_with_run_id_and_path(run_id: str, path: str, tmp_path: Path) -> None: out_path = mlflow.artifacts.download_artifacts( run_id=run_id, artifact_path=path, dst_path=tmp_path / str(uuid.uuid4()) ) files = [f.name for f in Path(out_path).iterdir() if f.is_file()] assert "MLmodel" in files assert "test.txt" in files client = mlflow.MlflowClient() out_path = client.download_artifacts( run_id=run_id, path=path, dst_path=tmp_path / str(uuid.uuid4()) ) files = [f.name for f in Path(out_path).iterdir() if f.is_file()] assert "MLmodel" in files assert "test.txt" in files def check_download_artifacts_with_model_uri(model_uri: str, tmp_path: Path) -> None: out_path = mlflow.artifacts.download_artifacts( artifact_uri=model_uri, dst_path=tmp_path / str(uuid.uuid4()) ) files = [f.name for f in Path(out_path).iterdir() if f.is_file()] # Ensure both run and model artifacts are downloaded assert "MLmodel" in files assert "test.txt" in files def check_evaluate(model_uri: str) -> None: # Model evaluation eval_res = mlflow.models.evaluate( model=model_uri, data=np.array([[1, 2]]), targets=np.array([3]), model_type="regressor", ) assert "mean_squared_error" in eval_res.metrics def check_spark_udf(model_uri: str) -> None: # Spark UDF if os.name != "nt": with SparkSession.builder.getOrCreate() as spark: udf = mlflow.pyfunc.spark_udf( spark, model_uri, result_type="double", env_manager="local", ) df = spark.createDataFrame([[1, 2]], ["col1", "col2"]) # This line fails with the following error on Windows: # File ".../pyspark\python\lib\pyspark.zip\pyspark\serializers.py", line 472, in loads # return cloudpickle.loads(obj, encoding=encoding) # ModuleNotFoundError: No module named 'pandas' pred = df.select(udf("col1", "col2").alias("pred")).collect() assert [row.pred for row in pred] == [3.0] def test_mlflow_2_x_comp(tmp_path: Path) -> None: tracking_uri = (tmp_path / "tracking").as_uri() mlflow.set_tracking_uri(tracking_uri) artifact_location = (tmp_path / "artifacts").as_uri() exp_id = mlflow.create_experiment("test", artifact_location=artifact_location) mlflow.set_experiment(experiment_id=exp_id) out_file = tmp_path / "out.txt" # Log a model using MLflow 2.x py_ver = ".".join(map(str, sys.version_info[:2])) subprocess.check_call( [ "uv", "run", "--isolated", "--no-project", "--index-strategy=unsafe-first-match", f"--python={py_ver}", # Use mlflow 2.x "--with=mlflow<3.0", # Pin numpy and sklearn versions to ensure the model can be loaded f"--with=numpy=={np.__version__}", f"--with=scikit-learn=={sklearn.__version__}", "python", # Use the isolated mode to ignore mlflow in the repository "-I", "-c", """ import sys import mlflow from sklearn.linear_model import LinearRegression assert mlflow.__version__.startswith("2."), mlflow.__version__ fitted_model= LinearRegression().fit([[1, 2]], [3]) with mlflow.start_run() as run: mlflow.log_text("test", "model/test.txt") model_info = mlflow.sklearn.log_model(fitted_model, artifact_path="model") assert model_info.model_uri.startswith("runs:/") out = sys.argv[1] with open(out, "w") as f: f.write(run.info.run_id) """, out_file, ], ) run_id = out_file.read_text().strip() model_uri = f"runs:/{run_id}/model" check_load(model_uri=model_uri) check_register(model_uri=model_uri) check_list_artifacts_with_run_id_and_path(run_id=run_id, path="model") check_list_artifacts_with_model_uri(model_uri=model_uri) check_download_artifacts_with_run_id_and_path(run_id=run_id, path="model", tmp_path=tmp_path) check_download_artifacts_with_model_uri(model_uri=model_uri, tmp_path=tmp_path) check_evaluate(model_uri=model_uri) check_spark_udf(model_uri=model_uri) def test_mlflow_3_x_comp(tmp_path: Path) -> None: tracking_uri = (tmp_path / "tracking").as_uri() mlflow.set_tracking_uri(tracking_uri) artifact_location = (tmp_path / "artifacts").as_uri() exp_id = mlflow.create_experiment("test", artifact_location=artifact_location) mlflow.set_experiment(experiment_id=exp_id) fitted_model = LinearRegression().fit([[1, 2]], [3]) with mlflow.start_run() as run: mlflow.log_text("test", "model/test.txt") model_info = mlflow.sklearn.log_model(fitted_model, name="model") # Runs URI run_id = run.info.run_id runs_model_uri = f"runs:/{run_id}/model" check_load(model_uri=runs_model_uri) check_register(model_uri=runs_model_uri) check_list_artifacts_with_run_id_and_path(run_id=run_id, path="model") check_list_artifacts_with_model_uri(model_uri=runs_model_uri) check_download_artifacts_with_run_id_and_path(run_id=run_id, path="model", tmp_path=tmp_path) check_download_artifacts_with_model_uri(model_uri=runs_model_uri, tmp_path=tmp_path) check_evaluate(model_uri=runs_model_uri) check_spark_udf(model_uri=runs_model_uri) # Models URI logged_model_uri = f"models:/{model_info.model_id}" check_load(model_uri=logged_model_uri) check_register(model_uri=logged_model_uri) artifacts = [a.path for a in mlflow.artifacts.list_artifacts(artifact_uri=logged_model_uri)] assert "MLmodel" in artifacts out_path = mlflow.artifacts.download_artifacts( artifact_uri=logged_model_uri, dst_path=tmp_path / str(uuid.uuid4()) ) files = [f.name for f in Path(out_path).iterdir() if f.is_file()] assert "MLmodel" in files check_evaluate(model_uri=logged_model_uri) check_spark_udf(model_uri=logged_model_uri) def test_run_and_model_has_artifact_with_same_name(tmp_path: Path) -> None: fitted_model = LinearRegression().fit([[1, 2]], [3]) with mlflow.start_run() as run: mlflow.log_text("", artifact_file="model/MLmodel") info = mlflow.sklearn.log_model(fitted_model, name="model") client = mlflow.MlflowClient() artifacts = client.list_artifacts(run_id=run.info.run_id, path="model") mlmodel_files = [a.path for a in artifacts if a.path.endswith("MLmodel")] # Both run and model artifacts should be listed assert len(mlmodel_files) == 2 out = mlflow.artifacts.download_artifacts( run_id=run.info.run_id, artifact_path="model", dst_path=tmp_path / str(uuid.uuid4()), ) mlmodel_files = list(Path(out).rglob("MLmodel")) assert len(mlmodel_files) == 1 # The model MLmodel file should overwrite the run MLmodel file assert info.model_id in mlmodel_files[0].read_text()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/test_mlflow_version_comp.py", "license": "Apache License 2.0", "lines": 192, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/store/_unity_catalog/registry/prompt_info.py
""" Internal PromptInfo entity for Unity Catalog prompt operations. This is an implementation detail for the Unity Catalog store and should not be considered part of the public MLflow API. """ class PromptInfo: """ Internal entity for prompt information from Unity Catalog. This represents prompt metadata without version-specific details like template. This maps to the Unity Catalog PromptInfo protobuf message. Note: This is an internal implementation detail and not part of the public API. """ def __init__( self, name: str, description: str | None = None, creation_timestamp: int | None = None, tags: dict[str, str] | None = None, ): """ Construct a PromptInfo entity. Args: name: Name of the prompt. description: Description of the prompt. creation_timestamp: Timestamp when the prompt was created. tags: Prompt-level metadata as key-value pairs. """ self._name = name self._description = description self._creation_timestamp = creation_timestamp self._tags = tags or {} @property def name(self) -> str: """The name of the prompt.""" return self._name @property def description(self) -> str | None: """The description of the prompt.""" return self._description @property def creation_timestamp(self) -> int | None: """The creation timestamp of the prompt.""" return self._creation_timestamp @property def tags(self) -> dict[str, str]: """Prompt-level metadata as key-value pairs.""" return self._tags.copy() def __eq__(self, other) -> bool: if not isinstance(other, PromptInfo): return False return ( self.name == other.name and self.description == other.description and self.creation_timestamp == other.creation_timestamp and self.tags == other.tags ) def __repr__(self) -> str: return ( f"<PromptInfo: name='{self.name}', description='{self.description}', tags={self.tags}>" )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/_unity_catalog/registry/prompt_info.py", "license": "Apache License 2.0", "lines": 60, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/store/_unity_catalog/registry/test_uc_prompt_utils.py
import json from mlflow.entities.model_registry.prompt import Prompt from mlflow.entities.model_registry.prompt_version import PromptVersion from mlflow.prompt.constants import ( PROMPT_MODEL_CONFIG_TAG_KEY, PROMPT_TYPE_TAG_KEY, PROMPT_TYPE_TEXT, RESPONSE_FORMAT_TAG_KEY, ) from mlflow.protos.unity_catalog_prompt_messages_pb2 import ( Prompt as ProtoPrompt, ) from mlflow.protos.unity_catalog_prompt_messages_pb2 import ( PromptTag as ProtoPromptTag, ) from mlflow.protos.unity_catalog_prompt_messages_pb2 import ( PromptVersion as ProtoPromptVersion, ) from mlflow.protos.unity_catalog_prompt_messages_pb2 import ( PromptVersionTag as ProtoPromptVersionTag, ) from mlflow.store._unity_catalog.registry.utils import ( mlflow_prompt_to_proto, mlflow_tags_to_proto, mlflow_tags_to_proto_version_tags, proto_info_to_mlflow_prompt_info, proto_to_mlflow_prompt, proto_to_mlflow_tags, proto_version_tags_to_mlflow_tags, ) def test_proto_to_mlflow_tags(): # Test with empty tags assert proto_to_mlflow_tags([]) == {} # Test with tags proto_tags = [ ProtoPromptTag(key="key1", value="value1"), ProtoPromptTag(key="key2", value="value2"), ] expected = {"key1": "value1", "key2": "value2"} assert proto_to_mlflow_tags(proto_tags) == expected # Test with None assert proto_to_mlflow_tags(None) == {} def test_mlflow_tags_to_proto(): # Test with empty tags assert mlflow_tags_to_proto({}) == [] # Test with tags tags = {"key1": "value1", "key2": "value2"} proto_tags = mlflow_tags_to_proto(tags) assert len(proto_tags) == 2 assert all(isinstance(tag, ProtoPromptTag) for tag in proto_tags) assert {tag.key: tag.value for tag in proto_tags} == tags # Test with None assert mlflow_tags_to_proto(None) == [] def test_proto_info_to_mlflow_prompt_info(): # Create test proto info proto_info = ProtoPrompt( name="test_prompt", description="Test prompt description", tags=[ ProtoPromptTag(key="key1", value="value1"), ProtoPromptTag(key="key2", value="value2"), ], ) # Test without prompt tags prompt_info = proto_info_to_mlflow_prompt_info(proto_info) assert isinstance(prompt_info, Prompt) assert prompt_info.name == "test_prompt" assert prompt_info.description == "Test prompt description" assert prompt_info.tags == {"key1": "value1", "key2": "value2"} # Test with additional prompt tags prompt_tags = {"tag1": "value1", "tag2": "value2"} prompt_info = proto_info_to_mlflow_prompt_info(proto_info, prompt_tags) expected_tags = { "key1": "value1", "key2": "value2", "tag1": "value1", "tag2": "value2", } assert prompt_info.tags == expected_tags def test_proto_to_mlflow_prompt(): # Test with version tags including response_format and model_config proto_version = ProtoPromptVersion() proto_version.name = "test_prompt" proto_version.version = "1" proto_version.template = json.dumps("Hello {{name}}!") proto_version.description = "Test description" response_format = { "type": "json_schema", "json_schema": { "name": "test_schema", "schema": { "type": "object", "properties": {"name": {"type": "string"}}, }, }, } model_config = { "model_name": "databricks-meta-llama-3-1-70b-instruct", "max_tokens": 100, "temperature": 0.7, } # Add version tags including both response_format and model_config proto_version.tags.extend( [ ProtoPromptVersionTag(key="env", value="production"), ProtoPromptVersionTag(key="author", value="alice"), ProtoPromptVersionTag(key=PROMPT_TYPE_TAG_KEY, value=PROMPT_TYPE_TEXT), ProtoPromptVersionTag( key=RESPONSE_FORMAT_TAG_KEY, value=json.dumps(response_format), ), ProtoPromptVersionTag( key=PROMPT_MODEL_CONFIG_TAG_KEY, value=json.dumps(model_config), ), ] ) result = proto_to_mlflow_prompt(proto_version) # Verify response_format and model_config are extracted assert result.template == "Hello {{name}}!" assert result.response_format == response_format assert result.model_config == model_config # Verify user tags remain and _mlflow tags are filtered out expected_tags = {"env": "production", "author": "alice"} assert result.tags == expected_tags assert RESPONSE_FORMAT_TAG_KEY not in result.tags assert PROMPT_MODEL_CONFIG_TAG_KEY not in result.tags assert PROMPT_TYPE_TAG_KEY not in result.tags # Test with no tags proto_no_tags = ProtoPromptVersion() proto_no_tags.name = "no_tags_prompt" proto_no_tags.version = "2" proto_no_tags.template = json.dumps("Simple template") result_no_tags = proto_to_mlflow_prompt(proto_no_tags) assert result_no_tags.tags == {} assert result_no_tags.response_format is None assert result_no_tags.model_config is None def test_mlflow_prompt_to_proto(): # Create test prompt (skip timestamp for simplicity) prompt = PromptVersion( name="test_prompt", version=1, template="Hello {{name}}!", commit_message="Test prompt", tags={"key1": "value1", "key2": "value2"}, aliases=["production"], ) # Convert to proto proto_version = mlflow_prompt_to_proto(prompt) # Verify conversion assert isinstance(proto_version, ProtoPromptVersion) assert proto_version.name == "test_prompt" assert proto_version.version == "1" assert proto_version.template == "Hello {{name}}!" assert proto_version.description == "Test prompt" tags_dict = {tag.key: tag.value for tag in proto_version.tags} assert tags_dict == {"key1": "value1", "key2": "value2"} # Test with empty fields prompt = PromptVersion(name="test_prompt", version=1, template="Hello {{name}}!") proto_version = mlflow_prompt_to_proto(prompt) assert len(proto_version.tags) == 0 def test_proto_version_tags_to_mlflow_tags(): # Test with empty tags assert proto_version_tags_to_mlflow_tags([]) == {} # Test with version tags proto_tags = [ ProtoPromptVersionTag(key="key1", value="value1"), ProtoPromptVersionTag(key="key2", value="value2"), ] expected = {"key1": "value1", "key2": "value2"} assert proto_version_tags_to_mlflow_tags(proto_tags) == expected # Test with None assert proto_version_tags_to_mlflow_tags(None) == {} def test_mlflow_tags_to_proto_version_tags(): # Test with empty tags assert mlflow_tags_to_proto_version_tags({}) == [] # Test with tags tags = {"key1": "value1", "key2": "value2"} proto_tags = mlflow_tags_to_proto_version_tags(tags) assert len(proto_tags) == 2 assert all(isinstance(tag, ProtoPromptVersionTag) for tag in proto_tags) assert {tag.key: tag.value for tag in proto_tags} == tags # Test with None assert mlflow_tags_to_proto_version_tags(None) == []
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/_unity_catalog/registry/test_uc_prompt_utils.py", "license": "Apache License 2.0", "lines": 184, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/optimize/types.py
import multiprocessing from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Callable from mlflow.entities import Feedback, Trace from mlflow.entities.model_registry import PromptVersion from mlflow.utils.annotations import deprecated, experimental if TYPE_CHECKING: from mlflow.genai.optimize.optimizers import BasePromptOptimizer AggregationFn = Callable[[dict[str, bool | float | str | Feedback | list[Feedback]]], float] @deprecated( since="3.5.0", ) @dataclass class LLMParams: """ Parameters for configuring a LLM Model. Args: model_name: Name of the model in the format `<provider>:/<model name>` or `<provider>/<model name>`. For example, "openai:/gpt-4o", "anthropic:/claude-4", or "openai/gpt-4o". base_uri: Optional base URI for the API endpoint. If not provided, the default endpoint for the provider will be used. temperature: Optional sampling temperature for the model's outputs. Higher values (e.g., 0.8) make the output more random, while lower values (e.g., 0.2) make it more deterministic. """ model_name: str base_uri: str | None = None temperature: float | None = None @deprecated( since="3.5.0", ) @dataclass class OptimizerConfig: """ Configuration for prompt optimization. Args: num_instruction_candidates: Number of candidate instructions to generate during each optimization iteration. Higher values may lead to better results but increase optimization time. Default: 6 max_few_shot_examples: Maximum number of examples to show in few-shot demonstrations. Default: 6 num_threads: Number of threads to use for parallel optimization. Default: (number of CPU cores * 2 + 1) optimizer_llm: Optional LLM parameters for the teacher model. If not provided, the target LLM will be used as the teacher. algorithm: The optimization algorithm to use. When a string is provided, it must be one of the supported algorithms: "DSPy/MIPROv2". When a BasePromptOptimizer is provided, it will be used as the optimizer. Default: "DSPy/MIPROv2" verbose: Whether to show optimizer logs during optimization. Default: False autolog: Whether to enable automatic logging and prompt registration. If set to True, a MLflow run is automatically created to store optimization parameters, datasets and metrics, and the optimized prompt is registered. If set to False, the raw optimized template is returned without registration. Default: True convert_to_single_text: Whether to convert the optimized prompt to a single prompt. Default: True extract_instructions: Whether to extract instructions from the initial prompt. Default: True """ num_instruction_candidates: int = 6 max_few_shot_examples: int = 6 num_threads: int = field(default_factory=lambda: (multiprocessing.cpu_count() or 1) * 2 + 1) optimizer_llm: LLMParams | None = None algorithm: str | type["BasePromptOptimizer"] = "DSPy/MIPROv2" verbose: bool = False autolog: bool = True convert_to_single_text: bool = True extract_instructions: bool = True @experimental(version="3.5.0") @dataclass class EvaluationResultRecord: """ The output type of `eval_fn` in the :py:func:`mlflow.genai.optimize.BasePromptOptimizer.optimize()` API. Args: inputs: The inputs of the evaluation. outputs: The outputs of the prediction function. expectations: The expected outputs. score: The aggregated score of the evaluation result. None if no scorers are provided. trace: The trace of the evaluation execution. rationales: The rationales of the evaluation result. individual_scores: Individual scores from each scorer (scorer_name -> score). """ inputs: dict[str, Any] outputs: Any expectations: Any score: float | None trace: Trace rationales: dict[str, str] individual_scores: dict[str, float] = field(default_factory=dict) @experimental(version="3.5.0") @dataclass class PromptOptimizerOutput: """ An output of the :py:func:`mlflow.genai.optimize.BasePromptOptimizer.optimize()` API. Args: optimized_prompts: The optimized prompts as a dict (prompt template name -> prompt template). e.g., {"question": "What is the capital of {{country}}?"} initial_eval_score: The evaluation score before optimization (optional). final_eval_score: The evaluation score after optimization (optional). initial_eval_score_per_scorer: Per-scorer scores before optimization (scorer name -> score). final_eval_score_per_scorer: Per-scorer scores after optimization (scorer name -> score). """ optimized_prompts: dict[str, str] initial_eval_score: float | None = None final_eval_score: float | None = None initial_eval_score_per_scorer: dict[str, float] = field(default_factory=dict) final_eval_score_per_scorer: dict[str, float] = field(default_factory=dict) @experimental(version="3.5.0") @dataclass class PromptOptimizationResult: """ Result of the :py:func:`mlflow.genai.optimize_prompts()` API. Args: optimized_prompts: The optimized prompts. optimizer_name: The name of the optimizer. initial_eval_score: The evaluation score before optimization (optional). final_eval_score: The evaluation score after optimization (optional). initial_eval_score_per_scorer: Per-scorer scores before optimization (scorer name -> score). final_eval_score_per_scorer: Per-scorer scores after optimization (scorer name -> score). """ optimized_prompts: list[PromptVersion] optimizer_name: str initial_eval_score: float | None = None final_eval_score: float | None = None initial_eval_score_per_scorer: dict[str, float] = field(default_factory=dict) final_eval_score_per_scorer: dict[str, float] = field(default_factory=dict)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/optimize/types.py", "license": "Apache License 2.0", "lines": 130, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/optimize/util.py
from __future__ import annotations import functools from contextlib import contextmanager, nullcontext from typing import TYPE_CHECKING, Any, Callable from pydantic import BaseModel, create_model from mlflow.entities import Trace from mlflow.exceptions import MlflowException from mlflow.genai.scorers import Scorer from mlflow.genai.scorers.builtin_scorers import BuiltInScorer from mlflow.genai.scorers.validation import valid_data_for_builtin_scorers from mlflow.tracking.client import MlflowClient if TYPE_CHECKING: import pandas as pd @contextmanager def prompt_optimization_autolog( optimizer_name: str, num_prompts: int, num_training_samples: int, train_data_df: "pd.DataFrame" | None, ): """ Context manager for autologging prompt optimization runs. Args: optimizer_name: Name of the optimizer being used num_prompts: Number of prompts being optimized num_training_samples: Number of training samples train_data_df: Training data as a pandas DataFrame. If None or empty, it means zero-shot optimization. Yields: Tuple of (run_id, results_dict) where results_dict should be populated with PromptOptimizerOutput and list of optimized PromptVersion objects """ import mlflow.data active_run = mlflow.active_run() run_context = mlflow.start_run() if active_run is None else nullcontext(active_run) with run_context as run: client = MlflowClient() run_id = run.info.run_id mlflow.log_param("optimizer", optimizer_name) mlflow.log_param("num_prompts", num_prompts) mlflow.log_param("num_training_samples", num_training_samples) if train_data_df is not None and not train_data_df.empty: # Log training dataset as run input if it is provided dataset = mlflow.data.from_pandas( train_data_df, source="prompt_optimization_train_data" ) mlflow.log_input(dataset, context="training") results = {} yield results if "optimized_prompts" in results: for prompt in results["optimized_prompts"]: client.link_prompt_version_to_run(run_id=run_id, prompt=prompt) if "optimizer_output" in results: output = results["optimizer_output"] if output.initial_eval_score is not None: mlflow.log_metric("initial_eval_score", output.initial_eval_score) if output.final_eval_score is not None: mlflow.log_metric("final_eval_score", output.final_eval_score) if output.initial_eval_score_per_scorer: mlflow.log_metrics( { f"initial_eval_score.{scorer_name}": score for scorer_name, score in output.initial_eval_score_per_scorer.items() } ) if output.final_eval_score_per_scorer: mlflow.log_metrics( { f"final_eval_score.{scorer_name}": score for scorer_name, score in output.final_eval_score_per_scorer.items() } ) def validate_train_data( train_data: "pd.DataFrame", scorers: list[Scorer] | None, predict_fn: Callable[..., Any] | None = None, ) -> None: """ Validate that training data has required fields for prompt optimization. Args: train_data: Training data as a pandas DataFrame. scorers: Scorers to validate the training data for. Can be None for zero-shot mode. predict_fn: The predict function to validate the training data for. Raises: MlflowException: If any record is missing required 'inputs' field or it is empty. """ for i, record in enumerate(train_data.to_dict("records")): if "inputs" not in record or not record["inputs"]: raise MlflowException.invalid_parameter_value( f"Record {i} is missing required 'inputs' field or it is empty" ) if scorers is not None: builtin_scorers = [scorer for scorer in scorers if isinstance(scorer, BuiltInScorer)] valid_data_for_builtin_scorers(train_data, builtin_scorers, predict_fn) def infer_type_from_value(value: Any, model_name: str = "Output") -> type: """ Infer the type from the value. Only supports primitive types, lists, and dict and Pydantic models. """ if value is None: return type(None) elif isinstance(value, (bool, int, float, str)): return type(value) elif isinstance(value, list): if not value: return list[Any] element_types = {infer_type_from_value(item) for item in value} return list[functools.reduce(lambda x, y: x | y, element_types)] elif isinstance(value, dict): fields = {k: (infer_type_from_value(v, model_name=k), ...) for k, v in value.items()} return create_model(model_name, **fields) elif isinstance(value, BaseModel): return type(value) return Any def create_metric_from_scorers( scorers: list[Scorer], objective: Callable[[dict[str, Any]], float] | None = None, ) -> Callable[[Any, Any, dict[str, Any]], tuple[float, dict[str, str], dict[str, float]]]: """ Create a metric function from scorers and an optional objective function. Args: scorers: List of scorers to evaluate inputs, outputs, and expectations. objective: Optional function that aggregates scorer outputs into a single score. Takes a dict mapping scorer names to scores and returns a float. If None and all scorers return numerical or CategoricalRating values, uses default aggregation (sum for numerical, conversion for categorical). Returns: A callable that takes (inputs, outputs, expectations, trace) and returns a tuple of (aggregated_score, rationales, individual_scores). Raises: MlflowException: If scorers return non-numerical values and no objective is provided. """ from mlflow.entities import Feedback from mlflow.genai.judges import CategoricalRating def _convert_to_numeric(score: Any) -> float | None: """Convert a value to numeric, handling Feedback and primitive types.""" if isinstance(score, Feedback): score = score.value if score == CategoricalRating.YES: return 1.0 elif score == CategoricalRating.NO: return 0.0 elif isinstance(score, (int, float, bool)): return float(score) return None def metric( inputs: Any, outputs: Any, expectations: dict[str, Any], trace: Trace | None, ) -> tuple[float, dict[str, str], dict[str, float]]: scores = {} rationales = {} for scorer in scorers: scores[scorer.name] = scorer.run( inputs=inputs, outputs=outputs, expectations=expectations, trace=trace ) for key, score in scores.items(): if isinstance(score, Feedback): rationales[key] = score.rationale # Try to convert all scores to numeric numeric_scores = {} for name, score in scores.items(): numeric_value = _convert_to_numeric(score) if numeric_value is not None: numeric_scores[name] = numeric_value if objective is not None: return objective(scores), rationales, numeric_scores # If all scores were convertible, use sum as default aggregation if len(numeric_scores) == len(scores): # We average the scores to get the score between 0 and 1. aggregated = sum(numeric_scores.values()) / len(numeric_scores) return aggregated, rationales, numeric_scores # Otherwise, report error with actual types non_convertible = { k: type(v).__name__ for k, v in scores.items() if k not in numeric_scores } scorer_details = ", ".join([f"{k} (type: {t})" for k, t in non_convertible.items()]) raise MlflowException( f"Scorers [{scorer_details}] return non-numerical values that cannot be " "automatically aggregated. Please provide an `objective` function to aggregate " "these values into a single score for optimization." ) return metric
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/optimize/util.py", "license": "Apache License 2.0", "lines": 184, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/optimize/test_util.py
from typing import Any, Union import pytest from pydantic import BaseModel from mlflow.entities.assessment import Feedback from mlflow.exceptions import MlflowException from mlflow.genai.judges import CategoricalRating from mlflow.genai.optimize.util import ( create_metric_from_scorers, infer_type_from_value, validate_train_data, ) from mlflow.genai.scorers import scorer @pytest.mark.parametrize( ("input_value", "expected_type"), [ (None, type(None)), (True, bool), (42, int), (3.14, float), ("hello", str), ], ) def test_infer_primitive_types(input_value, expected_type): assert infer_type_from_value(input_value) == expected_type @pytest.mark.parametrize( ("input_list", "expected_type"), [ ([], list[Any]), ([1, 2, 3], list[int]), (["a", "b", "c"], list[str]), ([1.0, 2.0, 3.0], list[float]), ([True, False, True], list[bool]), ([1, "hello", True], list[Union[int, str, bool]]), # noqa: UP007 ([1, "hello", True], list[int | str | bool]), ([1, 2.0], list[int | float]), ([[1, 2], [3, 4]], list[list[int]]), ([["a"], ["b", "c"]], list[list[str]]), ], ) def test_infer_list_types(input_list, expected_type): assert infer_type_from_value(input_list) == expected_type @pytest.mark.parametrize( ("input_dict", "expected_fields"), [ ({"name": "John", "age": 30, "active": True}, {"name": str, "age": int, "active": bool}), ({"score": 95.5, "passed": True}, {"score": float, "passed": bool}), ], ) def test_infer_simple_dict(input_dict, expected_fields): result = infer_type_from_value(input_dict) assert isinstance(result, type) assert issubclass(result, BaseModel) for field_name, expected_type in expected_fields.items(): assert result.__annotations__[field_name] == expected_type def test_infer_nested_dict(): data = { "user": {"name": "John", "scores": [85, 90, 95]}, "settings": {"enabled": True, "theme": "dark"}, } result = infer_type_from_value(data) assert isinstance(result, type) assert issubclass(result, BaseModel) # Check nested model types user_model = result.__annotations__["user"] settings_model = result.__annotations__["settings"] assert issubclass(user_model, BaseModel) assert issubclass(settings_model, BaseModel) # Check nested field types assert user_model.__annotations__["name"] == str assert user_model.__annotations__["scores"] == list[int] assert settings_model.__annotations__["enabled"] == bool assert settings_model.__annotations__["theme"] == str @pytest.mark.parametrize( ("model_class", "model_data"), [ ( type("UserModel", (BaseModel,), {"__annotations__": {"name": str, "age": int}}), {"name": "John", "age": 30}, ), ( type("ProductModel", (BaseModel,), {"__annotations__": {"id": int, "price": float}}), {"id": 1, "price": 99.99}, ), ], ) def test_infer_pydantic_model(model_class, model_data): model = model_class(**model_data) result = infer_type_from_value(model) assert result == model_class @pytest.mark.parametrize( "type_to_infer", [ type("CustomClass", (), {}), type("AnotherClass", (), {"custom_attr": 42}), ], ) def test_infer_unsupported_type(type_to_infer): obj = type_to_infer() assert infer_type_from_value(obj) == Any @pytest.mark.parametrize( ("input_dict", "model_name"), [ ({"name": "John", "age": 30}, "UserData"), ({"id": 1, "value": "test"}, "TestModel"), ], ) def test_model_name_parameter(input_dict, model_name): result = infer_type_from_value(input_dict, model_name=model_name) assert result.__name__ == model_name @pytest.mark.parametrize( ("score", "expected_score"), [ (CategoricalRating.YES, 1.0), (CategoricalRating.NO, 0.0), ("yes", 1.0), ("no", 0.0), (True, 1.0), (False, 0.0), (1, 1.0), (0, 0.0), (1.0, 1.0), (0.0, 0.0), ], ) def test_create_metric_from_scorers_with_single_score(score, expected_score): @scorer(name="test_scorer") def test_scorer(inputs, outputs): return Feedback(name="test_scorer", value=score, rationale="test rationale") metric = create_metric_from_scorers([test_scorer]) result = metric({"input": "test"}, {"output": "result"}, {}, None) assert result[0] == expected_score assert result[1] == {"test_scorer": "test rationale"} assert result[2] == {"test_scorer": expected_score} def test_create_metric_from_scorers_with_multiple_categorical_ratings(): @scorer(name="scorer1") def scorer1(inputs, outputs): return Feedback(name="scorer1", value=CategoricalRating.YES, rationale="rationale1") @scorer(name="scorer2") def scorer2(inputs, outputs): return Feedback(name="scorer2", value=CategoricalRating.YES, rationale="rationale2") metric = create_metric_from_scorers([scorer1, scorer2]) # Should average: (1.0 + 1.0) / 2 = 1.0 result = metric({"input": "test"}, {"output": "result"}, {}, None) assert result[0] == 1.0 assert result[1] == {"scorer1": "rationale1", "scorer2": "rationale2"} assert result[2] == {"scorer1": 1.0, "scorer2": 1.0} @pytest.mark.parametrize( ("train_data", "scorers", "expected_error"), [ # Empty inputs ( [{"inputs": {}, "outputs": "result"}], [], "Record 0 is missing required 'inputs' field or it is empty", ), # Missing inputs ( [{"outputs": "result"}], [], "Record 0 is missing required 'inputs' field or it is empty", ), ], ) def test_validate_train_data_errors(train_data, scorers, expected_error): import pandas as pd with pytest.raises(MlflowException, match=expected_error): validate_train_data(pd.DataFrame(train_data), scorers, lambda **kwargs: None) @pytest.mark.parametrize( "train_data", [ # Valid with outputs [{"inputs": {"text": "hello"}, "outputs": "result"}], # Valid with expectations [{"inputs": {"text": "hello"}, "expectations": {"expected": "result"}}], # Multiple valid records [ {"inputs": {"text": "hello"}, "outputs": "result1"}, {"inputs": {"text": "world"}, "expectations": {"expected": "result2"}}, ], # Falsy but valid values: False as output [{"inputs": {"text": "hello"}, "outputs": False}], ], ) def test_validate_train_data_success(train_data): import pandas as pd validate_train_data(pd.DataFrame(train_data), [], lambda **kwargs: None)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/optimize/test_util.py", "license": "Apache License 2.0", "lines": 186, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/datasets/evaluation_dataset.py
from typing import TYPE_CHECKING, Any from mlflow.data import Dataset from mlflow.data.pyfunc_dataset_mixin import PyFuncConvertibleDatasetMixin from mlflow.entities.evaluation_dataset import EvaluationDataset as _EntityEvaluationDataset from mlflow.genai.datasets.databricks_evaluation_dataset_source import ( DatabricksEvaluationDatasetSource, ) if TYPE_CHECKING: import pandas as pd import pyspark.sql class EvaluationDataset(Dataset, PyFuncConvertibleDatasetMixin): """ The public API for evaluation datasets in MLflow's GenAI module. This class provides a unified interface for evaluation datasets, supporting both: - Standard MLflow evaluation datasets (backed by MLflow's tracking store) - Databricks managed datasets (backed by Unity Catalog tables) through the databricks-agents library """ def __init__(self, dataset): """ Initialize the wrapper with either a managed dataset or an MLflow dataset. Args: dataset: Either a Databricks managed dataset (databricks.agents.datasets.Dataset) or an MLflow EvaluationDataset entity (mlflow.entities.evaluation_dataset.EvaluationDataset). The type is determined at runtime. """ if isinstance(dataset, _EntityEvaluationDataset): self._databricks_dataset = None self._mlflow_dataset = dataset else: self._databricks_dataset = dataset self._mlflow_dataset = None self._df = None def __eq__(self, other): """Check equality with another dataset.""" if isinstance(other, _EntityEvaluationDataset) and self._mlflow_dataset: return self._mlflow_dataset == other if isinstance(other, EvaluationDataset): if self._mlflow_dataset and other._mlflow_dataset: return self._mlflow_dataset == other._mlflow_dataset if self._databricks_dataset and other._databricks_dataset: return self._databricks_dataset == other._databricks_dataset return False def __setattr__(self, name, value): """Allow setting internal attributes on the wrapped dataset.""" object.__setattr__(self, name, value) def __getattr__(self, name): """ Dynamic attribute delegation for simple pass-through properties. This handles attributes that don't require special logic and can be directly delegated to the underlying dataset implementation. """ if name.startswith("_") or name == "records": raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") if self._mlflow_dataset and hasattr(self._mlflow_dataset, name): return getattr(self._mlflow_dataset, name) elif self._databricks_dataset and hasattr(self._databricks_dataset, name): return getattr(self._databricks_dataset, name) raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") @property def digest(self) -> str | None: """String digest (hash) of the dataset provided by the caller that uniquely identifies""" if self._mlflow_dataset: return self._mlflow_dataset.digest if self._databricks_dataset.digest is None: from mlflow.data.digest_utils import compute_pandas_digest return compute_pandas_digest(self.to_df()) return self._databricks_dataset.digest @property def name(self) -> str: """The name of the dataset.""" if self._mlflow_dataset: return self._mlflow_dataset.name return self._databricks_dataset.name if self._databricks_dataset else None @property def dataset_id(self) -> str: """The unique identifier of the dataset.""" if self._mlflow_dataset: return self._mlflow_dataset.dataset_id return self._databricks_dataset.dataset_id if self._databricks_dataset else None @property def source(self): """Source information for the dataset.""" if self._mlflow_dataset: return self._mlflow_dataset.source return DatabricksEvaluationDatasetSource(table_name=self.name, dataset_id=self.dataset_id) @property def source_type(self) -> str | None: """The type of the dataset source.""" if self._mlflow_dataset: return self._mlflow_dataset.source._get_source_type() return self._databricks_dataset.source_type @property def created_time(self) -> int | str | None: """The time the dataset was created.""" if self._mlflow_dataset: return self._mlflow_dataset.created_time return self._databricks_dataset.create_time @property def create_time(self) -> int | str | None: """Alias for created_time (for backward compatibility with managed datasets).""" return self.created_time @property def tags(self) -> dict[str, Any] | None: """The tags for the dataset (MLflow only).""" if self._mlflow_dataset: return self._mlflow_dataset.tags raise NotImplementedError( "Tags are not available for Databricks managed datasets. " "Tags are managed through Unity Catalog. Use Unity Catalog APIs to manage dataset tags." ) @property def experiment_ids(self) -> list[str]: """The experiment IDs associated with the dataset (MLflow only).""" if self._mlflow_dataset: return self._mlflow_dataset.experiment_ids raise NotImplementedError( "Experiment associations are not available for Databricks managed datasets. " "Dataset associations are managed through Unity Catalog." ) @property def schema(self) -> str | None: """The schema of the dataset.""" if self._mlflow_dataset: return self._mlflow_dataset.schema return self._databricks_dataset.schema if self._databricks_dataset else None @property def profile(self) -> str | None: """The profile of the dataset.""" if self._mlflow_dataset: return self._mlflow_dataset.profile return self._databricks_dataset.profile if self._databricks_dataset else None def set_profile(self, profile: str) -> "EvaluationDataset": """Set the profile of the dataset.""" if self._mlflow_dataset: self._mlflow_dataset._profile = profile return self dataset = self._databricks_dataset.set_profile(profile) return EvaluationDataset(dataset) def merge_records( self, records: "list[dict[str, Any]] | pd.DataFrame | pyspark.sql.DataFrame", ) -> "EvaluationDataset": """Merge records into the dataset.""" if self._mlflow_dataset: self._mlflow_dataset.merge_records(records) return self from mlflow.genai.datasets import _databricks_profile_env with _databricks_profile_env(): dataset = self._databricks_dataset.merge_records(records) return EvaluationDataset(dataset) def delete_records(self, record_ids: list[str]) -> int: """Delete specific records from the dataset.""" if self._mlflow_dataset: return self._mlflow_dataset.delete_records(record_ids) raise NotImplementedError( "Deleting records is not supported for Databricks managed datasets. " "Databricks datasets are managed through Unity Catalog tables." ) def to_df(self) -> "pd.DataFrame": """Convert the dataset to a pandas DataFrame.""" if self._mlflow_dataset: return self._mlflow_dataset.to_df() if self._df is None: from mlflow.genai.datasets import _databricks_profile_env with _databricks_profile_env(): self._df = self._databricks_dataset.to_df() return self._df def has_records(self) -> bool: """Check if dataset records are loaded without triggering a load.""" if self._mlflow_dataset: return self._mlflow_dataset.has_records() return self._df is not None def to_dict(self) -> dict[str, Any]: """Convert to dictionary representation.""" if self._mlflow_dataset: return self._mlflow_dataset.to_dict() raise NotImplementedError( "Serialization to dict is not supported for Databricks managed datasets. " "Databricks datasets are persisted in Unity Catalog tables and don't " "require serialization." ) @classmethod def from_dict(cls, data: dict[str, Any]) -> "EvaluationDataset": """ Create instance from dictionary representation. Note: This creates an MLflow dataset from serialized data. Databricks managed datasets are loaded directly from Unity Catalog, not from dict. """ mlflow_dataset = _EntityEvaluationDataset.from_dict(data) return cls(mlflow_dataset) def to_proto(self): """Convert to protobuf representation.""" if self._mlflow_dataset: return self._mlflow_dataset.to_proto() raise NotImplementedError( "Protobuf serialization is not supported for Databricks managed datasets. " "Databricks datasets are persisted in Unity Catalog tables and don't " "require serialization." ) @classmethod def from_proto(cls, proto): """ Create instance from protobuf representation. Note: This creates an MLflow dataset from serialized protobuf data. Databricks managed datasets are loaded directly from Unity Catalog, not from protobuf. """ mlflow_dataset = _EntityEvaluationDataset.from_proto(proto) return cls(mlflow_dataset) def _to_pyfunc_dataset(self): """Support for PyFuncConvertibleDatasetMixin.""" return self.to_evaluation_dataset() def to_evaluation_dataset(self, path=None, feature_names=None): """ Converts the dataset to the legacy EvaluationDataset for model evaluation. Required for use with mlflow.evaluate(). """ from mlflow.data.evaluation_dataset import EvaluationDataset as LegacyEvaluationDataset return LegacyEvaluationDataset( data=self.to_df(), path=path, feature_names=feature_names, name=self.name, digest=self.digest, ) def _to_mlflow_entity(self): """Convert to MLflow Dataset entity for logging.""" from mlflow.entities import Dataset as DatasetEntity return DatasetEntity( name=self.name, digest=self.digest, source_type=self.source_type, source=self.source.to_json(), schema=self.schema, profile=self.profile, )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/datasets/evaluation_dataset.py", "license": "Apache License 2.0", "lines": 237, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/autogen/chat.py
import logging from typing import TYPE_CHECKING, Union from opentelemetry.sdk.trace import Span from mlflow.tracing.utils import set_span_chat_tools from mlflow.types.chat import ChatTool if TYPE_CHECKING: from autogen_core.tools import BaseTool, ToolSchema _logger = logging.getLogger(__name__) def log_tools(span: Span, tools: list[Union["BaseTool", "ToolSchema"]]): """ Log Autogen tool definitions into the passed in span. Ref: https://microsoft.github.io/autogen/stable/user-guide/core-user-guide/components/tools.html Args: span: The span to log the tools into. tools: A list of Autogen BaseTool. """ from autogen_core.tools import BaseTool try: tools = [ ChatTool( type="function", function=tool.schema if isinstance(tool, BaseTool) else tool, ) for tool in tools ] set_span_chat_tools(span, tools) except Exception: _logger.debug(f"Failed to log tools to Span {span}.", exc_info=True)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/autogen/chat.py", "license": "Apache License 2.0", "lines": 28, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/ag2/test_ag2_autolog.py
import contextlib import time from unittest.mock import patch import pytest from autogen import ConversableAgent, GroupChat, GroupChatManager, UserProxyAgent, io from openai import APIConnectionError from openai.types.chat import ChatCompletion from openai.types.chat.chat_completion import ChatCompletionMessage, Choice import mlflow from mlflow.entities.span import SpanType from mlflow.tracing.constant import SpanAttributeKey from mlflow.version import IS_TRACING_SDK_ONLY from tests.helper_functions import start_mock_openai_server from tests.tracing.helper import get_traces @pytest.fixture(scope="module", autouse=True) def mock_openai(): with start_mock_openai_server() as base_url: yield base_url @pytest.fixture def llm_config(mock_openai): return { "config_list": [ { "model": "gpt-4o-mini", "base_url": mock_openai, "api_key": "test", "max_tokens": 100, }, ] } @contextlib.contextmanager def mock_user_input(messages: list[str]): with patch.object(io.IOStream.get_default(), "input", side_effect=messages): yield def get_simple_agent(llm_config): assistant = ConversableAgent("agent", llm_config=llm_config) user_proxy = UserProxyAgent("user", code_execution_config=False) return assistant, user_proxy def test_enable_disable_autolog(llm_config): mlflow.ag2.autolog() with mock_user_input(["Hi", "exit"]): assistant, user_proxy = get_simple_agent(llm_config) assistant.initiate_chat(user_proxy, message="foo") traces = get_traces() assert len(traces) == 1 mlflow.ag2.autolog(disable=True) with mock_user_input(["Hi", "exit"]): assistant, user_proxy = get_simple_agent(llm_config) assistant.initiate_chat(user_proxy, message="foo") # No new trace should be created traces = get_traces() assert len(traces) == 1 def test_tracing_agent(llm_config, mock_litellm_cost): mlflow.ag2.autolog() with mock_user_input( ["What is the capital of Tokyo?", "How long is it take from San Francisco?", "exit"] ): assistant, user_proxy = get_simple_agent(llm_config) response = assistant.initiate_chat(user_proxy, message="How can I help you today?") # Check if the initiate_chat method is patched traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "OK" assert traces[0].info.execution_time_ms > 0 # 7 spans are expected: # initiate_chat # |-- user # |-- assistant -- chat_completion # |-- user # |-- assistant -- chat_completion assert len(traces[0].data.spans) == 7 session_span = traces[0].data.spans[0] assert session_span.name == "initiate_chat" assert session_span.span_type == SpanType.UNKNOWN assert session_span.inputs["message"] == "How can I help you today?" assert session_span.outputs["chat_history"] == response.chat_history user_span = traces[0].data.spans[1] assert user_span.name == "user" assert user_span.span_type == SpanType.AGENT assert user_span.parent_id == session_span.span_id assert user_span.inputs["message"] == "How can I help you today?" assert user_span.outputs["message"]["content"] == "What is the capital of Tokyo?" agent_span = traces[0].data.spans[2] assert agent_span.name == "agent" assert agent_span.span_type == SpanType.AGENT assert agent_span.parent_id == session_span.span_id assert agent_span.inputs["message"]["content"] == "What is the capital of Tokyo?" assert agent_span.outputs is not None llm_span = traces[0].data.spans[3] assert llm_span.name == "chat_completion" assert llm_span.span_type == SpanType.LLM assert llm_span.parent_id == agent_span.span_id assert llm_span.inputs["messages"][-1]["content"] == "What is the capital of Tokyo?" assert llm_span.outputs is not None assert llm_span.attributes["cost"] >= 0 assert llm_span.model_name == "gpt-4o-mini" user_span_2 = traces[0].data.spans[4] assert user_span_2.name == "user" assert user_span_2.parent_id == session_span.span_id agent_span_2 = traces[0].data.spans[5] assert agent_span_2.name == "agent" assert agent_span_2.parent_id == session_span.span_id llm_span_2 = traces[0].data.spans[6] assert llm_span_2.name == "chat_completion" assert llm_span_2.parent_id == agent_span_2.span_id assert llm_span_2.model_name == "gpt-4o-mini" assert llm_span.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 9, "output_tokens": 12, "total_tokens": 21, } assert llm_span.get_attribute(SpanAttributeKey.MESSAGE_FORMAT) == "ag2" if not IS_TRACING_SDK_ONLY: # Verify cost is calculated (9 input tokens * 1.0 + 12 output tokens * 2.0) assert llm_span.llm_cost == { "input_cost": 9.0, "output_cost": 24.0, "total_cost": 33.0, } assert llm_span_2.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 9, "output_tokens": 12, "total_tokens": 21, } assert llm_span_2.get_attribute(SpanAttributeKey.MESSAGE_FORMAT) == "ag2" if not IS_TRACING_SDK_ONLY: # Verify cost is calculated (9 input tokens * 1.0 + 12 output tokens * 2.0) assert llm_span_2.llm_cost == { "input_cost": 9.0, "output_cost": 24.0, "total_cost": 33.0, } assert traces[0].info.token_usage == { "input_tokens": 18, "output_tokens": 24, "total_tokens": 42, } def test_tracing_agent_with_error(): mlflow.ag2.autolog() invalid_llm_config = { "config_list": [ { "model": "gpt-4o-mini", "base_url": "invalid_url", "api_key": "invalid", } ] } assistant = ConversableAgent("agent", llm_config=invalid_llm_config) user_proxy = UserProxyAgent("user", code_execution_config=False) with mock_user_input(["What is the capital of Tokyo?", "exit"]): with pytest.raises(APIConnectionError, match="Connection error"): assistant.initiate_chat(user_proxy, message="How can I help you today?") traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "ERROR" assert traces[0].info.execution_time_ms > 0 assert traces[0].data.spans[0].status.status_code == "ERROR" assert traces[0].data.spans[0].status.description == "Connection error." def test_tracing_agent_multiple_chat_sessions(llm_config): mlflow.ag2.autolog() with mock_user_input(["Hi", "exit", "Hello", "exit", "Hola", "exit"]): assistant, user_proxy = get_simple_agent(llm_config) assistant.initiate_chat(user_proxy, message="foo") assistant.initiate_chat(user_proxy, message="bar") assistant.initiate_chat(user_proxy, message="baz") # Traces should be created for each chat session traces = get_traces() assert len(traces) == 3 assert traces[0].info.token_usage == { "input_tokens": 9, "output_tokens": 12, "total_tokens": 21, } def test_tracing_agent_with_function_calling(llm_config): mlflow.ag2.autolog() # Define a simple tool and register it with the assistant agent def sum(a: int, b: int) -> int: time.sleep(1) return a + b assistant = ConversableAgent( name="assistant", system_message="You are a helpful AI assistant. " "You can help with simple calculations. " "Return 'TERMINATE' when the task is done.", llm_config=llm_config, ) user_proxy = ConversableAgent( name="tool_agent", llm_config=False, is_termination_msg=lambda msg: ( msg.get("content") is not None and "TERMINATE" in msg["content"] ), human_input_mode="NEVER", ) assistant.register_for_llm(name="sum", description="A simple sum calculator")(sum) user_proxy.register_for_execution(name="sum")(sum) # Start a chat session. We mock OpenAI response to simulate function calling response. with patch( "autogen.oai.client.OpenAIClient.create", side_effect=[ ChatCompletion( id="chat_1", created=0, object="chat.completion", model="gpt-4o-mini", choices=[ Choice( index=1, finish_reason="stop", message=ChatCompletionMessage( role="assistant", tool_calls=[ { "id": "call_1", "function": {"arguments": '{"a": 1, "b": 1}', "name": "sum"}, "type": "function", }, ], ), ), ], ), ChatCompletion( id="chat_2", created=0, object="chat.completion", model="gpt-4o-mini", choices=[ Choice( index=2, finish_reason="stop", message=ChatCompletionMessage( role="assistant", content="The result of the calculation is 2. \n\nTERMINATE", ), ), ], ), ], ): response = user_proxy.initiate_chat(assistant, message="What is 1 + 1?") assert response.summary.startswith("The result of the calculation is 2.") traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "OK" assistant_span = traces[0].data.spans[1] assert assistant_span.span_type == SpanType.AGENT tool_agent_span = traces[0].data.spans[3] assert tool_agent_span.span_type == SpanType.AGENT tool_span = traces[0].data.spans[4] assert tool_span.span_type == SpanType.TOOL assert tool_span.parent_id == tool_agent_span.span_id assert tool_span.inputs["a"] == 1 assert tool_span.inputs["b"] == 1 assert tool_span.outputs == "2" assert tool_span.end_time_ns - tool_span.start_time_ns >= 1e9 # 1 second @pytest.fixture def tokyo_timezone(monkeypatch): # Set the timezone to Tokyo monkeypatch.setenv("TZ", "Asia/Tokyo") time.tzset() yield # Reset the timezone monkeypatch.delenv("TZ") time.tzset() def test_tracing_llm_completion_duration_timezone(llm_config, tokyo_timezone): # Test if the duration calculation for LLM completion is robust to timezone changes. mlflow.ag2.autolog() with mock_user_input( ["What is the capital of Tokyo?", "How long is it take from San Francisco?", "exit"] ): assistant, user_proxy = get_simple_agent(llm_config) assistant.initiate_chat(user_proxy, message="How can I help you today?") # Check if the initiate_chat method is patched traces = get_traces() span_name_to_dict = {span.name: span for span in traces[0].data.spans} llm_span = span_name_to_dict["chat_completion"] # We mock OpenAI LLM call so it should not take too long e.g. > 10 seconds. If it does, # it most likely a bug such as incorrect timezone handling. assert 0 < llm_span.end_time_ns - llm_span.start_time_ns <= 10e9 assert llm_span.model_name == "gpt-4o-mini" # Check if the start time is in reasonable range root_span = span_name_to_dict["initiate_chat"] assert 0 < llm_span.start_time_ns - root_span.start_time_ns <= 1e9 assert traces[0].info.token_usage == { "input_tokens": 18, "output_tokens": 24, "total_tokens": 42, } def test_tracing_composite_agent(llm_config): # Composite agent can call initiate_chat() or generate_reply() method of its sub-agents. # This test is to ensure that won't create a new trace for the sub-agent's method call. mlflow.ag2.autolog() agent_1 = ConversableAgent("agent_1", llm_config=llm_config) agent_2 = ConversableAgent("agent_2", llm_config=llm_config) group_chat = GroupChat( agents=[agent_1, agent_2], messages=[], max_round=3, speaker_selection_method="round_robin", ) group_chat_manager = GroupChatManager( groupchat=group_chat, llm_config=llm_config, ) agent_1.initiate_chat(group_chat_manager, message="Hello") traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "OK" spans = traces[0].data.spans # 1 for the root initiate_chat, 2 for the messages and 2 for the corresponding LLM calls. assert len(spans) == 5 span_names = {span.name for span in spans} assert span_names == { "initiate_chat", "agent_1", "agent_2", "chat_completion", } assert traces[0].info.token_usage == { "input_tokens": 18, "output_tokens": 24, "total_tokens": 42, } def test_autogen_logger_catch_exception(llm_config): # Error from the logger should not affect the main execution mlflow.ag2.autolog() with patch( "mlflow.tracing.provider.start_detached_span", side_effect=Exception("error") ) as mock_start_span: with mock_user_input(["Hi", "exit"]): assistant, user_proxy = get_simple_agent(llm_config) assistant.initiate_chat(user_proxy, message="foo") assert mock_start_span.call_count == 1
{ "repo_id": "mlflow/mlflow", "file_path": "tests/ag2/test_ag2_autolog.py", "license": "Apache License 2.0", "lines": 335, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/utils/truncation.py
import json from functools import lru_cache from typing import Any from mlflow.entities.trace_data import TraceData from mlflow.entities.trace_info import TraceInfo from mlflow.tracing.constant import ( TRACE_REQUEST_RESPONSE_PREVIEW_MAX_LENGTH_DBX, TRACE_REQUEST_RESPONSE_PREVIEW_MAX_LENGTH_OSS, ) from mlflow.tracking._tracking_service.utils import get_tracking_uri from mlflow.utils.uri import is_databricks_uri def set_request_response_preview(trace_info: TraceInfo, trace_data: TraceData) -> None: """ Set the request and response previews for the trace info. """ # If request/response preview is already set by users via `mlflow.update_current_trace`, # we don't override it with the truncated version. if trace_info.request_preview is None and trace_data.request is not None: trace_info.request_preview = _get_truncated_preview(trace_data.request, role="user") if trace_info.response_preview is None and trace_data.response is not None: trace_info.response_preview = _get_truncated_preview(trace_data.response, role="assistant") def _get_truncated_preview( request_or_response: str | dict[str, Any] | None, role: str ) -> str | None: if request_or_response is None: return None max_length = _get_max_length() content = None obj = None if isinstance(request_or_response, dict): obj = request_or_response request_or_response = json.dumps(request_or_response) elif isinstance(request_or_response, str): try: obj = json.loads(request_or_response) except json.JSONDecodeError: pass if obj is not None: if messages := _try_extract_messages(obj): msg = _get_last_message(messages, role=role) content = _get_text_content_from_message(msg) content = content or request_or_response if len(content) <= max_length: return content return content[: max_length - 3] + "..." @lru_cache(maxsize=1) def _get_max_length() -> int: tracking_uri = get_tracking_uri() return ( TRACE_REQUEST_RESPONSE_PREVIEW_MAX_LENGTH_DBX if is_databricks_uri(tracking_uri) else TRACE_REQUEST_RESPONSE_PREVIEW_MAX_LENGTH_OSS ) def _try_extract_messages(obj: dict[str, Any]) -> list[dict[str, Any]] | None: if not isinstance(obj, dict): return None # Check if the object contains messages with OpenAI ChatCompletion format if (messages := obj.get("messages")) and isinstance(messages, list): return [item for item in messages if _is_message(item)] # Check if the object contains a message in OpenAI ChatCompletion response format (choices) if ( (choices := obj.get("choices")) and isinstance(choices, list) and len(choices) > 0 and isinstance(choices[0], dict) and (msg := choices[0].get("message")) and _is_message(msg) ): return [msg] # Check if the object contains a message in OpenAI Responses API request format if (input := obj.get("input")) and isinstance(input, list): return [item for item in input if _is_message(item)] # Check if the object contains a message in OpenAI Responses API response format if (output := obj.get("output")) and isinstance(output, list): return [item for item in output if _is_message(item)] # Handle ResponsesAgent input, which contains OpenAI Responses request in 'request' key if "request" in obj: return _try_extract_messages(obj["request"]) return None def _is_message(item: Any) -> bool: return isinstance(item, dict) and "role" in item and "content" in item def _get_last_message(messages: list[dict[str, Any]], role: str) -> dict[str, Any]: """ Return last message with the given role. If the messages don't include a message with the given role, return the last one. """ for message in reversed(messages): if message.get("role") == role: return message return messages[-1] def _get_text_content_from_message(message: dict[str, Any]) -> str: content = message.get("content") if isinstance(content, list): # content is a list of content parts for part in content: if isinstance(part, str): return part elif isinstance(part, dict) and part.get("type") in ["text", "output_text"]: return part.get("text") elif isinstance(content, str): return content return ""
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/utils/truncation.py", "license": "Apache License 2.0", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/tracing/utils/test_truncation.py
import json from unittest.mock import patch import pytest from mlflow.entities.trace_data import TraceData from mlflow.entities.trace_info import TraceInfo from mlflow.entities.trace_location import TraceLocation from mlflow.entities.trace_state import TraceState from mlflow.tracing.utils.truncation import _get_truncated_preview, set_request_response_preview @pytest.fixture(autouse=True) def patch_max_length(): # Patch max length to 50 to make tests faster with patch("mlflow.tracing.utils.truncation._get_max_length", return_value=50): yield @pytest.mark.parametrize( ("input_str", "expected"), [ ("short string", "short string"), ("{'a': 'b'}", "{'a': 'b'}"), ("start" + "a" * 50, "start" + "a" * 42 + "..."), (None, None), ], ids=["short string", "short json", "long string", "none"], ) def test_truncate_simple_string(input_str, expected): assert _get_truncated_preview(input_str, role="user") == expected def test_truncate_long_non_message_json(): input_str = json.dumps( { "a": "b" + "a" * 30, "b": "c" + "a" * 30, } ) result = _get_truncated_preview(input_str, role="user") assert len(result) == 50 assert result.startswith('{"a": "b') _TEST_MESSAGE_HISTORY = [ {"role": "user", "content": "First"}, {"role": "assistant", "content": "Second"}, {"role": "user", "content": "Third" + "a" * 50}, {"role": "assistant", "content": "Fourth"}, ] @pytest.mark.parametrize( "input", [ # ChatCompletion API {"messages": _TEST_MESSAGE_HISTORY}, # Responses API {"input": _TEST_MESSAGE_HISTORY}, # Responses Agent {"request": {"input": _TEST_MESSAGE_HISTORY}}, ], ids=["chat_completion", "responses", "responses_agent"], ) def test_truncate_request_messages(input): input_str = json.dumps(input) assert _get_truncated_preview(input_str, role="assistant") == "Fourth" # Long content should be truncated assert _get_truncated_preview(input_str, role="user") == "Third" + "a" * 42 + "..." # If non-existing role is provided, return the last message assert _get_truncated_preview(input_str, role="system") == "Fourth" def test_truncate_request_choices(): input_str = json.dumps( { "choices": [ { "index": 1, "message": {"role": "assistant", "content": "First" + "a" * 50}, "finish_reason": "stop", }, ], "object": "chat.completions", } ) assert _get_truncated_preview(input_str, role="assistant").startswith("First") def test_truncate_multi_content_messages(): # If text content exists, use it assert ( _get_truncated_preview( json.dumps( {"messages": [{"role": "user", "content": [{"type": "text", "text": "a" * 60}]}]} ), role="user", ) == "a" * 47 + "..." ) # Ignore non text content assert ( _get_truncated_preview( json.dumps( { "messages": [ { "role": "user", "content": [ {"type": "text", "text": "a" * 60}, {"type": "image", "image_url": "http://example.com/image.jpg"}, ], }, ] } ), role="user", ) == "a" * 47 + "..." ) # If non-text content exists, truncate the full json as-is assert _get_truncated_preview( json.dumps( { "messages": [ { "role": "user", "content": [ { "type": "image", "image_url": "http://example.com/image.jpg" + "a" * 50, } ], }, ] } ), role="user", ).startswith('{"messages":') def test_truncate_responses_api_output(): input_str = json.dumps( { "output": [ { "type": "message", "id": "test", "role": "assistant", "content": [{"type": "output_text", "text": "a" * 60}], } ], } ) assert _get_truncated_preview(input_str, role="assistant") == "a" * 47 + "..." @pytest.mark.parametrize( "input_data", [ {"messages": 123, "long_data": "a" * 50}, {"messages": []}, {"input": "string"}, {"output": 123}, {"choices": {"0": "value"}}, {"request": "string"}, {"choices": [{"message": "not a dict"}]}, {"choices": [{"message": {"role": "user"}}]}, ], ) def test_truncate_invalid_messages(input_data): input_str = json.dumps(input_data) result = _get_truncated_preview(input_str, role="user") if "long_data" in input_data: assert len(result) == 50 assert result.startswith(input_str[:20]) else: assert result == input_str @pytest.mark.parametrize( ("request_data", "expected_content", "should_not_contain"), [ ( {"request": {"input": [{"role": "user", "content": "Hello"}]}}, "Hello", "request", ), ( {"request": {"tool_choice": None, "input": [{"role": "user", "content": "Weather?"}]}}, "Weather?", '"tool_choice"', ), ( {"request": {"input": [{"role": "user", "content": "Hi"}]}}, "Hi", '"request"', ), ], ids=["short_structured_json", "agent_format_with_null_fields", "responses_agent_short"], ) def test_truncate_structured_json_extracts_content( request_data, expected_content, should_not_contain ): input_str = json.dumps(request_data) result = _get_truncated_preview(input_str, role="user") assert result == expected_content assert should_not_contain not in result @pytest.mark.parametrize( ("content_value", "expected_in_result"), [ (None, '"content": null'), ("", '"content": ""'), (123, '"content": 123'), ], ids=["null_content", "empty_string_content", "numeric_content"], ) def test_truncate_invalid_content_falls_back_to_json(content_value, expected_in_result): request_data = {"input": [{"role": "user", "content": content_value}]} input_str = json.dumps(request_data) result = _get_truncated_preview(input_str, role="user") assert expected_in_result in result or result.endswith("...") def test_set_request_response_preview_skips_none_data(): trace_info = TraceInfo( trace_id="tr-test", trace_location=TraceLocation.from_experiment_id("0"), request_time=1000, state=TraceState.OK, ) trace_data = TraceData(spans=[], request=None, response=None) set_request_response_preview(trace_info, trace_data) assert trace_info.request_preview is None assert trace_info.response_preview is None
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/utils/test_truncation.py", "license": "Apache License 2.0", "lines": 212, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/test_genai_import_without_agent_sdk.py
from unittest.mock import patch import pytest from mlflow.genai.datasets import create_dataset, delete_dataset, get_dataset from mlflow.genai.scorers import ( delete_scorer, get_scorer, list_scorers, ) from mlflow.genai.scorers.base import Scorer # Test `mlflow.genai` namespace def test_mlflow_genai_star_import_succeeds(): import mlflow.genai # noqa: F401 def test_namespaced_import_raises_when_agents_not_installed(): # Ensure that databricks-agents methods renamespaced under mlflow.genai raise an # ImportError when the databricks-agents package is not installed. import mlflow.genai # Mock to simulate Databricks environment without databricks-agents installed with patch("mlflow.genai.datasets.is_databricks_uri", return_value=True): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): mlflow.genai.create_dataset("test_schema") with pytest.raises(ImportError, match="The `databricks-agents` package is required"): mlflow.genai.get_dataset("test_schema") with pytest.raises(ImportError, match="The `databricks-agents` package is required"): mlflow.genai.delete_dataset("test_schema") # Test `mlflow.genai.datasets` namespace def test_mlflow_genai_datasets_star_import_succeeds(): import mlflow.genai.datasets # noqa: F401 def test_create_dataset_raises_when_agents_not_installed(): # Mock to simulate Databricks environment without databricks-agents installed with patch("mlflow.genai.datasets.is_databricks_uri", return_value=True): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): create_dataset("test_dataset") def test_get_dataset_raises_when_agents_not_installed(): # Mock to simulate Databricks environment without databricks-agents installed with patch("mlflow.genai.datasets.is_databricks_uri", return_value=True): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): get_dataset("test_dataset") def test_delete_dataset_raises_when_agents_not_installed(): # Mock to simulate Databricks environment without databricks-agents installed with patch("mlflow.genai.datasets.is_databricks_uri", return_value=True): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): delete_dataset("test_dataset") class MockScorer(Scorer): """Mock scorer for testing purposes.""" name: str = "mock_scorer" def __call__(self, *, outputs=None, **kwargs): return {"score": 1.0} def test_list_scorers_raises_when_agents_not_installed(): with patch( "mlflow.tracking._tracking_service.utils.get_tracking_uri", return_value="databricks" ): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): list_scorers(experiment_id="test_experiment") def test_get_scorer_raises_when_agents_not_installed(): with patch( "mlflow.tracking._tracking_service.utils.get_tracking_uri", return_value="databricks" ): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): get_scorer(name="test_scorer", experiment_id="test_experiment") def test_delete_scorer_raises_when_agents_not_installed(): with patch( "mlflow.tracking._tracking_service.utils.get_tracking_uri", return_value="databricks" ): with pytest.raises(ImportError, match="The `databricks-agents` package is required"): delete_scorer(experiment_id="test_experiment", name="test_scorer")
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/test_genai_import_without_agent_sdk.py", "license": "Apache License 2.0", "lines": 65, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/utils/data_validation.py
import inspect import logging from typing import Any, Callable from mlflow.exceptions import MlflowException from mlflow.tracing.provider import trace_disabled _logger = logging.getLogger(__name__) def check_model_prediction(predict_fn: Callable[..., Any], sample_input: Any): """ Validate if the predict function executes properly with the provided input. Args: predict_fn: The predict function to be evaluated. sample_input: A sample input to the model. """ _logger.info( "Testing model prediction with the first sample in the dataset. To disable this check, " "set the MLFLOW_GENAI_EVAL_SKIP_TRACE_VALIDATION environment variable to True." ) # Wrap the function to add a decorator for disabling tracing @trace_disabled def _check(): predict_fn(**sample_input) try: _check() except Exception as e: # Check input format and raise friendly message for typical error patterns _validate_function_and_input_compatibility(predict_fn, sample_input, e) _logger.debug(f"Failed to run predict_fn with input: {sample_input}", exc_info=True) def _validate_function_and_input_compatibility( predict_fn: Callable[..., Any], sample_input: dict[str, Any], e: Exception ) -> Callable[..., Any]: """ Validate the data format in the input column against the predict_fn. The input column must contain a dictionary of field names and values. When the predict_fn is provided, the field names must match the arguments of the predict_fn. """ params = inspect.signature(predict_fn).parameters if not params: raise MlflowException.invalid_parameter_value( "`predict_fn` must accept at least one argument." ) from e # Check for *args-style parameters which aren't supported _validate_no_var_args(params, e) # Check if input keys match function parameters _validate_input_keys_match_function_params(params, sample_input.keys(), e) # For other errors, show a generic error message raise MlflowException.invalid_parameter_value( "Failed to run the prediction function specified in the `predict_fn` " f"parameter. Input: {sample_input}. Error: {e}\n\n" ) from e def _has_variable_positional_arguments(params: inspect.Signature) -> bool: """Check if the function has variable positional arguments.""" return any(p.kind == inspect.Parameter.VAR_POSITIONAL for p in params.values()) def _validate_no_var_args(params: inspect.Signature, e: Exception): if not any(p.kind == inspect.Parameter.VAR_POSITIONAL for p in params.values()): return """Raise an error for functions using *args which aren't supported.""" code_sample = """```python def predict_fn(param1, param2): # Invoke the original predict function with positional arguments return fn(param1, param2) data = [ { "inputs": { "param1": "value1", "param2": "value2", } } ] mlflow.genai.evaluate(predict_fn=predict_fn, data=data, ...) ``` """ raise MlflowException.invalid_parameter_value( "The `predict_fn` has dynamic positional arguments (e.g. `*args`), " "so it cannot be used as a `predict_fn`. Please wrap it into another " "function that accepts explicit keyword arguments.\n" f"Example:\n\n{code_sample}\n" ) from e def _validate_input_keys_match_function_params( params: inspect.Signature, input_keys: list[str], e: Exception, ): if _has_required_keyword_arguments(params, input_keys): return """Raise an error when input keys don't match function parameters.""" param_names = list(params.keys()) input_example = {arg: f"value{i + 1}" for i, arg in enumerate(param_names[:3])} if len(param_names) > 3: input_example["..."] = "..." code_sample = "\n".join( [ "```python", "data = [", " {", ' "inputs": {', *(f' "{k}": "{v}",' for k, v in input_example.items()), " }", " }", "]", "```", ] ) raise MlflowException.invalid_parameter_value( "The `inputs` column must be a dictionary with the parameter names of " f"the `predict_fn` as keys. It seems the specified keys do not match " f"with the `predict_fn`'s arguments. Correct example:\n\n{code_sample}" ) from e def _has_required_keyword_arguments(params: inspect.Signature, required_args: list[str]) -> bool: """Check if the function accepts the specified keyword arguments.""" func_args = [] for name, param in params.items(): # If the function has **kwargs, it accepts all keyword arguments if param.kind == inspect.Parameter.VAR_KEYWORD: return True func_args.append(name) # Required argument must be a subset of the function's arguments return set(required_args) <= set(func_args)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/utils/data_validation.py", "license": "Apache License 2.0", "lines": 117, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/utils/test_data_validation.py
import pytest import mlflow from mlflow.exceptions import MlflowException from mlflow.genai.utils.data_validation import check_model_prediction from tests.tracing.helper import get_traces def _extract_code_example(e: MlflowException) -> str: """Extract the code example from the exception message.""" return e.message.split("```python")[1].split("```")[0] @pytest.mark.parametrize( ("predict_fn", "sample_input"), [ # Single argument (lambda question: None, {"question": "What is the capital of France?"}), # Multiple arguments ( lambda question, context: None, { "question": "What is the capital of France?", "context": "France is a country in Europe.", }, ), # Unnamed keyword arguments (lambda **kwargs: None, {"question": "What is the capital of France?"}), # Mix of named and unnamed keyword arguments ( lambda question, **kwargs: None, { "question": "What is the capital of France?", "context": "France is a country in Europe.", }, ), # Non-string value ( lambda messages: None, { "messages": [ {"role": "user", "content": "What is the capital of France?"}, {"role": "assistant", "content": "Paris"}, ], }, ), ], ) def test_check_model_prediction(predict_fn, sample_input): check_model_prediction(predict_fn, sample_input) # No trace should be logged during the check assert len(get_traces()) == 0 traced_predict_fn = mlflow.trace(predict_fn) check_model_prediction(traced_predict_fn, sample_input) # A trace should be logged during the check assert len(get_traces()) == 0 # Running the traced function normally should pass and generate a trace traced_predict_fn(**sample_input) assert len(get_traces()) == 1 def test_check_model_prediction_class_methods(): class MyClass: def predict(self, question: str, context: str): return "response" @classmethod def predict_cls(cls, question: str, context: str): return "response" @staticmethod def predict_static(question: str, context: str): return "response" sample_input = { "question": "What is the capital of France?", "context": "France is a country in Europe.", } check_model_prediction(MyClass().predict, sample_input) check_model_prediction(MyClass.predict_cls, sample_input) check_model_prediction(MyClass.predict_static, sample_input) assert len(get_traces()) == 0 # Validate traced version check_model_prediction(mlflow.trace(MyClass().predict), sample_input) check_model_prediction(mlflow.trace(MyClass.predict_cls), sample_input) check_model_prediction(mlflow.trace(MyClass.predict_static), sample_input) assert len(get_traces()) == 0 def test_check_model_prediction_no_args(): def fn(): return "response" with pytest.raises(MlflowException, match=r"`predict_fn` must accept at least one argument."): check_model_prediction(fn, {"question": "What is the capital of France?"}) def test_check_model_prediction_variable_args(): """ If the function has variable positional arguments (*args), it is not supported. """ def fn(*args): return "response" with pytest.raises(MlflowException, match=r"The `predict_fn` has dynamic") as e: check_model_prediction(fn, {"question": "What is the capital of France?"}) expected_code_example = """ def predict_fn(param1, param2): # Invoke the original predict function with positional arguments return fn(param1, param2) data = [ { "inputs": { "param1": "value1", "param2": "value2", } } ] mlflow.genai.evaluate(predict_fn=predict_fn, data=data, ...) """ assert _extract_code_example(e.value) == expected_code_example def test_check_model_prediction_unmatched_keys(): def fn(role: str, content: str): return "response" sample_input = {"messages": [{"role": "user", "content": "What is the capital of France?"}]} with pytest.raises( MlflowException, match=r"The `inputs` column must be a dictionary with" ) as e: check_model_prediction(fn, sample_input) code_example = """ data = [ { "inputs": { "role": "value1", "content": "value2", } } ] """ assert _extract_code_example(e.value) == code_example def test_check_model_prediction_unmatched_keys_with_many_args(): def fn(param1, param2, param3, param4, param5): return "response" sample_input = {"question": "What is the capital of France?"} with pytest.raises(MlflowException, match=r"The `inputs` column must be a dictionary") as e: check_model_prediction(fn, sample_input) # The code snippet shouldn't show more than three parameters code_example = """ data = [ { "inputs": { "param1": "value1", "param2": "value2", "param3": "value3", "...": "...", } } ] """ assert _extract_code_example(e.value) == code_example def test_check_model_prediction_unmatched_keys_with_variable_kwargs(): def fn(question: str, **kwargs): return "response" sample_input = {"query": "What is the capital of France?"} with pytest.raises(MlflowException, match=r"Failed to run the prediction function"): check_model_prediction(fn, sample_input) def test_check_model_prediction_unknown_error(): def fn(question: str): raise ValueError("Unknown error") sample_input = {"question": "What is the capital of France?"} with pytest.raises(MlflowException, match=r"Failed to run the prediction function"): check_model_prediction(fn, sample_input)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/utils/test_data_validation.py", "license": "Apache License 2.0", "lines": 156, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/langgraph/sample_code/langgraph_with_autolog.py
from dataclasses import dataclass from langchain.tools import tool from langgraph.graph import END, StateGraph import mlflow mlflow.langchain.autolog() @dataclass class OverallState: name: str = "LangChain" # add whatever fields you need @tool def my_tool(): """ Called as the very first node. Side-effect: add an MLflow tag to the *current* trace. Must return a dict of state-field updates. """ mlflow.update_current_trace(tags={"order_total": "hello"}) return {"status": "done"} builder = StateGraph(dict) builder.add_node("test_tool", my_tool) # ← calls your tool builder.set_entry_point("test_tool") # start here builder.add_edge("test_tool", END) # nothing else to do graph = builder.compile() mlflow.models.set_model(graph)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/langgraph/sample_code/langgraph_with_autolog.py", "license": "Apache License 2.0", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/langchain/sample_code/workflow.py
import json import os from typing import Any, Sequence from langchain_core.language_models import LanguageModelLike from langchain_core.messages import AIMessage, ToolCall from langchain_core.outputs import ChatGeneration, ChatResult from langchain_core.runnables import RunnableConfig, RunnableLambda from langchain_core.tools import BaseTool, tool from langchain_openai import ChatOpenAI from langgraph.graph import END, StateGraph from langgraph.graph.state import CompiledStateGraph from langgraph.prebuilt import ToolNode import mlflow from mlflow.langchain.chat_agent_langgraph import ( ChatAgentState, ChatAgentToolNode, ) os.environ["OPENAI_API_KEY"] = "test" class FakeOpenAI(ChatOpenAI, extra="allow"): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._responses = iter( [ AIMessage( content="", tool_calls=[ToolCall(name="uc_tool_format", args={}, id="123")], ), AIMessage( content="", tool_calls=[ToolCall(name="lc_tool_format", args={}, id="456")], ), AIMessage(content="Successfully generated", id="789"), ] ) def _generate(self, *args, **kwargs): return ChatResult(generations=[ChatGeneration(message=next(self._responses))]) @tool def uc_tool_format() -> str: """Returns uc tool format""" return json.dumps( { "format": "SCALAR", "value": '{"content":"hi","attachments":{"a":"b"},"custom_outputs":{"c":"d"}}', "truncated": False, } ) @tool def lc_tool_format() -> dict[str, Any]: """Returns lc tool format""" nums = [1, 2] return { "content": f"Successfully generated array of 2 random ints: {nums}.", "attachments": {"key1": "attach1", "key2": "attach2"}, "custom_outputs": {"random_nums": nums}, } tools = [uc_tool_format, lc_tool_format] def create_tool_calling_agent( model: LanguageModelLike, tools: ToolNode | Sequence[BaseTool], agent_prompt: str | None = None, ) -> CompiledStateGraph: model = model.bind_tools(tools) def should_continue(state: ChatAgentState): messages = state["messages"] last_message = messages[-1] # If there are function calls, continue. else, end if last_message.get("tool_calls"): return "continue" else: return "end" preprocessor = RunnableLambda(lambda state: state["messages"]) model_runnable = preprocessor | model @mlflow.trace def call_model( state: ChatAgentState, config: RunnableConfig, ): response = model_runnable.invoke(state, config) return {"messages": [response]} workflow = StateGraph(ChatAgentState) workflow.add_node("agent", RunnableLambda(call_model)) workflow.add_node("tools", ChatAgentToolNode(tools)) workflow.set_entry_point("agent") workflow.add_conditional_edges( "agent", should_continue, { "continue": "tools", "end": END, }, ) workflow.add_edge("tools", "agent") return workflow.compile() mlflow.langchain.autolog() llm = FakeOpenAI() graph = create_tool_calling_agent(llm, tools) mlflow.models.set_model(graph)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/langchain/sample_code/workflow.py", "license": "Apache License 2.0", "lines": 97, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/scorers/validation.py
import importlib import logging from collections import defaultdict from typing import Any, Callable from mlflow.exceptions import MlflowException from mlflow.genai.scorers.base import AggregationFunc, Scorer from mlflow.genai.scorers.builtin_scorers import ( BuiltInScorer, MissingColumnsException, get_all_scorers, ) try: # `pandas` is not required for `mlflow-skinny`. import pandas as pd except ImportError: pass _logger = logging.getLogger(__name__) IS_DBX_AGENTS_INSTALLED = importlib.util.find_spec("databricks.agents") is not None def validate_scorers(scorers: list[Any]) -> list[Scorer]: """ Validate a list of specified scorers. Args: scorers: A list of scorers to validate. Returns: A list of valid scorers. """ if not isinstance(scorers, list): raise MlflowException.invalid_parameter_value( "The `scorers` argument must be a list of scorers. If you are unsure about which " "scorer to use, you can specify `scorers=mlflow.genai.scorers.get_all_scorers()` " "to jump start with all available built-in scorers." ) if len(scorers) == 0: return [] valid_scorers = [] legacy_metrics = [] for scorer in scorers: if isinstance(scorer, Scorer): valid_scorers.append(scorer) else: if IS_DBX_AGENTS_INSTALLED: from databricks.rag_eval.evaluation.metrics import Metric if isinstance(scorer, Metric): legacy_metrics.append(scorer) valid_scorers.append(scorer) continue # Show helpful error message for common mistakes if isinstance(scorer, list) and (scorer == get_all_scorers()): # Common mistake 1: scorers=[get_all_scorers()] if len(scorers) == 1: hint = ( "\nHint: Use `scorers=get_all_scorers()` to pass all " "builtin scorers at once." ) # Common mistake 2: scorers=[get_all_scorers(), scorer1, scorer2] elif len(scorer) > 1: hint = ( "\nHint: Use `scorers=[*get_all_scorers(), scorer1, scorer2]` to pass " "all builtin scorers at once along with your custom scorers." ) # Common mistake 3: scorers=[RetrievalRelevance, Correctness] elif isinstance(scorer, type) and issubclass(scorer, BuiltInScorer): hint = ( "\nHint: It looks like you passed a scorer class instead of an instance. " f"Correct way to pass scorers is `scorers=[{scorer.__name__}()]`." ) else: hint = "" raise MlflowException.invalid_parameter_value( f"The `scorers` argument must be a list of scorers. The specified " f"list contains an invalid item with type: {type(scorer).__name__}." f"{hint}" ) if legacy_metrics: legacy_metric_names = [metric.name for metric in legacy_metrics] _logger.warning( f"Scorers {legacy_metric_names} are legacy metrics and will soon be deprecated " "in future releases. Please use the builtin scorers defined in `mlflow.genai.scorers` " "or custom scorers defined with the @scorer decorator instead." ) return valid_scorers def valid_data_for_builtin_scorers( data: "pd.DataFrame", builtin_scorers: list[BuiltInScorer], predict_fn: Callable[..., Any] | None = None, ) -> None: """ Validate that the required columns are present in the data for running the builtin scorers. Args: data: The data to validate. This must be a pandas DataFrame converted to the legacy evaluation set schema via `_convert_to_eval_set`. builtin_scorers: The list of builtin scorers to validate the data for. predict_fn: The predict function to validate the data for. """ input_columns = set(data.columns.tolist()) # Revert the replacement of "inputs"->"request" and "outputs"->"response" # in the upstream processing. if "request" in input_columns: input_columns.remove("request") input_columns.add("inputs") if "response" in input_columns: input_columns.remove("response") input_columns.add("outputs") if predict_fn is not None: # If the predict function is provided, the data doesn't need to # contain the "outputs" column. input_columns.add("outputs") if "trace" in input_columns: # Inputs and outputs are inferred from the trace. input_columns |= {"inputs", "outputs"} if predict_fn is not None: input_columns |= {"trace"} # Explode keys in the "expectations" column for easier processing. if "expectations" in input_columns: for value in data["expectations"].values: if pd.isna(value): continue if not isinstance(value, dict): raise MlflowException.invalid_parameter_value( "The 'expectations' column must be a dictionary of each expectation name " "to its value. For example, `{'expected_response': 'answer to the question'}`." ) for k in value: input_columns.add(f"expectations/{k}") # Missing column -> list of scorers that require the column. missing_col_to_scorers = defaultdict(list) for scorer in builtin_scorers: try: scorer.validate_columns(input_columns) except MissingColumnsException as e: for col in e.missing_columns: missing_col_to_scorers[col].append(scorer.name) if missing_col_to_scorers: msg = ( "The input data is missing following columns that are required by the specified " "scorers. The results will be null for those scorers." ) for col, scorers in missing_col_to_scorers.items(): if col.startswith("expectations/"): col = col.replace("expectations/", "") msg += ( f"\n - `{col}` field in `expectations` column " f"is required by [{', '.join(scorers)}]." ) else: msg += f"\n - `{col}` column is required by [{', '.join(scorers)}]." _logger.info(msg) def validate_aggregations(aggregations: list[str | AggregationFunc] | None) -> None: """ Validate that aggregations are either valid string names or callable functions. Args: aggregations: List of aggregation functions to validate. Can be strings from the standard set or callable functions. """ if not aggregations: return from mlflow.genai.scorers.aggregation import _AGGREGATE_FUNCTIONS valid_aggregation_names = set(_AGGREGATE_FUNCTIONS.keys()) for agg in aggregations: if isinstance(agg, str): if agg not in valid_aggregation_names: raise MlflowException.invalid_parameter_value( f"Invalid aggregation '{agg}'. Valid aggregations are: " f"{sorted(valid_aggregation_names)}" ) elif not callable(agg): raise MlflowException.invalid_parameter_value( f"Aggregation must be either a string from {sorted(valid_aggregation_names)} " f"or a callable function, got {type(agg).__name__}" )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/validation.py", "license": "Apache License 2.0", "lines": 170, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/scorers/test_validation.py
from unittest import mock import pandas as pd import pytest import mlflow from mlflow.exceptions import MlflowException from mlflow.genai.evaluation.utils import _convert_to_eval_set from mlflow.genai.scorers.base import Scorer, scorer from mlflow.genai.scorers.builtin_scorers import ( Correctness, ExpectationsGuidelines, Guidelines, RelevanceToQuery, RetrievalGroundedness, RetrievalSufficiency, get_all_scorers, ) from mlflow.genai.scorers.validation import valid_data_for_builtin_scorers, validate_scorers from tests.genai.conftest import databricks_only @pytest.fixture def mock_logger(): with mock.patch("mlflow.genai.scorers.validation._logger") as mock_logger: yield mock_logger def test_validate_scorers_valid(): @scorer def custom_scorer(inputs, outputs): return 1.0 scorers = validate_scorers( [ RelevanceToQuery(), Correctness(), Guidelines(guidelines=["Be polite", "Be kind"]), custom_scorer, ] ) assert len(scorers) == 4 assert all(isinstance(scorer, Scorer) for scorer in scorers) def test_validate_scorers_empty_list(): assert validate_scorers([]) == [] @databricks_only def test_validate_scorers_legacy_metric(): from databricks.agents.evals import metric @metric def legacy_metric_1(request, response): return 1.0 @metric def legacy_metric_2(request, response): return 1.0 with mock.patch("mlflow.genai.scorers.validation._logger") as mock_logger: scorers = validate_scorers([legacy_metric_1, legacy_metric_2]) assert len(scorers) == 2 mock_logger.warning.assert_called_once() assert "legacy_metric_1" in mock_logger.warning.call_args[0][0] def test_validate_scorers_invalid_all_scorers(): with pytest.raises(MlflowException, match="The `scorers` argument must be a list") as e: validate_scorers([1, 2, 3]) assert "an invalid item with type: int" in str(e.value) # Special case 1: List of list of all scorers with pytest.raises(MlflowException, match="The `scorers` argument must be a list") as e: validate_scorers([get_all_scorers()]) assert "an invalid item with type: list" in str(e.value) assert "Hint: Use `scorers=get_all_scorers()` to pass all" in str(e.value) # Special case 2: List of list of all scorers + custom scorers with pytest.raises(MlflowException, match="The `scorers` argument must be a list") as e: validate_scorers([get_all_scorers(), RelevanceToQuery(), Correctness()]) assert "an invalid item with type: list" in str(e.value) assert "Hint: Use `scorers=[*get_all_scorers(), scorer1, scorer2]` to pass all" in str(e.value) # Special case 3: List of classes (not instances) with pytest.raises(MlflowException, match="The `scorers` argument must be a list") as e: validate_scorers([RelevanceToQuery]) assert "Correct way to pass scorers is `scorers=[RelevanceToQuery()]`." in str(e.value) def test_validate_data(mock_logger, sample_rag_trace): data = pd.DataFrame( { "inputs": [{"question": "input1"}, {"question": "input2"}], "outputs": ["output1", "output2"], "trace": [sample_rag_trace, sample_rag_trace], } ) converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, builtin_scorers=[ RelevanceToQuery(), RetrievalGroundedness(), Guidelines(guidelines=["Be polite", "Be kind"]), ], ) mock_logger.info.assert_not_called() def test_validate_data_with_expectations(mock_logger, sample_rag_trace): data = pd.DataFrame( { "inputs": [{"question": "input1"}, {"question": "input2"}], "outputs": ["output1", "output2"], "trace": [sample_rag_trace, sample_rag_trace], "expectations": [ {"expected_response": "response1", "guidelines": ["Be polite", "Be kind"]}, {"expected_response": "response2", "guidelines": ["Be nice", "Be strong"]}, ], } ) converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, builtin_scorers=[ RelevanceToQuery(), RetrievalSufficiency(), # requires expected_response in expectations ExpectationsGuidelines(), # requires guidelines in expectations ], ) mock_logger.info.assert_not_called() def test_global_guidelines_do_not_require_expectations(mock_logger): data = pd.DataFrame( { "inputs": [{"question": "input1"}, {"question": "input2"}], "outputs": ["output1", "output2"], } ) converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, builtin_scorers=[Guidelines(guidelines=["Be polite", "Be kind"])], ) mock_logger.info.assert_not_called() @pytest.mark.parametrize( "expectations", [ {"expected_facts": [["fact1", "fact2"], ["fact3"]]}, {"expected_response": ["expectation1", "expectation2"]}, ], ) def test_validate_data_with_correctness(expectations, mock_logger): data = pd.DataFrame( { "inputs": [{"question": "input1"}, {"question": "input2"}], "outputs": ["output1", "output2"], "expectations": [expectations, expectations], } ) converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, builtin_scorers=[Correctness()], ) valid_data_for_builtin_scorers( data=pd.DataFrame({"inputs": ["input1"], "outputs": ["output1"]}), builtin_scorers=[Correctness()], ) mock_logger.info.assert_called_once() message = mock_logger.info.call_args[0][0] assert "expected_response or expected_facts" in message def test_validate_data_missing_columns(mock_logger): data = pd.DataFrame({"inputs": [{"question": "input1"}, {"question": "input2"}]}) converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, builtin_scorers=[ RelevanceToQuery(), RetrievalGroundedness(), Guidelines(guidelines=["Be polite", "Be kind"]), ], ) mock_logger.info.assert_called_once() msg = mock_logger.info.call_args[0][0] assert " - `outputs` column is required by [relevance_to_query, guidelines]." in msg assert " - `trace` column is required by [retrieval_groundedness]." in msg def test_validate_data_with_trace(mock_logger): # When a trace is provided, the inputs, outputs, and retrieved_context are # inferred from the trace. with mlflow.start_span() as span: span.set_inputs({"question": "What is the capital of France?"}) span.set_outputs("Paris") trace = mlflow.get_trace(span.trace_id) data = [{"trace": trace}, {"trace": trace}] converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, builtin_scorers=[ RelevanceToQuery(), RetrievalGroundedness(), Guidelines(guidelines=["Be polite", "Be kind"]), ], ) mock_logger.info.assert_not_called() def test_validate_data_with_predict_fn(mock_logger): data = pd.DataFrame({"inputs": [{"question": "input1"}, {"question": "input2"}]}) converted_date = _convert_to_eval_set(data) valid_data_for_builtin_scorers( data=converted_date, predict_fn=lambda x: x, builtin_scorers=[ # Requires "outputs" but predict_fn will provide it Guidelines(guidelines=["Be polite", "Be kind"]), # Requires "retrieved_context" but predict_fn will provide it RelevanceToQuery(), ], ) mock_logger.info.assert_not_called()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/test_validation.py", "license": "Apache License 2.0", "lines": 197, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/models/evaluation/deprecated.py
import functools import warnings from mlflow.models.evaluation import evaluate as model_evaluate @functools.wraps(model_evaluate) def evaluate(*args, **kwargs): warnings.warn( "The `mlflow.evaluate` API has been deprecated as of MLflow 3.0.0. " "Please use these new alternatives:\n\n" " - For traditional ML or deep learning models: Use `mlflow.models.evaluate`, " "which maintains full compatibility with the original `mlflow.evaluate` API.\n\n" " - For LLMs or GenAI applications: Use the new `mlflow.genai.evaluate` API, " "which offers enhanced features specifically designed for evaluating " "LLMs and GenAI applications.\n", FutureWarning, ) return model_evaluate(*args, **kwargs)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/models/evaluation/deprecated.py", "license": "Apache License 2.0", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/evaluate/test_deprecated.py
import warnings from contextlib import contextmanager from unittest.mock import patch import pandas as pd import pytest import mlflow _TEST_DATA = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) @pytest.mark.parametrize("tracking_uri", ["databricks", "http://localhost:5000"]) def test_global_evaluate_warn_in_tracking_uri(tracking_uri): with patch("mlflow.get_tracking_uri", return_value=tracking_uri): with pytest.warns(FutureWarning, match="The `mlflow.evaluate` API has been deprecated"): mlflow.evaluate( data=_TEST_DATA, model=lambda x: x["x"] * 2, extra_metrics=[mlflow.metrics.latency()], ) @contextmanager def no_future_warning(): with warnings.catch_warnings(): # Translate future warning into an exception warnings.simplefilter("error", FutureWarning) yield @pytest.mark.parametrize("tracking_uri", ["databricks", "sqlite://"]) def test_models_evaluate_does_not_warn(tracking_uri): with patch("mlflow.get_tracking_uri", return_value=tracking_uri): with no_future_warning(): mlflow.models.evaluate( data=_TEST_DATA, model=lambda x: x["x"] * 2, extra_metrics=[mlflow.metrics.mse()], )
{ "repo_id": "mlflow/mlflow", "file_path": "tests/evaluate/test_deprecated.py", "license": "Apache License 2.0", "lines": 31, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/evaluation/constant.py
class AgentEvaluationReserverKey: """ Expectation column names that are used by Agent Evaluation. Ref: https://docs.databricks.com/aws/en/generative-ai/agent-evaluation/evaluation-schema """ EXPECTED_RESPONSE = "expected_response" EXPECTED_RETRIEVED_CONTEXT = "expected_retrieved_context" EXPECTED_FACTS = "expected_facts" GUIDELINES = "guidelines" @classmethod def get_all(cls) -> set[str]: return { cls.EXPECTED_RESPONSE, cls.EXPECTED_RETRIEVED_CONTEXT, cls.EXPECTED_FACTS, cls.GUIDELINES, } # A column name for storing custom expectations dictionary in Agent Evaluation. AGENT_EVAL_CUSTOM_EXPECTATION_KEY = "custom_expected" # Input dataset column names class InputDatasetColumn: REQUEST_ID = "request_id" INPUTS = "inputs" REQUEST = "request" RESPONSE = "response" OUTPUTS = "outputs" EXPECTATIONS = "expectations" TAGS = "tags" TRACE = "trace" SOURCE = "source" # Result Dataframe column names class ResultDataFrameColumn: REQUEST_ID = "request_id" INPUTS = "inputs" OUTPUTS = "outputs" EXPECTATIONS = "expectations" TAGS = "tags" TRACE = "trace" ERROR_MESSAGE = "error_message"
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/evaluation/constant.py", "license": "Apache License 2.0", "lines": 39, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/entities/test_trace_info_v2.py
import pytest from google.protobuf.duration_pb2 import Duration from google.protobuf.timestamp_pb2 import Timestamp from mlflow.entities.trace_info_v2 import TraceInfoV2 from mlflow.entities.trace_status import TraceStatus from mlflow.protos.service_pb2 import TraceInfo as ProtoTraceInfo from mlflow.protos.service_pb2 import TraceRequestMetadata as ProtoTraceRequestMetadata from mlflow.protos.service_pb2 import TraceTag as ProtoTraceTag from mlflow.tracing.constant import ( MAX_CHARS_IN_TRACE_INFO_METADATA, MAX_CHARS_IN_TRACE_INFO_TAGS_KEY, MAX_CHARS_IN_TRACE_INFO_TAGS_VALUE, TRACE_SCHEMA_VERSION_KEY, ) @pytest.fixture def trace_info(): return TraceInfoV2( request_id="request_id", experiment_id="test_experiment", timestamp_ms=0, execution_time_ms=1, status=TraceStatus.OK, request_metadata={ "foo": "bar", "k" * 1000: "v" * 1000, }, tags={ "baz": "qux", "k" * 2000: "v" * 8000, }, assessments=[], ) @pytest.fixture def trace_info_proto(): ti_proto = ProtoTraceInfo() ti_proto.request_id = "request_id" ti_proto.experiment_id = "test_experiment" ti_proto.timestamp_ms = 0 ti_proto.execution_time_ms = 1 ti_proto.status = TraceStatus.OK.to_proto() request_metadata_1 = ti_proto.request_metadata.add() request_metadata_1.key = "foo" request_metadata_1.value = "bar" request_metadata_2 = ti_proto.request_metadata.add() request_metadata_2.key = "k" * 250 request_metadata_2.value = "v" * 250 request_metadata_3 = ti_proto.request_metadata.add() request_metadata_3.key = TRACE_SCHEMA_VERSION_KEY request_metadata_3.value = "2" tag_1 = ti_proto.tags.add() tag_1.key = "baz" tag_1.value = "qux" tag_2 = ti_proto.tags.add() tag_2.key = "k" * 250 tag_2.value = "v" * 250 return ti_proto def test_to_proto(trace_info): proto = trace_info.to_proto() assert proto.request_id == "request_id" assert proto.experiment_id == "test_experiment" assert proto.timestamp_ms == 0 assert proto.execution_time_ms == 1 assert proto.status == 1 request_metadata_1 = proto.request_metadata[0] assert isinstance(request_metadata_1, ProtoTraceRequestMetadata) assert request_metadata_1.key == "foo" assert request_metadata_1.value == "bar" request_metadata_2 = proto.request_metadata[1] assert isinstance(request_metadata_2, ProtoTraceRequestMetadata) assert request_metadata_2.key == "k" * MAX_CHARS_IN_TRACE_INFO_METADATA assert request_metadata_2.value == "v" * MAX_CHARS_IN_TRACE_INFO_METADATA tag_1 = proto.tags[0] assert isinstance(tag_1, ProtoTraceTag) assert tag_1.key == "baz" assert tag_1.value == "qux" tag_2 = proto.tags[1] assert isinstance(tag_2, ProtoTraceTag) assert tag_2.key == "k" * MAX_CHARS_IN_TRACE_INFO_TAGS_KEY assert tag_2.value == "v" * MAX_CHARS_IN_TRACE_INFO_TAGS_VALUE def test_to_dict(trace_info): trace_as_dict = trace_info.to_dict() assert trace_as_dict == { "request_id": "request_id", "experiment_id": "test_experiment", "timestamp_ms": 0, "execution_time_ms": 1, "status": "OK", "request_metadata": { "foo": "bar", "k" * 1000: "v" * 1000, }, "tags": { "baz": "qux", "k" * 2000: "v" * 8000, }, "assessments": [], } def test_trace_info_serialization_deserialization(trace_info_proto): # trace info proto -> TraceInfo trace_info = TraceInfoV2.from_proto(trace_info_proto) assert trace_info.request_id == "request_id" assert trace_info.experiment_id == "test_experiment" assert trace_info.timestamp_ms == 0 assert trace_info.execution_time_ms == 1 assert trace_info.status == TraceStatus.OK assert trace_info.request_metadata == { "foo": "bar", "k" * 250: "v" * 250, TRACE_SCHEMA_VERSION_KEY: "2", } assert trace_info.tags == { "baz": "qux", "k" * 250: "v" * 250, } # TraceInfo -> python native dictionary trace_info_as_dict = trace_info.to_dict() assert trace_info_as_dict == { "request_id": "request_id", "experiment_id": "test_experiment", "timestamp_ms": 0, "execution_time_ms": 1, "status": "OK", "request_metadata": { "foo": "bar", "k" * 250: "v" * 250, TRACE_SCHEMA_VERSION_KEY: "2", }, "tags": { "baz": "qux", "k" * 250: "v" * 250, }, "assessments": [], } # python native dictionary -> TraceInfo assert TraceInfoV2.from_dict(trace_info_as_dict) == trace_info # TraceInfo -> trace info proto assert trace_info.to_proto() == trace_info_proto def test_trace_info_v3(trace_info): v3_proto = trace_info.to_v3("request", "response").to_proto() assert v3_proto.request_preview == "request" assert v3_proto.response_preview == "response" assert v3_proto.trace_id == "request_id" assert isinstance(v3_proto.request_time, Timestamp) assert v3_proto.request_time.ToSeconds() == 0 assert isinstance(v3_proto.execution_duration, Duration) assert v3_proto.execution_duration.ToMilliseconds() == 1 assert v3_proto.state == 1 assert v3_proto.trace_metadata["foo"] == "bar" assert ( v3_proto.trace_metadata["k" * MAX_CHARS_IN_TRACE_INFO_METADATA] == "v" * MAX_CHARS_IN_TRACE_INFO_METADATA ) assert v3_proto.tags["baz"] == "qux" assert ( v3_proto.tags["k" * MAX_CHARS_IN_TRACE_INFO_TAGS_KEY] == "v" * MAX_CHARS_IN_TRACE_INFO_TAGS_VALUE )
{ "repo_id": "mlflow/mlflow", "file_path": "tests/entities/test_trace_info_v2.py", "license": "Apache License 2.0", "lines": 157, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/utils/environment.py
import logging import os from functools import lru_cache from mlflow.tracking.context.git_context import GitRunContext from mlflow.tracking.context.registry import resolve_tags from mlflow.utils.databricks_utils import is_in_databricks_notebook from mlflow.utils.git_utils import get_git_branch, get_git_commit, get_git_repo_url from mlflow.utils.mlflow_tags import ( MLFLOW_GIT_BRANCH, MLFLOW_GIT_COMMIT, MLFLOW_GIT_REPO_URL, TRACE_RESOLVE_TAGS_ALLOWLIST, ) _logger = logging.getLogger(__name__) @lru_cache(maxsize=1) def resolve_env_metadata(): """ Resolve common environment metadata to be saved in the trace info. These should not # change over time, so we resolve them only once. These will be stored in trace # metadata rather than tags, because they are immutable. """ # GitRunContext does not property work in notebook because _get_main_file() # points to the kernel launcher file, not the actual notebook file. metadata = resolve_tags(ignore=[GitRunContext]) if not is_in_databricks_notebook(): # Get Git metadata for the script or notebook. If the notebook is in a # Databricks managed Git repo, DatabricksRepoRunContext the metadata # so we don't need to run this logic. metadata.update(_resolve_git_metadata()) return {key: value for key, value in metadata.items() if key in TRACE_RESOLVE_TAGS_ALLOWLIST} def _resolve_git_metadata(): try: import git # noqa: F401 except ImportError: _logger.debug("Git python package is not installed. Skipping git metadata resolution.") return {} try: repo = os.getcwd() return { MLFLOW_GIT_COMMIT: get_git_commit(repo) or "", MLFLOW_GIT_REPO_URL: get_git_repo_url(repo) or "", MLFLOW_GIT_BRANCH: get_git_branch(repo) or "", } except Exception: _logger.debug("Failed to resolve git metadata", exc_info=True) return {}
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/utils/environment.py", "license": "Apache License 2.0", "lines": 46, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/tracing/utils/test_environment.py
from unittest import mock import pytest from mlflow.tracing.utils.environment import resolve_env_metadata from mlflow.utils.mlflow_tags import ( MLFLOW_DATABRICKS_NOTEBOOK_ID, MLFLOW_DATABRICKS_NOTEBOOK_PATH, MLFLOW_GIT_BRANCH, MLFLOW_GIT_COMMIT, MLFLOW_GIT_REPO_URL, MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE, MLFLOW_USER, ) from mlflow.version import IS_TRACING_SDK_ONLY @pytest.fixture(autouse=True) def clear_lru_cache(): resolve_env_metadata.cache_clear() def test_resolve_env_metadata(): expected_metadata = { MLFLOW_USER: mock.ANY, MLFLOW_SOURCE_NAME: mock.ANY, MLFLOW_SOURCE_TYPE: "LOCAL", } if not IS_TRACING_SDK_ONLY: expected_metadata.update( { MLFLOW_GIT_BRANCH: mock.ANY, MLFLOW_GIT_COMMIT: mock.ANY, MLFLOW_GIT_REPO_URL: mock.ANY, } ) assert resolve_env_metadata() == expected_metadata def test_resolve_env_metadata_in_databricks_notebook(): with ( mock.patch( "mlflow.tracking.context.databricks_notebook_context.databricks_utils" ) as mock_db_utils, mock.patch("mlflow.tracing.utils.environment.is_in_databricks_notebook", return_value=True), ): mock_db_utils.is_in_databricks_notebook.return_value = True mock_db_utils.get_notebook_id.return_value = "notebook_123" mock_db_utils.get_notebook_path.return_value = "/Users/bob/test.py" mock_db_utils.get_webapp_url.return_value = None mock_db_utils.get_workspace_url.return_value = None mock_db_utils.get_workspace_id.return_value = None mock_db_utils.get_workspace_info_from_dbutils.return_value = (None, None) assert resolve_env_metadata() == { MLFLOW_USER: mock.ANY, MLFLOW_SOURCE_NAME: "/Users/bob/test.py", MLFLOW_SOURCE_TYPE: "NOTEBOOK", MLFLOW_DATABRICKS_NOTEBOOK_ID: "notebook_123", MLFLOW_DATABRICKS_NOTEBOOK_PATH: "/Users/bob/test.py", }
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/utils/test_environment.py", "license": "Apache License 2.0", "lines": 53, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/entities/trace_info_v2.py
from dataclasses import asdict, dataclass, field from typing import Any from mlflow.entities._mlflow_object import _MlflowObject from mlflow.entities.assessment import Assessment from mlflow.entities.trace_info import TraceInfo from mlflow.entities.trace_location import TraceLocation from mlflow.entities.trace_status import TraceStatus from mlflow.protos.service_pb2 import TraceInfo as ProtoTraceInfo from mlflow.protos.service_pb2 import TraceRequestMetadata as ProtoTraceRequestMetadata from mlflow.protos.service_pb2 import TraceTag as ProtoTraceTag def _truncate_request_metadata(d: dict[str, Any]) -> dict[str, str]: from mlflow.tracing.constant import MAX_CHARS_IN_TRACE_INFO_METADATA return { k[:MAX_CHARS_IN_TRACE_INFO_METADATA]: str(v)[:MAX_CHARS_IN_TRACE_INFO_METADATA] for k, v in d.items() } def _truncate_tags(d: dict[str, Any]) -> dict[str, str]: from mlflow.tracing.constant import ( MAX_CHARS_IN_TRACE_INFO_TAGS_KEY, MAX_CHARS_IN_TRACE_INFO_TAGS_VALUE, ) return { k[:MAX_CHARS_IN_TRACE_INFO_TAGS_KEY]: str(v)[:MAX_CHARS_IN_TRACE_INFO_TAGS_VALUE] for k, v in d.items() } @dataclass class TraceInfoV2(_MlflowObject): """Metadata about a trace. Args: request_id: id of the trace. experiment_id: id of the experiment. timestamp_ms: start time of the trace, in milliseconds. execution_time_ms: duration of the trace, in milliseconds. status: status of the trace. request_metadata: Key-value pairs associated with the trace. Request metadata are designed for immutable values like run ID associated with the trace. tags: Tags associated with the trace. Tags are designed for mutable values like trace name, that can be updated by the users after the trace is created, unlike request_metadata. """ request_id: str experiment_id: str timestamp_ms: int execution_time_ms: int | None status: TraceStatus request_metadata: dict[str, str] = field(default_factory=dict) tags: dict[str, str] = field(default_factory=dict) assessments: list[Assessment] = field(default_factory=list) def __eq__(self, other): if type(other) is type(self): return self.__dict__ == other.__dict__ return False @property def trace_id(self) -> str: """Returns the trace ID of the trace info.""" return self.request_id def to_proto(self): proto = ProtoTraceInfo() proto.request_id = self.request_id proto.experiment_id = self.experiment_id proto.timestamp_ms = self.timestamp_ms # NB: Proto setter does not support nullable fields (even with 'optional' keyword), # so we substitute None with 0 for execution_time_ms. This should be not too confusing # as we only put None when starting a trace i.e. the execution time is actually 0. proto.execution_time_ms = self.execution_time_ms or 0 proto.status = self.status.to_proto() request_metadata = [] for key, value in _truncate_request_metadata(self.request_metadata).items(): attr = ProtoTraceRequestMetadata() attr.key = key attr.value = value request_metadata.append(attr) proto.request_metadata.extend(request_metadata) tags = [] for key, value in _truncate_tags(self.tags).items(): tag = ProtoTraceTag() tag.key = key tag.value = str(value) tags.append(tag) proto.tags.extend(tags) return proto @classmethod def from_proto(cls, proto, assessments=None): return cls( request_id=proto.request_id, experiment_id=proto.experiment_id, timestamp_ms=proto.timestamp_ms, execution_time_ms=proto.execution_time_ms, status=TraceStatus.from_proto(proto.status), request_metadata={attr.key: attr.value for attr in proto.request_metadata}, tags={tag.key: tag.value for tag in proto.tags}, assessments=assessments or [], ) def to_dict(self): """ Convert trace info to a dictionary for persistence. Update status field to the string value for serialization. """ trace_info_dict = asdict(self) trace_info_dict["status"] = self.status.value # Client request ID field is only added for internal use, and should not be # serialized for V2 TraceInfo. trace_info_dict.pop("client_request_id", None) return trace_info_dict @classmethod def from_dict(cls, trace_info_dict): """ Convert trace info dictionary to TraceInfo object. """ if "status" not in trace_info_dict: raise ValueError("status is required in trace info dictionary.") trace_info_dict["status"] = TraceStatus(trace_info_dict["status"]) return cls(**trace_info_dict) def to_v3(self, request: str | None = None, response: str | None = None) -> TraceInfo: return TraceInfo( trace_id=self.request_id, trace_location=TraceLocation.from_experiment_id(self.experiment_id), request_preview=request, response_preview=response, request_time=self.timestamp_ms, execution_duration=self.execution_time_ms, state=self.status.to_state(), trace_metadata=self.request_metadata.copy(), tags=self.tags, assessments=self.assessments, ) @classmethod def from_v3(cls, trace_info: TraceInfo) -> "TraceInfoV2": return cls( request_id=trace_info.trace_id, experiment_id=trace_info.experiment_id, timestamp_ms=trace_info.request_time, execution_time_ms=trace_info.execution_duration, status=TraceStatus.from_state(trace_info.state), request_metadata=trace_info.trace_metadata.copy(), tags=trace_info.tags, )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/entities/trace_info_v2.py", "license": "Apache License 2.0", "lines": 136, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:examples/pydanticai/tracing.py
""" This is an example for leveraging MLflow's auto tracing capabilities for Pydantic AI. Most codes are from https://ai.pydantic.dev/examples/bank-support/. """ import mlflow import mlflow.pydantic_ai mlflow.set_tracking_uri("http://localhost:5000") mlflow.set_experiment("Pydantic AI Example") mlflow.pydantic_ai.autolog(disable=False) from dataclasses import dataclass from pydantic import BaseModel, Field from pydantic_ai import Agent, RunContext class DatabaseConn: """This is a fake database for example purposes. In reality, you'd be connecting to an external database (e.g. PostgreSQL) to get information about customers. """ @classmethod async def customer_name(cls, *, id: int) -> str | None: if id == 123: return "John" @classmethod async def customer_balance(cls, *, id: int, include_pending: bool) -> float: if id == 123 and include_pending: return 123.45 else: raise ValueError("Customer not found") @dataclass class SupportDependencies: customer_id: int db: DatabaseConn class SupportOutput(BaseModel): support_advice: str = Field(description="Advice returned to the customer") block_card: bool = Field(description="Whether to block their card or not") risk: int = Field(description="Risk level of query", ge=0, le=10) support_agent = Agent( "openai:gpt-4o", deps_type=SupportDependencies, output_type=SupportOutput, system_prompt=( "You are a support agent in our bank, give the " "customer support and judge the risk level of their query. " "Reply using the customer's name." ), instrument=True, ) @support_agent.system_prompt async def add_customer_name(ctx: RunContext[SupportDependencies]) -> str: customer_name = await ctx.deps.db.customer_name(id=ctx.deps.customer_id) return f"The customer's name is {customer_name!r}" @support_agent.tool async def customer_balance(ctx: RunContext[SupportDependencies], include_pending: bool) -> str: """Returns the customer's current account balance.""" balance = await ctx.deps.db.customer_balance( id=ctx.deps.customer_id, include_pending=include_pending, ) return f"${balance:.2f}" if __name__ == "__main__": deps = SupportDependencies(customer_id=123, db=DatabaseConn()) result = support_agent.run_sync("What is my balance?", deps=deps) print(result.output) result = support_agent.run_sync("I just lost my card!", deps=deps) print(result.output)
{ "repo_id": "mlflow/mlflow", "file_path": "examples/pydanticai/tracing.py", "license": "Apache License 2.0", "lines": 64, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/pydantic_ai/autolog.py
import contextvars import inspect import logging from contextlib import asynccontextmanager from dataclasses import asdict, is_dataclass from typing import Any import mlflow from mlflow.entities import SpanType from mlflow.entities.span import LiveSpan from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey from mlflow.tracing.provider import with_active_span from mlflow.utils.autologging_utils.config import AutoLoggingConfig _logger = logging.getLogger(__name__) # Context variable to track when we're inside run_stream_sync to prevent # double span creation (run_stream_sync internally calls run_stream) _in_sync_stream_context: contextvars.ContextVar[bool] = contextvars.ContextVar( "_in_sync_stream_context", default=False ) _SAFE_PRIMITIVE_TYPES = (str, int, float, bool) def _is_safe_for_serialization(value: Any) -> bool: if value is None: return False if isinstance(value, _SAFE_PRIMITIVE_TYPES): return True if isinstance(value, dict): return all(_is_safe_for_serialization(v) for v in value.values()) if isinstance(value, (list, tuple)): return all(_is_safe_for_serialization(v) for v in value) if is_dataclass(value) and not isinstance(value, type): return True if isinstance(value, type): return True return False def _safe_get_attribute(instance: Any, key: str) -> Any: try: value = getattr(instance, key, None) if value is None: return None if isinstance(value, type): return value.__name__ if _is_safe_for_serialization(value): return value return None except Exception: return None def _extract_safe_attributes(instance: Any) -> dict[str, Any]: """Extract all public attributes that are safe for serialization. Skips attributes starting with underscore to avoid capturing internal references (e.g., httpx clients) that can interfere with async cleanup. """ attrs = {} for key in dir(instance): if key.startswith("_"): continue value = getattr(instance, key, None) # Skip methods/functions, but keep types (e.g., output_type=str) if callable(value) and not isinstance(value, type): continue safe_value = _safe_get_attribute(instance, key) if safe_value is not None: attrs[key] = safe_value return attrs def _set_span_attributes(span: LiveSpan, instance): # 1) MCPServer attributes try: from pydantic_ai.mcp import MCPServer if isinstance(instance, MCPServer): mcp_attrs = _get_mcp_server_attributes(instance) span.set_attributes({k: v for k, v in mcp_attrs.items() if v is not None}) except Exception as e: _logger.warning("Failed saving MCPServer attributes: %s", e) # 2) Agent attributes try: from pydantic_ai import Agent if isinstance(instance, Agent): agent_attrs = _get_agent_attributes(instance) span.set_attributes({k: v for k, v in agent_attrs.items() if v is not None}) except Exception as e: _logger.warning("Failed saving Agent attributes: %s", e) # 3) InstrumentedModel attributes try: from pydantic_ai.models.instrumented import InstrumentedModel if isinstance(instance, InstrumentedModel): model_attrs = _get_model_attributes(instance) span.set_attributes({k: v for k, v in model_attrs.items() if v is not None}) if model_name := getattr(instance, "model_name", None): span.set_attribute(SpanAttributeKey.MODEL, model_name) except Exception as e: _logger.warning("Failed saving InstrumentedModel attributes: %s", e) # 4) Tool attributes try: from pydantic_ai import Tool if isinstance(instance, Tool): tool_attrs = _get_tool_attributes(instance) span.set_attributes({k: v for k, v in tool_attrs.items() if v is not None}) except Exception as e: _logger.warning("Failed saving Tool attributes: %s", e) def patched_agent_init(original, self, *args, **kwargs): cfg = AutoLoggingConfig.init(flavor_name=mlflow.pydantic_ai.FLAVOR_NAME) if cfg.log_traces and kwargs.get("instrument") is None: kwargs["instrument"] = True return original(self, *args, **kwargs) async def patched_async_class_call(original, self, *args, **kwargs): cfg = AutoLoggingConfig.init(flavor_name=mlflow.pydantic_ai.FLAVOR_NAME) if not cfg.log_traces: return await original(self, *args, **kwargs) fullname = f"{self.__class__.__name__}.{original.__name__}" span_type = _get_span_type(self) with mlflow.start_span(name=fullname, span_type=span_type) as span: inputs = _construct_full_inputs(original, self, *args, **kwargs) span.set_inputs(inputs) _set_span_attributes(span, self) result = await original(self, *args, **kwargs) outputs = _serialize_output(result) span.set_outputs(outputs) if usage_dict := _parse_usage(result): span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_dict) return result def patched_class_call(original, self, *args, **kwargs): cfg = AutoLoggingConfig.init(flavor_name=mlflow.pydantic_ai.FLAVOR_NAME) if not cfg.log_traces: return original(self, *args, **kwargs) fullname = f"{self.__class__.__name__}.{original.__name__}" span_type = _get_span_type(self) with mlflow.start_span(name=fullname, span_type=span_type) as span: inputs = _construct_full_inputs(original, self, *args, **kwargs) span.set_inputs(inputs) _set_span_attributes(span, self) result = original(self, *args, **kwargs) outputs = _serialize_output(result) span.set_outputs(outputs) if usage_dict := _parse_usage(result): span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_dict) return result def patched_async_stream_call(original, self, *args, **kwargs): @asynccontextmanager async def _wrapper(): cfg = AutoLoggingConfig.init(flavor_name=mlflow.pydantic_ai.FLAVOR_NAME) if not cfg.log_traces: async with original(self, *args, **kwargs) as result: yield result return # Skip span creation ONLY for Agent.run_stream when inside run_stream_sync. # Agent.run_stream_sync already creates a root span, so we don't need another # Agent.run_stream span. But we DO want InstrumentedModel spans (LLM calls). # The async context manager for Agent.run_stream won't properly exit when # called from run_stream_sync (pydantic_ai's implementation uses a generator # that pauses), so we skip it to avoid orphaned spans. from pydantic_ai import Agent if _in_sync_stream_context.get() and isinstance(self, Agent): async with original(self, *args, **kwargs) as result: yield result return fullname = f"{self.__class__.__name__}.{original.__name__}" span_type = _get_span_type(self) with mlflow.start_span(name=fullname, span_type=span_type) as span: inputs = _construct_full_inputs(original, self, *args, **kwargs) span.set_inputs(inputs) _set_span_attributes(span, self) async with original(self, *args, **kwargs) as stream_result: try: yield stream_result finally: # After the stream is consumed, get the final result try: outputs = _serialize_output(stream_result) span.set_outputs(outputs) if usage_dict := _parse_usage(stream_result): span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_dict) except Exception as e: _logger.debug(f"Failed to set streaming outputs: {e}") return _wrapper() # Wrapper that captures span outputs after stream is consumed. # This is necessary because run_stream_sync is NOT a context manager # (unlike run_stream which is @asynccontextmanager). We must intercept # iterator completion to know when streaming finishes. class _StreamedRunResultSyncWrapper: def __init__(self, result, span): self._result = result self._span = span self._finalized = False def _use_span_context(self): return with_active_span(self._span) def _finalize(self): if self._finalized: return self._finalized = True # End child spans that haven't been ended yet. # This is necessary because pydantic_ai's run_stream_sync uses an async generator # that pauses mid-execution, causing async context managers (and their spans) to # never properly exit. We manually end these spans before ending the root span. self._end_unfinished_child_spans() try: self._span.set_outputs(_serialize_output(self._result)) if usage_dict := _parse_usage(self._result): self._span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_dict) except Exception as e: _logger.debug(f"Failed to set streaming outputs: {e}") finally: self._span.end() def _end_unfinished_child_spans(self): from mlflow.tracing.trace_manager import InMemoryTraceManager manager = InMemoryTraceManager.get_instance() if manager is None: return trace_id = self._span.request_id root_span_id = self._span.span_id with manager.get_trace(trace_id) as trace: if not trace: return # Find and end all unfinished child spans (direct children of our root span) for span_id, span in trace.span_dict.items(): if span_id == root_span_id: continue # Only end spans that are direct children of our root span if span.parent_id == root_span_id and span._span.end_time is None: try: span.end() except Exception as e: _logger.debug(f"Failed to end child span {span.name}: {e}") def _wrap_iterator(self, iterator_func, **kwargs): with self._use_span_context(): try: yield from iterator_func(**kwargs) finally: self._finalize() def stream_text(self, **kwargs): return self._wrap_iterator(self._result.stream_text, **kwargs) def stream_output(self, **kwargs): return self._wrap_iterator(self._result.stream_output, **kwargs) def stream_responses(self, **kwargs): return self._wrap_iterator(self._result.stream_responses, **kwargs) def get_output(self, **kwargs): with self._use_span_context(): try: return self._result.get_output(**kwargs) finally: self._finalize() def __getattr__(self, name): return getattr(self._result, name) def patched_sync_stream_call(original, self, *args, **kwargs): cfg = AutoLoggingConfig.init(flavor_name=mlflow.pydantic_ai.FLAVOR_NAME) if not cfg.log_traces: return original(self, *args, **kwargs) fullname = f"{self.__class__.__name__}.{original.__name__}" span_type = _get_span_type(self) # Use start_span_no_context (not `with start_span()`) because the span must remain # open after this function returns. The span ends later when the user finishes # iterating through the stream (handled by _StreamedRunResultSyncWrapper._finalize). span = mlflow.start_span_no_context(name=fullname, span_type=span_type) span.set_inputs(_construct_full_inputs(original, self, *args, **kwargs)) _set_span_attributes(span, self) try: # Use use_span to set this span as the active context so child spans # (e.g., LLM calls via InstrumentedModel) are properly parented. # end_on_exit=False ensures we control when the span ends (in _finalize). # Also set _in_sync_stream_context to prevent patched_async_stream_call # from creating another Agent.run_stream span (it would never end due to # pydantic_ai's async generator implementation). token = _in_sync_stream_context.set(True) try: with with_active_span(span): result = original(self, *args, **kwargs) finally: _in_sync_stream_context.reset(token) return _StreamedRunResultSyncWrapper(result, span) except Exception: span.end(status="ERROR") raise def _get_span_type(instance) -> str: try: from pydantic_ai import Agent, Tool from pydantic_ai.mcp import MCPServer from pydantic_ai.models.instrumented import InstrumentedModel except ImportError: return SpanType.UNKNOWN if isinstance(instance, InstrumentedModel): return SpanType.LLM if isinstance(instance, Agent): return SpanType.AGENT if isinstance(instance, Tool): return SpanType.TOOL if isinstance(instance, MCPServer): return SpanType.TOOL try: from pydantic_ai._tool_manager import ToolManager if isinstance(instance, ToolManager): return SpanType.TOOL except ImportError: pass return SpanType.UNKNOWN def _construct_full_inputs(func, *args, **kwargs) -> dict[str, Any]: try: sig = inspect.signature(func) bound = sig.bind_partial(*args, **kwargs).arguments bound.pop("self", None) bound.pop("deps", None) return { k: (v.__dict__ if hasattr(v, "__dict__") else v) for k, v in bound.items() if v is not None } except (ValueError, TypeError): return kwargs def _serialize_output(result: Any) -> Any: if result is None: return None if hasattr(result, "new_messages") and callable(result.new_messages): try: new_messages = result.new_messages() serialized_messages = [asdict(msg) for msg in new_messages] try: serialized_result = asdict(result) except Exception: # We can't use asdict for StreamedRunResult because its async generator serialized_result = dict(result.__dict__) if hasattr(result, "__dict__") else {} serialized_result["_new_messages_serialized"] = serialized_messages return serialized_result except Exception as e: _logger.debug(f"Failed to serialize new_messages: {e}") return result.__dict__ if hasattr(result, "__dict__") else result def _get_agent_attributes(instance): attrs = {SpanAttributeKey.MESSAGE_FORMAT: "pydantic_ai"} attrs.update(_extract_safe_attributes(instance)) if hasattr(instance, "tools"): try: if tools_value := _parse_tools(instance.tools): attrs["tools"] = tools_value except Exception: pass return attrs def _get_model_attributes(instance): attrs = {SpanAttributeKey.MESSAGE_FORMAT: "pydantic_ai"} attrs.update(_extract_safe_attributes(instance)) return attrs def _get_tool_attributes(instance): return _extract_safe_attributes(instance) def _get_mcp_server_attributes(instance): attrs = _extract_safe_attributes(instance) if hasattr(instance, "tools"): try: if tools_value := _parse_tools(instance.tools): attrs["tools"] = tools_value except Exception: pass return attrs def _parse_tools(tools): return [ {"type": "function", "function": data} for tool in tools if (data := tool.model_dumps(exclude_none=True)) ] def _parse_usage(result: Any) -> dict[str, int] | None: try: if isinstance(result, tuple) and len(result) == 2: usage = result[1] else: usage_attr = getattr(result, "usage", None) if usage_attr is None: return None # Handle both property (RunResult) and method (StreamedRunResult) # StreamedRunResult has .usage() as a method usage = usage_attr() if callable(usage_attr) else usage_attr if usage is None: return None # input_tokens/output_tokens are the current field names; request_tokens/ # response_tokens are deprecated aliases kept for backward compatibility. input_tokens = getattr(usage, "input_tokens", None) if input_tokens is None: input_tokens = getattr(usage, "request_tokens", 0) output_tokens = getattr(usage, "output_tokens", None) if output_tokens is None: output_tokens = getattr(usage, "response_tokens", 0) total_tokens = getattr(usage, "total_tokens") if total_tokens is None: total_tokens = input_tokens + output_tokens return { TokenUsageKey.INPUT_TOKENS: input_tokens, TokenUsageKey.OUTPUT_TOKENS: output_tokens, TokenUsageKey.TOTAL_TOKENS: total_tokens, } except Exception as e: _logger.debug(f"Failed to parse token usage from output: {e}") return None
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/pydantic_ai/autolog.py", "license": "Apache License 2.0", "lines": 384, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/pydantic_ai/test_pydanticai_fluent_tracing.py
import importlib.metadata from contextlib import asynccontextmanager from unittest.mock import patch import pytest from packaging.version import Version from pydantic_ai import Agent, RunContext from pydantic_ai.messages import ModelResponse, TextPart, ToolCallPart from pydantic_ai.models.instrumented import InstrumentedModel from pydantic_ai.usage import Usage import mlflow import mlflow.pydantic_ai # ensure the integration module is importable from mlflow.entities import SpanType from mlflow.tracing.constant import SpanAttributeKey from tests.tracing.helper import get_traces _FINAL_ANSWER_WITHOUT_TOOL = "Paris" _FINAL_ANSWER_WITH_TOOL = "winner" PYDANTIC_AI_VERSION = Version(importlib.metadata.version("pydantic_ai")) # Usage was deprecated in favor of RequestUsage in 0.7.3 IS_USAGE_DEPRECATED = PYDANTIC_AI_VERSION >= Version("0.7.3") # run_stream_sync was added in pydantic-ai 1.10.0 HAS_RUN_STREAM_SYNC = hasattr(Agent, "run_stream_sync") # Streaming tests require pydantic-ai >= 1.0.0 due to API changes HAS_STABLE_STREAMING_API = PYDANTIC_AI_VERSION >= Version("1.0.0") # In pydantic-ai >= 1.63.0, _agent_graph calls execute_tool_call directly instead of handle_call. # _tool_manager module doesn't exist in older versions (e.g. 0.2.x). try: from pydantic_ai._tool_manager import ToolManager as _ToolManager TOOL_MANAGER_SPAN_NAME = ( "ToolManager.execute_tool_call" if hasattr(_ToolManager, "execute_tool_call") else "ToolManager.handle_call" ) except ImportError: TOOL_MANAGER_SPAN_NAME = "ToolManager.handle_call" def _make_dummy_response_without_tool(): if IS_USAGE_DEPRECATED: from pydantic_ai.usage import RequestUsage parts = [TextPart(content=_FINAL_ANSWER_WITHOUT_TOOL)] resp = ModelResponse(parts=parts) if IS_USAGE_DEPRECATED: usage = RequestUsage(input_tokens=1, output_tokens=1) else: usage = Usage(requests=1, request_tokens=1, response_tokens=1, total_tokens=2) if PYDANTIC_AI_VERSION >= Version("0.2.0"): return ModelResponse(parts=parts, usage=usage) else: return resp, usage def _make_dummy_response_with_tool(): if IS_USAGE_DEPRECATED: from pydantic_ai.usage import RequestUsage call_parts = [ToolCallPart(tool_name="roulette_wheel", args={"square": 18})] final_parts = [TextPart(content=_FINAL_ANSWER_WITH_TOOL)] if IS_USAGE_DEPRECATED: usage_call = RequestUsage(input_tokens=10, output_tokens=20) usage_final = RequestUsage(input_tokens=100, output_tokens=200) else: usage_call = Usage(requests=0, request_tokens=10, response_tokens=20, total_tokens=30) usage_final = Usage(requests=1, request_tokens=100, response_tokens=200, total_tokens=300) if PYDANTIC_AI_VERSION >= Version("0.2.0"): call_resp = ModelResponse(parts=call_parts, usage=usage_call) final_resp = ModelResponse(parts=final_parts, usage=usage_final) yield call_resp yield final_resp else: call_resp = ModelResponse(parts=call_parts) final_resp = ModelResponse(parts=final_parts) yield call_resp, usage_call yield final_resp, usage_final def _make_streaming_response_without_tool(input_tokens=10, output_tokens=5): if IS_USAGE_DEPRECATED: from pydantic_ai.usage import RequestUsage usage = RequestUsage(input_tokens=input_tokens, output_tokens=output_tokens) else: usage = Usage( requests=1, request_tokens=input_tokens, response_tokens=output_tokens, total_tokens=input_tokens + output_tokens, ) return ModelResponse(parts=[TextPart(content=_FINAL_ANSWER_WITHOUT_TOOL)], usage=usage), usage def _make_streaming_response_with_tool(): if IS_USAGE_DEPRECATED: from pydantic_ai.usage import RequestUsage usage_call = RequestUsage(input_tokens=10, output_tokens=20) usage_final = RequestUsage(input_tokens=100, output_tokens=200) else: usage_call = Usage(requests=0, request_tokens=10, response_tokens=20, total_tokens=30) usage_final = Usage(requests=1, request_tokens=100, response_tokens=200, total_tokens=300) call_resp = ModelResponse( parts=[ToolCallPart(tool_name="roulette_wheel", args={"square": 18})], usage=usage_call, ) final_resp = ModelResponse( parts=[TextPart(content=_FINAL_ANSWER_WITH_TOOL)], usage=usage_final, ) return [call_resp, final_resp] class MockStreamedResponse: def __init__(self, response, usage): self._response = response self._usage = usage self.model_name = "openai:gpt-4o" self.timestamp = None def usage(self): return self._usage def get(self): return self._response async def __aiter__(self): for part in self._response.parts: if hasattr(part, "content"): for char in part.content: yield char else: yield "" @pytest.fixture(autouse=True) def clear_autolog_state(): from mlflow.utils.autologging_utils import AUTOLOGGING_INTEGRATIONS for key in AUTOLOGGING_INTEGRATIONS.keys(): AUTOLOGGING_INTEGRATIONS[key].clear() mlflow.utils.import_hooks._post_import_hooks = {} @pytest.fixture def simple_agent(): return Agent( "openai:gpt-4o", system_prompt="Tell me the capital of {{input}}.", instrument=True, ) @pytest.fixture def agent_with_tool(): roulette_agent = Agent( "openai:gpt-4o", system_prompt=( "Use the roulette_wheel function to see if the " "customer has won based on the number they provide." ), instrument=True, deps_type=int, output_type=str, ) @roulette_agent.tool async def roulette_wheel(ctx: RunContext[int], square: int) -> str: """check if the square is a winner""" return "winner" if square == ctx.deps else "loser" return roulette_agent def test_agent_run_sync_enable_fluent_disable_autolog(simple_agent): dummy = _make_dummy_response_without_tool() async def request(self, *args, **kwargs): return dummy with patch.object(InstrumentedModel, "request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = simple_agent.run_sync("France") assert result.output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert spans[0].name == "Agent.run_sync" assert spans[0].span_type == SpanType.AGENT assert spans[1].name == "Agent.run" assert spans[1].span_type == SpanType.AGENT span2 = spans[2] assert span2.name == "InstrumentedModel.request" assert span2.span_type == SpanType.LLM assert span2.parent_id == spans[1].span_id with patch.object(InstrumentedModel, "request", new=request): mlflow.pydantic_ai.autolog(disable=True) simple_agent.run_sync("France") assert len(get_traces()) == 1 @pytest.mark.asyncio async def test_agent_run_enable_fluent_disable_autolog(simple_agent): dummy = _make_dummy_response_without_tool() async def request(self, *args, **kwargs): return dummy with patch.object(InstrumentedModel, "request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = await simple_agent.run("France") assert result.output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert spans[0].name == "Agent.run" assert spans[0].span_type == SpanType.AGENT span1 = spans[1] assert span1.name == "InstrumentedModel.request" assert span1.span_type == SpanType.LLM assert span1.parent_id == spans[0].span_id def test_agent_run_sync_enable_disable_fluent_autolog_with_tool(agent_with_tool): sequence = _make_dummy_response_with_tool() async def request(self, *args, **kwargs): return next(sequence) with patch.object(InstrumentedModel, "request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = agent_with_tool.run_sync("Put my money on square eighteen", deps=18) assert result.output == _FINAL_ANSWER_WITH_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 5 assert spans[0].name == "Agent.run_sync" assert spans[0].span_type == SpanType.AGENT assert spans[1].name == "Agent.run" assert spans[1].span_type == SpanType.AGENT span2 = spans[2] assert span2.name == "InstrumentedModel.request" assert span2.span_type == SpanType.LLM assert span2.parent_id == spans[1].span_id span3 = spans[3] assert span3.span_type == SpanType.TOOL assert span3.parent_id == spans[1].span_id span4 = spans[4] assert span4.name == "InstrumentedModel.request" assert span4.span_type == SpanType.LLM assert span4.parent_id == spans[1].span_id @pytest.mark.asyncio async def test_agent_run_enable_disable_fluent_autolog_with_tool(agent_with_tool): sequence = _make_dummy_response_with_tool() async def request(self, *args, **kwargs): return next(sequence) with patch.object(InstrumentedModel, "request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = await agent_with_tool.run("Put my money on square eighteen", deps=18) assert result.output == _FINAL_ANSWER_WITH_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 4 assert spans[0].name == "Agent.run" assert spans[0].span_type == SpanType.AGENT span1 = spans[1] assert span1.name == "InstrumentedModel.request" assert span1.span_type == SpanType.LLM assert span1.parent_id == spans[0].span_id span2 = spans[2] assert span2.span_type == SpanType.TOOL assert span2.parent_id == spans[0].span_id span3 = spans[3] assert span3.name == "InstrumentedModel.request" assert span3.span_type == SpanType.LLM assert span3.parent_id == spans[0].span_id @pytest.mark.skipif( not HAS_STABLE_STREAMING_API, reason="Streaming API stabilized in pydantic-ai 1.0.0" ) @pytest.mark.asyncio async def test_agent_run_stream_creates_trace(simple_agent): response, usage = _make_streaming_response_without_tool(input_tokens=10, output_tokens=5) @asynccontextmanager async def request_stream(self, *args, **kwargs): yield MockStreamedResponse(response, usage) with patch.object(InstrumentedModel, "request_stream", new=request_stream): mlflow.pydantic_ai.autolog(log_traces=True) async with simple_agent.run_stream("France") as result: output = await result.get_output() assert output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 2 assert spans[0].name == "Agent.run_stream" assert spans[0].span_type == SpanType.AGENT assert spans[1].name == "InstrumentedModel.request_stream" assert spans[1].span_type == SpanType.LLM assert spans[1].parent_id == spans[0].span_id usage_attr = spans[0].attributes.get(SpanAttributeKey.CHAT_USAGE) assert usage_attr is not None assert usage_attr.get("input_tokens") == 10 assert usage_attr.get("output_tokens") == 5 assert usage_attr.get("total_tokens") == 15 @pytest.mark.skipif( not HAS_STABLE_STREAMING_API, reason="Streaming API stabilized in pydantic-ai 1.0.0" ) @pytest.mark.skipif(not HAS_RUN_STREAM_SYNC, reason="run_stream_sync added in pydantic-ai 1.10.0") def test_agent_run_stream_sync_creates_trace(simple_agent): response, usage = _make_streaming_response_without_tool(input_tokens=10, output_tokens=5) @asynccontextmanager async def request_stream(self, *args, **kwargs): yield MockStreamedResponse(response, usage) with patch.object(InstrumentedModel, "request_stream", new=request_stream): mlflow.pydantic_ai.autolog(log_traces=True) result = simple_agent.run_stream_sync("France") output = "" for text in result.stream_text(): output += text assert output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 2 assert spans[0].name == "Agent.run_stream_sync" assert spans[0].span_type == SpanType.AGENT assert spans[0].inputs is not None assert "user_prompt" in spans[0].inputs assert spans[0].outputs is not None assert spans[1].name == "InstrumentedModel.request_stream" assert spans[1].span_type == SpanType.LLM assert spans[1].parent_id == spans[0].span_id usage_attr = spans[0].attributes.get(SpanAttributeKey.CHAT_USAGE) assert usage_attr is not None assert usage_attr.get("input_tokens") == 10 assert usage_attr.get("output_tokens") == 5 assert usage_attr.get("total_tokens") == 15 @pytest.mark.skipif( not HAS_STABLE_STREAMING_API, reason="Streaming API stabilized in pydantic-ai 1.0.0" ) @pytest.mark.asyncio async def test_agent_run_stream_with_tool(agent_with_tool): sequence = _make_streaming_response_with_tool() @asynccontextmanager async def request_stream(self, *args, **kwargs): if sequence: resp = sequence.pop(0) yield MockStreamedResponse(resp, resp.usage) else: resp = sequence[-1] yield MockStreamedResponse(resp, resp.usage) with patch.object(InstrumentedModel, "request_stream", new=request_stream): mlflow.pydantic_ai.autolog(log_traces=True) async with agent_with_tool.run_stream("Put my money on square eighteen", deps=18) as result: output = await result.get_output() assert output == _FINAL_ANSWER_WITH_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 4 assert spans[0].name == "Agent.run_stream" assert spans[0].span_type == SpanType.AGENT assert spans[1].name == "InstrumentedModel.request_stream" assert spans[1].span_type == SpanType.LLM assert spans[1].parent_id == spans[0].span_id assert spans[2].span_type == SpanType.TOOL assert spans[2].name == TOOL_MANAGER_SPAN_NAME assert spans[2].parent_id == spans[0].span_id assert spans[3].name == "InstrumentedModel.request_stream" assert spans[3].span_type == SpanType.LLM assert spans[3].parent_id == spans[0].span_id @pytest.mark.skipif( not HAS_STABLE_STREAMING_API, reason="Streaming API stabilized in pydantic-ai 1.0.0" ) @pytest.mark.skipif(not HAS_RUN_STREAM_SYNC, reason="run_stream_sync added in pydantic-ai 1.10.0") def test_agent_run_stream_sync_with_tool(agent_with_tool): sequence = _make_streaming_response_with_tool() @asynccontextmanager async def request_stream(self, *args, **kwargs): if sequence: resp = sequence.pop(0) yield MockStreamedResponse(resp, resp.usage) else: resp = sequence[-1] yield MockStreamedResponse(resp, resp.usage) with patch.object(InstrumentedModel, "request_stream", new=request_stream): mlflow.pydantic_ai.autolog(log_traces=True) result = agent_with_tool.run_stream_sync("Put my money on square eighteen", deps=18) output = "" for text in result.stream_text(): output += text assert output == _FINAL_ANSWER_WITH_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 4 assert spans[0].name == "Agent.run_stream_sync" assert spans[0].span_type == SpanType.AGENT assert spans[0].inputs is not None assert "user_prompt" in spans[0].inputs assert spans[1].name == "InstrumentedModel.request_stream" assert spans[1].span_type == SpanType.LLM assert spans[1].parent_id == spans[0].span_id assert spans[2].span_type == SpanType.TOOL assert spans[2].name == TOOL_MANAGER_SPAN_NAME assert spans[2].parent_id == spans[0].span_id assert spans[3].name == "InstrumentedModel.request_stream" assert spans[3].span_type == SpanType.LLM assert spans[3].parent_id == spans[0].span_id
{ "repo_id": "mlflow/mlflow", "file_path": "tests/pydantic_ai/test_pydanticai_fluent_tracing.py", "license": "Apache License 2.0", "lines": 371, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/pydantic_ai/test_pydanticai_mcp_tracing.py
from unittest.mock import patch import pytest from pydantic_ai.mcp import MCPServerStdio import mlflow from mlflow.entities.trace import SpanType from tests.tracing.helper import get_traces @pytest.mark.asyncio async def test_mcp_server_list_tools_autolog(): tools_list = [ {"name": "tool1", "description": "Tool 1 description"}, {"name": "tool2", "description": "Tool 2 description"}, ] async def list_tools(self, *args, **kwargs): return tools_list with patch("pydantic_ai.mcp.MCPServer.list_tools", new=list_tools): mlflow.pydantic_ai.autolog(log_traces=True) server = MCPServerStdio( "deno", args=[ "run", "-N", "-R=node_modules", "-W=node_modules", "--node-modules-dir=auto", "jsr:@pydantic/mcp-run-python", "stdio", ], ) result = await server.list_tools() assert result == tools_list traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 1 span = spans[0] assert span.name == "MCPServerStdio.list_tools" assert span.span_type == SpanType.TOOL outputs = span.outputs assert len(outputs) == 2 assert outputs == tools_list with patch("pydantic_ai.mcp.MCPServer.list_tools", new=list_tools): mlflow.pydantic_ai.autolog(disable=True) await server.list_tools() assert len(get_traces()) == 1 @pytest.mark.asyncio async def test_mcp_server_call_tool_autolog(): tool_name = "calculator" tool_args = {"operation": "add", "a": 5, "b": 7} tool_result = {"result": 12} async def call_tool(self, name, args, *remaining_args, **kwargs): assert name == tool_name assert args == tool_args return tool_result with patch("pydantic_ai.mcp.MCPServer.call_tool", new=call_tool): mlflow.pydantic_ai.autolog(log_traces=True) server = MCPServerStdio( "deno", args=[ "run", "-N", "-R=node_modules", "-W=node_modules", "--node-modules-dir=auto", "jsr:@pydantic/mcp-run-python", "stdio", ], ) result = await server.call_tool(tool_name, tool_args) assert result == tool_result traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 1 call_tool_span = spans[0] assert call_tool_span is not None assert call_tool_span.name == "MCPServerStdio.call_tool" assert call_tool_span.span_type == SpanType.TOOL inputs = call_tool_span.inputs assert len(inputs) == 2 assert inputs["name"] == tool_name assert inputs["args"] == tool_args outputs = call_tool_span.outputs assert len(outputs) == 1 assert outputs == tool_result with patch("pydantic_ai.mcp.MCPServer.call_tool", new=call_tool): mlflow.pydantic_ai.autolog(disable=True) await server.call_tool(tool_name, tool_args) assert len(get_traces()) == 1
{ "repo_id": "mlflow/mlflow", "file_path": "tests/pydantic_ai/test_pydanticai_mcp_tracing.py", "license": "Apache License 2.0", "lines": 88, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/pydantic_ai/test_pydanticai_tracing.py
import importlib.metadata from unittest.mock import patch import pytest from packaging.version import Version from pydantic_ai import Agent, RunContext from pydantic_ai.messages import ModelResponse, TextPart, ToolCallPart from pydantic_ai.usage import Usage import mlflow import mlflow.pydantic_ai # ensure the integration module is importable from mlflow.entities import SpanType from mlflow.pydantic_ai.autolog import ( _get_agent_attributes, _get_mcp_server_attributes, _get_model_attributes, _get_tool_attributes, ) from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey from mlflow.version import IS_TRACING_SDK_ONLY from tests.tracing.helper import get_traces PYDANTIC_AI_VERSION = Version(importlib.metadata.version("pydantic_ai")) # Usage was deprecated in favor of RequestUsage in 0.7.3 IS_USAGE_DEPRECATED = PYDANTIC_AI_VERSION >= Version("0.7.3") _FINAL_ANSWER_WITHOUT_TOOL = "Paris" _FINAL_ANSWER_WITH_TOOL = "winner" def _make_dummy_response_without_tool(): # Usage was deprecated in favor of RequestUsage in 0.7.3 if IS_USAGE_DEPRECATED: from pydantic_ai.usage import RequestUsage parts = [TextPart(content=_FINAL_ANSWER_WITHOUT_TOOL)] if IS_USAGE_DEPRECATED: usage = RequestUsage(input_tokens=1, output_tokens=1) else: usage = Usage(requests=1, request_tokens=1, response_tokens=1, total_tokens=2) if PYDANTIC_AI_VERSION >= Version("0.2.0"): return ModelResponse(parts=parts, usage=usage) else: resp = ModelResponse(parts=parts) return resp, usage def _make_dummy_response_with_tool(): # Usage was deprecated in favor of RequestUsage in 0.7.3 if IS_USAGE_DEPRECATED: from pydantic_ai.usage import RequestUsage call_parts = [ToolCallPart(tool_name="roulette_wheel", args={"square": 18})] final_parts = [TextPart(content=_FINAL_ANSWER_WITH_TOOL)] if IS_USAGE_DEPRECATED: usage_call = RequestUsage(input_tokens=10, output_tokens=20) usage_final = RequestUsage(input_tokens=100, output_tokens=200) else: usage_call = Usage(requests=0, request_tokens=10, response_tokens=20, total_tokens=30) usage_final = Usage(requests=1, request_tokens=100, response_tokens=200, total_tokens=300) if PYDANTIC_AI_VERSION >= Version("0.2.0"): call_resp = ModelResponse(parts=call_parts, usage=usage_call) final_resp = ModelResponse(parts=final_parts, usage=usage_final) sequence = [ call_resp, final_resp, ] return sequence, final_resp else: call_resp = ModelResponse(parts=call_parts) final_resp = ModelResponse(parts=final_parts) sequence = [ (call_resp, usage_call), (final_resp, usage_final), ] return sequence, (final_resp, usage_final) @pytest.fixture(autouse=True) def clear_autolog_state(): from mlflow.utils.autologging_utils import AUTOLOGGING_INTEGRATIONS for key in AUTOLOGGING_INTEGRATIONS.keys(): AUTOLOGGING_INTEGRATIONS[key].clear() mlflow.utils.import_hooks._post_import_hooks = {} @pytest.fixture def simple_agent(): return Agent( "openai:gpt-4o", system_prompt="Tell me the capital of {{input}}.", instrument=True, ) @pytest.fixture def agent_with_tool(): roulette_agent = Agent( "openai:gpt-4o", system_prompt=( "Use the roulette_wheel function to see if the " "customer has won based on the number they provide." ), instrument=True, deps_type=int, output_type=str, ) @roulette_agent.tool async def roulette_wheel(ctx: RunContext[int], square: int) -> str: """check if the square is a winner""" return "winner" if square == ctx.deps else "loser" return roulette_agent def test_agent_run_sync_enable_disable_autolog(simple_agent, mock_litellm_cost): dummy = _make_dummy_response_without_tool() async def request(self, *args, **kwargs): return dummy with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = simple_agent.run_sync("France") assert result.output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert spans[0].name == "Agent.run_sync" assert spans[0].span_type == SpanType.AGENT assert spans[0].get_attribute(SpanAttributeKey.MESSAGE_FORMAT) == "pydantic_ai" outputs_0 = spans[0].get_attribute(SpanAttributeKey.OUTPUTS) assert outputs_0 is not None assert "_new_messages_serialized" in outputs_0 assert len(outputs_0["_new_messages_serialized"]) > 0 assert spans[1].name == "Agent.run" assert spans[1].span_type == SpanType.AGENT assert spans[1].get_attribute(SpanAttributeKey.MESSAGE_FORMAT) == "pydantic_ai" outputs_1 = spans[1].get_attribute(SpanAttributeKey.OUTPUTS) assert outputs_1 is not None assert "_new_messages_serialized" in outputs_1 assert len(outputs_1["_new_messages_serialized"]) > 0 span2 = spans[2] assert span2.name == "InstrumentedModel.request" assert span2.span_type == SpanType.LLM assert span2.parent_id == spans[1].span_id assert span2.get_attribute(SpanAttributeKey.MESSAGE_FORMAT) == "pydantic_ai" assert span2.get_attribute(SpanAttributeKey.CHAT_USAGE) == { TokenUsageKey.INPUT_TOKENS: 1, TokenUsageKey.OUTPUT_TOKENS: 1, TokenUsageKey.TOTAL_TOKENS: 2, } assert span2.model_name == "gpt-4o" if not IS_TRACING_SDK_ONLY: # Verify cost is calculated (1 input token * 1.0 + 1 output token * 2.0) assert span2.llm_cost == { "input_cost": 1.0, "output_cost": 2.0, "total_cost": 3.0, } assert traces[0].info.token_usage == { "input_tokens": 1, "output_tokens": 1, "total_tokens": 2, } with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(disable=True) simple_agent.run_sync("France") assert len(get_traces()) == 1 @pytest.mark.asyncio async def test_agent_run_enable_disable_autolog(simple_agent, mock_litellm_cost): dummy = _make_dummy_response_without_tool() async def request(self, *args, **kwargs): return dummy with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = await simple_agent.run("France") assert result.output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert spans[0].name == "Agent.run" assert spans[0].span_type == SpanType.AGENT span1 = spans[1] assert span1.name == "InstrumentedModel.request" assert span1.span_type == SpanType.LLM assert span1.parent_id == spans[0].span_id assert span1.get_attribute(SpanAttributeKey.CHAT_USAGE) == { TokenUsageKey.INPUT_TOKENS: 1, TokenUsageKey.OUTPUT_TOKENS: 1, TokenUsageKey.TOTAL_TOKENS: 2, } assert span1.model_name == "gpt-4o" if not IS_TRACING_SDK_ONLY: assert span1.llm_cost == { "input_cost": 1.0, "output_cost": 2.0, "total_cost": 3.0, } assert traces[0].info.token_usage == { "input_tokens": 1, "output_tokens": 1, "total_tokens": 2, } def test_agent_run_sync_enable_disable_autolog_with_tool(agent_with_tool, mock_litellm_cost): sequence, resp = _make_dummy_response_with_tool() async def request(self, *args, **kwargs): if sequence: return sequence.pop(0) return resp with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = agent_with_tool.run_sync("Put my money on square eighteen", deps=18) assert result.output == _FINAL_ANSWER_WITH_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 5 assert spans[0].name == "Agent.run_sync" assert spans[0].span_type == SpanType.AGENT assert spans[1].name == "Agent.run" assert spans[1].span_type == SpanType.AGENT span2 = spans[2] assert span2.name == "InstrumentedModel.request" assert span2.span_type == SpanType.LLM assert span2.parent_id == spans[1].span_id assert span2.model_name == "gpt-4o" if not IS_TRACING_SDK_ONLY: assert span2.llm_cost == { "input_cost": 10.0, "output_cost": 40.0, "total_cost": 50.0, } span3 = spans[3] assert span3.span_type == SpanType.TOOL assert span3.parent_id == spans[1].span_id span4 = spans[4] assert span4.name == "InstrumentedModel.request" assert span4.span_type == SpanType.LLM assert span4.parent_id == spans[1].span_id assert span4.model_name == "gpt-4o" if not IS_TRACING_SDK_ONLY: assert span4.llm_cost == { "input_cost": 100.0, "output_cost": 400.0, "total_cost": 500.0, } assert span2.get_attribute(SpanAttributeKey.CHAT_USAGE) == { TokenUsageKey.INPUT_TOKENS: 10, TokenUsageKey.OUTPUT_TOKENS: 20, TokenUsageKey.TOTAL_TOKENS: 30, } assert span4.get_attribute(SpanAttributeKey.CHAT_USAGE) == { TokenUsageKey.INPUT_TOKENS: 100, TokenUsageKey.OUTPUT_TOKENS: 200, TokenUsageKey.TOTAL_TOKENS: 300, } assert traces[0].info.token_usage == { "input_tokens": 110, "output_tokens": 220, "total_tokens": 330, } @pytest.mark.asyncio async def test_agent_run_enable_disable_autolog_with_tool(agent_with_tool, mock_litellm_cost): sequence, resp = _make_dummy_response_with_tool() async def request(self, *args, **kwargs): if sequence: return sequence.pop(0) return resp with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) result = await agent_with_tool.run("Put my money on square eighteen", deps=18) assert result.output == _FINAL_ANSWER_WITH_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans assert len(spans) == 4 assert spans[0].name == "Agent.run" assert spans[0].span_type == SpanType.AGENT span1 = spans[1] assert span1.name == "InstrumentedModel.request" assert span1.span_type == SpanType.LLM assert span1.parent_id == spans[0].span_id assert span1.model_name == "gpt-4o" span2 = spans[2] assert span2.span_type == SpanType.TOOL assert span2.parent_id == spans[0].span_id span3 = spans[3] assert span3.name == "InstrumentedModel.request" assert span3.span_type == SpanType.LLM assert span3.parent_id == spans[0].span_id assert span3.model_name == "gpt-4o" assert span1.get_attribute(SpanAttributeKey.CHAT_USAGE) == { TokenUsageKey.INPUT_TOKENS: 10, TokenUsageKey.OUTPUT_TOKENS: 20, TokenUsageKey.TOTAL_TOKENS: 30, } if not IS_TRACING_SDK_ONLY: assert span1.llm_cost == { "input_cost": 10.0, "output_cost": 40.0, "total_cost": 50.0, } assert span3.get_attribute(SpanAttributeKey.CHAT_USAGE) == { TokenUsageKey.INPUT_TOKENS: 100, TokenUsageKey.OUTPUT_TOKENS: 200, TokenUsageKey.TOTAL_TOKENS: 300, } if not IS_TRACING_SDK_ONLY: assert span3.llm_cost == { "input_cost": 100.0, "output_cost": 400.0, "total_cost": 500.0, } assert traces[0].info.token_usage == { "input_tokens": 110, "output_tokens": 220, "total_tokens": 330, } def test_agent_run_sync_failure(simple_agent): with patch( "pydantic_ai.models.instrumented.InstrumentedModel.request", side_effect=ValueError("test error"), ): mlflow.pydantic_ai.autolog(log_traces=True) with pytest.raises(ValueError, match="test error"): simple_agent.run_sync("France") traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "ERROR" spans = traces[0].data.spans assert len(spans) == 3 assert spans[0].name == "Agent.run_sync" assert spans[0].span_type == SpanType.AGENT assert spans[1].name == "Agent.run" assert spans[1].span_type == SpanType.AGENT assert spans[2].name.startswith("InstrumentedModel.") assert spans[2].span_type == SpanType.LLM with patch( "pydantic_ai.models.instrumented.InstrumentedModel.request", side_effect=ValueError("test error"), ): mlflow.pydantic_ai.autolog(disable=True) with pytest.raises(ValueError, match="test error"): simple_agent.run_sync("France") traces = get_traces() assert len(traces) == 1 class _MockUnsafeClient: _state = "open" def __del__(self): if self._state == "open": pass @pytest.mark.parametrize( ("getter_func", "mock_attrs", "expected_attrs", "excluded_attrs"), [ ( _get_agent_attributes, {"name": "test-agent", "system_prompt": "helpful", "retries": 3, "output_type": str}, {"name": "test-agent", "system_prompt": "helpful", "retries": 3, "output_type": "str"}, ["_client", "provider", "_internal_state"], ), ( _get_model_attributes, {"model_name": "gpt-4", "name": "test-model"}, {"model_name": "gpt-4", "name": "test-model"}, ["client", "_client", "provider", "api_key", "callbacks"], ), ( _get_tool_attributes, {"name": "my_tool", "description": "helpful", "max_retries": 2}, {"name": "my_tool", "description": "helpful", "max_retries": 2}, ["_internal", "func"], ), ( _get_mcp_server_attributes, {"name": "my_server", "url": "http://localhost:8080"}, {"name": "my_server", "url": "http://localhost:8080"}, ["_client", "_session", "_internal"], ), ], ) def test_attribute_getter_excludes_private_attrs( getter_func, mock_attrs, expected_attrs, excluded_attrs ): class MockInstance: pass instance = MockInstance() for key, value in mock_attrs.items(): setattr(instance, key, value) for key in excluded_attrs: setattr(instance, key, _MockUnsafeClient()) attrs = getter_func(instance) for key, value in expected_attrs.items(): assert attrs[key] == value for key in excluded_attrs: assert key not in attrs def test_autolog_auto_enables_instrument(): mlflow.pydantic_ai.autolog(log_traces=True) agent = Agent("openai:gpt-4o", system_prompt="Test") assert agent.instrument is not None # Verify the user can still explicitly set instrument=False agent_no_instrument = Agent("openai:gpt-4o", system_prompt="Test", instrument=False) assert agent_no_instrument.instrument is None or agent_no_instrument.instrument is False def test_autolog_auto_instrument_captures_llm_spans(mock_litellm_cost): dummy = _make_dummy_response_without_tool() async def request(self, *args, **kwargs): return dummy # Mock must be set BEFORE autolog so autolog patches the mock with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) # Create agent WITHOUT explicitly setting instrument=True agent = Agent("openai:gpt-4o", system_prompt="Tell me the capital of {{input}}.") result = agent.run_sync("France") assert result.output == _FINAL_ANSWER_WITHOUT_TOOL traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans # Should have Agent.run_sync > Agent.run > InstrumentedModel.request span_names = [s.name for s in spans] assert "InstrumentedModel.request" in span_names llm_span = next(s for s in spans if s.name == "InstrumentedModel.request") assert llm_span.span_type == SpanType.LLM def test_autolog_does_not_capture_client_references(simple_agent): dummy = _make_dummy_response_without_tool() async def request(self, *args, **kwargs): return dummy with patch("pydantic_ai.models.instrumented.InstrumentedModel.request", new=request): mlflow.pydantic_ai.autolog(log_traces=True) simple_agent.run_sync("France") traces = get_traces() assert len(traces) == 1 spans = traces[0].data.spans for span in spans: attrs = span.attributes or {} for key in attrs: assert "client" not in key.lower() or key == "openai_client" assert "provider" not in key.lower() assert "_state" not in key.lower() assert "httpx" not in key.lower()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/pydantic_ai/test_pydanticai_tracing.py", "license": "Apache License 2.0", "lines": 418, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/processor/mlflow_v3.py
import logging from opentelemetry.sdk.trace import Span as OTelSpan from opentelemetry.sdk.trace.export import SpanExporter from mlflow.entities.trace_info import TraceInfo from mlflow.entities.trace_location import TraceLocation from mlflow.entities.trace_state import TraceState from mlflow.tracing.processor.base_mlflow import BaseMlflowSpanProcessor from mlflow.tracing.utils import generate_trace_id_v3, get_experiment_id_for_trace _logger = logging.getLogger(__name__) class MlflowV3SpanProcessor(BaseMlflowSpanProcessor): """ Defines custom hooks to be executed when a span is started or ended (before exporting). This processor is used for exporting traces to MLflow Tracking Server using the V3 trace schema and API. """ def __init__( self, span_exporter: SpanExporter, export_metrics: bool, ): super().__init__(span_exporter, export_metrics) def _start_trace(self, root_span: OTelSpan) -> TraceInfo: """ Create a new TraceInfo object and register it with the trace manager. This method is called in the on_start method of the base class. """ experiment_id = get_experiment_id_for_trace(root_span) if experiment_id is None: _logger.debug( "Experiment ID is not set for trace. It may not be exported to MLflow backend." ) trace_info = TraceInfo( trace_id=generate_trace_id_v3(root_span), trace_location=TraceLocation.from_experiment_id(experiment_id), request_time=root_span.start_time // 1_000_000, # nanosecond to millisecond execution_duration=None, state=TraceState.IN_PROGRESS, trace_metadata=self._get_basic_trace_metadata(), tags=self._get_basic_trace_tags(root_span), ) self._trace_manager.register_trace(root_span.context.trace_id, trace_info) return trace_info
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/processor/mlflow_v3.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/tracing/processor/test_mlflow_v3_processor.py
import json from unittest import mock import mlflow.tracking.context.default_context from mlflow.entities.span import LiveSpan from mlflow.entities.trace_status import TraceStatus from mlflow.environment_variables import MLFLOW_TRACKING_USERNAME from mlflow.tracing.constant import ( SpanAttributeKey, TraceMetadataKey, ) from mlflow.tracing.processor.mlflow_v3 import MlflowV3SpanProcessor from mlflow.tracing.trace_manager import InMemoryTraceManager from mlflow.tracing.utils import encode_trace_id from tests.tracing.helper import ( create_mock_otel_span, create_test_trace_info, skip_when_testing_trace_sdk, ) def test_on_start(monkeypatch): monkeypatch.setattr(mlflow.tracking.context.default_context, "_get_source_name", lambda: "test") monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, "bob") # Root span should create a new trace on start trace_id = 12345 span = create_mock_otel_span(trace_id=trace_id, span_id=1, parent_id=None, start_time=5_000_000) processor = MlflowV3SpanProcessor(span_exporter=mock.MagicMock(), export_metrics=False) processor.on_start(span) # V3 processor uses encoded Otel trace_id as request_id request_id = "tr-" + encode_trace_id(trace_id) assert len(request_id) == 35 # 3 for "tr-" prefix + 32 for encoded trace_id assert span.attributes.get(SpanAttributeKey.REQUEST_ID) == json.dumps(request_id) assert request_id in InMemoryTraceManager.get_instance()._traces # Child span should not create a new trace child_span = create_mock_otel_span( trace_id=trace_id, span_id=2, parent_id=1, start_time=8_000_000 ) processor.on_start(child_span) assert child_span.attributes.get(SpanAttributeKey.REQUEST_ID) == json.dumps(request_id) @skip_when_testing_trace_sdk def test_on_start_during_model_evaluation(): from mlflow.pyfunc.context import Context, set_prediction_context trace_id = 12345 request_id = "tr-" + encode_trace_id(trace_id) # Root span should create a new trace on start span = create_mock_otel_span(trace_id=trace_id, span_id=1) processor = MlflowV3SpanProcessor(span_exporter=mock.MagicMock(), export_metrics=False) with set_prediction_context(Context(request_id=request_id, is_evaluate=True)): processor.on_start(span) assert span.attributes.get(SpanAttributeKey.REQUEST_ID) == json.dumps(request_id) @skip_when_testing_trace_sdk def test_on_start_during_run(monkeypatch): monkeypatch.setattr(mlflow.tracking.context.default_context, "_get_source_name", lambda: "test") monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, "bob") span = create_mock_otel_span(trace_id=12345, span_id=1, parent_id=None, start_time=5_000_000) env_experiment_name = "env_experiment_id" run_experiment_name = "run_experiment_id" mlflow.create_experiment(env_experiment_name) run_experiment_id = mlflow.create_experiment(run_experiment_name) mlflow.set_experiment(experiment_name=env_experiment_name) processor = MlflowV3SpanProcessor(span_exporter=mock.MagicMock(), export_metrics=False) with mlflow.start_run(experiment_id=run_experiment_id) as run: processor.on_start(span) trace_id = "tr-" + encode_trace_id(span.context.trace_id) trace = InMemoryTraceManager.get_instance()._traces[trace_id] assert trace.info.experiment_id == run_experiment_id assert trace.info.request_metadata[TraceMetadataKey.SOURCE_RUN] == run.info.run_id def test_incremental_span_name_no_deduplication(): InMemoryTraceManager.reset() trace_manager = InMemoryTraceManager.get_instance() trace_id = 12345 request_id = "tr-" + encode_trace_id(trace_id) processor = MlflowV3SpanProcessor(span_exporter=mock.MagicMock(), export_metrics=False) # Helper to create and register a span def create_and_register(name, span_id, parent_id=1): span = create_mock_otel_span( name=name, trace_id=trace_id, span_id=span_id, parent_id=parent_id, start_time=span_id * 1_000_000, end_time=(span_id + 1) * 1_000_000, ) processor.on_start(span) live_span = LiveSpan(span, request_id) trace_manager.register_span(live_span) processor.on_end(span) return span # Create root and 4 child spans: 3 "process" and 2 "query" create_and_register("process", 1, parent_id=None) create_and_register("process", 2) create_and_register("query", 3) create_and_register("process", 4) create_and_register("query", 5) with trace_manager.get_trace(request_id) as trace: names = [s.name for s in trace.span_dict.values() if s.name == "process"] assert len(names) == 3 with trace_manager.get_trace(request_id) as trace: names = [s.name for s in trace.span_dict.values() if s.name == "query"] assert len(names) == 2 with trace_manager.get_trace(request_id) as trace: spans_sorted_by_creation = sorted(trace.span_dict.values(), key=lambda s: s.start_time_ns) final_names = [s.name for s in spans_sorted_by_creation] assert final_names == ["process", "process", "query", "process", "query"] def test_on_end(): trace_info = create_test_trace_info("request_id", 0) trace_manager = InMemoryTraceManager.get_instance() trace_manager.register_trace("trace_id", trace_info) otel_span = create_mock_otel_span( name="foo", trace_id="trace_id", span_id=1, parent_id=None, start_time=5_000_000, end_time=9_000_000, ) span = LiveSpan(otel_span, "request_id") span.set_status("OK") span.set_inputs({"input1": "very long input" * 100}) span.set_outputs({"output": "very long output" * 100}) mock_exporter = mock.MagicMock() mock_client = mock.MagicMock() mock_client._start_tracked_trace.side_effect = Exception("error") processor = MlflowV3SpanProcessor(span_exporter=mock_exporter, export_metrics=False) processor.on_end(otel_span) mock_exporter.export.assert_called_once_with((otel_span,)) # Child spans should be exported mock_exporter.reset_mock() child_span = create_mock_otel_span(trace_id="trace_id", span_id=2, parent_id=1) # Set the REQUEST_ID attribute so the processor can find the trace child_span.set_attribute(SpanAttributeKey.REQUEST_ID, json.dumps("request_id")) processor.on_end(child_span) mock_exporter.export.assert_called_once_with((child_span,)) # Trace info should be updated according to the span attributes manager_trace = trace_manager.pop_trace("trace_id") trace_info = manager_trace.trace.info assert trace_info.status == TraceStatus.OK assert trace_info.execution_time_ms == 4 assert trace_info.tags == {}
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/processor/test_mlflow_v3_processor.py", "license": "Apache License 2.0", "lines": 137, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:examples/smolagents/tracing.py
""" This is an example for leveraging MLflow's auto tracing capabilities for Smolagents. For more information about MLflow Tracing, see: https://mlflow.org/docs/latest/llms/tracing/index.html """ from smolagents import CodeAgent, LiteLLMModel import mlflow # Turn on auto tracing for Smolagents by calling mlflow.smolagents.autolog() mlflow.smolagents.autolog() model = LiteLLMModel(model_id="openai/gpt-4o-mini", api_key="API_KEY") agent = CodeAgent(tools=[], model=model, add_base_tools=True) result = agent.run( "Could you give me the 118th number in the Fibonacci sequence?", )
{ "repo_id": "mlflow/mlflow", "file_path": "examples/smolagents/tracing.py", "license": "Apache License 2.0", "lines": 13, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/smolagents/autolog.py
import inspect import logging from typing import Any import mlflow from mlflow.entities import SpanType from mlflow.entities.span import LiveSpan from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey from mlflow.utils.autologging_utils.config import AutoLoggingConfig _logger = logging.getLogger(__name__) def patched_class_call(original, self, *args, **kwargs): try: config = AutoLoggingConfig.init(flavor_name=mlflow.smolagents.FLAVOR_NAME) if config.log_traces: fullname = f"{self.__class__.__name__}.{original.__name__}" span_type = _get_span_type(self) with mlflow.start_span(name=fullname, span_type=span_type) as span: inputs = _construct_full_inputs(original, self, *args, **kwargs) span.set_inputs(inputs) _set_span_attributes(span=span, instance=self) result = original(self, *args, **kwargs) # Need to convert the response of smolagents API for better visualization outputs = result.__dict__ if hasattr(result, "__dict__") else result if token_usage := _parse_usage(outputs): span.set_attribute(SpanAttributeKey.CHAT_USAGE, token_usage) span.set_outputs(outputs) return result except Exception as e: _logger.error("the error occurred while patching") raise e def _get_span_type(instance) -> str: from smolagents import CodeAgent, MultiStepAgent, Tool, ToolCallingAgent, models if isinstance(instance, (MultiStepAgent, CodeAgent, ToolCallingAgent)): return SpanType.AGENT elif isinstance(instance, Tool): return SpanType.TOOL elif isinstance(instance, models.Model): return SpanType.CHAT_MODEL return SpanType.UNKNOWN def _construct_full_inputs(func, *args, **kwargs): signature = inspect.signature(func) # This does not create copy. So values should not be mutated directly arguments = signature.bind_partial(*args, **kwargs).arguments if "self" in arguments: arguments.pop("self") # Avoid non serializable objects and circular references return { k: v.__dict__ if hasattr(v, "__dict__") else v for k, v in arguments.items() if v is not None } def _set_span_attributes(span: LiveSpan, instance): # Smolagents is available only python >= 3.10, so importing libraries inside methods. try: from smolagents import CodeAgent, MultiStepAgent, Tool, ToolCallingAgent, models if isinstance(instance, (MultiStepAgent, CodeAgent, ToolCallingAgent)): agent = _get_agent_attributes(instance) for key, value in agent.items(): if value is not None: span.set_attribute(key, str(value) if isinstance(value, list) else value) elif isinstance(instance, Tool): tool = _get_tool_attributes(instance) for key, value in tool.items(): if value is not None: span.set_attribute(key, str(value) if isinstance(value, list) else value) elif issubclass(type(instance), models.Model): model = _get_model_attributes(instance) for key, value in model.items(): if value is not None: span.set_attribute(key, str(value) if isinstance(value, list) else value) except Exception as e: _logger.warn("An exception happens when saving span attributes. Exception: %s", e) def _get_agent_attributes(instance): agent = {} for key, value in instance.__dict__.items(): if key == "tools": value = _parse_tools(value) if value is None: continue agent[key] = str(value) return agent def _inner_get_tool_attributes(tool_dict): res = {} if hasattr(tool_dict, "name") and tool_dict.name is not None: res["name"] = tool_dict.name if hasattr(tool_dict, "description") and tool_dict.description is not None: res["description"] = tool_dict.description result = {} if res: result["type"] = "function" result["function"] = res return result def _get_tool_attributes(instance): instance_dict = instance.__dict__ return _inner_get_tool_attributes(instance_dict) def _parse_tools(tools): return [_inner_get_tool_attributes(tool) for tool in tools] def _get_model_attributes(instance): model = {SpanAttributeKey.MESSAGE_FORMAT: "smolagents"} for key, value in instance.__dict__.items(): if value is None or key == "api_key": continue model[key] = str(value) return model def _parse_usage(output: Any) -> dict[str, int] | None: try: if isinstance(output, dict) and "raw" in output: output = output["raw"] if usage := getattr(output, "usage", None): return { TokenUsageKey.INPUT_TOKENS: usage.prompt_tokens, TokenUsageKey.OUTPUT_TOKENS: usage.completion_tokens, TokenUsageKey.TOTAL_TOKENS: usage.total_tokens, } except Exception as e: _logger.debug(f"Failed to parse token usage from output: {e}") return None
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/smolagents/autolog.py", "license": "Apache License 2.0", "lines": 116, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/smolagents/test_smolagents_autolog.py
from types import SimpleNamespace from unittest.mock import patch import pytest import smolagents from packaging.version import Version import mlflow from mlflow.entities.span import SpanType from mlflow.tracing.constant import SpanAttributeKey from tests.tracing.helper import get_traces _DUMMY_INPUT = "Explain quantum mechanics in simple terms." _SMOLAGENTS_VERSION_NEW = Version(smolagents.__version__) >= Version("1.15.0") MOCK_INFERENCE_CLIENT_MODEL_METHOD = "generate" if _SMOLAGENTS_VERSION_NEW else "__call__" def clear_autolog_state(): from mlflow.utils.autologging_utils import AUTOLOGGING_INTEGRATIONS for key in AUTOLOGGING_INTEGRATIONS.keys(): AUTOLOGGING_INTEGRATIONS[key].clear() mlflow.utils.import_hooks._post_import_hooks = {} def test_run_autolog(): from smolagents import ChatMessage, CodeAgent, InferenceClientModel _DUMMY_OUTPUT = ChatMessage( role="user", content='[{"type": "text", "text": "Explain quantum mechanics in simple terms."}]', ) _DUMMY_OUTPUT.raw = SimpleNamespace( usage=SimpleNamespace( prompt_tokens=10, completion_tokens=18, total_tokens=28, ) ) clear_autolog_state() agent = CodeAgent( tools=[], model=InferenceClientModel(model_id="gpt-3.5-turbo", token="test_id"), max_steps=2, ) with patch( f"smolagents.InferenceClientModel.{MOCK_INFERENCE_CLIENT_MODEL_METHOD}", return_value=_DUMMY_OUTPUT, ): mlflow.smolagents.autolog() agent.run(_DUMMY_INPUT) traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "OK" if _SMOLAGENTS_VERSION_NEW: # TODO: support this once the new version is stable assert len(traces[0].data.spans) > 0 else: assert len(traces[0].data.spans) == 6 # CodeAgent span_0 = traces[0].data.spans[0] assert span_0.name == "CodeAgent.run" assert span_0.span_type == SpanType.AGENT assert span_0.parent_id is None assert span_0.inputs == {"task": _DUMMY_INPUT} assert span_0.outputs == { "_value": '[{"type": "text", "text": "Explain quantum mechanics in simple terms."}]' } # CodeAgent span_1 = traces[0].data.spans[1] assert span_1.name == "CodeAgent.step" assert span_1.span_type == SpanType.AGENT assert span_1.parent_id == span_0.span_id assert span_1.inputs["memory_step"]["step_number"] == 1 assert span_1.outputs is None # InferenceClientModel span_2 = traces[0].data.spans[2] assert span_2.name == "InferenceClientModel.call_original" assert span_2.span_type == SpanType.CHAT_MODEL assert span_2.parent_id == span_1.span_id assert span_2.inputs is not None assert span_2.outputs is not None # CodeAgent span_3 = traces[0].data.spans[3] assert span_3.name == "CodeAgent.step" assert span_3.span_type == SpanType.AGENT assert span_3.parent_id == span_0.span_id assert span_3.inputs is not None assert span_3.outputs is None # InferenceClientModel span_4 = traces[0].data.spans[4] assert span_4.name == "InferenceClientModel.call_original" assert span_4.span_type == SpanType.CHAT_MODEL assert span_4.parent_id == span_3.span_id assert span_4.inputs is not None assert span_4.outputs is not None # InferenceClientModel span_5 = traces[0].data.spans[5] assert span_5.name == "InferenceClientModel.call_original" assert span_5.span_type == SpanType.CHAT_MODEL assert span_5.parent_id == span_0.span_id assert span_5.inputs is not None assert span_5.outputs is not None assert span_2.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 18, "total_tokens": 28, } assert span_4.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 18, "total_tokens": 28, } assert span_5.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 18, "total_tokens": 28, } assert traces[0].info.token_usage == { "input_tokens": 30, "output_tokens": 54, "total_tokens": 84, } clear_autolog_state() def test_run_failure(): from smolagents import CodeAgent, InferenceClientModel clear_autolog_state() mlflow.smolagents.autolog() agent = CodeAgent( tools=[], model=InferenceClientModel(model_id="gpt-3.5-turbo", token="test_id"), max_steps=1, ) with patch( f"smolagents.InferenceClientModel.{MOCK_INFERENCE_CLIENT_MODEL_METHOD}", side_effect=Exception("error"), ): with pytest.raises(Exception, match="error"): agent.run(_DUMMY_INPUT) traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "ERROR" if _SMOLAGENTS_VERSION_NEW: assert len(traces[0].data.spans) > 0 else: assert len(traces[0].data.spans) == 2 # CodeAgent span_0 = traces[0].data.spans[0] assert span_0.name == "CodeAgent.run" assert span_0.span_type == SpanType.AGENT assert span_0.parent_id is None assert span_0.inputs == {"task": _DUMMY_INPUT} assert span_0.outputs is None # InferenceClientModel span_1 = traces[0].data.spans[1] assert span_1.name == "CodeAgent.step" assert span_1.span_type == SpanType.AGENT assert span_1.parent_id == span_0.span_id assert span_1.inputs is not None assert span_1.outputs is None clear_autolog_state() def test_tool_autolog(): from smolagents import ChatMessage, CodeAgent, DuckDuckGoSearchTool, InferenceClientModel _DUMMY_OUTPUT = ChatMessage( role="user", content='[{"type": "text", "text": "Explain quantum mechanics in simple terms."}]', ) _DUMMY_OUTPUT.raw = SimpleNamespace( usage=SimpleNamespace( prompt_tokens=10, completion_tokens=18, total_tokens=28, ) ) clear_autolog_state() agent = CodeAgent( tools=[ DuckDuckGoSearchTool(), ], model=InferenceClientModel(model_id="gpt-3.5-turbo", token="test_id"), max_steps=1, ) with patch( f"smolagents.InferenceClientModel.{MOCK_INFERENCE_CLIENT_MODEL_METHOD}", return_value=_DUMMY_OUTPUT, ): mlflow.smolagents.autolog() agent.run(_DUMMY_INPUT) traces = get_traces() assert len(traces) == 1 assert traces[0].info.status == "OK" if _SMOLAGENTS_VERSION_NEW: assert len(traces[0].data.spans) > 0 else: assert len(traces[0].data.spans) == 4 # CodeAgent span_0 = traces[0].data.spans[0] assert span_0.name == "CodeAgent.run" assert span_0.span_type == SpanType.AGENT assert span_0.parent_id is None assert span_0.inputs is not None assert span_0.outputs is not None # InferenceClientModel span_1 = traces[0].data.spans[1] assert span_1.name == "CodeAgent.step" assert span_1.span_type == SpanType.AGENT assert span_1.parent_id == span_0.span_id assert span_1.inputs is not None assert span_1.outputs is None # InferenceClientModel span_2 = traces[0].data.spans[2] assert span_2.name == "InferenceClientModel.call_original" assert span_2.span_type == SpanType.CHAT_MODEL assert span_2.parent_id == span_1.span_id assert span_2.inputs is not None assert span_2.outputs is not None # CodeAgent span_3 = traces[0].data.spans[3] assert span_3.name == "InferenceClientModel.call_original" assert span_3.span_type == SpanType.CHAT_MODEL assert span_3.parent_id == span_0.span_id assert span_3.inputs is not None assert span_3.outputs is not None assert span_2.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 18, "total_tokens": 28, } assert span_3.get_attribute(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 18, "total_tokens": 28, } assert traces[0].info.token_usage == { "input_tokens": 20, "output_tokens": 36, "total_tokens": 56, } clear_autolog_state()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/smolagents/test_smolagents_autolog.py", "license": "Apache License 2.0", "lines": 230, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/models/evaluation/calibration_curve.py
import matplotlib.pyplot as plt from matplotlib.figure import Figure from sklearn.calibration import CalibrationDisplay, calibration_curve def make_multi_class_calibration_plot( n_classes, y_true, y_probs, calibration_config, label_list ) -> Figure: """Generate one calibration plot for all classes of a multi-class classifier Args: n_classes (int): number of classes in multi-class prediction y_true (array-like, shape (n_samples)): Ground truth (correct) target values. y_probs (array-like, shape (n_samples, n_classes)): Prediction probabilities for each class returned by a classifier. calibration_config (dict[str, Union[str, int]]): Additional parameters for sklearn.CalibrationDisplay label_list (array-like): list of label names for each class Returns: Figure: Multi-Class Calibration Plot """ fig, ax = plt.subplots() # add calibration line for each class for _class in range(n_classes): curves = calibration_curve( y_true=[v == _class for v in y_true], y_prob=y_probs[:, _class], n_bins=calibration_config.get("calibration_n_bins", 10), ) plt.plot( curves[0], curves[1], marker="o", markersize=3, label=f"Class {label_list[_class]}", ) # plot perfect calibration line plt.plot([0, 1], [0, 1], linestyle="dotted", label="Perfect Calibration", color="black") # add legend to plot plt.legend( loc="upper center", bbox_to_anchor=(0.5, -0.25), ncol=n_classes // 3, ) # set figure title ax.set_title( f"{calibration_config.get('calibration_classifier_name', 'Classifier')} Calibration", fontsize=12, ) # set figure axis labels ax.set_xlabel("Mean Predicted Probability", fontsize=10) ax.set_ylabel("Fraction of True Positives", fontsize=10) return fig def plot_calibration_curve(y_true, y_probs, pos_label, calibration_config, label_list) -> Figure: """Generate a calibration curve for a trained classifier Args: y_true (array-like, shape (n_samples)): Ground truth (correct) target values. y_probs (array-like, shape (n_samples, n_classes)): Prediction probabilities for each class returned by a classifier. pos_label (str): Label for the positive class. calibration_config (dict[str, Union[str, int]]): Additional parameters for sklearn.CalibrationDisplay label_list: List of class names Raises: TypeError: if calibration_config["calibration_classifier_name"] is not str TypeError: if calibration_config["calibration_n_bins"] is not int Returns: Figure: Calibration Curve for Evaluated Classifier """ # get number of classes n_classes = y_probs.shape[-1] # check that types are appropriate if calibration_config: # check that name provided for classifier is of proper type if not isinstance(calibration_config.get("calibration_classifier_name", ""), str): raise TypeError("calibration_classifier_name should be of type string") # check that name provided for calibration n_bins is of proper type if not isinstance(calibration_config.get("calibration_n_bins", 10), int): raise TypeError("calibration_n_bins should be of type int") # if we are evaluating a binary classifier assume positive class is at column index 1 if n_classes == 2: return CalibrationDisplay.from_predictions( y_true, y_prob=y_probs[:, 1], pos_label=pos_label, name=calibration_config.get("calibration_classifier_name", None), # type: ignore n_bins=calibration_config.get("calibration_n_bins", 10), # type: ignore ).figure_ # evaluating a multi-class classifier, create a calibration curve for each class return make_multi_class_calibration_plot( n_classes, y_true, y_probs, calibration_config, label_list )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/models/evaluation/calibration_curve.py", "license": "Apache License 2.0", "lines": 91, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/tracing/client.py
import json import logging import time from collections import defaultdict from concurrent.futures import ThreadPoolExecutor from contextlib import nullcontext from typing import Sequence import mlflow from mlflow.entities.assessment import Assessment from mlflow.entities.model_registry import PromptVersion from mlflow.entities.span import NO_OP_SPAN_TRACE_ID, Span from mlflow.entities.trace import Trace from mlflow.entities.trace_data import TraceData from mlflow.entities.trace_info import TraceInfo from mlflow.entities.trace_location import UCSchemaLocation, UnityCatalog from mlflow.environment_variables import ( _MLFLOW_SEARCH_TRACES_MAX_BATCH_SIZE, MLFLOW_SEARCH_TRACES_MAX_THREADS, MLFLOW_TRACING_SQL_WAREHOUSE_ID, ) from mlflow.exceptions import ( MlflowException, MlflowNotImplementedException, MlflowTraceDataCorrupted, MlflowTraceDataException, MlflowTraceDataNotFound, ) from mlflow.protos.databricks_pb2 import ( BAD_REQUEST, INVALID_PARAMETER_VALUE, NOT_FOUND, RESOURCE_DOES_NOT_EXIST, ) from mlflow.store.artifact.artifact_repository_registry import get_artifact_repository from mlflow.store.entities.paged_list import PagedList from mlflow.store.tracking import SEARCH_TRACES_DEFAULT_MAX_RESULTS from mlflow.telemetry.events import LogAssessmentEvent, StartTraceEvent from mlflow.telemetry.track import record_usage_event from mlflow.tracing.constant import ( GET_TRACE_V4_RETRY_TIMEOUT_SECONDS, SpansLocation, TraceMetadataKey, TraceTagKey, ) from mlflow.tracing.trace_manager import InMemoryTraceManager from mlflow.tracing.utils import TraceJSONEncoder, exclude_immutable_tags, parse_trace_id_v4 from mlflow.tracing.utils.artifact_utils import get_artifact_uri_for_trace from mlflow.tracking._tracking_service.utils import _get_store, _resolve_tracking_uri from mlflow.utils import is_uuid from mlflow.utils.mlflow_tags import IMMUTABLE_TAGS from mlflow.utils.uri import add_databricks_profile_info_to_artifact_uri, is_databricks_uri _logger = logging.getLogger(__name__) class TracingClient: """ Client of an MLflow Tracking Server that creates and manages experiments and runs. """ def __init__(self, tracking_uri: str | None = None): """ Args: tracking_uri: Address of local or remote tracking server. """ self.tracking_uri = _resolve_tracking_uri(tracking_uri) # NB: Fetch the tracking store (`self.store`) upon client initialization to ensure that # the tracking URI is valid and the store can be properly resolved. We define `store` as a # property method to ensure that the client is serializable, even if the store is not # self.store self.store @property def store(self): return _get_store(self.tracking_uri) @record_usage_event(StartTraceEvent) def start_trace(self, trace_info: TraceInfo) -> TraceInfo: """ Create a new trace in the backend. Args: trace_info: The TraceInfo object to record in the backend. Returns: The returned TraceInfoV3 object from the backend. """ return self.store.start_trace(trace_info=trace_info) def log_spans(self, location: str, spans: list[Span]) -> list[Span]: """ Log spans to the backend. Args: location: The location to log spans to. It should either be an experiment ID or a Unity Catalog table name. spans: List of Span objects to log. Returns: List of logged Span objects from the backend. """ return self.store.log_spans( location=location, spans=spans, tracking_uri=self.tracking_uri if is_databricks_uri(self.tracking_uri) else None, ) def delete_traces( self, experiment_id: str, max_timestamp_millis: int | None = None, max_traces: int | None = None, trace_ids: list[str] | None = None, ) -> int: return self.store.delete_traces( experiment_id=experiment_id, max_timestamp_millis=max_timestamp_millis, max_traces=max_traces, trace_ids=trace_ids, ) def get_trace_info(self, trace_id: str) -> TraceInfo: """ Get the trace info matching the ``trace_id``. Args: trace_id: String id of the trace to fetch. Returns: TraceInfo object, of type ``mlflow.entities.trace_info.TraceInfo``. """ with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace: if trace is not None: return trace.info return self.store.get_trace_info(trace_id) def get_trace(self, trace_id: str) -> Trace: """ Get the trace matching the ``trace_id``. Args: trace_id: String id of the trace to fetch. Returns: The fetched Trace object, of type ``mlflow.entities.Trace``. """ location, _ = parse_trace_id_v4(trace_id) if location is not None: start_time = time.time() attempt = 0 while time.time() - start_time < GET_TRACE_V4_RETRY_TIMEOUT_SECONDS: # For a V4 trace, load spans from the v4 BatchGetTraces endpoint. # BatchGetTraces returns an empty list if the trace is not found, which will be # retried up to GET_TRACE_V4_RETRY_TIMEOUT_SECONDS seconds. if traces := self.store.batch_get_traces([trace_id], location): return traces[0] attempt += 1 interval = 2**attempt _logger.debug( f"Trace not found, retrying in {interval} seconds (attempt {attempt})" ) time.sleep(interval) raise MlflowException( message=f"Trace with ID {trace_id} is not found.", error_code=NOT_FOUND, ) else: try: trace_info = self.get_trace_info(trace_id) # if the trace is stored in the tracking store, load spans from the tracking store # otherwise, load spans from the artifact repository if trace_info.tags.get(TraceTagKey.SPANS_LOCATION) == SpansLocation.TRACKING_STORE: try: return self.store.get_trace(trace_id) except MlflowNotImplementedException: pass if traces := self.store.batch_get_traces([trace_info.trace_id]): return traces[0] else: raise MlflowException( f"Trace with ID {trace_id} is not found.", error_code=NOT_FOUND, ) else: trace_data = self._download_trace_data(trace_info) except MlflowTraceDataNotFound: raise MlflowException( message=( f"Trace with ID {trace_id} cannot be loaded because it is missing span " "data. Please try creating or loading another trace." ), error_code=BAD_REQUEST, ) from None # Ensure the original spammy exception is not included in the traceback except MlflowTraceDataCorrupted: raise MlflowException( message=( f"Trace with ID {trace_id} cannot be loaded because its span data" " is corrupted. Please try creating or loading another trace." ), error_code=BAD_REQUEST, ) from None # Ensure the original spammy exception is not included in the traceback return Trace(trace_info, trace_data) def get_online_trace_details( self, trace_id: str, source_inference_table: str, source_databricks_request_id: str, ) -> str: return self.store.get_online_trace_details( trace_id=trace_id, source_inference_table=source_inference_table, source_databricks_request_id=source_databricks_request_id, ) def _search_traces( self, experiment_ids: list[str] | None = None, filter_string: str | None = None, max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS, order_by: list[str] | None = None, page_token: str | None = None, model_id: str | None = None, locations: list[str] | None = None, ): return self.store.search_traces( experiment_ids=experiment_ids, filter_string=filter_string, max_results=max_results, order_by=order_by, page_token=page_token, model_id=model_id, locations=locations, ) def search_traces( self, experiment_ids: list[str] | None = None, filter_string: str | None = None, max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS, order_by: list[str] | None = None, page_token: str | None = None, run_id: str | None = None, include_spans: bool = True, model_id: str | None = None, locations: list[str] | None = None, ) -> PagedList[Trace]: """ Return traces that match the given list of search expressions within the experiments. Args: experiment_ids: List of experiment ids to scope the search. Deprecated, use `locations` instead. filter_string: A search filter string. max_results: Maximum number of traces desired. order_by: List of order_by clauses. page_token: Token specifying the next page of results. It should be obtained from a ``search_traces`` call. run_id: A run id to scope the search. When a trace is created under an active run, it will be associated with the run and you can filter on the run id to retrieve the trace. include_spans: If ``True``, include spans in the returned traces. Otherwise, only the trace metadata is returned, e.g., trace ID, start time, end time, etc, without any spans. model_id: If specified, return traces associated with the model ID. locations: A list of locations to search over. To search over experiments, provide a list of experiment IDs. To search over UC tables on databricks, provide a list of locations in the format `<catalog_name>.<schema_name>[.<table_prefix>]`. Returns: A :py:class:`PagedList <mlflow.store.entities.PagedList>` of :py:class:`Trace <mlflow.entities.Trace>` objects that satisfy the search expressions. If the underlying tracking store supports pagination, the token for the next page may be obtained via the ``token`` attribute of the returned object; however, some store implementations may not support pagination and thus the returned token would not be meaningful in such cases. """ if model_id is not None: if filter_string: raise MlflowException( message=( "Cannot specify both `model_id` or `filter_string` in the search_traces " "call." ), error_code=INVALID_PARAMETER_VALUE, ) # if sql_warehouse_id is not set then we convert model_id to filter_string, # because `_search_unified_traces` requires sql warehouse id existing. if MLFLOW_TRACING_SQL_WAREHOUSE_ID.get() is None: filter_string = f"request_metadata.`mlflow.modelId` = '{model_id}'" model_id = None if run_id: run = self.store.get_run(run_id) if run.info.experiment_id not in locations: raise MlflowException( f"Run {run_id} belongs to experiment {run.info.experiment_id}, which is not " f"in the list of locations provided: {locations}. Please include " f"experiment {run.info.experiment_id} in the `locations` parameter to " "search for traces from this run.", error_code=INVALID_PARAMETER_VALUE, ) additional_filter = f"attribute.run_id = '{run_id}'" if filter_string: if TraceMetadataKey.SOURCE_RUN in filter_string: raise MlflowException( "You cannot filter by run_id when it is already part of the filter string." f"Please remove the {TraceMetadataKey.SOURCE_RUN} filter from the filter " "string and try again.", error_code=INVALID_PARAMETER_VALUE, ) filter_string += f" AND {additional_filter}" else: filter_string = additional_filter traces = [] next_max_results = max_results next_token = page_token max_workers = MLFLOW_SEARCH_TRACES_MAX_THREADS.get() executor = ( ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="MlflowTracingSearch") if include_spans else nullcontext() ) with executor: while len(traces) < max_results: trace_infos, next_token = self._search_traces( experiment_ids=experiment_ids, filter_string=filter_string, max_results=next_max_results, order_by=order_by, page_token=next_token, model_id=model_id, locations=locations, ) if include_spans: trace_infos_by_location = self._group_trace_infos_by_location(trace_infos) for ( location, location_trace_infos, ) in trace_infos_by_location.items(): if location == SpansLocation.ARTIFACT_REPO: # download traces from artifact repository if spans are # stored in the artifact repository traces.extend( trace for trace in executor.map( self._download_spans_from_artifact_repo, location_trace_infos, ) if trace ) else: # Get full traces with BatchGetTraces, all traces in a single call # must be located in the same table. trace_ids = [t.trace_id for t in location_trace_infos] traces.extend( self._download_spans_from_batch_get_traces( trace_ids, location, executor ) ) else: traces.extend(Trace(t, TraceData(spans=[])) for t in trace_infos) if not next_token: break next_max_results = max_results - len(traces) return PagedList(traces, next_token) def _download_spans_from_batch_get_traces( self, trace_ids: list[str], location: str, executor: ThreadPoolExecutor ) -> list[Trace]: """ Fetch full traces including spans from the BatchGetTrace v4 endpoint. BatchGetTrace endpoint only support up to 10 traces in a single call. """ traces = [] def _fetch_minibatch(ids: list[str]) -> list[Trace]: return self.store.batch_get_traces(ids, location) or [] batch_size = _MLFLOW_SEARCH_TRACES_MAX_BATCH_SIZE.get() batches = [trace_ids[i : i + batch_size] for i in range(0, len(trace_ids), batch_size)] for minibatch_traces in executor.map(_fetch_minibatch, batches): traces.extend(minibatch_traces) return traces def _download_spans_from_artifact_repo(self, trace_info: TraceInfo) -> Trace | None: """ Download trace data for the given trace_info and returns a Trace object. If the download fails (e.g., the trace data is missing or corrupted), returns None. This is used for traces logged via v3 endpoint, where spans are stored in artifact store. """ is_online_trace = is_uuid(trace_info.trace_id) is_databricks = is_databricks_uri(self.tracking_uri) # For online traces in Databricks, we need to get trace data from a different endpoint try: if is_databricks and is_online_trace: # For online traces, get data from the online API trace_data = self.get_online_trace_details( trace_id=trace_info.trace_id, source_inference_table=trace_info.request_metadata.get("mlflow.sourceTable"), source_databricks_request_id=trace_info.request_metadata.get( "mlflow.databricksRequestId" ), ) trace_data = TraceData.from_dict(json.loads(trace_data)) else: # For offline traces, download data from artifact storage trace_data = self._download_trace_data(trace_info) except MlflowTraceDataException as e: _logger.warning( ( f"Failed to download trace data for trace {trace_info.trace_id!r} " f"with {e.ctx}. For full traceback, set logging level to DEBUG." ), exc_info=_logger.isEnabledFor(logging.DEBUG), ) return None else: return Trace(trace_info, trace_data) def _group_trace_infos_by_location( self, trace_infos: list[TraceInfo] ) -> dict[str, list[TraceInfo]]: """ Group the trace infos based on where the trace data is stored. Returns: A dictionary mapping location to a list of trace infos. """ trace_infos_by_location = defaultdict(list) for trace_info in trace_infos: if uc_schema := trace_info.trace_location.uc_schema: location = f"{uc_schema.catalog_name}.{uc_schema.schema_name}" trace_infos_by_location[location].append(trace_info) elif uc_tp := trace_info.trace_location.uc_table_prefix: location = f"{uc_tp.catalog_name}.{uc_tp.schema_name}.{uc_tp.table_prefix}" trace_infos_by_location[location].append(trace_info) elif trace_info.trace_location.mlflow_experiment: # New traces in SQL store store spans in the tracking store, while for old traces or # traces with File store, spans are stored in artifact repository. if trace_info.tags.get(TraceTagKey.SPANS_LOCATION) == SpansLocation.TRACKING_STORE: # location is not used for traces with mlflow experiment location in tracking # store, so we use None as the location trace_infos_by_location[None].append(trace_info) else: trace_infos_by_location[SpansLocation.ARTIFACT_REPO].append(trace_info) else: _logger.warning(f"Unsupported location: {trace_info.trace_location}. Skipping.") return trace_infos_by_location def calculate_trace_filter_correlation( self, experiment_ids: list[str], filter_string1: str, filter_string2: str, base_filter: str | None = None, ): """ Calculate the correlation (NPMI) between two trace filter conditions. This method computes the Normalized Pointwise Mutual Information (NPMI) between traces matching two different filter conditions, which measures how much more (or less) likely traces are to satisfy both conditions compared to if the conditions were independent. Args: experiment_ids: List of experiment IDs to search within. filter_string1: First filter condition (e.g., "span.type = 'LLM'"). filter_string2: Second filter condition (e.g., "feedback.quality > 0.8"). base_filter: Optional base filter that both filter1 and filter2 are tested on top of (e.g., 'request_time > ... and request_time < ...' for time windows). Returns: TraceFilterCorrelationResult containing: - npmi: NPMI score from -1 (never co-occur) to 1 (always co-occur) - npmi_smoothed: Smoothed NPMI value with Jeffreys prior for robustness - filter1_count: Number of traces matching filter_string1 - filter2_count: Number of traces matching filter_string2 - joint_count: Number of traces matching both filters - total_count: Total number of traces in the experiments .. code-block:: python from mlflow.tracing.client import TracingClient client = TracingClient() result = client.calculate_trace_filter_correlation( experiment_ids=["123"], filter_string1="span.type = 'LLM'", filter_string2="feedback.quality > 0.8", ) print(f"NPMI: {result.npmi:.3f}") # Output: NPMI: 0.456 """ return self.store.calculate_trace_filter_correlation( experiment_ids=experiment_ids, filter_string1=filter_string1, filter_string2=filter_string2, base_filter=base_filter, ) def set_trace_tags(self, trace_id: str, tags: dict[str, str]): """ Set tags on the trace with the given trace_id. Args: trace_id: The ID of the trace. tags: A dictionary of key-value pairs. """ tags = exclude_immutable_tags(tags) for k, v in tags.items(): self.set_trace_tag(trace_id, k, v) def set_trace_tag(self, trace_id: str, key: str, value: str): """ Set a tag on the trace with the given trace ID. Args: trace_id: The ID of the trace to set the tag on. key: The string key of the tag. Must be at most 250 characters long, otherwise it will be truncated when stored. value: The string value of the tag. Must be at most 250 characters long, otherwise it will be truncated when stored. """ if not isinstance(value, str): _logger.warning( "Received non-string value for trace tag. Please note that non-string tag values" "will automatically be stringified when the trace is logged." ) # Trying to set the tag on the active trace first with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace: if trace: trace.info.tags[key] = str(value) return if key in IMMUTABLE_TAGS: _logger.warning(f"Tag '{key}' is immutable and cannot be set on a trace.") else: self.store.set_trace_tag(trace_id, key, str(value)) def delete_trace_tag(self, trace_id: str, key: str): """ Delete a tag on the trace with the given trace ID. Args: trace_id: The ID of the trace to delete the tag from. key: The string key of the tag. Must be at most 250 characters long, otherwise it will be truncated when stored. """ # Trying to delete the tag on the active trace first with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace: if trace: if key in trace.info.tags: trace.info.tags.pop(key) return else: raise MlflowException( f"Tag with key {key} not found in trace with ID {trace_id}.", error_code=RESOURCE_DOES_NOT_EXIST, ) if key in IMMUTABLE_TAGS: _logger.warning(f"Tag '{key}' is immutable and cannot be deleted on a trace.") else: self.store.delete_trace_tag(trace_id, key) def get_assessment(self, trace_id: str, assessment_id: str) -> Assessment: """ Get an assessment entity from the backend store. Args: trace_id: The ID of the trace. assessment_id: The ID of the assessment to get. Returns: The Assessment object. """ return self.store.get_assessment(trace_id, assessment_id) @record_usage_event(LogAssessmentEvent) def log_assessment(self, trace_id: str, assessment: Assessment) -> Assessment: """ Log an assessment to a trace. Args: trace_id: The ID of the trace. assessment: The assessment object to log. Returns: The logged Assessment object. """ assessment.trace_id = trace_id if trace_id is None or trace_id == NO_OP_SPAN_TRACE_ID: _logger.debug( "Skipping assessment logging for NO_OP_SPAN_TRACE_ID. This is expected when " "tracing is disabled." ) return assessment # If the trace is the active trace, add the assessment to it in-memory if trace_id == mlflow.get_active_trace_id(): with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace: if trace is None: _logger.debug( f"Trace {trace_id} is active but not found in the in-memory buffer. " "Something is wrong with trace handling. Skipping assessment logging." ) trace.info.assessments.append(assessment) return assessment return self.store.create_assessment(assessment) def update_assessment( self, trace_id: str, assessment_id: str, assessment: Assessment, ): """ Update an existing assessment entity in the backend store. Args: trace_id: The ID of the trace. assessment_id: The ID of the feedback assessment to update. assessment: The updated assessment. """ return self.store.update_assessment( trace_id=trace_id, assessment_id=assessment_id, name=assessment.name, expectation=assessment.expectation, feedback=assessment.feedback, rationale=assessment.rationale, metadata=assessment.metadata, ) def delete_assessment(self, trace_id: str, assessment_id: str): """ Delete an assessment associated with a trace. Args: trace_id: The ID of the trace. assessment_id: The ID of the assessment to delete. """ self.store.delete_assessment(trace_id=trace_id, assessment_id=assessment_id) def _get_artifact_repo_for_trace(self, trace_info: TraceInfo): artifact_uri = get_artifact_uri_for_trace(trace_info) artifact_uri = add_databricks_profile_info_to_artifact_uri(artifact_uri, self.tracking_uri) return get_artifact_repository(artifact_uri) def _download_trace_data(self, trace_info: TraceInfo) -> TraceData: """ Download trace data from artifact repository. Args: trace_info: Either a TraceInfo or TraceInfoV3 object containing trace metadata. Returns: TraceData object representing the downloaded trace data. """ artifact_repo = self._get_artifact_repo_for_trace(trace_info) return TraceData.from_dict(artifact_repo.download_trace_data()) def _upload_trace_data(self, trace_info: TraceInfo, trace_data: TraceData) -> None: artifact_repo = self._get_artifact_repo_for_trace(trace_info) trace_data_json = json.dumps(trace_data.to_dict(), cls=TraceJSONEncoder, ensure_ascii=False) return artifact_repo.upload_trace_data(trace_data_json) def link_prompt_versions_to_trace( self, trace_id: str, prompts: Sequence[PromptVersion] ) -> None: """ Link multiple prompt versions to a trace. Args: trace_id: The ID of the trace to link prompts to. prompts: List of PromptVersion objects to link to the trace. """ from mlflow.tracking._model_registry.utils import _get_store as _get_model_registry_store registry_store = _get_model_registry_store() registry_store.link_prompts_to_trace(prompt_versions=prompts, trace_id=trace_id) def _set_experiment_trace_location( self, location: UCSchemaLocation, experiment_id: str, sql_warehouse_id: str | None = None, ) -> UCSchemaLocation: if is_databricks_uri(self.tracking_uri): return self.store.set_experiment_trace_location( experiment_id=str(experiment_id), location=location, sql_warehouse_id=sql_warehouse_id, ) raise MlflowException( "Setting storage location is not supported on non-Databricks backends." ) def _get_trace_location(self, telemetry_profile_id: str) -> UnityCatalog: if is_databricks_uri(self.tracking_uri) and hasattr(self.store, "get_trace_location"): return self.store.get_trace_location(telemetry_profile_id) raise MlflowException("Getting trace location by ID is not supported on this backend.") def _create_or_get_trace_location( self, location: UnityCatalog, sql_warehouse_id: str | None = None ) -> UnityCatalog: if is_databricks_uri(self.tracking_uri) and hasattr( self.store, "create_or_get_trace_location" ): return self.store.create_or_get_trace_location(location, sql_warehouse_id) raise MlflowException("Creating trace location is not supported on this backend.") def _link_trace_location(self, experiment_id: str, location: UnityCatalog) -> None: if is_databricks_uri(self.tracking_uri) and hasattr(self.store, "link_trace_location"): self.store.link_trace_location(experiment_id, location) return raise MlflowException("Linking trace location is not supported on this backend.") def _unset_experiment_trace_location( self, experiment_id: str, location: UCSchemaLocation | UnityCatalog ) -> None: if is_databricks_uri(self.tracking_uri): self.store.unset_experiment_trace_location(str(experiment_id), location) else: raise MlflowException( "Clearing storage location is not supported on non-Databricks backends." )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/client.py", "license": "Apache License 2.0", "lines": 660, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/utils/yaml_utils.py
import codecs import os import shutil import tempfile import yaml from mlflow.utils.file_utils import ENCODING, exists, get_parent_dir try: from yaml import CSafeDumper as YamlSafeDumper from yaml import CSafeLoader as YamlSafeLoader except ImportError: from yaml import SafeDumper as YamlSafeDumper from yaml import SafeLoader as YamlSafeLoader from mlflow.exceptions import MissingConfigException def write_yaml(root, file_name, data, overwrite=False, sort_keys=True, ensure_yaml_extension=True): """Write dictionary data in yaml format. Args: root: Directory name. file_name: Desired file name. data: Data to be dumped as yaml format. overwrite: If True, will overwrite existing files. sort_keys: Whether to sort the keys when writing the yaml file. ensure_yaml_extension: If True, will automatically add .yaml extension if not given. """ if not exists(root): raise MissingConfigException(f"Parent directory '{root}' does not exist.") file_path = os.path.join(root, file_name) yaml_file_name = file_path if ensure_yaml_extension and not file_path.endswith(".yaml"): yaml_file_name = file_path + ".yaml" if exists(yaml_file_name) and not overwrite: raise Exception(f"Yaml file '{file_path}' exists as '{yaml_file_name}") with codecs.open(yaml_file_name, mode="w", encoding=ENCODING) as yaml_file: yaml.dump( data, yaml_file, default_flow_style=False, allow_unicode=True, sort_keys=sort_keys, Dumper=YamlSafeDumper, ) def overwrite_yaml(root, file_name, data, ensure_yaml_extension=True): """Safely overwrites a preexisting yaml file, ensuring that file contents are not deleted or corrupted if the write fails. This is achieved by writing contents to a temporary file and moving the temporary file to replace the preexisting file, rather than opening the preexisting file for a direct write. Args: root: Directory name. file_name: File name. data: The data to write, represented as a dictionary. ensure_yaml_extension: If True, Will automatically add .yaml extension if not given. """ tmp_file_path = None original_file_path = os.path.join(root, file_name) original_file_mode = os.stat(original_file_path).st_mode try: tmp_file_fd, tmp_file_path = tempfile.mkstemp(suffix="file.yaml") os.close(tmp_file_fd) write_yaml( root=get_parent_dir(tmp_file_path), file_name=os.path.basename(tmp_file_path), data=data, overwrite=True, sort_keys=True, ensure_yaml_extension=ensure_yaml_extension, ) shutil.move(tmp_file_path, original_file_path) # restores original file permissions, see https://docs.python.org/3/library/tempfile.html#tempfile.mkstemp os.chmod(original_file_path, original_file_mode) finally: if tmp_file_path is not None and os.path.exists(tmp_file_path): os.remove(tmp_file_path) def read_yaml(root, file_name): """Read data from yaml file and return as dictionary Args: root: Directory name. file_name: File name. Expects to have '.yaml' extension. Returns: Data in yaml file as dictionary. """ if not exists(root): raise MissingConfigException( f"Cannot read '{file_name}'. Parent dir '{root}' does not exist." ) file_path = os.path.join(root, file_name) if not exists(file_path): raise MissingConfigException(f"Yaml file '{file_path}' does not exist.") with codecs.open(file_path, mode="r", encoding=ENCODING) as yaml_file: return yaml.load(yaml_file, Loader=YamlSafeLoader) class safe_edit_yaml: def __init__(self, root, file_name, edit_func): self._root = root self._file_name = file_name self._edit_func = edit_func self._original = read_yaml(root, file_name) def __enter__(self): new_dict = self._edit_func(self._original.copy()) write_yaml(self._root, self._file_name, new_dict, overwrite=True) def __exit__(self, *args): write_yaml(self._root, self._file_name, self._original, overwrite=True)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/utils/yaml_utils.py", "license": "Apache License 2.0", "lines": 99, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/utils/test_yaml_utils.py
import codecs import os from mlflow.utils.yaml_utils import ( read_yaml, safe_edit_yaml, write_yaml, ) from tests.helper_functions import random_file, random_int def test_yaml_read_and_write(tmp_path): temp_dir = str(tmp_path) yaml_file = random_file("yaml") long_value = 1 data = { "a": random_int(), "B": random_int(), "text_value": "中文", "long_value": long_value, "int_value": 32, "text_value_2": "hi", } write_yaml(temp_dir, yaml_file, data) read_data = read_yaml(temp_dir, yaml_file) assert data == read_data yaml_path = os.path.join(temp_dir, yaml_file) with codecs.open(yaml_path, encoding="utf-8") as handle: contents = handle.read() assert "!!python" not in contents # Check that UTF-8 strings are written properly to the file (rather than as ASCII # representations of their byte sequences). assert "中文" in contents def edit_func(old_dict): old_dict["more_text"] = "西班牙语" return old_dict assert "more_text" not in read_yaml(temp_dir, yaml_file) with safe_edit_yaml(temp_dir, yaml_file, edit_func): edited_dict = read_yaml(temp_dir, yaml_file) assert "more_text" in edited_dict assert edited_dict["more_text"] == "西班牙语" assert "more_text" not in read_yaml(temp_dir, yaml_file) def test_yaml_write_sorting(tmp_path): temp_dir = str(tmp_path) data = { "a": 1, "c": 2, "b": 3, } sorted_yaml_file = random_file("yaml") write_yaml(temp_dir, sorted_yaml_file, data, sort_keys=True) expected_sorted = """a: 1 b: 3 c: 2 """ with open(os.path.join(temp_dir, sorted_yaml_file)) as f: actual_sorted = f.read() assert actual_sorted == expected_sorted unsorted_yaml_file = random_file("yaml") write_yaml(temp_dir, unsorted_yaml_file, data, sort_keys=False) expected_unsorted = """a: 1 c: 2 b: 3 """ with open(os.path.join(temp_dir, unsorted_yaml_file)) as f: actual_unsorted = f.read() assert actual_unsorted == expected_unsorted
{ "repo_id": "mlflow/mlflow", "file_path": "tests/utils/test_yaml_utils.py", "license": "Apache License 2.0", "lines": 64, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:src/mcp/server/mcpserver/context.py
from __future__ import annotations from collections.abc import Iterable from typing import TYPE_CHECKING, Any, Generic, Literal from pydantic import AnyUrl, BaseModel from mcp.server.context import LifespanContextT, RequestT, ServerRequestContext from mcp.server.elicitation import ( ElicitationResult, ElicitSchemaModelT, UrlElicitationResult, elicit_url, elicit_with_validation, ) from mcp.server.lowlevel.helper_types import ReadResourceContents if TYPE_CHECKING: from mcp.server.mcpserver.server import MCPServer class Context(BaseModel, Generic[LifespanContextT, RequestT]): """Context object providing access to MCP capabilities. This provides a cleaner interface to MCP's RequestContext functionality. It gets injected into tool and resource functions that request it via type hints. To use context in a tool function, add a parameter with the Context type annotation: ```python @server.tool() async def my_tool(x: int, ctx: Context) -> str: # Log messages to the client await ctx.info(f"Processing {x}") await ctx.debug("Debug info") await ctx.warning("Warning message") await ctx.error("Error message") # Report progress await ctx.report_progress(50, 100) # Access resources data = await ctx.read_resource("resource://data") # Get request info request_id = ctx.request_id client_id = ctx.client_id return str(x) ``` The context parameter name can be anything as long as it's annotated with Context. The context is optional - tools that don't need it can omit the parameter. """ _request_context: ServerRequestContext[LifespanContextT, RequestT] | None _mcp_server: MCPServer | None # TODO(maxisbey): Consider making request_context/mcp_server required, or refactor Context entirely. def __init__( self, *, request_context: ServerRequestContext[LifespanContextT, RequestT] | None = None, mcp_server: MCPServer | None = None, # TODO(Marcelo): We should drop this kwargs parameter. **kwargs: Any, ): super().__init__(**kwargs) self._request_context = request_context self._mcp_server = mcp_server @property def mcp_server(self) -> MCPServer: """Access to the MCPServer instance.""" if self._mcp_server is None: # pragma: no cover raise ValueError("Context is not available outside of a request") return self._mcp_server # pragma: no cover @property def request_context(self) -> ServerRequestContext[LifespanContextT, RequestT]: """Access to the underlying request context.""" if self._request_context is None: # pragma: no cover raise ValueError("Context is not available outside of a request") return self._request_context async def report_progress(self, progress: float, total: float | None = None, message: str | None = None) -> None: """Report progress for the current operation. Args: progress: Current progress value (e.g., 24) total: Optional total value (e.g., 100) message: Optional message (e.g., "Starting render...") """ progress_token = self.request_context.meta.get("progress_token") if self.request_context.meta else None if progress_token is None: # pragma: no cover return await self.request_context.session.send_progress_notification( progress_token=progress_token, progress=progress, total=total, message=message, related_request_id=self.request_id, ) async def read_resource(self, uri: str | AnyUrl) -> Iterable[ReadResourceContents]: """Read a resource by URI. Args: uri: Resource URI to read Returns: The resource content as either text or bytes """ assert self._mcp_server is not None, "Context is not available outside of a request" return await self._mcp_server.read_resource(uri, self) async def elicit( self, message: str, schema: type[ElicitSchemaModelT], ) -> ElicitationResult[ElicitSchemaModelT]: """Elicit information from the client/user. This method can be used to interactively ask for additional information from the client within a tool's execution. The client might display the message to the user and collect a response according to the provided schema. If the client is an agent, it might decide how to handle the elicitation -- either by asking the user or automatically generating a response. Args: message: Message to present to the user schema: A Pydantic model class defining the expected response structure. According to the specification, only primitive types are allowed. Returns: An ElicitationResult containing the action taken and the data if accepted Note: Check the result.action to determine if the user accepted, declined, or cancelled. The result.data will only be populated if action is "accept" and validation succeeded. """ return await elicit_with_validation( session=self.request_context.session, message=message, schema=schema, related_request_id=self.request_id, ) async def elicit_url( self, message: str, url: str, elicitation_id: str, ) -> UrlElicitationResult: """Request URL mode elicitation from the client. This directs the user to an external URL for out-of-band interactions that must not pass through the MCP client. Use this for: - Collecting sensitive credentials (API keys, passwords) - OAuth authorization flows with third-party services - Payment and subscription flows - Any interaction where data should not pass through the LLM context The response indicates whether the user consented to navigate to the URL. The actual interaction happens out-of-band. When the elicitation completes, call `ctx.session.send_elicit_complete(elicitation_id)` to notify the client. Args: message: Human-readable explanation of why the interaction is needed url: The URL the user should navigate to elicitation_id: Unique identifier for tracking this elicitation Returns: UrlElicitationResult indicating accept, decline, or cancel """ return await elicit_url( session=self.request_context.session, message=message, url=url, elicitation_id=elicitation_id, related_request_id=self.request_id, ) async def log( self, level: Literal["debug", "info", "warning", "error"], message: str, *, logger_name: str | None = None, extra: dict[str, Any] | None = None, ) -> None: """Send a log message to the client. Args: level: Log level (debug, info, warning, error) message: Log message logger_name: Optional logger name extra: Optional dictionary with additional structured data to include """ if extra: log_data = {"message": message, **extra} else: log_data = message await self.request_context.session.send_log_message( level=level, data=log_data, logger=logger_name, related_request_id=self.request_id, ) @property def client_id(self) -> str | None: """Get the client ID if available.""" return self.request_context.meta.get("client_id") if self.request_context.meta else None # pragma: no cover @property def request_id(self) -> str: """Get the unique ID for this request.""" return str(self.request_context.request_id) @property def session(self): """Access to the underlying session for advanced usage.""" return self.request_context.session async def close_sse_stream(self) -> None: """Close the SSE stream to trigger client reconnection. This method closes the HTTP connection for the current request, triggering client reconnection. Events continue to be stored in the event store and will be replayed when the client reconnects with Last-Event-ID. Use this to implement polling behavior during long-running operations - the client will reconnect after the retry interval specified in the priming event. Note: This is a no-op if not using StreamableHTTP transport with event_store. The callback is only available when event_store is configured. """ if self._request_context and self._request_context.close_sse_stream: # pragma: no cover await self._request_context.close_sse_stream() async def close_standalone_sse_stream(self) -> None: """Close the standalone GET SSE stream to trigger client reconnection. This method closes the HTTP connection for the standalone GET stream used for unsolicited server-to-client notifications. The client SHOULD reconnect with Last-Event-ID to resume receiving notifications. Note: This is a no-op if not using StreamableHTTP transport with event_store. Currently, client reconnection for standalone GET streams is NOT implemented - this is a known gap. """ if self._request_context and self._request_context.close_standalone_sse_stream: # pragma: no cover await self._request_context.close_standalone_sse_stream() # Convenience methods for common log levels async def debug(self, message: str, *, logger_name: str | None = None, extra: dict[str, Any] | None = None) -> None: """Send a debug log message.""" await self.log("debug", message, logger_name=logger_name, extra=extra) async def info(self, message: str, *, logger_name: str | None = None, extra: dict[str, Any] | None = None) -> None: """Send an info log message.""" await self.log("info", message, logger_name=logger_name, extra=extra) async def warning( self, message: str, *, logger_name: str | None = None, extra: dict[str, Any] | None = None ) -> None: """Send a warning log message.""" await self.log("warning", message, logger_name=logger_name, extra=extra) async def error(self, message: str, *, logger_name: str | None = None, extra: dict[str, Any] | None = None) -> None: """Send an error log message.""" await self.log("error", message, logger_name=logger_name, extra=extra)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/mcpserver/context.py", "license": "MIT License", "lines": 226, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:tests/server/mcpserver/tools/test_base.py
from mcp.server.mcpserver import Context from mcp.server.mcpserver.tools.base import Tool def test_context_detected_in_union_annotation(): def my_tool(x: int, ctx: Context | None) -> str: raise NotImplementedError tool = Tool.from_function(my_tool) assert tool.context_kwarg == "ctx"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/server/mcpserver/tools/test_base.py", "license": "MIT License", "lines": 7, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/server/auth/test_routes.py
import pytest from pydantic import AnyHttpUrl from mcp.server.auth.routes import validate_issuer_url def test_validate_issuer_url_https_allowed(): validate_issuer_url(AnyHttpUrl("https://example.com/path")) def test_validate_issuer_url_http_localhost_allowed(): validate_issuer_url(AnyHttpUrl("http://localhost:8080/path")) def test_validate_issuer_url_http_127_0_0_1_allowed(): validate_issuer_url(AnyHttpUrl("http://127.0.0.1:8080/path")) def test_validate_issuer_url_http_ipv6_loopback_allowed(): validate_issuer_url(AnyHttpUrl("http://[::1]:8080/path")) def test_validate_issuer_url_http_non_loopback_rejected(): with pytest.raises(ValueError, match="Issuer URL must be HTTPS"): validate_issuer_url(AnyHttpUrl("http://evil.com/path")) def test_validate_issuer_url_http_127_prefix_domain_rejected(): """A domain like 127.0.0.1.evil.com is not loopback.""" with pytest.raises(ValueError, match="Issuer URL must be HTTPS"): validate_issuer_url(AnyHttpUrl("http://127.0.0.1.evil.com/path")) def test_validate_issuer_url_http_127_prefix_subdomain_rejected(): """A domain like 127.0.0.1something.example.com is not loopback.""" with pytest.raises(ValueError, match="Issuer URL must be HTTPS"): validate_issuer_url(AnyHttpUrl("http://127.0.0.1something.example.com/path")) def test_validate_issuer_url_fragment_rejected(): with pytest.raises(ValueError, match="fragment"): validate_issuer_url(AnyHttpUrl("https://example.com/path#frag")) def test_validate_issuer_url_query_rejected(): with pytest.raises(ValueError, match="query"): validate_issuer_url(AnyHttpUrl("https://example.com/path?q=1"))
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/server/auth/test_routes.py", "license": "MIT License", "lines": 28, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:src/mcp/client/context.py
"""Request context for MCP client handlers.""" from mcp.client.session import ClientSession from mcp.shared._context import RequestContext ClientRequestContext = RequestContext[ClientSession] """Context for handling incoming requests in a client session. This context is passed to client-side callbacks (sampling, elicitation, list_roots) when the server sends requests to the client. Attributes: request_id: The unique identifier for this request. meta: Optional metadata associated with the request. session: The client session handling this request. """
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/client/context.py", "license": "MIT License", "lines": 12, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/server/context.py
from __future__ import annotations from dataclasses import dataclass from typing import Any, Generic from typing_extensions import TypeVar from mcp.server.experimental.request_context import Experimental from mcp.server.session import ServerSession from mcp.shared._context import RequestContext from mcp.shared.message import CloseSSEStreamCallback LifespanContextT = TypeVar("LifespanContextT", default=dict[str, Any]) RequestT = TypeVar("RequestT", default=Any) @dataclass(kw_only=True) class ServerRequestContext(RequestContext[ServerSession], Generic[LifespanContextT, RequestT]): lifespan_context: LifespanContextT experimental: Experimental request: RequestT | None = None close_sse_stream: CloseSSEStreamCallback | None = None close_standalone_sse_stream: CloseSSEStreamCallback | None = None
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/context.py", "license": "MIT License", "lines": 17, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:src/mcp/client/_transport.py
"""Transport protocol for MCP clients.""" from __future__ import annotations from contextlib import AbstractAsyncContextManager from typing import Protocol from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from mcp.shared.message import SessionMessage TransportStreams = tuple[MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage]] class Transport(AbstractAsyncContextManager[TransportStreams], Protocol): """Protocol for MCP transports. A transport is an async context manager that yields read and write streams for bidirectional communication with an MCP server. """
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/client/_transport.py", "license": "MIT License", "lines": 12, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/server/mcpserver/exceptions.py
"""Custom exceptions for MCPServer.""" class MCPServerError(Exception): """Base error for MCPServer.""" class ValidationError(MCPServerError): """Error in validating parameters or return values.""" class ResourceError(MCPServerError): """Error in resource operations.""" class ToolError(MCPServerError): """Error in tool operations.""" class InvalidSignature(Exception): """Invalid signature for use with MCPServer."""
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/mcpserver/exceptions.py", "license": "MIT License", "lines": 11, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:.github/actions/conformance/client.py
"""MCP unified conformance test client. This client is designed to work with the @modelcontextprotocol/conformance npm package. It handles all conformance test scenarios via environment variables and CLI arguments. Contract: - MCP_CONFORMANCE_SCENARIO env var -> scenario name - MCP_CONFORMANCE_CONTEXT env var -> optional JSON (for client-credentials scenarios) - Server URL as last CLI argument (sys.argv[1]) - Must exit 0 within 30 seconds Scenarios: initialize - Connect, initialize, list tools, close tools_call - Connect, call add_numbers(a=5, b=3), close sse-retry - Connect, call test_reconnection, close elicitation-sep1034-client-defaults - Elicitation with default accept callback auth/client-credentials-jwt - Client credentials with private_key_jwt auth/client-credentials-basic - Client credentials with client_secret_basic auth/* - Authorization code flow (default for auth scenarios) """ import asyncio import json import logging import os import sys from collections.abc import Callable, Coroutine from typing import Any, cast from urllib.parse import parse_qs, urlparse import httpx from pydantic import AnyUrl from mcp import ClientSession, types from mcp.client.auth import OAuthClientProvider, TokenStorage from mcp.client.auth.extensions.client_credentials import ( ClientCredentialsOAuthProvider, PrivateKeyJWTOAuthProvider, SignedJWTParameters, ) from mcp.client.context import ClientRequestContext from mcp.client.streamable_http import streamable_http_client from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken # Set up logging to stderr (stdout is for conformance test output) logging.basicConfig( level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", stream=sys.stderr, ) logger = logging.getLogger(__name__) # Type for async scenario handler functions ScenarioHandler = Callable[[str], Coroutine[Any, None, None]] # Registry of scenario handlers HANDLERS: dict[str, ScenarioHandler] = {} def register(name: str) -> Callable[[ScenarioHandler], ScenarioHandler]: """Register a scenario handler.""" def decorator(fn: ScenarioHandler) -> ScenarioHandler: HANDLERS[name] = fn return fn return decorator def get_conformance_context() -> dict[str, Any]: """Load conformance test context from MCP_CONFORMANCE_CONTEXT environment variable.""" context_json = os.environ.get("MCP_CONFORMANCE_CONTEXT") if not context_json: raise RuntimeError( "MCP_CONFORMANCE_CONTEXT environment variable not set. " "Expected JSON with client_id, client_secret, and/or private_key_pem." ) try: return json.loads(context_json) except json.JSONDecodeError as e: raise RuntimeError(f"Failed to parse MCP_CONFORMANCE_CONTEXT as JSON: {e}") from e class InMemoryTokenStorage(TokenStorage): """Simple in-memory token storage for conformance testing.""" def __init__(self) -> None: self._tokens: OAuthToken | None = None self._client_info: OAuthClientInformationFull | None = None async def get_tokens(self) -> OAuthToken | None: return self._tokens async def set_tokens(self, tokens: OAuthToken) -> None: self._tokens = tokens async def get_client_info(self) -> OAuthClientInformationFull | None: return self._client_info async def set_client_info(self, client_info: OAuthClientInformationFull) -> None: self._client_info = client_info class ConformanceOAuthCallbackHandler: """OAuth callback handler that automatically fetches the authorization URL and extracts the auth code, without requiring user interaction. """ def __init__(self) -> None: self._auth_code: str | None = None self._state: str | None = None async def handle_redirect(self, authorization_url: str) -> None: """Fetch the authorization URL and extract the auth code from the redirect.""" logger.debug(f"Fetching authorization URL: {authorization_url}") async with httpx.AsyncClient() as client: response = await client.get( authorization_url, follow_redirects=False, ) if response.status_code in (301, 302, 303, 307, 308): location = cast(str, response.headers.get("location")) if location: redirect_url = urlparse(location) query_params: dict[str, list[str]] = parse_qs(redirect_url.query) if "code" in query_params: self._auth_code = query_params["code"][0] state_values = query_params.get("state") self._state = state_values[0] if state_values else None logger.debug(f"Got auth code from redirect: {self._auth_code[:10]}...") return else: raise RuntimeError(f"No auth code in redirect URL: {location}") else: raise RuntimeError(f"No redirect location received from {authorization_url}") else: raise RuntimeError(f"Expected redirect response, got {response.status_code} from {authorization_url}") async def handle_callback(self) -> tuple[str, str | None]: """Return the captured auth code and state.""" if self._auth_code is None: raise RuntimeError("No authorization code available - was handle_redirect called?") auth_code = self._auth_code state = self._state self._auth_code = None self._state = None return auth_code, state # --- Scenario Handlers --- @register("initialize") async def run_initialize(server_url: str) -> None: """Connect, initialize, list tools, close.""" async with streamable_http_client(url=server_url) as (read_stream, write_stream): async with ClientSession(read_stream, write_stream) as session: await session.initialize() logger.debug("Initialized successfully") await session.list_tools() logger.debug("Listed tools successfully") @register("tools_call") async def run_tools_call(server_url: str) -> None: """Connect, initialize, list tools, call add_numbers(a=5, b=3), close.""" async with streamable_http_client(url=server_url) as (read_stream, write_stream): async with ClientSession(read_stream, write_stream) as session: await session.initialize() await session.list_tools() result = await session.call_tool("add_numbers", {"a": 5, "b": 3}) logger.debug(f"add_numbers result: {result}") @register("sse-retry") async def run_sse_retry(server_url: str) -> None: """Connect, initialize, list tools, call test_reconnection, close.""" async with streamable_http_client(url=server_url) as (read_stream, write_stream): async with ClientSession(read_stream, write_stream) as session: await session.initialize() await session.list_tools() result = await session.call_tool("test_reconnection", {}) logger.debug(f"test_reconnection result: {result}") async def default_elicitation_callback( context: ClientRequestContext, params: types.ElicitRequestParams, ) -> types.ElicitResult | types.ErrorData: """Accept elicitation and apply defaults from the schema (SEP-1034).""" content: dict[str, str | int | float | bool | list[str] | None] = {} # For form mode, extract defaults from the requested_schema if isinstance(params, types.ElicitRequestFormParams): schema = params.requested_schema logger.debug(f"Elicitation schema: {schema}") properties = schema.get("properties", {}) for prop_name, prop_schema in properties.items(): if "default" in prop_schema: content[prop_name] = prop_schema["default"] logger.debug(f"Applied defaults: {content}") return types.ElicitResult(action="accept", content=content) @register("elicitation-sep1034-client-defaults") async def run_elicitation_defaults(server_url: str) -> None: """Connect with elicitation callback that applies schema defaults.""" async with streamable_http_client(url=server_url) as (read_stream, write_stream): async with ClientSession( read_stream, write_stream, elicitation_callback=default_elicitation_callback ) as session: await session.initialize() await session.list_tools() result = await session.call_tool("test_client_elicitation_defaults", {}) logger.debug(f"test_client_elicitation_defaults result: {result}") @register("auth/client-credentials-jwt") async def run_client_credentials_jwt(server_url: str) -> None: """Client credentials flow with private_key_jwt authentication.""" context = get_conformance_context() client_id = context.get("client_id") private_key_pem = context.get("private_key_pem") signing_algorithm = context.get("signing_algorithm", "ES256") if not client_id: raise RuntimeError("MCP_CONFORMANCE_CONTEXT missing 'client_id'") if not private_key_pem: raise RuntimeError("MCP_CONFORMANCE_CONTEXT missing 'private_key_pem'") jwt_params = SignedJWTParameters( issuer=client_id, subject=client_id, signing_algorithm=signing_algorithm, signing_key=private_key_pem, ) oauth_auth = PrivateKeyJWTOAuthProvider( server_url=server_url, storage=InMemoryTokenStorage(), client_id=client_id, assertion_provider=jwt_params.create_assertion_provider(), ) await _run_auth_session(server_url, oauth_auth) @register("auth/client-credentials-basic") async def run_client_credentials_basic(server_url: str) -> None: """Client credentials flow with client_secret_basic authentication.""" context = get_conformance_context() client_id = context.get("client_id") client_secret = context.get("client_secret") if not client_id: raise RuntimeError("MCP_CONFORMANCE_CONTEXT missing 'client_id'") if not client_secret: raise RuntimeError("MCP_CONFORMANCE_CONTEXT missing 'client_secret'") oauth_auth = ClientCredentialsOAuthProvider( server_url=server_url, storage=InMemoryTokenStorage(), client_id=client_id, client_secret=client_secret, token_endpoint_auth_method="client_secret_basic", ) await _run_auth_session(server_url, oauth_auth) async def run_auth_code_client(server_url: str) -> None: """Authorization code flow (default for auth/* scenarios).""" callback_handler = ConformanceOAuthCallbackHandler() storage = InMemoryTokenStorage() # Check for pre-registered client credentials from context context_json = os.environ.get("MCP_CONFORMANCE_CONTEXT") if context_json: try: context = json.loads(context_json) client_id = context.get("client_id") client_secret = context.get("client_secret") if client_id: await storage.set_client_info( OAuthClientInformationFull( client_id=client_id, client_secret=client_secret, redirect_uris=[AnyUrl("http://localhost:3000/callback")], token_endpoint_auth_method="client_secret_basic" if client_secret else "none", ) ) logger.debug(f"Pre-loaded client credentials: client_id={client_id}") except json.JSONDecodeError: logger.exception("Failed to parse MCP_CONFORMANCE_CONTEXT") oauth_auth = OAuthClientProvider( server_url=server_url, client_metadata=OAuthClientMetadata( client_name="conformance-client", redirect_uris=[AnyUrl("http://localhost:3000/callback")], grant_types=["authorization_code", "refresh_token"], response_types=["code"], ), storage=storage, redirect_handler=callback_handler.handle_redirect, callback_handler=callback_handler.handle_callback, client_metadata_url="https://conformance-test.local/client-metadata.json", ) await _run_auth_session(server_url, oauth_auth) async def _run_auth_session(server_url: str, oauth_auth: OAuthClientProvider) -> None: """Common session logic for all OAuth flows.""" client = httpx.AsyncClient(auth=oauth_auth, timeout=30.0) async with streamable_http_client(url=server_url, http_client=client) as (read_stream, write_stream): async with ClientSession( read_stream, write_stream, elicitation_callback=default_elicitation_callback ) as session: await session.initialize() logger.debug("Initialized successfully") tools_result = await session.list_tools() logger.debug(f"Listed tools: {[t.name for t in tools_result.tools]}") # Call the first available tool (different tests have different tools) if tools_result.tools: tool_name = tools_result.tools[0].name try: result = await session.call_tool(tool_name, {}) logger.debug(f"Called {tool_name}, result: {result}") except Exception as e: logger.debug(f"Tool call result/error: {e}") logger.debug("Connection closed successfully") def main() -> None: """Main entry point for the conformance client.""" if len(sys.argv) < 2: print(f"Usage: {sys.argv[0]} <server-url>", file=sys.stderr) sys.exit(1) server_url = sys.argv[1] scenario = os.environ.get("MCP_CONFORMANCE_SCENARIO") if scenario: logger.debug(f"Running explicit scenario '{scenario}' against {server_url}") handler = HANDLERS.get(scenario) if handler: asyncio.run(handler(server_url)) elif scenario.startswith("auth/"): asyncio.run(run_auth_code_client(server_url)) else: print(f"Unknown scenario: {scenario}", file=sys.stderr) sys.exit(1) else: logger.debug(f"Running default auth flow against {server_url}") asyncio.run(run_auth_code_client(server_url)) if __name__ == "__main__": main()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": ".github/actions/conformance/client.py", "license": "MIT License", "lines": 296, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/types/jsonrpc.py
"""This module follows the JSON-RPC 2.0 specification: https://www.jsonrpc.org/specification.""" from __future__ import annotations from typing import Annotated, Any, Literal from pydantic import BaseModel, Field, TypeAdapter RequestId = Annotated[int, Field(strict=True)] | str """The ID of a JSON-RPC request.""" class JSONRPCRequest(BaseModel): """A JSON-RPC request that expects a response.""" jsonrpc: Literal["2.0"] id: RequestId method: str params: dict[str, Any] | None = None class JSONRPCNotification(BaseModel): """A JSON-RPC notification which does not expect a response.""" jsonrpc: Literal["2.0"] method: str params: dict[str, Any] | None = None # TODO(Marcelo): This is actually not correct. A JSONRPCResponse is the union of a successful response and an error. class JSONRPCResponse(BaseModel): """A successful (non-error) response to a request.""" jsonrpc: Literal["2.0"] id: RequestId result: dict[str, Any] # MCP-specific error codes in the range [-32000, -32099] URL_ELICITATION_REQUIRED = -32042 """Error code indicating that a URL mode elicitation is required before the request can be processed.""" # SDK error codes CONNECTION_CLOSED = -32000 REQUEST_TIMEOUT = -32001 # Standard JSON-RPC error codes PARSE_ERROR = -32700 INVALID_REQUEST = -32600 METHOD_NOT_FOUND = -32601 INVALID_PARAMS = -32602 INTERNAL_ERROR = -32603 class ErrorData(BaseModel): """Error information for JSON-RPC error responses.""" code: int """The error type that occurred.""" message: str """A short description of the error. The message SHOULD be limited to a concise single sentence. """ data: Any = None """Additional information about the error. The value of this member is defined by the sender (e.g. detailed error information, nested errors, etc.). """ class JSONRPCError(BaseModel): """A response to a request that indicates an error occurred.""" jsonrpc: Literal["2.0"] id: RequestId | None error: ErrorData JSONRPCMessage = JSONRPCRequest | JSONRPCNotification | JSONRPCResponse | JSONRPCError jsonrpc_message_adapter: TypeAdapter[JSONRPCMessage] = TypeAdapter(JSONRPCMessage)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/types/jsonrpc.py", "license": "MIT License", "lines": 54, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:src/mcp/client/_memory.py
"""In-memory transport for testing MCP servers without network overhead.""" from __future__ import annotations from collections.abc import AsyncIterator from contextlib import AbstractAsyncContextManager, asynccontextmanager from types import TracebackType from typing import Any import anyio from mcp.client._transport import TransportStreams from mcp.server import Server from mcp.server.mcpserver import MCPServer from mcp.shared.memory import create_client_server_memory_streams class InMemoryTransport: """In-memory transport for testing MCP servers without network overhead. This transport starts the server in a background task and provides streams for client-side communication. The server is automatically stopped when the context manager exits. """ def __init__(self, server: Server[Any] | MCPServer, *, raise_exceptions: bool = False) -> None: """Initialize the in-memory transport. Args: server: The MCP server to connect to (Server or MCPServer instance) raise_exceptions: Whether to raise exceptions from the server """ self._server = server self._raise_exceptions = raise_exceptions self._cm: AbstractAsyncContextManager[TransportStreams] | None = None @asynccontextmanager async def _connect(self) -> AsyncIterator[TransportStreams]: """Connect to the server and yield streams for communication.""" # Unwrap MCPServer to get underlying Server if isinstance(self._server, MCPServer): # TODO(Marcelo): Make `lowlevel_server` public. actual_server: Server[Any] = self._server._lowlevel_server # type: ignore[reportPrivateUsage] else: actual_server = self._server async with create_client_server_memory_streams() as (client_streams, server_streams): client_read, client_write = client_streams server_read, server_write = server_streams async with anyio.create_task_group() as tg: # Start server in background tg.start_soon( lambda: actual_server.run( server_read, server_write, actual_server.create_initialization_options(), raise_exceptions=self._raise_exceptions, ) ) try: yield client_read, client_write finally: tg.cancel_scope.cancel() async def __aenter__(self) -> TransportStreams: """Connect to the server and return streams for communication.""" self._cm = self._connect() return await self._cm.__aenter__() async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: """Close the transport and stop the server.""" if self._cm is not None: # pragma: no branch await self._cm.__aexit__(exc_type, exc_val, exc_tb) self._cm = None
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/client/_memory.py", "license": "MIT License", "lines": 63, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/client/client.py
"""Unified MCP Client that wraps ClientSession with transport management.""" from __future__ import annotations from contextlib import AsyncExitStack from dataclasses import KW_ONLY, dataclass, field from typing import Any from mcp.client._memory import InMemoryTransport from mcp.client._transport import Transport from mcp.client.session import ClientSession, ElicitationFnT, ListRootsFnT, LoggingFnT, MessageHandlerFnT, SamplingFnT from mcp.client.streamable_http import streamable_http_client from mcp.server import Server from mcp.server.mcpserver import MCPServer from mcp.shared.session import ProgressFnT from mcp.types import ( CallToolResult, CompleteResult, EmptyResult, GetPromptResult, Implementation, ListPromptsResult, ListResourcesResult, ListResourceTemplatesResult, ListToolsResult, LoggingLevel, PaginatedRequestParams, PromptReference, ReadResourceResult, RequestParamsMeta, ResourceTemplateReference, ServerCapabilities, ) @dataclass class Client: """A high-level MCP client for connecting to MCP servers. Supports in-memory transport for testing (pass a Server or MCPServer instance), Streamable HTTP transport (pass a URL string), or a custom Transport instance. Example: ```python from mcp.client import Client from mcp.server.mcpserver import MCPServer server = MCPServer("test") @server.tool() def add(a: int, b: int) -> int: return a + b async def main(): async with Client(server) as client: result = await client.call_tool("add", {"a": 1, "b": 2}) asyncio.run(main()) ``` """ server: Server[Any] | MCPServer | Transport | str """The MCP server to connect to. If the server is a `Server` or `MCPServer` instance, it will be wrapped in an `InMemoryTransport`. If the server is a URL string, it will be used as the URL for a `streamable_http_client` transport. If the server is a `Transport` instance, it will be used directly. """ _: KW_ONLY # TODO(Marcelo): When do `raise_exceptions=True` actually raises? raise_exceptions: bool = False """Whether to raise exceptions from the server.""" read_timeout_seconds: float | None = None """Timeout for read operations.""" sampling_callback: SamplingFnT | None = None """Callback for handling sampling requests.""" list_roots_callback: ListRootsFnT | None = None """Callback for handling list roots requests.""" logging_callback: LoggingFnT | None = None """Callback for handling logging notifications.""" # TODO(Marcelo): Why do we have both "callback" and "handler"? message_handler: MessageHandlerFnT | None = None """Callback for handling raw messages.""" client_info: Implementation | None = None """Client implementation info to send to server.""" elicitation_callback: ElicitationFnT | None = None """Callback for handling elicitation requests.""" _session: ClientSession | None = field(init=False, default=None) _exit_stack: AsyncExitStack | None = field(init=False, default=None) _transport: Transport = field(init=False) def __post_init__(self) -> None: if isinstance(self.server, Server | MCPServer): self._transport = InMemoryTransport(self.server, raise_exceptions=self.raise_exceptions) elif isinstance(self.server, str): self._transport = streamable_http_client(self.server) else: self._transport = self.server async def __aenter__(self) -> Client: """Enter the async context manager.""" if self._session is not None: raise RuntimeError("Client is already entered; cannot reenter") async with AsyncExitStack() as exit_stack: read_stream, write_stream = await exit_stack.enter_async_context(self._transport) self._session = await exit_stack.enter_async_context( ClientSession( read_stream=read_stream, write_stream=write_stream, read_timeout_seconds=self.read_timeout_seconds, sampling_callback=self.sampling_callback, list_roots_callback=self.list_roots_callback, logging_callback=self.logging_callback, message_handler=self.message_handler, client_info=self.client_info, elicitation_callback=self.elicitation_callback, ) ) await self._session.initialize() # Transfer ownership to self for __aexit__ to handle self._exit_stack = exit_stack.pop_all() return self async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any) -> None: """Exit the async context manager.""" if self._exit_stack: # pragma: no branch await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb) self._session = None @property def session(self) -> ClientSession: """Get the underlying ClientSession. This provides access to the full ClientSession API for advanced use cases. Raises: RuntimeError: If accessed before entering the context manager. """ if self._session is None: raise RuntimeError("Client must be used within an async context manager") return self._session @property def server_capabilities(self) -> ServerCapabilities | None: """The server capabilities received during initialization, or None if not yet initialized.""" return self.session.get_server_capabilities() async def send_ping(self, *, meta: RequestParamsMeta | None = None) -> EmptyResult: """Send a ping request to the server.""" return await self.session.send_ping(meta=meta) async def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None, message: str | None = None, ) -> None: """Send a progress notification to the server.""" await self.session.send_progress_notification( progress_token=progress_token, progress=progress, total=total, message=message, ) async def set_logging_level(self, level: LoggingLevel, *, meta: RequestParamsMeta | None = None) -> EmptyResult: """Set the logging level on the server.""" return await self.session.set_logging_level(level=level, meta=meta) async def list_resources( self, *, cursor: str | None = None, meta: RequestParamsMeta | None = None, ) -> ListResourcesResult: """List available resources from the server.""" return await self.session.list_resources(params=PaginatedRequestParams(cursor=cursor, _meta=meta)) async def list_resource_templates( self, *, cursor: str | None = None, meta: RequestParamsMeta | None = None, ) -> ListResourceTemplatesResult: """List available resource templates from the server.""" return await self.session.list_resource_templates(params=PaginatedRequestParams(cursor=cursor, _meta=meta)) async def read_resource(self, uri: str, *, meta: RequestParamsMeta | None = None) -> ReadResourceResult: """Read a resource from the server. Args: uri: The URI of the resource to read. meta: Additional metadata for the request. Returns: The resource content. """ return await self.session.read_resource(uri, meta=meta) async def subscribe_resource(self, uri: str, *, meta: RequestParamsMeta | None = None) -> EmptyResult: """Subscribe to resource updates.""" return await self.session.subscribe_resource(uri, meta=meta) async def unsubscribe_resource(self, uri: str, *, meta: RequestParamsMeta | None = None) -> EmptyResult: """Unsubscribe from resource updates.""" return await self.session.unsubscribe_resource(uri, meta=meta) async def call_tool( self, name: str, arguments: dict[str, Any] | None = None, read_timeout_seconds: float | None = None, progress_callback: ProgressFnT | None = None, *, meta: RequestParamsMeta | None = None, ) -> CallToolResult: """Call a tool on the server. Args: name: The name of the tool to call arguments: Arguments to pass to the tool read_timeout_seconds: Timeout for the tool call progress_callback: Callback for progress updates meta: Additional metadata for the request Returns: The tool result. """ return await self.session.call_tool( name=name, arguments=arguments, read_timeout_seconds=read_timeout_seconds, progress_callback=progress_callback, meta=meta, ) async def list_prompts( self, *, cursor: str | None = None, meta: RequestParamsMeta | None = None, ) -> ListPromptsResult: """List available prompts from the server.""" return await self.session.list_prompts(params=PaginatedRequestParams(cursor=cursor, _meta=meta)) async def get_prompt( self, name: str, arguments: dict[str, str] | None = None, *, meta: RequestParamsMeta | None = None ) -> GetPromptResult: """Get a prompt from the server. Args: name: The name of the prompt arguments: Arguments to pass to the prompt meta: Additional metadata for the request Returns: The prompt content. """ return await self.session.get_prompt(name=name, arguments=arguments, meta=meta) async def complete( self, ref: ResourceTemplateReference | PromptReference, argument: dict[str, str], context_arguments: dict[str, str] | None = None, ) -> CompleteResult: """Get completions for a prompt or resource template argument. Args: ref: Reference to the prompt or resource template argument: The argument to complete context_arguments: Additional context arguments Returns: Completion suggestions. """ return await self.session.complete(ref=ref, argument=argument, context_arguments=context_arguments) async def list_tools(self, *, cursor: str | None = None, meta: RequestParamsMeta | None = None) -> ListToolsResult: """List available tools from the server.""" return await self.session.list_tools(params=PaginatedRequestParams(cursor=cursor, _meta=meta)) async def send_roots_list_changed(self) -> None: """Send a notification that the roots list has changed.""" # TODO(Marcelo): Currently, there is no way for the server to handle this. We should add support. await self.session.send_roots_list_changed() # pragma: no cover
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/client/client.py", "license": "MIT License", "lines": 245, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:tests/client/test_client.py
"""Tests for the unified Client class.""" from __future__ import annotations from unittest.mock import patch import anyio import pytest from inline_snapshot import snapshot from mcp import types from mcp.client._memory import InMemoryTransport from mcp.client.client import Client from mcp.server import Server, ServerRequestContext from mcp.server.mcpserver import MCPServer from mcp.types import ( CallToolResult, EmptyResult, GetPromptResult, ListPromptsResult, ListResourcesResult, ListResourceTemplatesResult, ListToolsResult, Prompt, PromptArgument, PromptMessage, PromptsCapability, ReadResourceResult, Resource, ResourcesCapability, ServerCapabilities, TextContent, TextResourceContents, Tool, ToolsCapability, ) pytestmark = pytest.mark.anyio @pytest.fixture def simple_server() -> Server: """Create a simple MCP server for testing.""" async def handle_list_resources( ctx: ServerRequestContext, params: types.PaginatedRequestParams | None ) -> ListResourcesResult: return ListResourcesResult( resources=[Resource(uri="memory://test", name="Test Resource", description="A test resource")] ) async def handle_subscribe_resource(ctx: ServerRequestContext, params: types.SubscribeRequestParams) -> EmptyResult: return EmptyResult() async def handle_unsubscribe_resource( ctx: ServerRequestContext, params: types.UnsubscribeRequestParams ) -> EmptyResult: return EmptyResult() async def handle_set_logging_level(ctx: ServerRequestContext, params: types.SetLevelRequestParams) -> EmptyResult: return EmptyResult() async def handle_completion(ctx: ServerRequestContext, params: types.CompleteRequestParams) -> types.CompleteResult: return types.CompleteResult(completion=types.Completion(values=[])) return Server( name="test_server", on_list_resources=handle_list_resources, on_subscribe_resource=handle_subscribe_resource, on_unsubscribe_resource=handle_unsubscribe_resource, on_set_logging_level=handle_set_logging_level, on_completion=handle_completion, ) @pytest.fixture def app() -> MCPServer: """Create an MCPServer server for testing.""" server = MCPServer("test") @server.tool() def greet(name: str) -> str: """Greet someone by name.""" return f"Hello, {name}!" @server.resource("test://resource") def test_resource() -> str: """A test resource.""" return "Test content" @server.prompt() def greeting_prompt(name: str) -> str: """A greeting prompt.""" return f"Please greet {name} warmly." return server async def test_client_is_initialized(app: MCPServer): """Test that the client is initialized after entering context.""" async with Client(app) as client: assert client.server_capabilities == snapshot( ServerCapabilities( experimental={}, prompts=PromptsCapability(list_changed=False), resources=ResourcesCapability(subscribe=False, list_changed=False), tools=ToolsCapability(list_changed=False), ) ) async def test_client_with_simple_server(simple_server: Server): """Test that from_server works with a basic Server instance.""" async with Client(simple_server) as client: resources = await client.list_resources() assert resources == snapshot( ListResourcesResult( resources=[Resource(name="Test Resource", uri="memory://test", description="A test resource")] ) ) async def test_client_send_ping(app: MCPServer): async with Client(app) as client: result = await client.send_ping() assert result == snapshot(EmptyResult()) async def test_client_list_tools(app: MCPServer): async with Client(app) as client: result = await client.list_tools() assert result == snapshot( ListToolsResult( tools=[ Tool( name="greet", description="Greet someone by name.", input_schema={ "properties": {"name": {"title": "Name", "type": "string"}}, "required": ["name"], "title": "greetArguments", "type": "object", }, output_schema={ "properties": {"result": {"title": "Result", "type": "string"}}, "required": ["result"], "title": "greetOutput", "type": "object", }, ) ] ) ) async def test_client_call_tool(app: MCPServer): async with Client(app) as client: result = await client.call_tool("greet", {"name": "World"}) assert result == snapshot( CallToolResult( content=[TextContent(text="Hello, World!")], structured_content={"result": "Hello, World!"}, ) ) async def test_read_resource(app: MCPServer): """Test reading a resource.""" async with Client(app) as client: result = await client.read_resource("test://resource") assert result == snapshot( ReadResourceResult( contents=[TextResourceContents(uri="test://resource", mime_type="text/plain", text="Test content")] ) ) async def test_get_prompt(app: MCPServer): """Test getting a prompt.""" async with Client(app) as client: result = await client.get_prompt("greeting_prompt", {"name": "Alice"}) assert result == snapshot( GetPromptResult( description="A greeting prompt.", messages=[PromptMessage(role="user", content=TextContent(text="Please greet Alice warmly."))], ) ) def test_client_session_property_before_enter(app: MCPServer): """Test that accessing session before context manager raises RuntimeError.""" client = Client(app) with pytest.raises(RuntimeError, match="Client must be used within an async context manager"): client.session async def test_client_reentry_raises_runtime_error(app: MCPServer): """Test that reentering a client raises RuntimeError.""" async with Client(app) as client: with pytest.raises(RuntimeError, match="Client is already entered"): await client.__aenter__() async def test_client_send_progress_notification(): """Test sending progress notification.""" received_from_client = None event = anyio.Event() async def handle_progress(ctx: ServerRequestContext, params: types.ProgressNotificationParams) -> None: nonlocal received_from_client received_from_client = {"progress_token": params.progress_token, "progress": params.progress} event.set() server = Server(name="test_server", on_progress=handle_progress) async with Client(server) as client: await client.send_progress_notification(progress_token="token123", progress=50.0) await event.wait() assert received_from_client == snapshot({"progress_token": "token123", "progress": 50.0}) async def test_client_subscribe_resource(simple_server: Server): async with Client(simple_server) as client: result = await client.subscribe_resource("memory://test") assert result == snapshot(EmptyResult()) async def test_client_unsubscribe_resource(simple_server: Server): async with Client(simple_server) as client: result = await client.unsubscribe_resource("memory://test") assert result == snapshot(EmptyResult()) async def test_client_set_logging_level(simple_server: Server): """Test setting logging level.""" async with Client(simple_server) as client: result = await client.set_logging_level("debug") assert result == snapshot(EmptyResult()) async def test_client_list_resources_with_params(app: MCPServer): """Test listing resources with params parameter.""" async with Client(app) as client: result = await client.list_resources() assert result == snapshot( ListResourcesResult( resources=[ Resource( name="test_resource", uri="test://resource", description="A test resource.", mime_type="text/plain", ) ] ) ) async def test_client_list_resource_templates(app: MCPServer): """Test listing resource templates with params parameter.""" async with Client(app) as client: result = await client.list_resource_templates() assert result == snapshot(ListResourceTemplatesResult(resource_templates=[])) async def test_list_prompts(app: MCPServer): """Test listing prompts with params parameter.""" async with Client(app) as client: result = await client.list_prompts() assert result == snapshot( ListPromptsResult( prompts=[ Prompt( name="greeting_prompt", description="A greeting prompt.", arguments=[PromptArgument(name="name", required=True)], ) ] ) ) async def test_complete_with_prompt_reference(simple_server: Server): """Test getting completions for a prompt argument.""" async with Client(simple_server) as client: ref = types.PromptReference(type="ref/prompt", name="test_prompt") result = await client.complete(ref=ref, argument={"name": "arg", "value": "test"}) assert result == snapshot(types.CompleteResult(completion=types.Completion(values=[]))) def test_client_with_url_initializes_streamable_http_transport(): with patch("mcp.client.client.streamable_http_client") as mock: _ = Client("http://localhost:8000/mcp") mock.assert_called_once_with("http://localhost:8000/mcp") async def test_client_uses_transport_directly(app: MCPServer): transport = InMemoryTransport(app) async with Client(transport) as client: result = await client.call_tool("greet", {"name": "Transport"}) assert result == snapshot( CallToolResult( content=[TextContent(text="Hello, Transport!")], structured_content={"result": "Hello, Transport!"}, ) )
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/client/test_client.py", "license": "MIT License", "lines": 246, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/client/transports/test_memory.py
"""Tests for InMemoryTransport.""" import pytest from mcp import Client, types from mcp.client._memory import InMemoryTransport from mcp.server import Server, ServerRequestContext from mcp.server.mcpserver import MCPServer from mcp.types import ListResourcesResult, Resource @pytest.fixture def simple_server() -> Server: """Create a simple MCP server for testing.""" async def handle_list_resources( ctx: ServerRequestContext, params: types.PaginatedRequestParams | None ) -> ListResourcesResult: # pragma: no cover return ListResourcesResult( resources=[ Resource( uri="memory://test", name="Test Resource", description="A test resource", ) ] ) return Server(name="test_server", on_list_resources=handle_list_resources) @pytest.fixture def mcpserver_server() -> MCPServer: """Create an MCPServer server for testing.""" server = MCPServer("test") @server.tool() def greet(name: str) -> str: """Greet someone by name.""" return f"Hello, {name}!" @server.resource("test://resource") def test_resource() -> str: # pragma: no cover """A test resource.""" return "Test content" return server pytestmark = pytest.mark.anyio async def test_with_server(simple_server: Server): """Test creating transport with a Server instance.""" transport = InMemoryTransport(simple_server) async with transport as (read_stream, write_stream): assert read_stream is not None assert write_stream is not None async def test_with_mcpserver(mcpserver_server: MCPServer): """Test creating transport with an MCPServer instance.""" transport = InMemoryTransport(mcpserver_server) async with transport as (read_stream, write_stream): assert read_stream is not None assert write_stream is not None async def test_server_is_running(mcpserver_server: MCPServer): """Test that the server is running and responding to requests.""" async with Client(mcpserver_server) as client: assert client.server_capabilities is not None async def test_list_tools(mcpserver_server: MCPServer): """Test listing tools through the transport.""" async with Client(mcpserver_server) as client: tools_result = await client.list_tools() assert len(tools_result.tools) > 0 tool_names = [t.name for t in tools_result.tools] assert "greet" in tool_names async def test_call_tool(mcpserver_server: MCPServer): """Test calling a tool through the transport.""" async with Client(mcpserver_server) as client: result = await client.call_tool("greet", {"name": "World"}) assert result is not None assert len(result.content) > 0 assert "Hello, World!" in str(result.content[0]) async def test_raise_exceptions(mcpserver_server: MCPServer): """Test that raise_exceptions parameter is passed through.""" transport = InMemoryTransport(mcpserver_server, raise_exceptions=True) async with transport as (read_stream, _write_stream): assert read_stream is not None
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/client/transports/test_memory.py", "license": "MIT License", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/issues/test_973_url_decoding.py
"""Test that URL-encoded parameters are decoded in resource templates. Regression test for https://github.com/modelcontextprotocol/python-sdk/issues/973 """ from mcp.server.mcpserver.resources import ResourceTemplate def test_template_matches_decodes_space(): """Test that %20 is decoded to space.""" def search(query: str) -> str: # pragma: no cover return f"Results for: {query}" template = ResourceTemplate.from_function( fn=search, uri_template="search://{query}", name="search", ) params = template.matches("search://hello%20world") assert params is not None assert params["query"] == "hello world" def test_template_matches_decodes_accented_characters(): """Test that %C3%A9 is decoded to e with accent.""" def search(query: str) -> str: # pragma: no cover return f"Results for: {query}" template = ResourceTemplate.from_function( fn=search, uri_template="search://{query}", name="search", ) params = template.matches("search://caf%C3%A9") assert params is not None assert params["query"] == "café" def test_template_matches_decodes_complex_phrase(): """Test complex French phrase from the original issue.""" def search(query: str) -> str: # pragma: no cover return f"Results for: {query}" template = ResourceTemplate.from_function( fn=search, uri_template="search://{query}", name="search", ) params = template.matches("search://stick%20correcteur%20teint%C3%A9%20anti-imperfections") assert params is not None assert params["query"] == "stick correcteur teinté anti-imperfections" def test_template_matches_preserves_plus_sign(): """Test that plus sign remains as plus (not converted to space). In URI encoding, %20 is space. Plus-as-space is only for application/x-www-form-urlencoded (HTML forms). """ def search(query: str) -> str: # pragma: no cover return f"Results for: {query}" template = ResourceTemplate.from_function( fn=search, uri_template="search://{query}", name="search", ) params = template.matches("search://hello+world") assert params is not None assert params["query"] == "hello+world"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/issues/test_973_url_decoding.py", "license": "MIT License", "lines": 55, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/issues/test_1574_resource_uri_validation.py
"""Tests for issue #1574: Python SDK incorrectly validates Resource URIs. The Python SDK previously used Pydantic's AnyUrl for URI fields, which rejected relative paths like 'users/me' that are valid according to the MCP spec and accepted by the TypeScript SDK. The fix changed URI fields to plain strings to match the spec, which defines uri fields as strings with no JSON Schema format validation. These tests verify the fix works end-to-end through the JSON-RPC protocol. """ import pytest from mcp import Client, types from mcp.server import Server, ServerRequestContext from mcp.types import ( ListResourcesResult, PaginatedRequestParams, ReadResourceRequestParams, ReadResourceResult, TextResourceContents, ) pytestmark = pytest.mark.anyio async def test_relative_uri_roundtrip(): """Relative URIs survive the full server-client JSON-RPC roundtrip. This is the critical regression test - if someone reintroduces AnyUrl, the server would fail to serialize resources with relative URIs, or the URI would be transformed during the roundtrip. """ async def handle_list_resources( ctx: ServerRequestContext, params: PaginatedRequestParams | None ) -> ListResourcesResult: return ListResourcesResult( resources=[ types.Resource(name="user", uri="users/me"), types.Resource(name="config", uri="./config"), types.Resource(name="parent", uri="../parent/resource"), ] ) async def handle_read_resource(ctx: ServerRequestContext, params: ReadResourceRequestParams) -> ReadResourceResult: return ReadResourceResult( contents=[TextResourceContents(uri=str(params.uri), text=f"data for {params.uri}", mime_type="text/plain")] ) server = Server("test", on_list_resources=handle_list_resources, on_read_resource=handle_read_resource) async with Client(server) as client: # List should return the exact URIs we specified resources = await client.list_resources() uri_map = {r.uri: r for r in resources.resources} assert "users/me" in uri_map, f"Expected 'users/me' in {list(uri_map.keys())}" assert "./config" in uri_map, f"Expected './config' in {list(uri_map.keys())}" assert "../parent/resource" in uri_map, f"Expected '../parent/resource' in {list(uri_map.keys())}" # Read should work with each relative URI and preserve it in the response for uri_str in ["users/me", "./config", "../parent/resource"]: result = await client.read_resource(uri_str) assert len(result.contents) == 1 assert result.contents[0].uri == uri_str async def test_custom_scheme_uri_roundtrip(): """Custom scheme URIs work through the protocol. Some MCP servers use custom schemes like "custom://resource". These should work end-to-end. """ async def handle_list_resources( ctx: ServerRequestContext, params: PaginatedRequestParams | None ) -> ListResourcesResult: return ListResourcesResult( resources=[ types.Resource(name="custom", uri="custom://my-resource"), types.Resource(name="file", uri="file:///path/to/file"), ] ) async def handle_read_resource(ctx: ServerRequestContext, params: ReadResourceRequestParams) -> ReadResourceResult: return ReadResourceResult( contents=[TextResourceContents(uri=str(params.uri), text="data", mime_type="text/plain")] ) server = Server("test", on_list_resources=handle_list_resources, on_read_resource=handle_read_resource) async with Client(server) as client: resources = await client.list_resources() uri_map = {r.uri: r for r in resources.resources} assert "custom://my-resource" in uri_map assert "file:///path/to/file" in uri_map # Read with custom scheme result = await client.read_resource("custom://my-resource") assert len(result.contents) == 1 def test_uri_json_roundtrip_preserves_value(): """URI is preserved exactly through JSON serialization. This catches any Pydantic validation or normalization that would alter the URI during the JSON-RPC message flow. """ test_uris = [ "users/me", "custom://resource", "./relative", "../parent", "file:///absolute/path", "https://example.com/path", ] for uri_str in test_uris: resource = types.Resource(name="test", uri=uri_str) json_data = resource.model_dump(mode="json") restored = types.Resource.model_validate(json_data) assert restored.uri == uri_str, f"URI mutated: {uri_str} -> {restored.uri}" def test_resource_contents_uri_json_roundtrip(): """TextResourceContents URI is preserved through JSON serialization.""" test_uris = ["users/me", "./relative", "custom://resource"] for uri_str in test_uris: contents = types.TextResourceContents( uri=uri_str, text="data", mime_type="text/plain", ) json_data = contents.model_dump(mode="json") restored = types.TextResourceContents.model_validate(json_data) assert restored.uri == uri_str, f"URI mutated: {uri_str} -> {restored.uri}"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/issues/test_1574_resource_uri_validation.py", "license": "MIT License", "lines": 109, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/server/lowlevel/test_helper_types.py
"""Test helper_types.py meta field. These tests verify the changes made to helper_types.py:11 where we added: meta: dict[str, Any] | None = field(default=None) ReadResourceContents is the return type for resource read handlers. It's used internally by the low-level server to package resource content before sending it over the MCP protocol. """ from mcp.server.lowlevel.helper_types import ReadResourceContents def test_read_resource_contents_with_metadata(): """Test that ReadResourceContents accepts meta parameter. ReadResourceContents is an internal helper type used by the low-level MCP server. When a resource is read, the server creates a ReadResourceContents instance that contains the content, mime type, and now metadata. The low-level server then extracts the meta field and includes it in the protocol response as _meta. """ # Bridge between Resource.meta and MCP protocol _meta field (helper_types.py:11) metadata = {"version": "1.0", "cached": True} contents = ReadResourceContents( content="test content", mime_type="text/plain", meta=metadata, ) assert contents.meta is not None assert contents.meta == metadata assert contents.meta["version"] == "1.0" assert contents.meta["cached"] is True def test_read_resource_contents_without_metadata(): """Test that ReadResourceContents meta defaults to None.""" # Ensures backward compatibility - meta defaults to None, _meta omitted from protocol (helper_types.py:11) contents = ReadResourceContents( content="test content", mime_type="text/plain", ) assert contents.meta is None def test_read_resource_contents_with_bytes(): """Test that ReadResourceContents works with bytes content and meta.""" # Verifies meta works with both str and bytes content (binary resources like images, PDFs) metadata = {"encoding": "utf-8"} contents = ReadResourceContents( content=b"binary content", mime_type="application/octet-stream", meta=metadata, ) assert contents.content == b"binary content" assert contents.meta == metadata
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/server/lowlevel/test_helper_types.py", "license": "MIT License", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/server/test_stateless_mode.py
"""Tests for stateless HTTP mode limitations. Stateless HTTP mode does not support server-to-client requests because there is no persistent connection for bidirectional communication. These tests verify that appropriate errors are raised when attempting to use unsupported features. See: https://github.com/modelcontextprotocol/python-sdk/issues/1097 """ from collections.abc import AsyncGenerator from typing import Any import anyio import pytest from mcp import types from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.shared.exceptions import StatelessModeNotSupported from mcp.shared.message import SessionMessage from mcp.types import ServerCapabilities @pytest.fixture async def stateless_session() -> AsyncGenerator[ServerSession, None]: """Create a stateless ServerSession for testing.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage | Exception](1) init_options = InitializationOptions( server_name="test", server_version="0.1.0", capabilities=ServerCapabilities(), ) async with ( client_to_server_send, client_to_server_receive, server_to_client_send, server_to_client_receive, ): async with ServerSession( client_to_server_receive, server_to_client_send, init_options, stateless=True, ) as session: yield session @pytest.mark.anyio async def test_list_roots_fails_in_stateless_mode(stateless_session: ServerSession): """Test that list_roots raises StatelessModeNotSupported in stateless mode.""" with pytest.raises(StatelessModeNotSupported, match="list_roots"): await stateless_session.list_roots() @pytest.mark.anyio async def test_create_message_fails_in_stateless_mode(stateless_session: ServerSession): """Test that create_message raises StatelessModeNotSupported in stateless mode.""" with pytest.raises(StatelessModeNotSupported, match="sampling"): await stateless_session.create_message( messages=[ types.SamplingMessage( role="user", content=types.TextContent(type="text", text="hello"), ) ], max_tokens=100, ) @pytest.mark.anyio async def test_elicit_form_fails_in_stateless_mode(stateless_session: ServerSession): """Test that elicit_form raises StatelessModeNotSupported in stateless mode.""" with pytest.raises(StatelessModeNotSupported, match="elicitation"): await stateless_session.elicit_form( message="Please provide input", requested_schema={"type": "object", "properties": {}}, ) @pytest.mark.anyio async def test_elicit_url_fails_in_stateless_mode(stateless_session: ServerSession): """Test that elicit_url raises StatelessModeNotSupported in stateless mode.""" with pytest.raises(StatelessModeNotSupported, match="elicitation"): await stateless_session.elicit_url( message="Please authenticate", url="https://example.com/auth", elicitation_id="test-123", ) @pytest.mark.anyio async def test_elicit_deprecated_fails_in_stateless_mode(stateless_session: ServerSession): """Test that the deprecated elicit method also fails in stateless mode.""" with pytest.raises(StatelessModeNotSupported, match="elicitation"): await stateless_session.elicit( message="Please provide input", requested_schema={"type": "object", "properties": {}}, ) @pytest.mark.anyio async def test_stateless_error_message_is_actionable(stateless_session: ServerSession): """Test that the error message provides actionable guidance.""" with pytest.raises(StatelessModeNotSupported) as exc_info: await stateless_session.list_roots() error_message = str(exc_info.value) # Should mention it's stateless mode assert "stateless HTTP mode" in error_message # Should explain why it doesn't work assert "server-to-client requests" in error_message # Should tell user how to fix it assert "stateless_http=False" in error_message @pytest.mark.anyio async def test_exception_has_method_attribute(stateless_session: ServerSession): """Test that the exception has a method attribute for programmatic access.""" with pytest.raises(StatelessModeNotSupported) as exc_info: await stateless_session.list_roots() assert exc_info.value.method == "list_roots" @pytest.fixture async def stateful_session() -> AsyncGenerator[ServerSession, None]: """Create a stateful ServerSession for testing.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage | Exception](1) init_options = InitializationOptions( server_name="test", server_version="0.1.0", capabilities=ServerCapabilities(), ) async with ( client_to_server_send, client_to_server_receive, server_to_client_send, server_to_client_receive, ): async with ServerSession( client_to_server_receive, server_to_client_send, init_options, stateless=False, ) as session: yield session @pytest.mark.anyio async def test_stateful_mode_does_not_raise_stateless_error( stateful_session: ServerSession, monkeypatch: pytest.MonkeyPatch ): """Test that StatelessModeNotSupported is not raised in stateful mode. We mock send_request to avoid blocking on I/O while still verifying that the stateless check passes. """ send_request_called = False async def mock_send_request(*_: Any, **__: Any) -> types.ListRootsResult: nonlocal send_request_called send_request_called = True return types.ListRootsResult(roots=[]) monkeypatch.setattr(stateful_session, "send_request", mock_send_request) # This should NOT raise StatelessModeNotSupported result = await stateful_session.list_roots() assert send_request_called assert isinstance(result, types.ListRootsResult)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/server/test_stateless_mode.py", "license": "MIT License", "lines": 141, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/issues/test_1754_mime_type_parameters.py
"""Test for GitHub issue #1754: MIME type validation rejects valid RFC 2045 parameters. The MIME type validation regex was too restrictive and rejected valid MIME types with parameters like 'text/html;profile=mcp-app' which are valid per RFC 2045. """ import pytest from mcp import Client from mcp.server.mcpserver import MCPServer pytestmark = pytest.mark.anyio async def test_mime_type_with_parameters(): """Test that MIME types with parameters are accepted (RFC 2045).""" mcp = MCPServer("test") # This should NOT raise a validation error @mcp.resource("ui://widget", mime_type="text/html;profile=mcp-app") def widget() -> str: raise NotImplementedError() resources = await mcp.list_resources() assert len(resources) == 1 assert resources[0].mime_type == "text/html;profile=mcp-app" async def test_mime_type_with_parameters_and_space(): """Test MIME type with space after semicolon.""" mcp = MCPServer("test") @mcp.resource("data://json", mime_type="application/json; charset=utf-8") def data() -> str: raise NotImplementedError() resources = await mcp.list_resources() assert len(resources) == 1 assert resources[0].mime_type == "application/json; charset=utf-8" async def test_mime_type_with_multiple_parameters(): """Test MIME type with multiple parameters.""" mcp = MCPServer("test") @mcp.resource("data://multi", mime_type="text/plain; charset=utf-8; format=fixed") def data() -> str: raise NotImplementedError() resources = await mcp.list_resources() assert len(resources) == 1 assert resources[0].mime_type == "text/plain; charset=utf-8; format=fixed" async def test_mime_type_preserved_in_read_resource(): """Test that MIME type with parameters is preserved when reading resource.""" mcp = MCPServer("test") @mcp.resource("ui://my-widget", mime_type="text/html;profile=mcp-app") def my_widget() -> str: return "<html><body>Hello MCP-UI</body></html>" async with Client(mcp) as client: # Read the resource result = await client.read_resource("ui://my-widget") assert len(result.contents) == 1 assert result.contents[0].mime_type == "text/html;profile=mcp-app"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/issues/test_1754_mime_type_parameters.py", "license": "MIT License", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/issues/test_1363_race_condition_streamable_http.py
"""Test for issue #1363 - Race condition in StreamableHTTP transport causes ClosedResourceError. This test reproduces the race condition described in issue #1363 where MCP servers in HTTP Streamable mode experience ClosedResourceError exceptions when requests fail validation early (e.g., due to incorrect Accept headers). The race condition occurs because: 1. Transport setup creates a message_router task 2. Message router enters async for write_stream_reader loop 3. write_stream_reader calls checkpoint() in receive(), yielding control 4. Request handling processes HTTP request 5. If validation fails early, request returns immediately 6. Transport termination closes all streams including write_stream_reader 7. Message router may still be in checkpoint() yield and hasn't returned to check stream state 8. When message router resumes, it encounters a closed stream, raising ClosedResourceError """ import logging import threading from collections.abc import AsyncGenerator from contextlib import asynccontextmanager import anyio import httpx import pytest from starlette.applications import Starlette from starlette.routing import Mount from mcp.server import Server from mcp.server.streamable_http_manager import StreamableHTTPSessionManager SERVER_NAME = "test_race_condition_server" class RaceConditionTestServer(Server): def __init__(self): super().__init__(SERVER_NAME) def create_app(json_response: bool = False) -> Starlette: """Create a Starlette application for testing.""" app = RaceConditionTestServer() # Create session manager session_manager = StreamableHTTPSessionManager( app=app, json_response=json_response, stateless=True, # Use stateless mode to trigger the race condition ) # Create Starlette app with lifespan @asynccontextmanager async def lifespan(app: Starlette) -> AsyncGenerator[None, None]: async with session_manager.run(): yield routes = [ Mount("/", app=session_manager.handle_request), ] return Starlette(routes=routes, lifespan=lifespan) class ServerThread(threading.Thread): """Thread that runs the ASGI application lifespan in a separate event loop.""" def __init__(self, app: Starlette): super().__init__(daemon=True) self.app = app self._stop_event = threading.Event() def run(self) -> None: """Run the lifespan in a new event loop.""" # Create a new event loop for this thread async def run_lifespan(): # Use the lifespan context (always present in our tests) lifespan_context = getattr(self.app.router, "lifespan_context", None) assert lifespan_context is not None # Tests always create apps with lifespan async with lifespan_context(self.app): # Wait until stop is requested while not self._stop_event.is_set(): await anyio.sleep(0.1) anyio.run(run_lifespan) def stop(self) -> None: """Signal the thread to stop.""" self._stop_event.set() def check_logs_for_race_condition_errors(caplog: pytest.LogCaptureFixture, test_name: str) -> None: """Check logs for ClosedResourceError and other race condition errors. Args: caplog: pytest log capture fixture test_name: Name of the test for better error messages """ # Check for specific race condition errors in logs errors_found: list[str] = [] for record in caplog.records: # pragma: lax no cover message = record.getMessage() if "ClosedResourceError" in message: errors_found.append("ClosedResourceError") if "Error in message router" in message: errors_found.append("Error in message router") if "anyio.ClosedResourceError" in message: errors_found.append("anyio.ClosedResourceError") # Assert no race condition errors occurred if errors_found: # pragma: no cover error_msg = f"Test '{test_name}' found race condition errors in logs: {', '.join(set(errors_found))}\n" error_msg += "Log records:\n" for record in caplog.records: if any(err in record.getMessage() for err in ["ClosedResourceError", "Error in message router"]): error_msg += f" {record.levelname}: {record.getMessage()}\n" pytest.fail(error_msg) @pytest.mark.anyio async def test_race_condition_invalid_accept_headers(caplog: pytest.LogCaptureFixture): """Test the race condition with invalid Accept headers. This test reproduces the exact scenario described in issue #1363: - Send POST request with incorrect Accept headers (missing either application/json or text/event-stream) - Request fails validation early and returns quickly - This should trigger the race condition where message_router encounters ClosedResourceError """ app = create_app() server_thread = ServerThread(app) server_thread.start() try: # Give the server thread a moment to start await anyio.sleep(0.1) # Suppress WARNING logs (expected validation errors) and capture ERROR logs with caplog.at_level(logging.ERROR): # Test with missing text/event-stream in Accept header async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://testserver", timeout=5.0 ) as client: response = await client.post( "/", json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}}, headers={ "Accept": "application/json", # Missing text/event-stream "Content-Type": "application/json", }, ) # Should get 406 Not Acceptable due to missing text/event-stream assert response.status_code == 406 # Test with missing application/json in Accept header async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://testserver", timeout=5.0 ) as client: response = await client.post( "/", json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}}, headers={ "Accept": "text/event-stream", # Missing application/json "Content-Type": "application/json", }, ) # Should get 406 Not Acceptable due to missing application/json assert response.status_code == 406 # Test with completely invalid Accept header async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://testserver", timeout=5.0 ) as client: response = await client.post( "/", json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}}, headers={ "Accept": "text/plain", # Invalid Accept header "Content-Type": "application/json", }, ) # Should get 406 Not Acceptable assert response.status_code == 406 # Give background tasks time to complete await anyio.sleep(0.2) finally: server_thread.stop() server_thread.join(timeout=5.0) # Check logs for race condition errors check_logs_for_race_condition_errors(caplog, "test_race_condition_invalid_accept_headers") @pytest.mark.anyio async def test_race_condition_invalid_content_type(caplog: pytest.LogCaptureFixture): """Test the race condition with invalid Content-Type headers. This test reproduces the race condition scenario with Content-Type validation failure. """ app = create_app() server_thread = ServerThread(app) server_thread.start() try: # Give the server thread a moment to start await anyio.sleep(0.1) # Suppress WARNING logs (expected validation errors) and capture ERROR logs with caplog.at_level(logging.ERROR): # Test with invalid Content-Type async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://testserver", timeout=5.0 ) as client: response = await client.post( "/", json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}}, headers={ "Accept": "application/json, text/event-stream", "Content-Type": "text/plain", # Invalid Content-Type }, ) assert response.status_code == 400 # Give background tasks time to complete await anyio.sleep(0.2) finally: server_thread.stop() server_thread.join(timeout=5.0) # Check logs for race condition errors check_logs_for_race_condition_errors(caplog, "test_race_condition_invalid_content_type") @pytest.mark.anyio async def test_race_condition_message_router_async_for(caplog: pytest.LogCaptureFixture): """Uses json_response=True to trigger the `if self.is_json_response_enabled` branch, which reproduces the ClosedResourceError when message_router is suspended in async for loop while transport cleanup closes streams concurrently. """ app = create_app(json_response=True) server_thread = ServerThread(app) server_thread.start() try: # Give the server thread a moment to start await anyio.sleep(0.1) # Suppress WARNING logs (expected validation errors) and capture ERROR logs with caplog.at_level(logging.ERROR): # Use httpx.ASGITransport to test the ASGI app directly async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://testserver", timeout=5.0 ) as client: # Send a valid initialize request response = await client.post( "/", json={"jsonrpc": "2.0", "method": "initialize", "id": 1, "params": {}}, headers={ "Accept": "application/json, text/event-stream", "Content-Type": "application/json", }, ) # Should get a successful response assert response.status_code in (200, 201) # Give background tasks time to complete await anyio.sleep(0.2) finally: server_thread.stop() server_thread.join(timeout=5.0) # Check logs for race condition errors in message router check_logs_for_race_condition_errors(caplog, "test_race_condition_message_router_async_for")
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/issues/test_1363_race_condition_streamable_http.py", "license": "MIT License", "lines": 226, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:examples/clients/sse-polling-client/mcp_sse_polling_client/main.py
"""SSE Polling Demo Client Demonstrates the client-side auto-reconnect for SSE polling pattern. This client connects to the SSE Polling Demo server and calls process_batch, which triggers periodic server-side stream closes. The client automatically reconnects using Last-Event-ID and resumes receiving messages. Run with: # First start the server: uv run mcp-sse-polling-demo --port 3000 # Then run this client: uv run mcp-sse-polling-client --url http://localhost:3000/mcp """ import asyncio import logging import click from mcp import ClientSession from mcp.client.streamable_http import streamable_http_client async def run_demo(url: str, items: int, checkpoint_every: int) -> None: """Run the SSE polling demo.""" print(f"\n{'=' * 60}") print("SSE Polling Demo Client") print(f"{'=' * 60}") print(f"Server URL: {url}") print(f"Processing {items} items with checkpoints every {checkpoint_every}") print(f"{'=' * 60}\n") async with streamable_http_client(url) as (read_stream, write_stream): async with ClientSession(read_stream, write_stream) as session: # Initialize the connection print("Initializing connection...") await session.initialize() print("Connected!\n") # List available tools tools = await session.list_tools() print(f"Available tools: {[t.name for t in tools.tools]}\n") # Call the process_batch tool print(f"Calling process_batch(items={items}, checkpoint_every={checkpoint_every})...\n") print("-" * 40) result = await session.call_tool( "process_batch", { "items": items, "checkpoint_every": checkpoint_every, }, ) print("-" * 40) if result.content: content = result.content[0] text = getattr(content, "text", str(content)) print(f"\nResult: {text}") else: print("\nResult: No content") print(f"{'=' * 60}\n") @click.command() @click.option( "--url", default="http://localhost:3000/mcp", help="Server URL", ) @click.option( "--items", default=10, help="Number of items to process", ) @click.option( "--checkpoint-every", default=3, help="Checkpoint interval", ) @click.option( "--log-level", default="INFO", help="Logging level", ) def main(url: str, items: int, checkpoint_every: int, log_level: str) -> None: """Run the SSE Polling Demo client.""" logging.basicConfig( level=getattr(logging, log_level.upper()), format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # Suppress noisy HTTP client logging logging.getLogger("httpx").setLevel(logging.WARNING) logging.getLogger("httpcore").setLevel(logging.WARNING) asyncio.run(run_demo(url, items, checkpoint_every)) if __name__ == "__main__": main()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/clients/sse-polling-client/mcp_sse_polling_client/main.py", "license": "MIT License", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:examples/servers/sse-polling-demo/mcp_sse_polling_demo/event_store.py
"""In-memory event store for demonstrating resumability functionality. This is a simple implementation intended for examples and testing, not for production use where a persistent storage solution would be more appropriate. """ import logging from collections import deque from dataclasses import dataclass from uuid import uuid4 from mcp.server.streamable_http import EventCallback, EventId, EventMessage, EventStore, StreamId from mcp.types import JSONRPCMessage logger = logging.getLogger(__name__) @dataclass class EventEntry: """Represents an event entry in the event store.""" event_id: EventId stream_id: StreamId message: JSONRPCMessage | None # None for priming events class InMemoryEventStore(EventStore): """Simple in-memory implementation of the EventStore interface for resumability. This is primarily intended for examples and testing, not for production use where a persistent storage solution would be more appropriate. This implementation keeps only the last N events per stream for memory efficiency. """ def __init__(self, max_events_per_stream: int = 100): """Initialize the event store. Args: max_events_per_stream: Maximum number of events to keep per stream """ self.max_events_per_stream = max_events_per_stream # for maintaining last N events per stream self.streams: dict[StreamId, deque[EventEntry]] = {} # event_id -> EventEntry for quick lookup self.event_index: dict[EventId, EventEntry] = {} async def store_event(self, stream_id: StreamId, message: JSONRPCMessage | None) -> EventId: """Stores an event with a generated event ID. Args: stream_id: ID of the stream the event belongs to message: The message to store, or None for priming events """ event_id = str(uuid4()) event_entry = EventEntry(event_id=event_id, stream_id=stream_id, message=message) # Get or create deque for this stream if stream_id not in self.streams: self.streams[stream_id] = deque(maxlen=self.max_events_per_stream) # If deque is full, the oldest event will be automatically removed # We need to remove it from the event_index as well if len(self.streams[stream_id]) == self.max_events_per_stream: oldest_event = self.streams[stream_id][0] self.event_index.pop(oldest_event.event_id, None) # Add new event self.streams[stream_id].append(event_entry) self.event_index[event_id] = event_entry return event_id async def replay_events_after( self, last_event_id: EventId, send_callback: EventCallback, ) -> StreamId | None: """Replays events that occurred after the specified event ID.""" if last_event_id not in self.event_index: logger.warning(f"Event ID {last_event_id} not found in store") return None # Get the stream and find events after the last one last_event = self.event_index[last_event_id] stream_id = last_event.stream_id stream_events = self.streams.get(last_event.stream_id, deque()) # Events in deque are already in chronological order found_last = False for event in stream_events: if found_last: # Skip priming events (None messages) during replay if event.message is not None: await send_callback(EventMessage(event.message, event.event_id)) elif event.event_id == last_event_id: found_last = True return stream_id
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/servers/sse-polling-demo/mcp_sse_polling_demo/event_store.py", "license": "MIT License", "lines": 76, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:examples/servers/sse-polling-demo/mcp_sse_polling_demo/server.py
"""SSE Polling Demo Server Demonstrates the SSE polling pattern with close_sse_stream() for long-running tasks. Features demonstrated: - Priming events (automatic with EventStore) - Server-initiated stream close via close_sse_stream callback - Client auto-reconnect with Last-Event-ID - Progress notifications during long-running tasks Run with: uv run mcp-sse-polling-demo --port 3000 """ import contextlib import logging from collections.abc import AsyncIterator import anyio import click from mcp import types from mcp.server import Server, ServerRequestContext from mcp.server.streamable_http_manager import StreamableHTTPSessionManager from starlette.applications import Starlette from starlette.routing import Mount from starlette.types import Receive, Scope, Send from .event_store import InMemoryEventStore logger = logging.getLogger(__name__) async def handle_list_tools( ctx: ServerRequestContext, params: types.PaginatedRequestParams | None ) -> types.ListToolsResult: """List available tools.""" return types.ListToolsResult( tools=[ types.Tool( name="process_batch", description=( "Process a batch of items with periodic checkpoints. " "Demonstrates SSE polling where server closes stream periodically." ), input_schema={ "type": "object", "properties": { "items": { "type": "integer", "description": "Number of items to process (1-100)", "default": 10, }, "checkpoint_every": { "type": "integer", "description": "Close stream after this many items (1-20)", "default": 3, }, }, }, ) ] ) async def handle_call_tool(ctx: ServerRequestContext, params: types.CallToolRequestParams) -> types.CallToolResult: """Handle tool calls.""" arguments = params.arguments or {} if params.name == "process_batch": items = arguments.get("items", 10) checkpoint_every = arguments.get("checkpoint_every", 3) if items < 1 or items > 100: return types.CallToolResult( content=[types.TextContent(type="text", text="Error: items must be between 1 and 100")] ) if checkpoint_every < 1 or checkpoint_every > 20: return types.CallToolResult( content=[types.TextContent(type="text", text="Error: checkpoint_every must be between 1 and 20")] ) await ctx.session.send_log_message( level="info", data=f"Starting batch processing of {items} items...", logger="process_batch", related_request_id=ctx.request_id, ) for i in range(1, items + 1): # Simulate work await anyio.sleep(0.5) # Report progress await ctx.session.send_log_message( level="info", data=f"[{i}/{items}] Processing item {i}", logger="process_batch", related_request_id=ctx.request_id, ) # Checkpoint: close stream to trigger client reconnect if i % checkpoint_every == 0 and i < items: await ctx.session.send_log_message( level="info", data=f"Checkpoint at item {i} - closing SSE stream for polling", logger="process_batch", related_request_id=ctx.request_id, ) if ctx.close_sse_stream: logger.info(f"Closing SSE stream at checkpoint {i}") await ctx.close_sse_stream() # Wait for client to reconnect (must be > retry_interval of 100ms) await anyio.sleep(0.2) return types.CallToolResult( content=[ types.TextContent( type="text", text=f"Successfully processed {items} items with checkpoints every {checkpoint_every} items", ) ] ) return types.CallToolResult(content=[types.TextContent(type="text", text=f"Unknown tool: {params.name}")]) @click.command() @click.option("--port", default=3000, help="Port to listen on") @click.option( "--log-level", default="INFO", help="Logging level (DEBUG, INFO, WARNING, ERROR)", ) @click.option( "--retry-interval", default=100, help="SSE retry interval in milliseconds (sent to client)", ) def main(port: int, log_level: str, retry_interval: int) -> int: """Run the SSE Polling Demo server.""" logging.basicConfig( level=getattr(logging, log_level.upper()), format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) app = Server( "sse-polling-demo", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) # Create event store for resumability event_store = InMemoryEventStore() # Create session manager with event store and retry interval session_manager = StreamableHTTPSessionManager( app=app, event_store=event_store, retry_interval=retry_interval, ) async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None: await session_manager.handle_request(scope, receive, send) @contextlib.asynccontextmanager async def lifespan(starlette_app: Starlette) -> AsyncIterator[None]: async with session_manager.run(): logger.info(f"SSE Polling Demo server started on port {port}") logger.info("Try: POST /mcp with tools/call for 'process_batch'") yield logger.info("Server shutting down...") starlette_app = Starlette( debug=True, routes=[ Mount("/mcp", app=handle_streamable_http), ], lifespan=lifespan, ) import uvicorn uvicorn.run(starlette_app, host="127.0.0.1", port=port) return 0 if __name__ == "__main__": main()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/servers/sse-polling-demo/mcp_sse_polling_demo/server.py", "license": "MIT License", "lines": 157, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:examples/clients/simple-task-client/mcp_simple_task_client/main.py
"""Simple task client demonstrating MCP tasks polling over streamable HTTP.""" import asyncio import click from mcp import ClientSession from mcp.client.streamable_http import streamable_http_client from mcp.types import CallToolResult, TextContent async def run(url: str) -> None: async with streamable_http_client(url) as (read, write): async with ClientSession(read, write) as session: await session.initialize() # List tools tools = await session.list_tools() print(f"Available tools: {[t.name for t in tools.tools]}") # Call the tool as a task print("\nCalling tool as a task...") result = await session.experimental.call_tool_as_task( "long_running_task", arguments={}, ttl=60000, ) task_id = result.task.task_id print(f"Task created: {task_id}") status = None # Poll until done (respects server's pollInterval hint) async for status in session.experimental.poll_task(task_id): print(f" Status: {status.status} - {status.status_message or ''}") # Check final status if status and status.status != "completed": print(f"Task ended with status: {status.status}") return # Get the result task_result = await session.experimental.get_task_result(task_id, CallToolResult) content = task_result.content[0] if isinstance(content, TextContent): print(f"\nResult: {content.text}") @click.command() @click.option("--url", default="http://localhost:8000/mcp", help="Server URL") def main(url: str) -> int: asyncio.run(run(url)) return 0 if __name__ == "__main__": main()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/clients/simple-task-client/mcp_simple_task_client/main.py", "license": "MIT License", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:examples/clients/simple-task-interactive-client/mcp_simple_task_interactive_client/main.py
"""Simple interactive task client demonstrating elicitation and sampling responses. This example demonstrates the spec-compliant polling pattern: 1. Poll tasks/get watching for status changes 2. On input_required, call tasks/result to receive elicitation/sampling requests 3. Continue until terminal status, then retrieve final result """ import asyncio import click from mcp import ClientSession from mcp.client.context import ClientRequestContext from mcp.client.streamable_http import streamable_http_client from mcp.types import ( CallToolResult, CreateMessageRequestParams, CreateMessageResult, ElicitRequestParams, ElicitResult, TextContent, ) async def elicitation_callback( context: ClientRequestContext, params: ElicitRequestParams, ) -> ElicitResult: """Handle elicitation requests from the server.""" print(f"\n[Elicitation] Server asks: {params.message}") # Simple terminal prompt response = input("Your response (y/n): ").strip().lower() confirmed = response in ("y", "yes", "true", "1") print(f"[Elicitation] Responding with: confirm={confirmed}") return ElicitResult(action="accept", content={"confirm": confirmed}) async def sampling_callback( context: ClientRequestContext, params: CreateMessageRequestParams, ) -> CreateMessageResult: """Handle sampling requests from the server.""" # Get the prompt from the first message prompt = "unknown" if params.messages: content = params.messages[0].content if isinstance(content, TextContent): prompt = content.text print(f"\n[Sampling] Server requests LLM completion for: {prompt}") # Return a hardcoded haiku (in real use, call your LLM here) haiku = """Cherry blossoms fall Softly on the quiet pond Spring whispers goodbye""" print("[Sampling] Responding with haiku") return CreateMessageResult( model="mock-haiku-model", role="assistant", content=TextContent(type="text", text=haiku), ) def get_text(result: CallToolResult) -> str: """Extract text from a CallToolResult.""" if result.content and isinstance(result.content[0], TextContent): return result.content[0].text return "(no text)" async def run(url: str) -> None: async with streamable_http_client(url) as (read, write): async with ClientSession( read, write, elicitation_callback=elicitation_callback, sampling_callback=sampling_callback, ) as session: await session.initialize() # List tools tools = await session.list_tools() print(f"Available tools: {[t.name for t in tools.tools]}") # Demo 1: Elicitation (confirm_delete) print("\n--- Demo 1: Elicitation ---") print("Calling confirm_delete tool...") elicit_task = await session.experimental.call_tool_as_task("confirm_delete", {"filename": "important.txt"}) elicit_task_id = elicit_task.task.task_id print(f"Task created: {elicit_task_id}") # Poll until terminal, calling tasks/result on input_required async for status in session.experimental.poll_task(elicit_task_id): print(f"[Poll] Status: {status.status}") if status.status == "input_required": # Server needs input - tasks/result delivers the elicitation request elicit_result = await session.experimental.get_task_result(elicit_task_id, CallToolResult) break else: # poll_task exited due to terminal status elicit_result = await session.experimental.get_task_result(elicit_task_id, CallToolResult) print(f"Result: {get_text(elicit_result)}") # Demo 2: Sampling (write_haiku) print("\n--- Demo 2: Sampling ---") print("Calling write_haiku tool...") sampling_task = await session.experimental.call_tool_as_task("write_haiku", {"topic": "autumn leaves"}) sampling_task_id = sampling_task.task.task_id print(f"Task created: {sampling_task_id}") # Poll until terminal, calling tasks/result on input_required async for status in session.experimental.poll_task(sampling_task_id): print(f"[Poll] Status: {status.status}") if status.status == "input_required": sampling_result = await session.experimental.get_task_result(sampling_task_id, CallToolResult) break else: sampling_result = await session.experimental.get_task_result(sampling_task_id, CallToolResult) print(f"Result:\n{get_text(sampling_result)}") @click.command() @click.option("--url", default="http://localhost:8000/mcp", help="Server URL") def main(url: str) -> int: asyncio.run(run(url)) return 0 if __name__ == "__main__": main()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/clients/simple-task-interactive-client/mcp_simple_task_interactive_client/main.py", "license": "MIT License", "lines": 108, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:examples/servers/simple-task-interactive/mcp_simple_task_interactive/server.py
"""Simple interactive task server demonstrating elicitation and sampling. This example shows the simplified task API where: - server.experimental.enable_tasks() sets up all infrastructure - ctx.experimental.run_task() handles task lifecycle automatically - ServerTaskContext.elicit() and ServerTaskContext.create_message() queue requests properly """ from collections.abc import AsyncIterator from contextlib import asynccontextmanager from typing import Any import click import uvicorn from mcp import types from mcp.server import Server, ServerRequestContext from mcp.server.experimental.task_context import ServerTaskContext from mcp.server.streamable_http_manager import StreamableHTTPSessionManager from starlette.applications import Starlette from starlette.routing import Mount async def handle_list_tools( ctx: ServerRequestContext, params: types.PaginatedRequestParams | None ) -> types.ListToolsResult: return types.ListToolsResult( tools=[ types.Tool( name="confirm_delete", description="Asks for confirmation before deleting (demonstrates elicitation)", input_schema={ "type": "object", "properties": {"filename": {"type": "string"}}, }, execution=types.ToolExecution(task_support=types.TASK_REQUIRED), ), types.Tool( name="write_haiku", description="Asks LLM to write a haiku (demonstrates sampling)", input_schema={"type": "object", "properties": {"topic": {"type": "string"}}}, execution=types.ToolExecution(task_support=types.TASK_REQUIRED), ), ] ) async def handle_confirm_delete(ctx: ServerRequestContext, arguments: dict[str, Any]) -> types.CreateTaskResult: """Handle the confirm_delete tool - demonstrates elicitation.""" ctx.experimental.validate_task_mode(types.TASK_REQUIRED) filename = arguments.get("filename", "unknown.txt") print(f"\n[Server] confirm_delete called for '{filename}'") async def work(task: ServerTaskContext) -> types.CallToolResult: print(f"[Server] Task {task.task_id} starting elicitation...") result = await task.elicit( message=f"Are you sure you want to delete '{filename}'?", requested_schema={ "type": "object", "properties": {"confirm": {"type": "boolean"}}, "required": ["confirm"], }, ) print(f"[Server] Received elicitation response: action={result.action}, content={result.content}") if result.action == "accept" and result.content: confirmed = result.content.get("confirm", False) text = f"Deleted '{filename}'" if confirmed else "Deletion cancelled" else: text = "Deletion cancelled" print(f"[Server] Completing task with result: {text}") return types.CallToolResult(content=[types.TextContent(type="text", text=text)]) return await ctx.experimental.run_task(work) async def handle_write_haiku(ctx: ServerRequestContext, arguments: dict[str, Any]) -> types.CreateTaskResult: """Handle the write_haiku tool - demonstrates sampling.""" ctx.experimental.validate_task_mode(types.TASK_REQUIRED) topic = arguments.get("topic", "nature") print(f"\n[Server] write_haiku called for topic '{topic}'") async def work(task: ServerTaskContext) -> types.CallToolResult: print(f"[Server] Task {task.task_id} starting sampling...") result = await task.create_message( messages=[ types.SamplingMessage( role="user", content=types.TextContent(type="text", text=f"Write a haiku about {topic}"), ) ], max_tokens=50, ) haiku = "No response" if isinstance(result.content, types.TextContent): haiku = result.content.text print(f"[Server] Received sampling response: {haiku[:50]}...") return types.CallToolResult(content=[types.TextContent(type="text", text=f"Haiku:\n{haiku}")]) return await ctx.experimental.run_task(work) async def handle_call_tool( ctx: ServerRequestContext, params: types.CallToolRequestParams ) -> types.CallToolResult | types.CreateTaskResult: """Dispatch tool calls to their handlers.""" arguments = params.arguments or {} if params.name == "confirm_delete": return await handle_confirm_delete(ctx, arguments) elif params.name == "write_haiku": return await handle_write_haiku(ctx, arguments) return types.CallToolResult( content=[types.TextContent(type="text", text=f"Unknown tool: {params.name}")], is_error=True, ) server = Server( "simple-task-interactive", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) # Enable task support - this auto-registers all handlers server.experimental.enable_tasks() def create_app(session_manager: StreamableHTTPSessionManager) -> Starlette: @asynccontextmanager async def app_lifespan(app: Starlette) -> AsyncIterator[None]: async with session_manager.run(): yield return Starlette( routes=[Mount("/mcp", app=session_manager.handle_request)], lifespan=app_lifespan, ) @click.command() @click.option("--port", default=8000, help="Port to listen on") def main(port: int) -> int: session_manager = StreamableHTTPSessionManager(app=server) starlette_app = create_app(session_manager) print(f"Starting server on http://localhost:{port}/mcp") uvicorn.run(starlette_app, host="127.0.0.1", port=port) return 0
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/servers/simple-task-interactive/mcp_simple_task_interactive/server.py", "license": "MIT License", "lines": 122, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:examples/servers/simple-task/mcp_simple_task/server.py
"""Simple task server demonstrating MCP tasks over streamable HTTP.""" from collections.abc import AsyncIterator from contextlib import asynccontextmanager import anyio import click import uvicorn from mcp import types from mcp.server import Server, ServerRequestContext from mcp.server.experimental.task_context import ServerTaskContext from mcp.server.streamable_http_manager import StreamableHTTPSessionManager from starlette.applications import Starlette from starlette.routing import Mount async def handle_list_tools( ctx: ServerRequestContext, params: types.PaginatedRequestParams | None ) -> types.ListToolsResult: return types.ListToolsResult( tools=[ types.Tool( name="long_running_task", description="A task that takes a few seconds to complete with status updates", input_schema={"type": "object", "properties": {}}, execution=types.ToolExecution(task_support=types.TASK_REQUIRED), ) ] ) async def handle_call_tool( ctx: ServerRequestContext, params: types.CallToolRequestParams ) -> types.CallToolResult | types.CreateTaskResult: """Dispatch tool calls to their handlers.""" if params.name == "long_running_task": ctx.experimental.validate_task_mode(types.TASK_REQUIRED) async def work(task: ServerTaskContext) -> types.CallToolResult: await task.update_status("Starting work...") await anyio.sleep(1) await task.update_status("Processing step 1...") await anyio.sleep(1) await task.update_status("Processing step 2...") await anyio.sleep(1) return types.CallToolResult(content=[types.TextContent(type="text", text="Task completed!")]) return await ctx.experimental.run_task(work) return types.CallToolResult( content=[types.TextContent(type="text", text=f"Unknown tool: {params.name}")], is_error=True, ) server = Server( "simple-task-server", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) # One-line setup: auto-registers get_task, get_task_result, list_tasks, cancel_task server.experimental.enable_tasks() @click.command() @click.option("--port", default=8000, help="Port to listen on") def main(port: int) -> int: session_manager = StreamableHTTPSessionManager(app=server) @asynccontextmanager async def app_lifespan(app: Starlette) -> AsyncIterator[None]: async with session_manager.run(): yield starlette_app = Starlette( routes=[Mount("/mcp", app=session_manager.handle_request)], lifespan=app_lifespan, ) print(f"Starting server on http://localhost:{port}/mcp") uvicorn.run(starlette_app, host="127.0.0.1", port=port) return 0
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "examples/servers/simple-task/mcp_simple_task/server.py", "license": "MIT License", "lines": 66, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:src/mcp/client/experimental/task_handlers.py
"""Experimental task handler protocols for server -> client requests. This module provides Protocol types and default handlers for when servers send task-related requests to clients (the reverse of normal client -> server flow). WARNING: These APIs are experimental and may change without notice. Use cases: - Server sends task-augmented sampling/elicitation request to client - Client creates a local task, spawns background work, returns CreateTaskResult - Server polls client's task status via tasks/get, tasks/result, etc. """ from __future__ import annotations from dataclasses import dataclass, field from typing import TYPE_CHECKING, Protocol from pydantic import TypeAdapter from mcp import types from mcp.shared._context import RequestContext from mcp.shared.session import RequestResponder if TYPE_CHECKING: from mcp.client.session import ClientSession class GetTaskHandlerFnT(Protocol): """Handler for tasks/get requests from server. WARNING: This is experimental and may change without notice. """ async def __call__( self, context: RequestContext[ClientSession], params: types.GetTaskRequestParams, ) -> types.GetTaskResult | types.ErrorData: ... # pragma: no branch class GetTaskResultHandlerFnT(Protocol): """Handler for tasks/result requests from server. WARNING: This is experimental and may change without notice. """ async def __call__( self, context: RequestContext[ClientSession], params: types.GetTaskPayloadRequestParams, ) -> types.GetTaskPayloadResult | types.ErrorData: ... # pragma: no branch class ListTasksHandlerFnT(Protocol): """Handler for tasks/list requests from server. WARNING: This is experimental and may change without notice. """ async def __call__( self, context: RequestContext[ClientSession], params: types.PaginatedRequestParams | None, ) -> types.ListTasksResult | types.ErrorData: ... # pragma: no branch class CancelTaskHandlerFnT(Protocol): """Handler for tasks/cancel requests from server. WARNING: This is experimental and may change without notice. """ async def __call__( self, context: RequestContext[ClientSession], params: types.CancelTaskRequestParams, ) -> types.CancelTaskResult | types.ErrorData: ... # pragma: no branch class TaskAugmentedSamplingFnT(Protocol): """Handler for task-augmented sampling/createMessage requests from server. When server sends a CreateMessageRequest with task field, this callback is invoked. The callback should create a task, spawn background work, and return CreateTaskResult immediately. WARNING: This is experimental and may change without notice. """ async def __call__( self, context: RequestContext[ClientSession], params: types.CreateMessageRequestParams, task_metadata: types.TaskMetadata, ) -> types.CreateTaskResult | types.ErrorData: ... # pragma: no branch class TaskAugmentedElicitationFnT(Protocol): """Handler for task-augmented elicitation/create requests from server. When server sends an ElicitRequest with task field, this callback is invoked. The callback should create a task, spawn background work, and return CreateTaskResult immediately. WARNING: This is experimental and may change without notice. """ async def __call__( self, context: RequestContext[ClientSession], params: types.ElicitRequestParams, task_metadata: types.TaskMetadata, ) -> types.CreateTaskResult | types.ErrorData: ... # pragma: no branch async def default_get_task_handler( context: RequestContext[ClientSession], params: types.GetTaskRequestParams, ) -> types.GetTaskResult | types.ErrorData: return types.ErrorData( code=types.METHOD_NOT_FOUND, message="tasks/get not supported", ) async def default_get_task_result_handler( context: RequestContext[ClientSession], params: types.GetTaskPayloadRequestParams, ) -> types.GetTaskPayloadResult | types.ErrorData: return types.ErrorData( code=types.METHOD_NOT_FOUND, message="tasks/result not supported", ) async def default_list_tasks_handler( context: RequestContext[ClientSession], params: types.PaginatedRequestParams | None, ) -> types.ListTasksResult | types.ErrorData: return types.ErrorData( code=types.METHOD_NOT_FOUND, message="tasks/list not supported", ) async def default_cancel_task_handler( context: RequestContext[ClientSession], params: types.CancelTaskRequestParams, ) -> types.CancelTaskResult | types.ErrorData: return types.ErrorData( code=types.METHOD_NOT_FOUND, message="tasks/cancel not supported", ) async def default_task_augmented_sampling( context: RequestContext[ClientSession], params: types.CreateMessageRequestParams, task_metadata: types.TaskMetadata, ) -> types.CreateTaskResult | types.ErrorData: return types.ErrorData( code=types.INVALID_REQUEST, message="Task-augmented sampling not supported", ) async def default_task_augmented_elicitation( context: RequestContext[ClientSession], params: types.ElicitRequestParams, task_metadata: types.TaskMetadata, ) -> types.CreateTaskResult | types.ErrorData: return types.ErrorData( code=types.INVALID_REQUEST, message="Task-augmented elicitation not supported", ) @dataclass class ExperimentalTaskHandlers: """Container for experimental task handlers. Groups all task-related handlers that handle server -> client requests. This includes both pure task requests (get, list, cancel, result) and task-augmented request handlers (sampling, elicitation with task field). WARNING: These APIs are experimental and may change without notice. Example: ```python handlers = ExperimentalTaskHandlers( get_task=my_get_task_handler, list_tasks=my_list_tasks_handler, ) session = ClientSession(..., experimental_task_handlers=handlers) ``` """ # Pure task request handlers get_task: GetTaskHandlerFnT = field(default=default_get_task_handler) get_task_result: GetTaskResultHandlerFnT = field(default=default_get_task_result_handler) list_tasks: ListTasksHandlerFnT = field(default=default_list_tasks_handler) cancel_task: CancelTaskHandlerFnT = field(default=default_cancel_task_handler) # Task-augmented request handlers augmented_sampling: TaskAugmentedSamplingFnT = field(default=default_task_augmented_sampling) augmented_elicitation: TaskAugmentedElicitationFnT = field(default=default_task_augmented_elicitation) def build_capability(self) -> types.ClientTasksCapability | None: """Build ClientTasksCapability from the configured handlers. Returns a capability object that reflects which handlers are configured (i.e., not using the default "not supported" handlers). Returns: ClientTasksCapability if any handlers are provided, None otherwise """ has_list = self.list_tasks is not default_list_tasks_handler has_cancel = self.cancel_task is not default_cancel_task_handler has_sampling = self.augmented_sampling is not default_task_augmented_sampling has_elicitation = self.augmented_elicitation is not default_task_augmented_elicitation # If no handlers are provided, return None if not any([has_list, has_cancel, has_sampling, has_elicitation]): return None # Build requests capability if any request handlers are provided requests_capability: types.ClientTasksRequestsCapability | None = None if has_sampling or has_elicitation: requests_capability = types.ClientTasksRequestsCapability( sampling=types.TasksSamplingCapability(create_message=types.TasksCreateMessageCapability()) if has_sampling else None, elicitation=types.TasksElicitationCapability(create=types.TasksCreateElicitationCapability()) if has_elicitation else None, ) return types.ClientTasksCapability( list=types.TasksListCapability() if has_list else None, cancel=types.TasksCancelCapability() if has_cancel else None, requests=requests_capability, ) @staticmethod def handles_request(request: types.ServerRequest) -> bool: """Check if this handler handles the given request type.""" return isinstance( request, types.GetTaskRequest | types.GetTaskPayloadRequest | types.ListTasksRequest | types.CancelTaskRequest, ) async def handle_request( self, ctx: RequestContext[ClientSession], responder: RequestResponder[types.ServerRequest, types.ClientResult], ) -> None: """Handle a task-related request from the server. Call handles_request() first to check if this handler can handle the request. """ client_response_type: TypeAdapter[types.ClientResult | types.ErrorData] = TypeAdapter( types.ClientResult | types.ErrorData ) match responder.request: case types.GetTaskRequest(params=params): response = await self.get_task(ctx, params) client_response = client_response_type.validate_python(response) await responder.respond(client_response) case types.GetTaskPayloadRequest(params=params): response = await self.get_task_result(ctx, params) client_response = client_response_type.validate_python(response) await responder.respond(client_response) case types.ListTasksRequest(params=params): response = await self.list_tasks(ctx, params) client_response = client_response_type.validate_python(response) await responder.respond(client_response) case types.CancelTaskRequest(params=params): response = await self.cancel_task(ctx, params) client_response = client_response_type.validate_python(response) await responder.respond(client_response) case _: # pragma: no cover raise ValueError(f"Unhandled request type: {type(responder.request)}") # Backwards compatibility aliases default_task_augmented_sampling_callback = default_task_augmented_sampling default_task_augmented_elicitation_callback = default_task_augmented_elicitation
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/client/experimental/task_handlers.py", "license": "MIT License", "lines": 224, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/client/experimental/tasks.py
"""Experimental client-side task support. This module provides client methods for interacting with MCP tasks. WARNING: These APIs are experimental and may change without notice. Example: ```python # Call a tool as a task result = await session.experimental.call_tool_as_task("tool_name", {"arg": "value"}) task_id = result.task.task_id # Get task status status = await session.experimental.get_task(task_id) # Get task result when complete if status.status == "completed": result = await session.experimental.get_task_result(task_id, CallToolResult) # List all tasks tasks = await session.experimental.list_tasks() # Cancel a task await session.experimental.cancel_task(task_id) ``` """ from collections.abc import AsyncIterator from typing import TYPE_CHECKING, Any, TypeVar from mcp import types from mcp.shared.experimental.tasks.polling import poll_until_terminal from mcp.types._types import RequestParamsMeta if TYPE_CHECKING: from mcp.client.session import ClientSession ResultT = TypeVar("ResultT", bound=types.Result) class ExperimentalClientFeatures: """Experimental client features for tasks and other experimental APIs. WARNING: These APIs are experimental and may change without notice. Access via session.experimental: status = await session.experimental.get_task(task_id) """ def __init__(self, session: "ClientSession") -> None: self._session = session async def call_tool_as_task( self, name: str, arguments: dict[str, Any] | None = None, *, ttl: int = 60000, meta: RequestParamsMeta | None = None, ) -> types.CreateTaskResult: """Call a tool as a task, returning a CreateTaskResult for polling. This is a convenience method for calling tools that support task execution. The server will return a task reference instead of the immediate result, which can then be polled via `get_task()` and retrieved via `get_task_result()`. Args: name: The tool name arguments: Tool arguments ttl: Task time-to-live in milliseconds (default: 60000 = 1 minute) meta: Optional metadata to include in the request Returns: CreateTaskResult containing the task reference Example: ```python # Create task result = await session.experimental.call_tool_as_task( "long_running_tool", {"input": "data"} ) task_id = result.task.task_id # Poll for completion while True: status = await session.experimental.get_task(task_id) if status.status == "completed": break await anyio.sleep(0.5) # Get result final = await session.experimental.get_task_result(task_id, CallToolResult) ``` """ return await self._session.send_request( types.CallToolRequest( params=types.CallToolRequestParams( name=name, arguments=arguments, task=types.TaskMetadata(ttl=ttl), _meta=meta, ), ), types.CreateTaskResult, ) async def get_task(self, task_id: str) -> types.GetTaskResult: """Get the current status of a task. Args: task_id: The task identifier Returns: GetTaskResult containing the task status and metadata """ return await self._session.send_request( types.GetTaskRequest(params=types.GetTaskRequestParams(task_id=task_id)), types.GetTaskResult, ) async def get_task_result( self, task_id: str, result_type: type[ResultT], ) -> ResultT: """Get the result of a completed task. The result type depends on the original request type: - tools/call tasks return CallToolResult - Other request types return their corresponding result type Args: task_id: The task identifier result_type: The expected result type (e.g., CallToolResult) Returns: The task result, validated against result_type """ return await self._session.send_request( types.GetTaskPayloadRequest( params=types.GetTaskPayloadRequestParams(task_id=task_id), ), result_type, ) async def list_tasks( self, cursor: str | None = None, ) -> types.ListTasksResult: """List all tasks. Args: cursor: Optional pagination cursor Returns: ListTasksResult containing tasks and optional next cursor """ params = types.PaginatedRequestParams(cursor=cursor) if cursor else None return await self._session.send_request( types.ListTasksRequest(params=params), types.ListTasksResult, ) async def cancel_task(self, task_id: str) -> types.CancelTaskResult: """Cancel a running task. Args: task_id: The task identifier Returns: CancelTaskResult with the updated task state """ return await self._session.send_request( types.CancelTaskRequest( params=types.CancelTaskRequestParams(task_id=task_id), ), types.CancelTaskResult, ) async def poll_task(self, task_id: str) -> AsyncIterator[types.GetTaskResult]: """Poll a task until it reaches a terminal status. Yields GetTaskResult for each poll, allowing the caller to react to status changes (e.g., handle input_required). Exits when the task reaches a terminal status (completed, failed, cancelled). Respects the pollInterval hint from the server. Args: task_id: The task identifier Yields: GetTaskResult for each poll Example: ```python async for status in session.experimental.poll_task(task_id): print(f"Status: {status.status}") if status.status == "input_required": # Handle elicitation request via tasks/result pass # Task is now terminal, get the result result = await session.experimental.get_task_result(task_id, CallToolResult) ``` """ async for status in poll_until_terminal(self.get_task, task_id): yield status
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/client/experimental/tasks.py", "license": "MIT License", "lines": 165, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/server/experimental/request_context.py
"""Experimental request context features. This module provides the Experimental class which gives access to experimental features within a request context, such as task-augmented request handling. WARNING: These APIs are experimental and may change without notice. """ from collections.abc import Awaitable, Callable from dataclasses import dataclass, field from typing import Any from mcp.server.experimental.task_context import ServerTaskContext from mcp.server.experimental.task_support import TaskSupport from mcp.server.session import ServerSession from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.helpers import MODEL_IMMEDIATE_RESPONSE_KEY, is_terminal from mcp.types import ( METHOD_NOT_FOUND, TASK_FORBIDDEN, TASK_REQUIRED, ClientCapabilities, CreateTaskResult, ErrorData, Result, TaskExecutionMode, TaskMetadata, Tool, ) @dataclass class Experimental: """Experimental features context for task-augmented requests. Provides helpers for validating task execution compatibility and running tasks with automatic lifecycle management. WARNING: This API is experimental and may change without notice. """ task_metadata: TaskMetadata | None = None _client_capabilities: ClientCapabilities | None = field(default=None, repr=False) _session: ServerSession | None = field(default=None, repr=False) _task_support: TaskSupport | None = field(default=None, repr=False) @property def is_task(self) -> bool: """Check if this request is task-augmented.""" return self.task_metadata is not None @property def client_supports_tasks(self) -> bool: """Check if the client declared task support.""" if self._client_capabilities is None: return False return self._client_capabilities.tasks is not None def validate_task_mode( self, tool_task_mode: TaskExecutionMode | None, *, raise_error: bool = True ) -> ErrorData | None: """Validate that the request is compatible with the tool's task execution mode. Per MCP spec: - "required": Clients MUST invoke as a task. Server returns -32601 if not. - "forbidden" (or None): Clients MUST NOT invoke as a task. Server returns -32601 if they do. - "optional": Either is acceptable. Args: tool_task_mode: The tool's execution.taskSupport value ("forbidden", "optional", "required", or None) raise_error: If True, raises MCPError on validation failure. If False, returns ErrorData. Returns: None if valid, ErrorData if invalid and raise_error=False Raises: MCPError: If invalid and raise_error=True """ mode = tool_task_mode or TASK_FORBIDDEN error: ErrorData | None = None if mode == TASK_REQUIRED and not self.is_task: error = ErrorData(code=METHOD_NOT_FOUND, message="This tool requires task-augmented invocation") elif mode == TASK_FORBIDDEN and self.is_task: error = ErrorData(code=METHOD_NOT_FOUND, message="This tool does not support task-augmented invocation") if error is not None and raise_error: raise MCPError.from_error_data(error) return error def validate_for_tool(self, tool: Tool, *, raise_error: bool = True) -> ErrorData | None: """Validate that the request is compatible with the given tool. Convenience wrapper around validate_task_mode that extracts the mode from a Tool. Args: tool: The Tool definition raise_error: If True, raises MCPError on validation failure. Returns: None if valid, ErrorData if invalid and raise_error=False """ mode = tool.execution.task_support if tool.execution else None return self.validate_task_mode(mode, raise_error=raise_error) def can_use_tool(self, tool_task_mode: TaskExecutionMode | None) -> bool: """Check if this client can use a tool with the given task mode. Useful for filtering tool lists or providing warnings. Returns False if the tool's task mode is "required" but the client doesn't support tasks. Args: tool_task_mode: The tool's execution.taskSupport value Returns: True if the client can use this tool, False otherwise """ mode = tool_task_mode or TASK_FORBIDDEN if mode == TASK_REQUIRED and not self.client_supports_tasks: return False return True async def run_task( self, work: Callable[[ServerTaskContext], Awaitable[Result]], *, task_id: str | None = None, model_immediate_response: str | None = None, ) -> CreateTaskResult: """Create a task, spawn background work, and return CreateTaskResult immediately. This is the recommended way to handle task-augmented tool calls. It: 1. Creates a task in the store 2. Spawns the work function in a background task 3. Returns CreateTaskResult immediately The work function receives a ServerTaskContext with: - elicit() for sending elicitation requests - create_message() for sampling requests - update_status() for progress updates - complete()/fail() for finishing the task When work() returns a Result, the task is auto-completed with that result. If work() raises an exception, the task is auto-failed. Args: work: Async function that does the actual work task_id: Optional task ID (generated if not provided) model_immediate_response: Optional string to include in _meta as io.modelcontextprotocol/model-immediate-response Returns: CreateTaskResult to return to the client Raises: RuntimeError: If task support is not enabled or task_metadata is missing Example: ```python async def handle_tool(ctx: RequestContext, params: CallToolRequestParams) -> CallToolResult: async def work(task: ServerTaskContext) -> CallToolResult: result = await task.elicit( message="Are you sure?", requested_schema={"type": "object", ...} ) confirmed = result.content.get("confirm", False) return CallToolResult(content=[TextContent(text="Done" if confirmed else "Cancelled")]) return await ctx.experimental.run_task(work) ``` WARNING: This API is experimental and may change without notice. """ if self._task_support is None: raise RuntimeError("Task support not enabled. Call server.experimental.enable_tasks() first.") if self._session is None: raise RuntimeError("Session not available.") if self.task_metadata is None: raise RuntimeError( "Request is not task-augmented (no task field in params). " "The client must send a task-augmented request." ) support = self._task_support # Access task_group via TaskSupport - raises if not in run() context task_group = support.task_group task = await support.store.create_task(self.task_metadata, task_id) task_ctx = ServerTaskContext( task=task, store=support.store, session=self._session, queue=support.queue, handler=support.handler, ) async def execute() -> None: try: result = await work(task_ctx) if not is_terminal(task_ctx.task.status): await task_ctx.complete(result) except Exception as e: if not is_terminal(task_ctx.task.status): await task_ctx.fail(str(e)) task_group.start_soon(execute) meta: dict[str, Any] | None = None if model_immediate_response is not None: meta = {MODEL_IMMEDIATE_RESPONSE_KEY: model_immediate_response} return CreateTaskResult(task=task, **{"_meta": meta} if meta else {})
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/experimental/request_context.py", "license": "MIT License", "lines": 171, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/server/experimental/session_features.py
"""Experimental server session features for server→client task operations. This module provides the server-side equivalent of ExperimentalClientFeatures, allowing the server to send task-augmented requests to the client and poll for results. WARNING: These APIs are experimental and may change without notice. """ from collections.abc import AsyncIterator from typing import TYPE_CHECKING, Any, TypeVar from mcp import types from mcp.server.validation import validate_sampling_tools, validate_tool_use_result_messages from mcp.shared.experimental.tasks.capabilities import ( require_task_augmented_elicitation, require_task_augmented_sampling, ) from mcp.shared.experimental.tasks.polling import poll_until_terminal if TYPE_CHECKING: from mcp.server.session import ServerSession ResultT = TypeVar("ResultT", bound=types.Result) class ExperimentalServerSessionFeatures: """Experimental server session features for server→client task operations. This provides the server-side equivalent of ExperimentalClientFeatures, allowing the server to send task-augmented requests to the client and poll for results. WARNING: These APIs are experimental and may change without notice. Access via session.experimental: result = await session.experimental.elicit_as_task(...) """ def __init__(self, session: "ServerSession") -> None: self._session = session async def get_task(self, task_id: str) -> types.GetTaskResult: """Send tasks/get to the client to get task status. Args: task_id: The task identifier Returns: GetTaskResult containing the task status """ return await self._session.send_request( types.GetTaskRequest(params=types.GetTaskRequestParams(task_id=task_id)), types.GetTaskResult, ) async def get_task_result( self, task_id: str, result_type: type[ResultT], ) -> ResultT: """Send tasks/result to the client to retrieve the final result. Args: task_id: The task identifier result_type: The expected result type Returns: The task result, validated against result_type """ return await self._session.send_request( types.GetTaskPayloadRequest(params=types.GetTaskPayloadRequestParams(task_id=task_id)), result_type, ) async def poll_task(self, task_id: str) -> AsyncIterator[types.GetTaskResult]: """Poll a client task until it reaches terminal status. Yields GetTaskResult for each poll, allowing the caller to react to status changes. Exits when task reaches a terminal status. Respects the pollInterval hint from the client. Args: task_id: The task identifier Yields: GetTaskResult for each poll """ async for status in poll_until_terminal(self.get_task, task_id): yield status async def elicit_as_task( self, message: str, requested_schema: types.ElicitRequestedSchema, *, ttl: int = 60000, ) -> types.ElicitResult: """Send a task-augmented elicitation to the client and poll until complete. The client will create a local task, process the elicitation asynchronously, and return the result when ready. This method handles the full flow: 1. Send elicitation with task field 2. Receive CreateTaskResult from client 3. Poll client's task until terminal 4. Retrieve and return the final ElicitResult Args: message: The message to present to the user requested_schema: Schema defining the expected response ttl: Task time-to-live in milliseconds Returns: The client's elicitation response Raises: MCPError: If client doesn't support task-augmented elicitation """ client_caps = self._session.client_params.capabilities if self._session.client_params else None require_task_augmented_elicitation(client_caps) create_result = await self._session.send_request( types.ElicitRequest( params=types.ElicitRequestFormParams( message=message, requested_schema=requested_schema, task=types.TaskMetadata(ttl=ttl), ) ), types.CreateTaskResult, ) task_id = create_result.task.task_id async for _ in self.poll_task(task_id): pass return await self.get_task_result(task_id, types.ElicitResult) async def create_message_as_task( self, messages: list[types.SamplingMessage], *, max_tokens: int, ttl: int = 60000, system_prompt: str | None = None, include_context: types.IncludeContext | None = None, temperature: float | None = None, stop_sequences: list[str] | None = None, metadata: dict[str, Any] | None = None, model_preferences: types.ModelPreferences | None = None, tools: list[types.Tool] | None = None, tool_choice: types.ToolChoice | None = None, ) -> types.CreateMessageResult: """Send a task-augmented sampling request and poll until complete. The client will create a local task, process the sampling request asynchronously, and return the result when ready. Args: messages: The conversation messages for sampling max_tokens: Maximum tokens in the response ttl: Task time-to-live in milliseconds system_prompt: Optional system prompt include_context: Context inclusion strategy temperature: Sampling temperature stop_sequences: Stop sequences metadata: Additional metadata model_preferences: Model selection preferences tools: Optional list of tools the LLM can use during sampling tool_choice: Optional control over tool usage behavior Returns: The sampling result from the client Raises: MCPError: If client doesn't support task-augmented sampling or tools ValueError: If tool_use or tool_result message structure is invalid """ client_caps = self._session.client_params.capabilities if self._session.client_params else None require_task_augmented_sampling(client_caps) validate_sampling_tools(client_caps, tools, tool_choice) validate_tool_use_result_messages(messages) create_result = await self._session.send_request( types.CreateMessageRequest( params=types.CreateMessageRequestParams( messages=messages, max_tokens=max_tokens, system_prompt=system_prompt, include_context=include_context, temperature=temperature, stop_sequences=stop_sequences, metadata=metadata, model_preferences=model_preferences, tools=tools, tool_choice=tool_choice, task=types.TaskMetadata(ttl=ttl), ) ), types.CreateTaskResult, ) task_id = create_result.task.task_id async for _ in self.poll_task(task_id): pass return await self.get_task_result(task_id, types.CreateMessageResult)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/experimental/session_features.py", "license": "MIT License", "lines": 168, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/server/experimental/task_context.py
"""ServerTaskContext - Server-integrated task context with elicitation and sampling. This wraps the pure TaskContext and adds server-specific functionality: - Elicitation (task.elicit()) - Sampling (task.create_message()) - Status notifications """ from typing import Any import anyio from mcp.server.experimental.task_result_handler import TaskResultHandler from mcp.server.session import ServerSession from mcp.server.validation import validate_sampling_tools, validate_tool_use_result_messages from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.capabilities import ( require_task_augmented_elicitation, require_task_augmented_sampling, ) from mcp.shared.experimental.tasks.context import TaskContext from mcp.shared.experimental.tasks.message_queue import QueuedMessage, TaskMessageQueue from mcp.shared.experimental.tasks.resolver import Resolver from mcp.shared.experimental.tasks.store import TaskStore from mcp.types import ( INVALID_REQUEST, TASK_STATUS_INPUT_REQUIRED, TASK_STATUS_WORKING, ClientCapabilities, CreateMessageResult, CreateTaskResult, ElicitationCapability, ElicitRequestedSchema, ElicitResult, IncludeContext, ModelPreferences, RequestId, Result, SamplingCapability, SamplingMessage, Task, TaskMetadata, TaskStatusNotification, TaskStatusNotificationParams, Tool, ToolChoice, ) class ServerTaskContext: """Server-integrated task context with elicitation and sampling. This wraps a pure TaskContext and adds server-specific functionality: - elicit() for sending elicitation requests to the client - create_message() for sampling requests - Status notifications via the session Example: ```python async def my_task_work(task: ServerTaskContext) -> CallToolResult: await task.update_status("Starting...") result = await task.elicit( message="Continue?", requested_schema={"type": "object", "properties": {"ok": {"type": "boolean"}}} ) if result.content.get("ok"): return CallToolResult(content=[TextContent(text="Done!")]) else: return CallToolResult(content=[TextContent(text="Cancelled")]) ``` """ def __init__( self, *, task: Task, store: TaskStore, session: ServerSession, queue: TaskMessageQueue, handler: TaskResultHandler | None = None, ): """Create a ServerTaskContext. Args: task: The Task object store: The task store session: The server session queue: The message queue for elicitation/sampling handler: The result handler for response routing (required for elicit/create_message) """ self._ctx = TaskContext(task=task, store=store) self._session = session self._queue = queue self._handler = handler self._store = store # Delegate pure properties to inner context @property def task_id(self) -> str: """The task identifier.""" return self._ctx.task_id @property def task(self) -> Task: """The current task state.""" return self._ctx.task @property def is_cancelled(self) -> bool: """Whether cancellation has been requested.""" return self._ctx.is_cancelled def request_cancellation(self) -> None: """Request cancellation of this task.""" self._ctx.request_cancellation() # Enhanced methods with notifications async def update_status(self, message: str, *, notify: bool = True) -> None: """Update the task's status message. Args: message: The new status message notify: Whether to send a notification to the client """ await self._ctx.update_status(message) if notify: await self._send_notification() async def complete(self, result: Result, *, notify: bool = True) -> None: """Mark the task as completed with the given result. Args: result: The task result notify: Whether to send a notification to the client """ await self._ctx.complete(result) if notify: await self._send_notification() async def fail(self, error: str, *, notify: bool = True) -> None: """Mark the task as failed with an error message. Args: error: The error message notify: Whether to send a notification to the client """ await self._ctx.fail(error) if notify: await self._send_notification() async def _send_notification(self) -> None: """Send a task status notification to the client.""" task = self._ctx.task await self._session.send_notification( TaskStatusNotification( params=TaskStatusNotificationParams( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) ) ) # Server-specific methods: elicitation and sampling def _check_elicitation_capability(self) -> None: """Check if the client supports elicitation.""" if not self._session.check_client_capability(ClientCapabilities(elicitation=ElicitationCapability())): raise MCPError(code=INVALID_REQUEST, message="Client does not support elicitation capability") def _check_sampling_capability(self) -> None: """Check if the client supports sampling.""" if not self._session.check_client_capability(ClientCapabilities(sampling=SamplingCapability())): raise MCPError(code=INVALID_REQUEST, message="Client does not support sampling capability") async def elicit( self, message: str, requested_schema: ElicitRequestedSchema, ) -> ElicitResult: """Send an elicitation request via the task message queue. This method: 1. Checks client capability 2. Updates task status to "input_required" 3. Queues the elicitation request 4. Waits for the response (delivered via tasks/result round-trip) 5. Updates task status back to "working" 6. Returns the result Args: message: The message to present to the user requested_schema: Schema defining the expected response structure Returns: The client's response Raises: MCPError: If client doesn't support elicitation capability """ self._check_elicitation_capability() if self._handler is None: raise RuntimeError("handler is required for elicit(). Pass handler= to ServerTaskContext.") # Update status to input_required await self._store.update_task(self.task_id, status=TASK_STATUS_INPUT_REQUIRED) # Build the request using session's helper request = self._session._build_elicit_form_request( # pyright: ignore[reportPrivateUsage] message=message, requested_schema=requested_schema, related_task_id=self.task_id, ) request_id: RequestId = request.id resolver: Resolver[dict[str, Any]] = Resolver() self._handler._pending_requests[request_id] = resolver # pyright: ignore[reportPrivateUsage] queued = QueuedMessage( type="request", message=request, resolver=resolver, original_request_id=request_id, ) await self._queue.enqueue(self.task_id, queued) try: # Wait for response (routed back via TaskResultHandler) response_data = await resolver.wait() await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) return ElicitResult.model_validate(response_data) except anyio.get_cancelled_exc_class(): # This path is tested in test_elicit_restores_status_on_cancellation # which verifies status is restored to "working" after cancellation. await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) raise async def elicit_url( self, message: str, url: str, elicitation_id: str, ) -> ElicitResult: """Send a URL mode elicitation request via the task message queue. This directs the user to an external URL for out-of-band interactions like OAuth flows, credential collection, or payment processing. This method: 1. Checks client capability 2. Updates task status to "input_required" 3. Queues the elicitation request 4. Waits for the response (delivered via tasks/result round-trip) 5. Updates task status back to "working" 6. Returns the result Args: message: Human-readable explanation of why the interaction is needed url: The URL the user should navigate to elicitation_id: Unique identifier for tracking this elicitation Returns: The client's response indicating acceptance, decline, or cancellation Raises: MCPError: If client doesn't support elicitation capability RuntimeError: If handler is not configured """ self._check_elicitation_capability() if self._handler is None: raise RuntimeError("handler is required for elicit_url(). Pass handler= to ServerTaskContext.") # Update status to input_required await self._store.update_task(self.task_id, status=TASK_STATUS_INPUT_REQUIRED) # Build the request using session's helper request = self._session._build_elicit_url_request( # pyright: ignore[reportPrivateUsage] message=message, url=url, elicitation_id=elicitation_id, related_task_id=self.task_id, ) request_id: RequestId = request.id resolver: Resolver[dict[str, Any]] = Resolver() self._handler._pending_requests[request_id] = resolver # pyright: ignore[reportPrivateUsage] queued = QueuedMessage( type="request", message=request, resolver=resolver, original_request_id=request_id, ) await self._queue.enqueue(self.task_id, queued) try: # Wait for response (routed back via TaskResultHandler) response_data = await resolver.wait() await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) return ElicitResult.model_validate(response_data) except anyio.get_cancelled_exc_class(): # pragma: no cover await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) raise async def create_message( self, messages: list[SamplingMessage], *, max_tokens: int, system_prompt: str | None = None, include_context: IncludeContext | None = None, temperature: float | None = None, stop_sequences: list[str] | None = None, metadata: dict[str, Any] | None = None, model_preferences: ModelPreferences | None = None, tools: list[Tool] | None = None, tool_choice: ToolChoice | None = None, ) -> CreateMessageResult: """Send a sampling request via the task message queue. This method: 1. Checks client capability 2. Updates task status to "input_required" 3. Queues the sampling request 4. Waits for the response (delivered via tasks/result round-trip) 5. Updates task status back to "working" 6. Returns the result Args: messages: The conversation messages for sampling max_tokens: Maximum tokens in the response system_prompt: Optional system prompt include_context: Context inclusion strategy temperature: Sampling temperature stop_sequences: Stop sequences metadata: Additional metadata model_preferences: Model selection preferences tools: Optional list of tools the LLM can use during sampling tool_choice: Optional control over tool usage behavior Returns: The sampling result from the client Raises: MCPError: If client doesn't support sampling capability or tools ValueError: If tool_use or tool_result message structure is invalid """ self._check_sampling_capability() client_caps = self._session.client_params.capabilities if self._session.client_params else None validate_sampling_tools(client_caps, tools, tool_choice) validate_tool_use_result_messages(messages) if self._handler is None: raise RuntimeError("handler is required for create_message(). Pass handler= to ServerTaskContext.") # Update status to input_required await self._store.update_task(self.task_id, status=TASK_STATUS_INPUT_REQUIRED) # Build the request using session's helper request = self._session._build_create_message_request( # pyright: ignore[reportPrivateUsage] messages=messages, max_tokens=max_tokens, system_prompt=system_prompt, include_context=include_context, temperature=temperature, stop_sequences=stop_sequences, metadata=metadata, model_preferences=model_preferences, tools=tools, tool_choice=tool_choice, related_task_id=self.task_id, ) request_id: RequestId = request.id resolver: Resolver[dict[str, Any]] = Resolver() self._handler._pending_requests[request_id] = resolver # pyright: ignore[reportPrivateUsage] queued = QueuedMessage( type="request", message=request, resolver=resolver, original_request_id=request_id, ) await self._queue.enqueue(self.task_id, queued) try: # Wait for response (routed back via TaskResultHandler) response_data = await resolver.wait() await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) return CreateMessageResult.model_validate(response_data) except anyio.get_cancelled_exc_class(): # This path is tested in test_create_message_restores_status_on_cancellation # which verifies status is restored to "working" after cancellation. await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) raise async def elicit_as_task( self, message: str, requested_schema: ElicitRequestedSchema, *, ttl: int = 60000, ) -> ElicitResult: """Send a task-augmented elicitation via the queue, then poll client. This is for use inside a task-augmented tool call when you want the client to handle the elicitation as its own task. The elicitation request is queued and delivered when the client calls tasks/result. After the client responds with CreateTaskResult, we poll the client's task until complete. Args: message: The message to present to the user requested_schema: Schema defining the expected response structure ttl: Task time-to-live in milliseconds for the client's task Returns: The client's elicitation response Raises: MCPError: If client doesn't support task-augmented elicitation RuntimeError: If handler is not configured """ client_caps = self._session.client_params.capabilities if self._session.client_params else None require_task_augmented_elicitation(client_caps) if self._handler is None: raise RuntimeError("handler is required for elicit_as_task()") # Update status to input_required await self._store.update_task(self.task_id, status=TASK_STATUS_INPUT_REQUIRED) request = self._session._build_elicit_form_request( # pyright: ignore[reportPrivateUsage] message=message, requested_schema=requested_schema, related_task_id=self.task_id, task=TaskMetadata(ttl=ttl), ) request_id: RequestId = request.id resolver: Resolver[dict[str, Any]] = Resolver() self._handler._pending_requests[request_id] = resolver # pyright: ignore[reportPrivateUsage] queued = QueuedMessage( type="request", message=request, resolver=resolver, original_request_id=request_id, ) await self._queue.enqueue(self.task_id, queued) try: # Wait for initial response (CreateTaskResult from client) response_data = await resolver.wait() create_result = CreateTaskResult.model_validate(response_data) client_task_id = create_result.task.task_id # Poll the client's task using session.experimental async for _ in self._session.experimental.poll_task(client_task_id): pass # Get final result from client result = await self._session.experimental.get_task_result( client_task_id, ElicitResult, ) await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) return result except anyio.get_cancelled_exc_class(): # pragma: no cover await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) raise async def create_message_as_task( self, messages: list[SamplingMessage], *, max_tokens: int, ttl: int = 60000, system_prompt: str | None = None, include_context: IncludeContext | None = None, temperature: float | None = None, stop_sequences: list[str] | None = None, metadata: dict[str, Any] | None = None, model_preferences: ModelPreferences | None = None, tools: list[Tool] | None = None, tool_choice: ToolChoice | None = None, ) -> CreateMessageResult: """Send a task-augmented sampling request via the queue, then poll client. This is for use inside a task-augmented tool call when you want the client to handle the sampling as its own task. The request is queued and delivered when the client calls tasks/result. After the client responds with CreateTaskResult, we poll the client's task until complete. Args: messages: The conversation messages for sampling max_tokens: Maximum tokens in the response ttl: Task time-to-live in milliseconds for the client's task system_prompt: Optional system prompt include_context: Context inclusion strategy temperature: Sampling temperature stop_sequences: Stop sequences metadata: Additional metadata model_preferences: Model selection preferences tools: Optional list of tools the LLM can use during sampling tool_choice: Optional control over tool usage behavior Returns: The sampling result from the client Raises: MCPError: If client doesn't support task-augmented sampling or tools ValueError: If tool_use or tool_result message structure is invalid RuntimeError: If handler is not configured """ client_caps = self._session.client_params.capabilities if self._session.client_params else None require_task_augmented_sampling(client_caps) validate_sampling_tools(client_caps, tools, tool_choice) validate_tool_use_result_messages(messages) if self._handler is None: raise RuntimeError("handler is required for create_message_as_task()") # Update status to input_required await self._store.update_task(self.task_id, status=TASK_STATUS_INPUT_REQUIRED) # Build request WITH task field for task-augmented sampling request = self._session._build_create_message_request( # pyright: ignore[reportPrivateUsage] messages=messages, max_tokens=max_tokens, system_prompt=system_prompt, include_context=include_context, temperature=temperature, stop_sequences=stop_sequences, metadata=metadata, model_preferences=model_preferences, tools=tools, tool_choice=tool_choice, related_task_id=self.task_id, task=TaskMetadata(ttl=ttl), ) request_id: RequestId = request.id resolver: Resolver[dict[str, Any]] = Resolver() self._handler._pending_requests[request_id] = resolver # pyright: ignore[reportPrivateUsage] queued = QueuedMessage( type="request", message=request, resolver=resolver, original_request_id=request_id, ) await self._queue.enqueue(self.task_id, queued) try: # Wait for initial response (CreateTaskResult from client) response_data = await resolver.wait() create_result = CreateTaskResult.model_validate(response_data) client_task_id = create_result.task.task_id # Poll the client's task using session.experimental async for _ in self._session.experimental.poll_task(client_task_id): pass # Get final result from client result = await self._session.experimental.get_task_result( client_task_id, CreateMessageResult, ) await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) return result except anyio.get_cancelled_exc_class(): # pragma: no cover await self._store.update_task(self.task_id, status=TASK_STATUS_WORKING) raise
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/experimental/task_context.py", "license": "MIT License", "lines": 495, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/server/experimental/task_result_handler.py
"""TaskResultHandler - Integrated handler for tasks/result endpoint. This implements the dequeue-send-wait pattern from the MCP Tasks spec: 1. Dequeue all pending messages for the task 2. Send them to the client via transport with relatedRequestId routing 3. Wait if task is not in terminal state 4. Return final result when task completes This is the core of the task message queue pattern. """ import logging from typing import Any import anyio from mcp.server.session import ServerSession from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.helpers import RELATED_TASK_METADATA_KEY, is_terminal from mcp.shared.experimental.tasks.message_queue import TaskMessageQueue from mcp.shared.experimental.tasks.resolver import Resolver from mcp.shared.experimental.tasks.store import TaskStore from mcp.shared.message import ServerMessageMetadata, SessionMessage from mcp.types import ( INVALID_PARAMS, ErrorData, GetTaskPayloadRequest, GetTaskPayloadResult, RelatedTaskMetadata, RequestId, ) logger = logging.getLogger(__name__) class TaskResultHandler: """Handler for tasks/result that implements the message queue pattern. This handler: 1. Dequeues pending messages (elicitations, notifications) for the task 2. Sends them to the client via the response stream 3. Waits for responses and resolves them back to callers 4. Blocks until task reaches terminal state 5. Returns the final result Usage: async def handle_task_result( ctx: ServerRequestContext, params: GetTaskPayloadRequestParams ) -> GetTaskPayloadResult: ... server.experimental.enable_tasks( on_task_result=handle_task_result, ) """ def __init__( self, store: TaskStore, queue: TaskMessageQueue, ): self._store = store self._queue = queue # Map from internal request ID to resolver for routing responses self._pending_requests: dict[RequestId, Resolver[dict[str, Any]]] = {} async def send_message( self, session: ServerSession, message: SessionMessage, ) -> None: """Send a message via the session. This is a helper for delivering queued task messages. """ await session.send_message(message) async def handle( self, request: GetTaskPayloadRequest, session: ServerSession, request_id: RequestId, ) -> GetTaskPayloadResult: """Handle a tasks/result request. This implements the dequeue-send-wait loop: 1. Dequeue all pending messages 2. Send each via transport with relatedRequestId = this request's ID 3. If task not terminal, wait for status change 4. Loop until task is terminal 5. Return final result Args: request: The GetTaskPayloadRequest session: The server session for sending messages request_id: The request ID for relatedRequestId routing Returns: GetTaskPayloadResult with the task's final payload """ task_id = request.params.task_id while True: task = await self._store.get_task(task_id) if task is None: raise MCPError(code=INVALID_PARAMS, message=f"Task not found: {task_id}") await self._deliver_queued_messages(task_id, session, request_id) # If task is terminal, return result if is_terminal(task.status): result = await self._store.get_result(task_id) # GetTaskPayloadResult is a Result with extra="allow" # The stored result contains the actual payload data # Per spec: tasks/result MUST include _meta with related-task metadata related_task = RelatedTaskMetadata(task_id=task_id) related_task_meta: dict[str, Any] = {RELATED_TASK_METADATA_KEY: related_task.model_dump(by_alias=True)} if result is not None: result_data = result.model_dump(by_alias=True) existing_meta: dict[str, Any] = result_data.get("_meta") or {} result_data["_meta"] = {**existing_meta, **related_task_meta} return GetTaskPayloadResult.model_validate(result_data) return GetTaskPayloadResult.model_validate({"_meta": related_task_meta}) # Wait for task update (status change or new messages) await self._wait_for_task_update(task_id) async def _deliver_queued_messages( self, task_id: str, session: ServerSession, request_id: RequestId, ) -> None: """Dequeue and send all pending messages for a task. Each message is sent via the session's write stream with relatedRequestId set so responses route back to this stream. """ while True: message = await self._queue.dequeue(task_id) if message is None: break # If this is a request (not notification), wait for response if message.type == "request" and message.resolver is not None: # Store the resolver so we can route the response back original_id = message.original_request_id if original_id is not None: self._pending_requests[original_id] = message.resolver logger.debug("Delivering queued message for task %s: %s", task_id, message.type) # Send the message with relatedRequestId for routing session_message = SessionMessage( message=message.message, metadata=ServerMessageMetadata(related_request_id=request_id), ) await self.send_message(session, session_message) async def _wait_for_task_update(self, task_id: str) -> None: """Wait for task to be updated (status change or new message). Races between store update and queue message - first one wins. """ async with anyio.create_task_group() as tg: async def wait_for_store() -> None: try: await self._store.wait_for_update(task_id) except Exception: pass finally: tg.cancel_scope.cancel() async def wait_for_queue() -> None: try: await self._queue.wait_for_message(task_id) except Exception: pass finally: tg.cancel_scope.cancel() tg.start_soon(wait_for_store) tg.start_soon(wait_for_queue) def route_response(self, request_id: RequestId, response: dict[str, Any]) -> bool: """Route a response back to the waiting resolver. This is called when a response arrives for a queued request. Args: request_id: The request ID from the response response: The response data Returns: True if response was routed, False if no pending request """ resolver = self._pending_requests.pop(request_id, None) if resolver is not None and not resolver.done(): resolver.set_result(response) return True return False def route_error(self, request_id: RequestId, error: ErrorData) -> bool: """Route an error back to the waiting resolver. Args: request_id: The request ID from the error response error: The error data Returns: True if error was routed, False if no pending request """ resolver = self._pending_requests.pop(request_id, None) if resolver is not None and not resolver.done(): resolver.set_exception(MCPError.from_error_data(error)) return True return False
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/experimental/task_result_handler.py", "license": "MIT License", "lines": 179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/server/experimental/task_support.py
"""TaskSupport - Configuration for experimental task support. This module provides the TaskSupport class which encapsulates all the infrastructure needed for task-augmented requests: store, queue, and handler. """ from collections.abc import AsyncIterator from contextlib import asynccontextmanager from dataclasses import dataclass, field import anyio from anyio.abc import TaskGroup from mcp.server.experimental.task_result_handler import TaskResultHandler from mcp.server.session import ServerSession from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.experimental.tasks.message_queue import InMemoryTaskMessageQueue, TaskMessageQueue from mcp.shared.experimental.tasks.store import TaskStore @dataclass class TaskSupport: """Configuration for experimental task support. Encapsulates the task store, message queue, result handler, and task group for spawning background work. When enabled on a server, this automatically: - Configures response routing for each session - Provides default handlers for task operations - Manages a task group for background task execution Example: Simple in-memory setup: ```python server.experimental.enable_tasks() ``` Custom store/queue for distributed systems: ```python server.experimental.enable_tasks( store=RedisTaskStore(redis_url), queue=RedisTaskMessageQueue(redis_url), ) ``` """ store: TaskStore queue: TaskMessageQueue handler: TaskResultHandler = field(init=False) _task_group: TaskGroup | None = field(init=False, default=None) def __post_init__(self) -> None: """Create the result handler from store and queue.""" self.handler = TaskResultHandler(self.store, self.queue) @property def task_group(self) -> TaskGroup: """Get the task group for spawning background work. Raises: RuntimeError: If not within a run() context """ if self._task_group is None: raise RuntimeError("TaskSupport not running. Ensure Server.run() is active.") return self._task_group @asynccontextmanager async def run(self) -> AsyncIterator[None]: """Run the task support lifecycle. This creates a task group for spawning background task work. Called automatically by Server.run(). Usage: async with task_support.run(): # Task group is now available ... """ async with anyio.create_task_group() as tg: self._task_group = tg try: yield finally: self._task_group = None def configure_session(self, session: ServerSession) -> None: """Configure a session for task support. This registers the result handler as a response router so that responses to queued requests (elicitation, sampling) are routed back to the waiting resolvers. Called automatically by Server.run() for each new session. Args: session: The session to configure """ session.add_response_router(self.handler) @classmethod def in_memory(cls) -> "TaskSupport": """Create in-memory task support. Suitable for development, testing, and single-process servers. For distributed systems, provide custom store and queue implementations. Returns: TaskSupport configured with in-memory store and queue """ return cls( store=InMemoryTaskStore(), queue=InMemoryTaskMessageQueue(), )
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/experimental/task_support.py", "license": "MIT License", "lines": 90, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/server/lowlevel/experimental.py
"""Experimental handlers for the low-level MCP server. WARNING: These APIs are experimental and may change without notice. """ from __future__ import annotations import logging from collections.abc import Awaitable, Callable from typing import Any, Generic from typing_extensions import TypeVar from mcp.server.context import ServerRequestContext from mcp.server.experimental.task_support import TaskSupport from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.helpers import cancel_task from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.experimental.tasks.message_queue import InMemoryTaskMessageQueue, TaskMessageQueue from mcp.shared.experimental.tasks.store import TaskStore from mcp.types import ( INVALID_PARAMS, CancelTaskRequestParams, CancelTaskResult, GetTaskPayloadRequest, GetTaskPayloadRequestParams, GetTaskPayloadResult, GetTaskRequestParams, GetTaskResult, ListTasksResult, PaginatedRequestParams, ServerCapabilities, ServerTasksCapability, ServerTasksRequestsCapability, TasksCallCapability, TasksCancelCapability, TasksListCapability, TasksToolsCapability, ) logger = logging.getLogger(__name__) LifespanResultT = TypeVar("LifespanResultT", default=Any) class ExperimentalHandlers(Generic[LifespanResultT]): """Experimental request/notification handlers. WARNING: These APIs are experimental and may change without notice. """ def __init__( self, add_request_handler: Callable[ [str, Callable[[ServerRequestContext[LifespanResultT], Any], Awaitable[Any]]], None ], has_handler: Callable[[str], bool], ) -> None: self._add_request_handler = add_request_handler self._has_handler = has_handler self._task_support: TaskSupport | None = None @property def task_support(self) -> TaskSupport | None: """Get the task support configuration, if enabled.""" return self._task_support def update_capabilities(self, capabilities: ServerCapabilities) -> None: # Only add tasks capability if handlers are registered if not any(self._has_handler(method) for method in ["tasks/get", "tasks/list", "tasks/cancel", "tasks/result"]): return capabilities.tasks = ServerTasksCapability() if self._has_handler("tasks/list"): capabilities.tasks.list = TasksListCapability() if self._has_handler("tasks/cancel"): capabilities.tasks.cancel = TasksCancelCapability() capabilities.tasks.requests = ServerTasksRequestsCapability( tools=TasksToolsCapability(call=TasksCallCapability()) ) # assuming always supported for now def enable_tasks( self, store: TaskStore | None = None, queue: TaskMessageQueue | None = None, *, on_get_task: Callable[[ServerRequestContext[LifespanResultT], GetTaskRequestParams], Awaitable[GetTaskResult]] | None = None, on_task_result: Callable[ [ServerRequestContext[LifespanResultT], GetTaskPayloadRequestParams], Awaitable[GetTaskPayloadResult] ] | None = None, on_list_tasks: Callable[ [ServerRequestContext[LifespanResultT], PaginatedRequestParams | None], Awaitable[ListTasksResult] ] | None = None, on_cancel_task: Callable[ [ServerRequestContext[LifespanResultT], CancelTaskRequestParams], Awaitable[CancelTaskResult] ] | None = None, ) -> TaskSupport: """Enable experimental task support. This sets up the task infrastructure and registers handlers for tasks/get, tasks/result, tasks/list, and tasks/cancel. Custom handlers can be provided via the on_* kwargs; any not provided will use defaults. Args: store: Custom TaskStore implementation (defaults to InMemoryTaskStore) queue: Custom TaskMessageQueue implementation (defaults to InMemoryTaskMessageQueue) on_get_task: Custom handler for tasks/get on_task_result: Custom handler for tasks/result on_list_tasks: Custom handler for tasks/list on_cancel_task: Custom handler for tasks/cancel Returns: The TaskSupport configuration object Example: Simple in-memory setup: ```python server.experimental.enable_tasks() ``` Custom store/queue for distributed systems: ```python server.experimental.enable_tasks( store=RedisTaskStore(redis_url), queue=RedisTaskMessageQueue(redis_url), ) ``` WARNING: This API is experimental and may change without notice. """ if store is None: store = InMemoryTaskStore() if queue is None: queue = InMemoryTaskMessageQueue() self._task_support = TaskSupport(store=store, queue=queue) task_support = self._task_support # Register user-provided handlers if on_get_task is not None: self._add_request_handler("tasks/get", on_get_task) if on_task_result is not None: self._add_request_handler("tasks/result", on_task_result) if on_list_tasks is not None: self._add_request_handler("tasks/list", on_list_tasks) if on_cancel_task is not None: self._add_request_handler("tasks/cancel", on_cancel_task) # Fill in defaults for any not provided if not self._has_handler("tasks/get"): async def _default_get_task( ctx: ServerRequestContext[LifespanResultT], params: GetTaskRequestParams ) -> GetTaskResult: task = await task_support.store.get_task(params.task_id) if task is None: raise MCPError(code=INVALID_PARAMS, message=f"Task not found: {params.task_id}") return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) self._add_request_handler("tasks/get", _default_get_task) if not self._has_handler("tasks/result"): async def _default_get_task_result( ctx: ServerRequestContext[LifespanResultT], params: GetTaskPayloadRequestParams ) -> GetTaskPayloadResult: assert ctx.request_id is not None req = GetTaskPayloadRequest(params=params) result = await task_support.handler.handle(req, ctx.session, ctx.request_id) return result self._add_request_handler("tasks/result", _default_get_task_result) if not self._has_handler("tasks/list"): async def _default_list_tasks( ctx: ServerRequestContext[LifespanResultT], params: PaginatedRequestParams | None ) -> ListTasksResult: cursor = params.cursor if params else None tasks, next_cursor = await task_support.store.list_tasks(cursor) return ListTasksResult(tasks=tasks, next_cursor=next_cursor) self._add_request_handler("tasks/list", _default_list_tasks) if not self._has_handler("tasks/cancel"): async def _default_cancel_task( ctx: ServerRequestContext[LifespanResultT], params: CancelTaskRequestParams ) -> CancelTaskResult: result = await cancel_task(task_support.store, params.task_id) return result self._add_request_handler("tasks/cancel", _default_cancel_task) return task_support
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/lowlevel/experimental.py", "license": "MIT License", "lines": 171, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/server/validation.py
"""Shared validation functions for server requests. This module provides validation logic for sampling and elicitation requests that is shared across normal and task-augmented code paths. """ from mcp.shared.exceptions import MCPError from mcp.types import INVALID_PARAMS, ClientCapabilities, SamplingMessage, Tool, ToolChoice def check_sampling_tools_capability(client_caps: ClientCapabilities | None) -> bool: """Check if the client supports sampling tools capability. Args: client_caps: The client's declared capabilities Returns: True if client supports sampling.tools, False otherwise """ if client_caps is None: return False if client_caps.sampling is None: return False if client_caps.sampling.tools is None: return False return True def validate_sampling_tools( client_caps: ClientCapabilities | None, tools: list[Tool] | None, tool_choice: ToolChoice | None, ) -> None: """Validate that the client supports sampling tools if tools are being used. Args: client_caps: The client's declared capabilities tools: The tools list, if provided tool_choice: The tool choice setting, if provided Raises: MCPError: If tools/tool_choice are provided but client doesn't support them """ if tools is not None or tool_choice is not None: if not check_sampling_tools_capability(client_caps): raise MCPError(code=INVALID_PARAMS, message="Client does not support sampling tools capability") def validate_tool_use_result_messages(messages: list[SamplingMessage]) -> None: """Validate tool_use/tool_result message structure per SEP-1577. This validation ensures: 1. Messages with tool_result content contain ONLY tool_result content 2. tool_result messages are preceded by a message with tool_use 3. tool_result IDs match the tool_use IDs from the previous message See: https://github.com/modelcontextprotocol/modelcontextprotocol/issues/1577 Args: messages: The list of sampling messages to validate Raises: ValueError: If the message structure is invalid """ if not messages: return last_content = messages[-1].content_as_list has_tool_results = any(c.type == "tool_result" for c in last_content) previous_content = messages[-2].content_as_list if len(messages) >= 2 else None has_previous_tool_use = previous_content and any(c.type == "tool_use" for c in previous_content) if has_tool_results: # Per spec: "SamplingMessage with tool result content blocks # MUST NOT contain other content types." if any(c.type != "tool_result" for c in last_content): raise ValueError("The last message must contain only tool_result content if any is present") if previous_content is None: raise ValueError("tool_result requires a previous message containing tool_use") if not has_previous_tool_use: raise ValueError("tool_result blocks do not match any tool_use in the previous message") if has_previous_tool_use and previous_content: tool_use_ids = {c.id for c in previous_content if c.type == "tool_use"} tool_result_ids = {c.tool_use_id for c in last_content if c.type == "tool_result"} if tool_use_ids != tool_result_ids: raise ValueError("ids of tool_result blocks and tool_use blocks from previous message do not match")
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/server/validation.py", "license": "MIT License", "lines": 68, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/capabilities.py
"""Tasks capability checking utilities. This module provides functions for checking and requiring task-related capabilities. All tasks capability logic is centralized here to keep the main session code clean. WARNING: These APIs are experimental and may change without notice. """ from mcp.shared.exceptions import MCPError from mcp.types import INVALID_REQUEST, ClientCapabilities, ClientTasksCapability def check_tasks_capability( required: ClientTasksCapability, client: ClientTasksCapability, ) -> bool: """Check if client's tasks capability matches the required capability. Args: required: The capability being checked for client: The client's declared capabilities Returns: True if client has the required capability, False otherwise """ if required.requests is None: return True if client.requests is None: return False # Check elicitation.create if required.requests.elicitation is not None: if client.requests.elicitation is None: return False if required.requests.elicitation.create is not None: if client.requests.elicitation.create is None: return False # Check sampling.createMessage if required.requests.sampling is not None: if client.requests.sampling is None: return False if required.requests.sampling.create_message is not None: if client.requests.sampling.create_message is None: return False return True def has_task_augmented_elicitation(caps: ClientCapabilities) -> bool: """Check if capabilities include task-augmented elicitation support.""" if caps.tasks is None: return False if caps.tasks.requests is None: return False if caps.tasks.requests.elicitation is None: return False return caps.tasks.requests.elicitation.create is not None def has_task_augmented_sampling(caps: ClientCapabilities) -> bool: """Check if capabilities include task-augmented sampling support.""" if caps.tasks is None: return False if caps.tasks.requests is None: return False if caps.tasks.requests.sampling is None: return False return caps.tasks.requests.sampling.create_message is not None def require_task_augmented_elicitation(client_caps: ClientCapabilities | None) -> None: """Raise MCPError if client doesn't support task-augmented elicitation. Args: client_caps: The client's declared capabilities, or None if not initialized Raises: MCPError: If client doesn't support task-augmented elicitation """ if client_caps is None or not has_task_augmented_elicitation(client_caps): raise MCPError(code=INVALID_REQUEST, message="Client does not support task-augmented elicitation") def require_task_augmented_sampling(client_caps: ClientCapabilities | None) -> None: """Raise MCPError if client doesn't support task-augmented sampling. Args: client_caps: The client's declared capabilities, or None if not initialized Raises: MCPError: If client doesn't support task-augmented sampling """ if client_caps is None or not has_task_augmented_sampling(client_caps): raise MCPError(code=INVALID_REQUEST, message="Client does not support task-augmented sampling")
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/capabilities.py", "license": "MIT License", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/context.py
"""TaskContext - Pure task state management. This module provides TaskContext, which manages task state without any server/session dependencies. It can be used standalone for distributed workers or wrapped by ServerTaskContext for full server integration. """ from mcp.shared.experimental.tasks.store import TaskStore from mcp.types import TASK_STATUS_COMPLETED, TASK_STATUS_FAILED, Result, Task class TaskContext: """Pure task state management - no session dependencies. This class handles: - Task state (status, result) - Cancellation tracking - Store interactions For server-integrated features (elicit, create_message, notifications), use ServerTaskContext from mcp.server.experimental. Example (distributed worker): async def worker_job(task_id: str): store = RedisTaskStore(redis_url) task = await store.get_task(task_id) ctx = TaskContext(task=task, store=store) await ctx.update_status("Working...") result = await do_work() await ctx.complete(result) """ def __init__(self, task: Task, store: TaskStore): self._task = task self._store = store self._cancelled = False @property def task_id(self) -> str: """The task identifier.""" return self._task.task_id @property def task(self) -> Task: """The current task state.""" return self._task @property def is_cancelled(self) -> bool: """Whether cancellation has been requested.""" return self._cancelled def request_cancellation(self) -> None: """Request cancellation of this task. This sets is_cancelled=True. Task work should check this periodically and exit gracefully if set. """ self._cancelled = True async def update_status(self, message: str) -> None: """Update the task's status message. Args: message: The new status message """ self._task = await self._store.update_task( self.task_id, status_message=message, ) async def complete(self, result: Result) -> None: """Mark the task as completed with the given result. Args: result: The task result """ await self._store.store_result(self.task_id, result) self._task = await self._store.update_task( self.task_id, status=TASK_STATUS_COMPLETED, ) async def fail(self, error: str) -> None: """Mark the task as failed with an error message. Args: error: The error message """ self._task = await self._store.update_task( self.task_id, status=TASK_STATUS_FAILED, status_message=error, )
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/context.py", "license": "MIT License", "lines": 75, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/helpers.py
"""Helper functions for pure task management. These helpers work with pure TaskContext and don't require server dependencies. For server-integrated task helpers, use mcp.server.experimental. """ from collections.abc import AsyncIterator from contextlib import asynccontextmanager from datetime import datetime, timezone from uuid import uuid4 from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.context import TaskContext from mcp.shared.experimental.tasks.store import TaskStore from mcp.types import ( INVALID_PARAMS, TASK_STATUS_CANCELLED, TASK_STATUS_COMPLETED, TASK_STATUS_FAILED, TASK_STATUS_WORKING, CancelTaskResult, Task, TaskMetadata, TaskStatus, ) # Metadata key for model-immediate-response (per MCP spec) # Servers MAY include this in CreateTaskResult._meta to provide an immediate # response string while the task executes in the background. MODEL_IMMEDIATE_RESPONSE_KEY = "io.modelcontextprotocol/model-immediate-response" # Metadata key for associating requests with a task (per MCP spec) RELATED_TASK_METADATA_KEY = "io.modelcontextprotocol/related-task" def is_terminal(status: TaskStatus) -> bool: """Check if a task status represents a terminal state. Terminal states are those where the task has finished and will not change. Args: status: The task status to check Returns: True if the status is terminal (completed, failed, or cancelled) """ return status in (TASK_STATUS_COMPLETED, TASK_STATUS_FAILED, TASK_STATUS_CANCELLED) async def cancel_task( store: TaskStore, task_id: str, ) -> CancelTaskResult: """Cancel a task with spec-compliant validation. Per spec: "Receivers MUST reject cancellation of terminal status tasks with -32602 (Invalid params)" This helper validates that the task exists and is not in a terminal state before setting it to "cancelled". Args: store: The task store task_id: The task identifier to cancel Returns: CancelTaskResult with the cancelled task state Raises: MCPError: With INVALID_PARAMS (-32602) if: - Task does not exist - Task is already in a terminal state (completed, failed, cancelled) Example: ```python async def handle_cancel(ctx, params: CancelTaskRequestParams) -> CancelTaskResult: return await cancel_task(store, params.task_id) ``` """ task = await store.get_task(task_id) if task is None: raise MCPError(code=INVALID_PARAMS, message=f"Task not found: {task_id}") if is_terminal(task.status): raise MCPError(code=INVALID_PARAMS, message=f"Cannot cancel task in terminal state '{task.status}'") # Update task to cancelled status cancelled_task = await store.update_task(task_id, status=TASK_STATUS_CANCELLED) return CancelTaskResult(**cancelled_task.model_dump()) def generate_task_id() -> str: """Generate a unique task ID.""" return str(uuid4()) def create_task_state( metadata: TaskMetadata, task_id: str | None = None, ) -> Task: """Create a Task object with initial state. This is a helper for TaskStore implementations. Args: metadata: Task metadata task_id: Optional task ID (generated if not provided) Returns: A new Task in "working" status """ now = datetime.now(timezone.utc) return Task( task_id=task_id or generate_task_id(), status=TASK_STATUS_WORKING, created_at=now, last_updated_at=now, ttl=metadata.ttl, poll_interval=500, # Default 500ms poll interval ) @asynccontextmanager async def task_execution( task_id: str, store: TaskStore, ) -> AsyncIterator[TaskContext]: """Context manager for safe task execution (pure, no server dependencies). Loads a task from the store and provides a TaskContext for the work. If an unhandled exception occurs, the task is automatically marked as failed and the exception is suppressed (since the failure is captured in task state). This is useful for distributed workers that don't have a server session. Args: task_id: The task identifier to execute store: The task store (must be accessible by the worker) Yields: TaskContext for updating status and completing/failing the task Raises: ValueError: If the task is not found in the store Example (distributed worker): async def worker_process(task_id: str): store = RedisTaskStore(redis_url) async with task_execution(task_id, store) as ctx: await ctx.update_status("Working...") result = await do_work() await ctx.complete(result) """ task = await store.get_task(task_id) if task is None: raise ValueError(f"Task {task_id} not found") ctx = TaskContext(task, store) try: yield ctx except Exception as e: # Auto-fail the task if an exception occurs and task isn't already terminal # Exception is suppressed since failure is captured in task state if not is_terminal(ctx.task.status): await ctx.fail(str(e)) # Don't re-raise - the failure is recorded in task state
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/helpers.py", "license": "MIT License", "lines": 130, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/in_memory_task_store.py
"""In-memory implementation of TaskStore for demonstration purposes. This implementation stores all tasks in memory and provides automatic cleanup based on the TTL duration specified in the task metadata using lazy expiration. Note: This is not suitable for production use as all data is lost on restart. For production, consider implementing TaskStore with a database or distributed cache. """ from dataclasses import dataclass, field from datetime import datetime, timedelta, timezone import anyio from mcp.shared.experimental.tasks.helpers import create_task_state, is_terminal from mcp.shared.experimental.tasks.store import TaskStore from mcp.types import Result, Task, TaskMetadata, TaskStatus @dataclass class StoredTask: """Internal storage representation of a task.""" task: Task result: Result | None = None # Time when this task should be removed (None = never) expires_at: datetime | None = field(default=None) class InMemoryTaskStore(TaskStore): """A simple in-memory implementation of TaskStore. Features: - Automatic TTL-based cleanup (lazy expiration) - Thread-safe for single-process async use - Pagination support for list_tasks Limitations: - All data lost on restart - Not suitable for distributed systems - No persistence For production, implement TaskStore with Redis, PostgreSQL, etc. """ def __init__(self, page_size: int = 10) -> None: self._tasks: dict[str, StoredTask] = {} self._page_size = page_size self._update_events: dict[str, anyio.Event] = {} def _calculate_expiry(self, ttl_ms: int | None) -> datetime | None: """Calculate expiry time from TTL in milliseconds.""" if ttl_ms is None: return None return datetime.now(timezone.utc) + timedelta(milliseconds=ttl_ms) def _is_expired(self, stored: StoredTask) -> bool: """Check if a task has expired.""" if stored.expires_at is None: return False return datetime.now(timezone.utc) >= stored.expires_at def _cleanup_expired(self) -> None: """Remove all expired tasks. Called lazily during access operations.""" expired_ids = [task_id for task_id, stored in self._tasks.items() if self._is_expired(stored)] for task_id in expired_ids: del self._tasks[task_id] async def create_task( self, metadata: TaskMetadata, task_id: str | None = None, ) -> Task: """Create a new task with the given metadata.""" # Cleanup expired tasks on access self._cleanup_expired() task = create_task_state(metadata, task_id) if task.task_id in self._tasks: raise ValueError(f"Task with ID {task.task_id} already exists") stored = StoredTask( task=task, expires_at=self._calculate_expiry(metadata.ttl), ) self._tasks[task.task_id] = stored # Return a copy to prevent external modification return Task(**task.model_dump()) async def get_task(self, task_id: str) -> Task | None: """Get a task by ID.""" # Cleanup expired tasks on access self._cleanup_expired() stored = self._tasks.get(task_id) if stored is None: return None # Return a copy to prevent external modification return Task(**stored.task.model_dump()) async def update_task( self, task_id: str, status: TaskStatus | None = None, status_message: str | None = None, ) -> Task: """Update a task's status and/or message.""" stored = self._tasks.get(task_id) if stored is None: raise ValueError(f"Task with ID {task_id} not found") # Per spec: Terminal states MUST NOT transition to any other status if status is not None and status != stored.task.status and is_terminal(stored.task.status): raise ValueError(f"Cannot transition from terminal status '{stored.task.status}'") status_changed = False if status is not None and stored.task.status != status: stored.task.status = status status_changed = True if status_message is not None: stored.task.status_message = status_message # Update last_updated_at on any change stored.task.last_updated_at = datetime.now(timezone.utc) # If task is now terminal and has TTL, reset expiry timer if status is not None and is_terminal(status) and stored.task.ttl is not None: stored.expires_at = self._calculate_expiry(stored.task.ttl) # Notify waiters if status changed if status_changed: await self.notify_update(task_id) return Task(**stored.task.model_dump()) async def store_result(self, task_id: str, result: Result) -> None: """Store the result for a task.""" stored = self._tasks.get(task_id) if stored is None: raise ValueError(f"Task with ID {task_id} not found") stored.result = result async def get_result(self, task_id: str) -> Result | None: """Get the stored result for a task.""" stored = self._tasks.get(task_id) if stored is None: return None return stored.result async def list_tasks( self, cursor: str | None = None, ) -> tuple[list[Task], str | None]: """List tasks with pagination.""" # Cleanup expired tasks on access self._cleanup_expired() all_task_ids = list(self._tasks.keys()) start_index = 0 if cursor is not None: try: cursor_index = all_task_ids.index(cursor) start_index = cursor_index + 1 except ValueError: raise ValueError(f"Invalid cursor: {cursor}") page_task_ids = all_task_ids[start_index : start_index + self._page_size] tasks = [Task(**self._tasks[tid].task.model_dump()) for tid in page_task_ids] # Determine next cursor next_cursor = None if start_index + self._page_size < len(all_task_ids) and page_task_ids: next_cursor = page_task_ids[-1] return tasks, next_cursor async def delete_task(self, task_id: str) -> bool: """Delete a task.""" if task_id not in self._tasks: return False del self._tasks[task_id] return True async def wait_for_update(self, task_id: str) -> None: """Wait until the task status changes.""" if task_id not in self._tasks: raise ValueError(f"Task with ID {task_id} not found") # Create a fresh event for waiting (anyio.Event can't be cleared) self._update_events[task_id] = anyio.Event() event = self._update_events[task_id] await event.wait() async def notify_update(self, task_id: str) -> None: """Signal that a task has been updated.""" if task_id in self._update_events: self._update_events[task_id].set() # --- Testing/debugging helpers --- def cleanup(self) -> None: """Cleanup all tasks (useful for testing or graceful shutdown).""" self._tasks.clear() self._update_events.clear() def get_all_tasks(self) -> list[Task]: """Get all tasks (useful for debugging). Returns copies to prevent modification.""" self._cleanup_expired() return [Task(**stored.task.model_dump()) for stored in self._tasks.values()]
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/in_memory_task_store.py", "license": "MIT License", "lines": 166, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/message_queue.py
"""TaskMessageQueue - FIFO queue for task-related messages. This implements the core message queue pattern from the MCP Tasks spec. When a handler needs to send a request (like elicitation) during a task-augmented request, the message is enqueued instead of sent directly. Messages are delivered to the client only through the `tasks/result` endpoint. This pattern enables: 1. Decoupling request handling from message delivery 2. Proper bidirectional communication via the tasks/result stream 3. Automatic status management (working <-> input_required) """ from abc import ABC, abstractmethod from collections import deque from dataclasses import dataclass, field from datetime import datetime, timezone from typing import Any, Literal import anyio from mcp.shared.experimental.tasks.resolver import Resolver from mcp.types import JSONRPCNotification, JSONRPCRequest, RequestId @dataclass class QueuedMessage: """A message queued for delivery via tasks/result. Messages are stored with their type and a resolver for requests that expect responses. """ type: Literal["request", "notification"] """Whether this is a request (expects response) or notification (one-way).""" message: JSONRPCRequest | JSONRPCNotification """The JSON-RPC message to send.""" timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc)) """When the message was enqueued.""" resolver: Resolver[dict[str, Any]] | None = None """Resolver to set when response arrives (only for requests).""" original_request_id: RequestId | None = None """The original request ID used internally, for routing responses back.""" class TaskMessageQueue(ABC): """Abstract interface for task message queuing. This is a FIFO queue that stores messages to be delivered via `tasks/result`. When a task-augmented handler calls elicit() or sends a notification, the message is enqueued here instead of being sent directly to the client. The `tasks/result` handler then dequeues and sends these messages through the transport, with `relatedRequestId` set to the tasks/result request ID so responses are routed correctly. Implementations can use in-memory storage, Redis, etc. """ @abstractmethod async def enqueue(self, task_id: str, message: QueuedMessage) -> None: """Add a message to the queue for a task. Args: task_id: The task identifier message: The message to enqueue """ @abstractmethod async def dequeue(self, task_id: str) -> QueuedMessage | None: """Remove and return the next message from the queue. Args: task_id: The task identifier Returns: The next message, or None if queue is empty """ @abstractmethod async def peek(self, task_id: str) -> QueuedMessage | None: """Return the next message without removing it. Args: task_id: The task identifier Returns: The next message, or None if queue is empty """ @abstractmethod async def is_empty(self, task_id: str) -> bool: """Check if the queue is empty for a task. Args: task_id: The task identifier Returns: True if no messages are queued """ @abstractmethod async def clear(self, task_id: str) -> list[QueuedMessage]: """Remove and return all messages from the queue. This is useful for cleanup when a task is cancelled or completed. Args: task_id: The task identifier Returns: All queued messages (may be empty) """ @abstractmethod async def wait_for_message(self, task_id: str) -> None: """Wait until a message is available in the queue. This blocks until either: 1. A message is enqueued for this task 2. The wait is cancelled Args: task_id: The task identifier """ @abstractmethod async def notify_message_available(self, task_id: str) -> None: """Signal that a message is available for a task. This wakes up any coroutines waiting in wait_for_message(). Args: task_id: The task identifier """ class InMemoryTaskMessageQueue(TaskMessageQueue): """In-memory implementation of TaskMessageQueue. This is suitable for single-process servers. For distributed systems, implement TaskMessageQueue with Redis, RabbitMQ, etc. Features: - FIFO ordering per task - Async wait for message availability - Thread-safe for single-process async use """ def __init__(self) -> None: self._queues: dict[str, deque[QueuedMessage]] = {} self._events: dict[str, anyio.Event] = {} def _get_queue(self, task_id: str) -> deque[QueuedMessage]: """Get or create the queue for a task.""" if task_id not in self._queues: self._queues[task_id] = deque() return self._queues[task_id] async def enqueue(self, task_id: str, message: QueuedMessage) -> None: """Add a message to the queue.""" queue = self._get_queue(task_id) queue.append(message) # Signal that a message is available await self.notify_message_available(task_id) async def dequeue(self, task_id: str) -> QueuedMessage | None: """Remove and return the next message.""" queue = self._get_queue(task_id) if not queue: return None return queue.popleft() async def peek(self, task_id: str) -> QueuedMessage | None: """Return the next message without removing it.""" queue = self._get_queue(task_id) if not queue: return None return queue[0] async def is_empty(self, task_id: str) -> bool: """Check if the queue is empty.""" queue = self._get_queue(task_id) return len(queue) == 0 async def clear(self, task_id: str) -> list[QueuedMessage]: """Remove and return all messages.""" queue = self._get_queue(task_id) messages = list(queue) queue.clear() return messages async def wait_for_message(self, task_id: str) -> None: """Wait until a message is available.""" # Check if there are already messages if not await self.is_empty(task_id): return # Create a fresh event for waiting (anyio.Event can't be cleared) self._events[task_id] = anyio.Event() event = self._events[task_id] # Double-check after creating event (avoid race condition) if not await self.is_empty(task_id): return # Wait for a new message await event.wait() async def notify_message_available(self, task_id: str) -> None: """Signal that a message is available.""" if task_id in self._events: self._events[task_id].set() def cleanup(self, task_id: str | None = None) -> None: """Clean up queues and events. Args: task_id: If provided, clean up only this task. Otherwise clean up all. """ if task_id is not None: self._queues.pop(task_id, None) self._events.pop(task_id, None) else: self._queues.clear() self._events.clear()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/message_queue.py", "license": "MIT License", "lines": 173, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/polling.py
"""Shared polling utilities for task operations. This module provides generic polling logic that works for both client→server and server→client task polling. WARNING: These APIs are experimental and may change without notice. """ from collections.abc import AsyncIterator, Awaitable, Callable import anyio from mcp.shared.experimental.tasks.helpers import is_terminal from mcp.types import GetTaskResult async def poll_until_terminal( get_task: Callable[[str], Awaitable[GetTaskResult]], task_id: str, default_interval_ms: int = 500, ) -> AsyncIterator[GetTaskResult]: """Poll a task until it reaches terminal status. This is a generic utility that works for both client→server and server→client polling. The caller provides the get_task function appropriate for their direction. Args: get_task: Async function that takes task_id and returns GetTaskResult task_id: The task to poll default_interval_ms: Fallback poll interval if server doesn't specify Yields: GetTaskResult for each poll """ while True: status = await get_task(task_id) yield status if is_terminal(status.status): break interval_ms = status.poll_interval if status.poll_interval is not None else default_interval_ms await anyio.sleep(interval_ms / 1000)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/polling.py", "license": "MIT License", "lines": 31, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/resolver.py
"""Resolver - An anyio-compatible future-like object for async result passing. This provides a simple way to pass a result (or exception) from one coroutine to another without depending on asyncio.Future. """ from typing import Generic, TypeVar, cast import anyio T = TypeVar("T") class Resolver(Generic[T]): """A simple resolver for passing results between coroutines. Unlike asyncio.Future, this works with any anyio-compatible async backend. Usage: resolver: Resolver[str] = Resolver() # In one coroutine: resolver.set_result("hello") # In another coroutine: result = await resolver.wait() # returns "hello" """ def __init__(self) -> None: self._event = anyio.Event() self._value: T | None = None self._exception: BaseException | None = None def set_result(self, value: T) -> None: """Set the result value and wake up waiters.""" if self._event.is_set(): raise RuntimeError("Resolver already completed") self._value = value self._event.set() def set_exception(self, exc: BaseException) -> None: """Set an exception and wake up waiters.""" if self._event.is_set(): raise RuntimeError("Resolver already completed") self._exception = exc self._event.set() async def wait(self) -> T: """Wait for the result and return it, or raise the exception.""" await self._event.wait() if self._exception is not None: raise self._exception # If we reach here, set_result() was called, so _value is set return cast(T, self._value) def done(self) -> bool: """Return True if the resolver has been completed.""" return self._event.is_set()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/resolver.py", "license": "MIT License", "lines": 43, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
modelcontextprotocol/python-sdk:src/mcp/shared/experimental/tasks/store.py
"""TaskStore - Abstract interface for task state storage.""" from abc import ABC, abstractmethod from mcp.types import Result, Task, TaskMetadata, TaskStatus class TaskStore(ABC): """Abstract interface for task state storage. This is a pure storage interface - it doesn't manage execution. Implementations can use in-memory storage, databases, Redis, etc. All methods are async to support various backends. """ @abstractmethod async def create_task( self, metadata: TaskMetadata, task_id: str | None = None, ) -> Task: """Create a new task. Args: metadata: Task metadata (ttl, etc.) task_id: Optional task ID. If None, implementation should generate one. Returns: The created Task with status="working" Raises: ValueError: If task_id already exists """ @abstractmethod async def get_task(self, task_id: str) -> Task | None: """Get a task by ID. Args: task_id: The task identifier Returns: The Task, or None if not found """ @abstractmethod async def update_task( self, task_id: str, status: TaskStatus | None = None, status_message: str | None = None, ) -> Task: """Update a task's status and/or message. Args: task_id: The task identifier status: New status (if changing) status_message: New status message (if changing) Returns: The updated Task Raises: ValueError: If task not found ValueError: If attempting to transition from a terminal status (completed, failed, cancelled). Per spec, terminal states MUST NOT transition to any other status. """ @abstractmethod async def store_result(self, task_id: str, result: Result) -> None: """Store the result for a task. Args: task_id: The task identifier result: The result to store Raises: ValueError: If task not found """ @abstractmethod async def get_result(self, task_id: str) -> Result | None: """Get the stored result for a task. Args: task_id: The task identifier Returns: The stored Result, or None if not available """ @abstractmethod async def list_tasks( self, cursor: str | None = None, ) -> tuple[list[Task], str | None]: """List tasks with pagination. Args: cursor: Optional cursor for pagination Returns: Tuple of (tasks, next_cursor). next_cursor is None if no more pages. """ @abstractmethod async def delete_task(self, task_id: str) -> bool: """Delete a task. Args: task_id: The task identifier Returns: True if deleted, False if not found """ @abstractmethod async def wait_for_update(self, task_id: str) -> None: """Wait until the task status changes. This blocks until either: 1. The task status changes 2. The wait is cancelled Used by tasks/result to wait for task completion or status changes. Args: task_id: The task identifier Raises: ValueError: If task not found """ @abstractmethod async def notify_update(self, task_id: str) -> None: """Signal that a task has been updated. This wakes up any coroutines waiting in wait_for_update(). Args: task_id: The task identifier """
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/experimental/tasks/store.py", "license": "MIT License", "lines": 107, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:src/mcp/shared/response_router.py
"""ResponseRouter - Protocol for pluggable response routing. This module defines a protocol for routing JSON-RPC responses to alternative handlers before falling back to the default response stream mechanism. The primary use case is task-augmented requests: when a TaskSession enqueues a request (like elicitation), the response needs to be routed back to the waiting resolver instead of the normal response stream. Design: - Protocol-based for testability and flexibility - Returns bool to indicate if response was handled - Supports both success responses and errors """ from typing import Any, Protocol from mcp.types import ErrorData, RequestId class ResponseRouter(Protocol): """Protocol for routing responses to alternative handlers. Implementations check if they have a pending request for the given ID and deliver the response/error to the appropriate handler. Example: ```python class TaskResultHandler(ResponseRouter): def route_response(self, request_id, response): resolver = self._pending_requests.pop(request_id, None) if resolver: resolver.set_result(response) return True return False ``` """ def route_response(self, request_id: RequestId, response: dict[str, Any]) -> bool: """Try to route a response to a pending request handler. Args: request_id: The JSON-RPC request ID from the response response: The response result data Returns: True if the response was handled, False otherwise """ ... # pragma: no cover def route_error(self, request_id: RequestId, error: ErrorData) -> bool: """Try to route an error to a pending request handler. Args: request_id: The JSON-RPC request ID from the error response error: The error data Returns: True if the error was handled, False otherwise """ ... # pragma: no cover
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "src/mcp/shared/response_router.py", "license": "MIT License", "lines": 46, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
modelcontextprotocol/python-sdk:tests/experimental/tasks/client/test_capabilities.py
"""Tests for client task capabilities declaration during initialization.""" import anyio import pytest from mcp import ClientCapabilities, types from mcp.client.experimental.task_handlers import ExperimentalTaskHandlers from mcp.client.session import ClientSession from mcp.shared._context import RequestContext from mcp.shared.message import SessionMessage from mcp.types import ( LATEST_PROTOCOL_VERSION, Implementation, InitializeRequest, InitializeResult, JSONRPCRequest, JSONRPCResponse, ServerCapabilities, client_request_adapter, ) @pytest.mark.anyio async def test_client_capabilities_without_tasks(): """Test that tasks capability is None when not provided.""" client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](1) server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) received_capabilities = None async def mock_server(): nonlocal received_capabilities session_message = await client_to_server_receive.receive() jsonrpc_request = session_message.message assert isinstance(jsonrpc_request, JSONRPCRequest) request = client_request_adapter.validate_python( jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request, InitializeRequest) received_capabilities = request.params.capabilities result = InitializeResult( protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), server_info=Implementation(name="mock-server", version="0.1.0"), ) async with server_to_client_send: await server_to_client_send.send( SessionMessage( JSONRPCResponse( jsonrpc="2.0", id=jsonrpc_request.id, result=result.model_dump(by_alias=True, mode="json", exclude_none=True), ) ) ) await client_to_server_receive.receive() async with ( ClientSession( server_to_client_receive, client_to_server_send, ) as session, anyio.create_task_group() as tg, client_to_server_send, client_to_server_receive, server_to_client_send, server_to_client_receive, ): tg.start_soon(mock_server) await session.initialize() # Assert that tasks capability is None when not provided assert received_capabilities is not None assert received_capabilities.tasks is None @pytest.mark.anyio async def test_client_capabilities_with_tasks(): """Test that tasks capability is properly set when handlers are provided.""" client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](1) server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) received_capabilities: ClientCapabilities | None = None # Define custom handlers to trigger capability building (never actually called) async def my_list_tasks_handler( context: RequestContext[ClientSession], params: types.PaginatedRequestParams | None, ) -> types.ListTasksResult | types.ErrorData: raise NotImplementedError async def my_cancel_task_handler( context: RequestContext[ClientSession], params: types.CancelTaskRequestParams, ) -> types.CancelTaskResult | types.ErrorData: raise NotImplementedError async def mock_server(): nonlocal received_capabilities session_message = await client_to_server_receive.receive() jsonrpc_request = session_message.message assert isinstance(jsonrpc_request, JSONRPCRequest) request = client_request_adapter.validate_python( jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request, InitializeRequest) received_capabilities = request.params.capabilities result = InitializeResult( protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), server_info=Implementation(name="mock-server", version="0.1.0"), ) async with server_to_client_send: await server_to_client_send.send( SessionMessage( JSONRPCResponse( jsonrpc="2.0", id=jsonrpc_request.id, result=result.model_dump(by_alias=True, mode="json", exclude_none=True), ) ) ) await client_to_server_receive.receive() # Create handlers container task_handlers = ExperimentalTaskHandlers( list_tasks=my_list_tasks_handler, cancel_task=my_cancel_task_handler, ) async with ( ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as session, anyio.create_task_group() as tg, client_to_server_send, client_to_server_receive, server_to_client_send, server_to_client_receive, ): tg.start_soon(mock_server) await session.initialize() # Assert that tasks capability is properly set from handlers assert received_capabilities is not None assert received_capabilities.tasks is not None assert isinstance(received_capabilities.tasks, types.ClientTasksCapability) assert received_capabilities.tasks.list is not None assert received_capabilities.tasks.cancel is not None @pytest.mark.anyio async def test_client_capabilities_auto_built_from_handlers(): """Test that tasks capability is automatically built from provided handlers.""" client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](1) server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) received_capabilities: ClientCapabilities | None = None # Define custom handlers (not defaults) async def my_list_tasks_handler( context: RequestContext[ClientSession], params: types.PaginatedRequestParams | None, ) -> types.ListTasksResult | types.ErrorData: raise NotImplementedError async def my_cancel_task_handler( context: RequestContext[ClientSession], params: types.CancelTaskRequestParams, ) -> types.CancelTaskResult | types.ErrorData: raise NotImplementedError async def mock_server(): nonlocal received_capabilities session_message = await client_to_server_receive.receive() jsonrpc_request = session_message.message assert isinstance(jsonrpc_request, JSONRPCRequest) request = client_request_adapter.validate_python( jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request, InitializeRequest) received_capabilities = request.params.capabilities result = InitializeResult( protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), server_info=Implementation(name="mock-server", version="0.1.0"), ) async with server_to_client_send: await server_to_client_send.send( SessionMessage( JSONRPCResponse( jsonrpc="2.0", id=jsonrpc_request.id, result=result.model_dump(by_alias=True, mode="json", exclude_none=True), ) ) ) await client_to_server_receive.receive() # Provide handlers via ExperimentalTaskHandlers task_handlers = ExperimentalTaskHandlers( list_tasks=my_list_tasks_handler, cancel_task=my_cancel_task_handler, ) async with ( ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as session, anyio.create_task_group() as tg, client_to_server_send, client_to_server_receive, server_to_client_send, server_to_client_receive, ): tg.start_soon(mock_server) await session.initialize() # Assert that tasks capability was auto-built from handlers assert received_capabilities is not None assert received_capabilities.tasks is not None assert received_capabilities.tasks.list is not None assert received_capabilities.tasks.cancel is not None # requests should be None since we didn't provide task-augmented handlers assert received_capabilities.tasks.requests is None @pytest.mark.anyio async def test_client_capabilities_with_task_augmented_handlers(): """Test that requests capability is built when augmented handlers are provided.""" client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](1) server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](1) received_capabilities: ClientCapabilities | None = None # Define task-augmented handler async def my_augmented_sampling_handler( context: RequestContext[ClientSession], params: types.CreateMessageRequestParams, task_metadata: types.TaskMetadata, ) -> types.CreateTaskResult | types.ErrorData: raise NotImplementedError async def mock_server(): nonlocal received_capabilities session_message = await client_to_server_receive.receive() jsonrpc_request = session_message.message assert isinstance(jsonrpc_request, JSONRPCRequest) request = client_request_adapter.validate_python( jsonrpc_request.model_dump(by_alias=True, mode="json", exclude_none=True) ) assert isinstance(request, InitializeRequest) received_capabilities = request.params.capabilities result = InitializeResult( protocol_version=LATEST_PROTOCOL_VERSION, capabilities=ServerCapabilities(), server_info=Implementation(name="mock-server", version="0.1.0"), ) async with server_to_client_send: await server_to_client_send.send( SessionMessage( JSONRPCResponse( jsonrpc="2.0", id=jsonrpc_request.id, result=result.model_dump(by_alias=True, mode="json", exclude_none=True), ) ) ) await client_to_server_receive.receive() # Provide task-augmented sampling handler task_handlers = ExperimentalTaskHandlers( augmented_sampling=my_augmented_sampling_handler, ) async with ( ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as session, anyio.create_task_group() as tg, client_to_server_send, client_to_server_receive, server_to_client_send, server_to_client_receive, ): tg.start_soon(mock_server) await session.initialize() # Assert that tasks capability includes requests.sampling assert received_capabilities is not None assert received_capabilities.tasks is not None assert received_capabilities.tasks.requests is not None assert received_capabilities.tasks.requests.sampling is not None assert received_capabilities.tasks.requests.elicitation is None # Not provided
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/client/test_capabilities.py", "license": "MIT License", "lines": 266, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/client/test_handlers.py
"""Tests for client-side task management handlers (server -> client requests). These tests verify that clients can handle task-related requests from servers: - GetTaskRequest - server polling client's task status - GetTaskPayloadRequest - server getting result from client's task - ListTasksRequest - server listing client's tasks - CancelTaskRequest - server cancelling client's task This is the inverse of the existing tests in test_tasks.py, which test client -> server task requests. """ from collections.abc import AsyncIterator from dataclasses import dataclass import anyio import pytest from anyio import Event from anyio.abc import TaskGroup from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from mcp import types from mcp.client.experimental.task_handlers import ExperimentalTaskHandlers from mcp.client.session import ClientSession from mcp.shared._context import RequestContext from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.message import SessionMessage from mcp.shared.session import RequestResponder from mcp.types import ( CancelTaskRequest, CancelTaskRequestParams, CancelTaskResult, ClientResult, CreateMessageRequest, CreateMessageRequestParams, CreateMessageResult, CreateTaskResult, ElicitRequest, ElicitRequestFormParams, ElicitRequestParams, ElicitResult, ErrorData, GetTaskPayloadRequest, GetTaskPayloadRequestParams, GetTaskPayloadResult, GetTaskRequest, GetTaskRequestParams, GetTaskResult, ListTasksRequest, ListTasksResult, SamplingMessage, ServerNotification, ServerRequest, TaskMetadata, TextContent, ) # Buffer size for test streams STREAM_BUFFER_SIZE = 10 @dataclass class ClientTestStreams: """Bidirectional message streams for client/server communication in tests.""" server_send: MemoryObjectSendStream[SessionMessage] server_receive: MemoryObjectReceiveStream[SessionMessage] client_send: MemoryObjectSendStream[SessionMessage] client_receive: MemoryObjectReceiveStream[SessionMessage] @pytest.fixture async def client_streams() -> AsyncIterator[ClientTestStreams]: """Create bidirectional message streams for client tests. Automatically closes all streams after the test completes. """ server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage]( STREAM_BUFFER_SIZE ) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage]( STREAM_BUFFER_SIZE ) streams = ClientTestStreams( server_send=server_to_client_send, server_receive=client_to_server_receive, client_send=client_to_server_send, client_receive=server_to_client_receive, ) yield streams # Cleanup await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() async def _default_message_handler( message: RequestResponder[ServerRequest, ClientResult] | ServerNotification | Exception, ) -> None: """Default message handler that ignores messages (tests handle them explicitly).""" ... @pytest.mark.anyio async def test_client_handles_get_task_request(client_streams: ClientTestStreams) -> None: """Test that client can respond to GetTaskRequest from server.""" with anyio.fail_after(10): store = InMemoryTaskStore() received_task_id: str | None = None async def get_task_handler( context: RequestContext[ClientSession], params: GetTaskRequestParams, ) -> GetTaskResult | ErrorData: nonlocal received_task_id received_task_id = params.task_id task = await store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) await store.create_task(TaskMetadata(ttl=60000), task_id="test-task-123") task_handlers = ExperimentalTaskHandlers(get_task=get_task_handler) client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, experimental_task_handlers=task_handlers, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = GetTaskRequest(params=GetTaskRequestParams(task_id="test-task-123")) request = types.JSONRPCRequest(jsonrpc="2.0", id="req-1", **typed_request.model_dump(by_alias=True)) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCResponse) assert response.id == "req-1" result = GetTaskResult.model_validate(response.result) assert result.task_id == "test-task-123" assert result.status == "working" assert received_task_id == "test-task-123" tg.cancel_scope.cancel() store.cleanup() @pytest.mark.anyio async def test_client_handles_get_task_result_request(client_streams: ClientTestStreams) -> None: """Test that client can respond to GetTaskPayloadRequest from server.""" with anyio.fail_after(10): store = InMemoryTaskStore() async def get_task_result_handler( context: RequestContext[ClientSession], params: GetTaskPayloadRequestParams, ) -> GetTaskPayloadResult | ErrorData: result = await store.get_result(params.task_id) assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, types.CallToolResult) return GetTaskPayloadResult(**result.model_dump()) await store.create_task(TaskMetadata(ttl=60000), task_id="test-task-456") await store.store_result( "test-task-456", types.CallToolResult(content=[TextContent(type="text", text="Task completed successfully!")]), ) await store.update_task("test-task-456", status="completed") task_handlers = ExperimentalTaskHandlers(get_task_result=get_task_result_handler) client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, experimental_task_handlers=task_handlers, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="test-task-456")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-2", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCResponse) assert isinstance(response.result, dict) result_dict = response.result assert "content" in result_dict assert len(result_dict["content"]) == 1 assert result_dict["content"][0]["text"] == "Task completed successfully!" tg.cancel_scope.cancel() store.cleanup() @pytest.mark.anyio async def test_client_handles_list_tasks_request(client_streams: ClientTestStreams) -> None: """Test that client can respond to ListTasksRequest from server.""" with anyio.fail_after(10): store = InMemoryTaskStore() async def list_tasks_handler( context: RequestContext[ClientSession], params: types.PaginatedRequestParams | None, ) -> ListTasksResult | ErrorData: cursor = params.cursor if params else None tasks_list, next_cursor = await store.list_tasks(cursor=cursor) return ListTasksResult(tasks=tasks_list, next_cursor=next_cursor) await store.create_task(TaskMetadata(ttl=60000), task_id="task-1") await store.create_task(TaskMetadata(ttl=60000), task_id="task-2") task_handlers = ExperimentalTaskHandlers(list_tasks=list_tasks_handler) client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, experimental_task_handlers=task_handlers, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = ListTasksRequest() request = types.JSONRPCRequest( jsonrpc="2.0", id="req-3", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCResponse) result = ListTasksResult.model_validate(response.result) assert len(result.tasks) == 2 tg.cancel_scope.cancel() store.cleanup() @pytest.mark.anyio async def test_client_handles_cancel_task_request(client_streams: ClientTestStreams) -> None: """Test that client can respond to CancelTaskRequest from server.""" with anyio.fail_after(10): store = InMemoryTaskStore() async def cancel_task_handler( context: RequestContext[ClientSession], params: CancelTaskRequestParams, ) -> CancelTaskResult | ErrorData: task = await store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" await store.update_task(params.task_id, status="cancelled") updated = await store.get_task(params.task_id) assert updated is not None return CancelTaskResult( task_id=updated.task_id, status=updated.status, created_at=updated.created_at, last_updated_at=updated.last_updated_at, ttl=updated.ttl, ) await store.create_task(TaskMetadata(ttl=60000), task_id="task-to-cancel") task_handlers = ExperimentalTaskHandlers(cancel_task=cancel_task_handler) client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, experimental_task_handlers=task_handlers, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = CancelTaskRequest(params=CancelTaskRequestParams(task_id="task-to-cancel")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-4", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCResponse) result = CancelTaskResult.model_validate(response.result) assert result.task_id == "task-to-cancel" assert result.status == "cancelled" tg.cancel_scope.cancel() store.cleanup() @pytest.mark.anyio async def test_client_task_augmented_sampling(client_streams: ClientTestStreams) -> None: """Test that client can handle task-augmented sampling request from server.""" with anyio.fail_after(10): store = InMemoryTaskStore() sampling_completed = Event() created_task_id: list[str | None] = [None] background_tg: list[TaskGroup | None] = [None] async def task_augmented_sampling_callback( context: RequestContext[ClientSession], params: CreateMessageRequestParams, task_metadata: TaskMetadata, ) -> CreateTaskResult: task = await store.create_task(task_metadata) created_task_id[0] = task.task_id async def do_sampling() -> None: result = CreateMessageResult( role="assistant", content=TextContent(type="text", text="Sampled response"), model="test-model", stop_reason="endTurn", ) await store.store_result(task.task_id, result) await store.update_task(task.task_id, status="completed") sampling_completed.set() assert background_tg[0] is not None background_tg[0].start_soon(do_sampling) return CreateTaskResult(task=task) async def get_task_handler( context: RequestContext[ClientSession], params: GetTaskRequestParams, ) -> GetTaskResult | ErrorData: task = await store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) async def get_task_result_handler( context: RequestContext[ClientSession], params: GetTaskPayloadRequestParams, ) -> GetTaskPayloadResult | ErrorData: result = await store.get_result(params.task_id) assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, CreateMessageResult) return GetTaskPayloadResult(**result.model_dump()) task_handlers = ExperimentalTaskHandlers( augmented_sampling=task_augmented_sampling_callback, get_task=get_task_handler, get_task_result=get_task_result_handler, ) client_ready = anyio.Event() async with anyio.create_task_group() as tg: background_tg[0] = tg async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, experimental_task_handlers=task_handlers, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() # Step 1: Server sends task-augmented CreateMessageRequest typed_request = CreateMessageRequest( params=CreateMessageRequestParams( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, task=TaskMetadata(ttl=60000), ) ) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-sampling", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) # Step 2: Client responds with CreateTaskResult response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCResponse) task_result = CreateTaskResult.model_validate(response.result) task_id = task_result.task.task_id assert task_id == created_task_id[0] # Step 3: Wait for background sampling await sampling_completed.wait() # Step 4: Server polls task status typed_poll = GetTaskRequest(params=GetTaskRequestParams(task_id=task_id)) poll_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-poll", **typed_poll.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(poll_request)) poll_response_msg = await client_streams.server_receive.receive() poll_response = poll_response_msg.message assert isinstance(poll_response, types.JSONRPCResponse) status = GetTaskResult.model_validate(poll_response.result) assert status.status == "completed" # Step 5: Server gets result typed_result_req = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task_id)) result_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-result", **typed_result_req.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(result_request)) result_response_msg = await client_streams.server_receive.receive() result_response = result_response_msg.message assert isinstance(result_response, types.JSONRPCResponse) assert isinstance(result_response.result, dict) assert result_response.result["role"] == "assistant" tg.cancel_scope.cancel() store.cleanup() @pytest.mark.anyio async def test_client_task_augmented_elicitation(client_streams: ClientTestStreams) -> None: """Test that client can handle task-augmented elicitation request from server.""" with anyio.fail_after(10): store = InMemoryTaskStore() elicitation_completed = Event() created_task_id: list[str | None] = [None] background_tg: list[TaskGroup | None] = [None] async def task_augmented_elicitation_callback( context: RequestContext[ClientSession], params: ElicitRequestParams, task_metadata: TaskMetadata, ) -> CreateTaskResult | ErrorData: task = await store.create_task(task_metadata) created_task_id[0] = task.task_id async def do_elicitation() -> None: # Simulate user providing elicitation response result = ElicitResult(action="accept", content={"name": "Test User"}) await store.store_result(task.task_id, result) await store.update_task(task.task_id, status="completed") elicitation_completed.set() assert background_tg[0] is not None background_tg[0].start_soon(do_elicitation) return CreateTaskResult(task=task) async def get_task_handler( context: RequestContext[ClientSession], params: GetTaskRequestParams, ) -> GetTaskResult | ErrorData: task = await store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) async def get_task_result_handler( context: RequestContext[ClientSession], params: GetTaskPayloadRequestParams, ) -> GetTaskPayloadResult | ErrorData: result = await store.get_result(params.task_id) assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, ElicitResult) return GetTaskPayloadResult(**result.model_dump()) task_handlers = ExperimentalTaskHandlers( augmented_elicitation=task_augmented_elicitation_callback, get_task=get_task_handler, get_task_result=get_task_result_handler, ) client_ready = anyio.Event() async with anyio.create_task_group() as tg: background_tg[0] = tg async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, experimental_task_handlers=task_handlers, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() # Step 1: Server sends task-augmented ElicitRequest typed_request = ElicitRequest( params=ElicitRequestFormParams( message="What is your name?", requested_schema={"type": "object", "properties": {"name": {"type": "string"}}}, task=TaskMetadata(ttl=60000), ) ) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-elicit", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) # Step 2: Client responds with CreateTaskResult response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCResponse) task_result = CreateTaskResult.model_validate(response.result) task_id = task_result.task.task_id assert task_id == created_task_id[0] # Step 3: Wait for background elicitation await elicitation_completed.wait() # Step 4: Server polls task status typed_poll = GetTaskRequest(params=GetTaskRequestParams(task_id=task_id)) poll_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-poll", **typed_poll.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(poll_request)) poll_response_msg = await client_streams.server_receive.receive() poll_response = poll_response_msg.message assert isinstance(poll_response, types.JSONRPCResponse) status = GetTaskResult.model_validate(poll_response.result) assert status.status == "completed" # Step 5: Server gets result typed_result_req = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task_id)) result_request = types.JSONRPCRequest( jsonrpc="2.0", id="req-result", **typed_result_req.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(result_request)) result_response_msg = await client_streams.server_receive.receive() result_response = result_response_msg.message assert isinstance(result_response, types.JSONRPCResponse) # Verify the elicitation result assert isinstance(result_response.result, dict) assert result_response.result["action"] == "accept" assert result_response.result["content"] == {"name": "Test User"} tg.cancel_scope.cancel() store.cleanup() @pytest.mark.anyio async def test_client_returns_error_for_unhandled_task_request(client_streams: ClientTestStreams) -> None: """Test that client returns error when no handler is registered for task request.""" with anyio.fail_after(10): client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = GetTaskRequest(params=GetTaskRequestParams(task_id="nonexistent")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-unhandled", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCError) assert ( "not supported" in response.error.message.lower() or "method not found" in response.error.message.lower() ) tg.cancel_scope.cancel() @pytest.mark.anyio async def test_client_returns_error_for_unhandled_task_result_request(client_streams: ClientTestStreams) -> None: """Test that client returns error for unhandled tasks/result request.""" with anyio.fail_after(10): client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="nonexistent")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-result", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCError) assert "not supported" in response.error.message.lower() tg.cancel_scope.cancel() @pytest.mark.anyio async def test_client_returns_error_for_unhandled_list_tasks_request(client_streams: ClientTestStreams) -> None: """Test that client returns error for unhandled tasks/list request.""" with anyio.fail_after(10): client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = ListTasksRequest() request = types.JSONRPCRequest( jsonrpc="2.0", id="req-list", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCError) assert "not supported" in response.error.message.lower() tg.cancel_scope.cancel() @pytest.mark.anyio async def test_client_returns_error_for_unhandled_cancel_task_request(client_streams: ClientTestStreams) -> None: """Test that client returns error for unhandled tasks/cancel request.""" with anyio.fail_after(10): client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() typed_request = CancelTaskRequest(params=CancelTaskRequestParams(task_id="nonexistent")) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-cancel", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCError) assert "not supported" in response.error.message.lower() tg.cancel_scope.cancel() @pytest.mark.anyio async def test_client_returns_error_for_unhandled_task_augmented_sampling(client_streams: ClientTestStreams) -> None: """Test that client returns error for task-augmented sampling without handler.""" with anyio.fail_after(10): client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: # No task handlers provided - uses defaults async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() # Send task-augmented sampling request typed_request = CreateMessageRequest( params=CreateMessageRequestParams( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, task=TaskMetadata(ttl=60000), ) ) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-sampling", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCError) assert "not supported" in response.error.message.lower() tg.cancel_scope.cancel() @pytest.mark.anyio async def test_client_returns_error_for_unhandled_task_augmented_elicitation( client_streams: ClientTestStreams, ) -> None: """Test that client returns error for task-augmented elicitation without handler.""" with anyio.fail_after(10): client_ready = anyio.Event() async with anyio.create_task_group() as tg: async def run_client() -> None: # No task handlers provided - uses defaults async with ClientSession( client_streams.client_receive, client_streams.client_send, message_handler=_default_message_handler, ): client_ready.set() await anyio.sleep_forever() tg.start_soon(run_client) await client_ready.wait() # Send task-augmented elicitation request typed_request = ElicitRequest( params=ElicitRequestFormParams( message="What is your name?", requested_schema={"type": "object", "properties": {"name": {"type": "string"}}}, task=TaskMetadata(ttl=60000), ) ) request = types.JSONRPCRequest( jsonrpc="2.0", id="req-elicit", **typed_request.model_dump(by_alias=True), ) await client_streams.server_send.send(SessionMessage(request)) response_msg = await client_streams.server_receive.receive() response = response_msg.message assert isinstance(response, types.JSONRPCError) assert "not supported" in response.error.message.lower() tg.cancel_scope.cancel()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/client/test_handlers.py", "license": "MIT License", "lines": 711, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/client/test_poll_task.py
"""Tests for poll_task async iterator.""" from collections.abc import Callable, Coroutine from datetime import datetime, timezone from typing import Any from unittest.mock import AsyncMock import pytest from mcp.client.experimental.tasks import ExperimentalClientFeatures from mcp.types import GetTaskResult, TaskStatus def make_task_result( status: TaskStatus = "working", poll_interval: int = 0, task_id: str = "test-task", status_message: str | None = None, ) -> GetTaskResult: """Create GetTaskResult with sensible defaults.""" now = datetime.now(timezone.utc) return GetTaskResult( task_id=task_id, status=status, status_message=status_message, created_at=now, last_updated_at=now, ttl=60000, poll_interval=poll_interval, ) def make_status_sequence( *statuses: TaskStatus, task_id: str = "test-task", ) -> Callable[[str], Coroutine[Any, Any, GetTaskResult]]: """Create mock get_task that returns statuses in sequence.""" status_iter = iter(statuses) async def mock_get_task(tid: str) -> GetTaskResult: return make_task_result(status=next(status_iter), task_id=tid) return mock_get_task @pytest.fixture def mock_session() -> AsyncMock: return AsyncMock() @pytest.fixture def features(mock_session: AsyncMock) -> ExperimentalClientFeatures: return ExperimentalClientFeatures(mock_session) @pytest.mark.anyio async def test_poll_task_yields_until_completed(features: ExperimentalClientFeatures) -> None: """poll_task yields each status until terminal.""" features.get_task = make_status_sequence("working", "working", "completed") # type: ignore[method-assign] statuses = [s.status async for s in features.poll_task("test-task")] assert statuses == ["working", "working", "completed"] @pytest.mark.anyio @pytest.mark.parametrize("terminal_status", ["completed", "failed", "cancelled"]) async def test_poll_task_exits_on_terminal(features: ExperimentalClientFeatures, terminal_status: TaskStatus) -> None: """poll_task exits immediately when task is already terminal.""" features.get_task = make_status_sequence(terminal_status) # type: ignore[method-assign] statuses = [s.status async for s in features.poll_task("test-task")] assert statuses == [terminal_status] @pytest.mark.anyio async def test_poll_task_continues_through_input_required(features: ExperimentalClientFeatures) -> None: """poll_task yields input_required and continues (non-terminal).""" features.get_task = make_status_sequence("working", "input_required", "working", "completed") # type: ignore[method-assign] statuses = [s.status async for s in features.poll_task("test-task")] assert statuses == ["working", "input_required", "working", "completed"] @pytest.mark.anyio async def test_poll_task_passes_task_id(features: ExperimentalClientFeatures) -> None: """poll_task passes correct task_id to get_task.""" received_ids: list[str] = [] async def mock_get_task(task_id: str) -> GetTaskResult: received_ids.append(task_id) return make_task_result(status="completed", task_id=task_id) features.get_task = mock_get_task # type: ignore[method-assign] _ = [s async for s in features.poll_task("my-task-123")] assert received_ids == ["my-task-123"] @pytest.mark.anyio async def test_poll_task_yields_full_result(features: ExperimentalClientFeatures) -> None: """poll_task yields complete GetTaskResult objects.""" async def mock_get_task(task_id: str) -> GetTaskResult: return make_task_result( status="completed", task_id=task_id, status_message="All done!", ) features.get_task = mock_get_task # type: ignore[method-assign] results = [r async for r in features.poll_task("test-task")] assert len(results) == 1 assert results[0].status == "completed" assert results[0].status_message == "All done!" assert results[0].task_id == "test-task"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/client/test_poll_task.py", "license": "MIT License", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/client/test_tasks.py
"""Tests for the experimental client task methods (session.experimental).""" from collections.abc import AsyncIterator from contextlib import asynccontextmanager from dataclasses import dataclass, field import anyio import pytest from anyio import Event from anyio.abc import TaskGroup from mcp import Client from mcp.server import Server, ServerRequestContext from mcp.shared.experimental.tasks.helpers import task_execution from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.types import ( CallToolRequest, CallToolRequestParams, CallToolResult, CancelTaskRequestParams, CancelTaskResult, CreateTaskResult, GetTaskPayloadRequestParams, GetTaskPayloadResult, GetTaskRequestParams, GetTaskResult, ListTasksResult, ListToolsResult, PaginatedRequestParams, TaskMetadata, TextContent, ) pytestmark = pytest.mark.anyio @dataclass class AppContext: """Application context passed via lifespan_context.""" task_group: TaskGroup store: InMemoryTaskStore task_done_events: dict[str, Event] = field(default_factory=lambda: {}) async def _handle_list_tools( ctx: ServerRequestContext[AppContext], params: PaginatedRequestParams | None ) -> ListToolsResult: raise NotImplementedError async def _handle_call_tool_with_done_event( ctx: ServerRequestContext[AppContext], params: CallToolRequestParams, *, result_text: str = "Done" ) -> CallToolResult | CreateTaskResult: app = ctx.lifespan_context if ctx.experimental.is_task: task_metadata = ctx.experimental.task_metadata assert task_metadata is not None task = await app.store.create_task(task_metadata) done_event = Event() app.task_done_events[task.task_id] = done_event async def do_work() -> None: async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.complete(CallToolResult(content=[TextContent(type="text", text=result_text)])) done_event.set() app.task_group.start_soon(do_work) return CreateTaskResult(task=task) raise NotImplementedError def _make_lifespan(store: InMemoryTaskStore, task_done_events: dict[str, Event]): @asynccontextmanager async def app_lifespan(server: Server[AppContext]) -> AsyncIterator[AppContext]: async with anyio.create_task_group() as tg: yield AppContext(task_group=tg, store=store, task_done_events=task_done_events) return app_lifespan async def test_session_experimental_get_task() -> None: """Test session.experimental.get_task() method.""" store = InMemoryTaskStore() task_done_events: dict[str, Event] = {} async def handle_get_task(ctx: ServerRequestContext[AppContext], params: GetTaskRequestParams) -> GetTaskResult: app = ctx.lifespan_context task = await app.store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) server: Server[AppContext] = Server( "test-server", lifespan=_make_lifespan(store, task_done_events), on_list_tools=_handle_list_tools, on_call_tool=_handle_call_tool_with_done_event, ) server.experimental.enable_tasks(on_get_task=handle_get_task) async with Client(server) as client: # Create a task create_result = await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="test_tool", arguments={}, task=TaskMetadata(ttl=60000), ) ), CreateTaskResult, ) task_id = create_result.task.task_id # Wait for task to complete await task_done_events[task_id].wait() # Use session.experimental to get task status task_status = await client.session.experimental.get_task(task_id) assert task_status.task_id == task_id assert task_status.status == "completed" async def test_session_experimental_get_task_result() -> None: """Test session.experimental.get_task_result() method.""" store = InMemoryTaskStore() task_done_events: dict[str, Event] = {} async def handle_call_tool( ctx: ServerRequestContext[AppContext], params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: return await _handle_call_tool_with_done_event(ctx, params, result_text="Task result content") async def handle_get_task_result( ctx: ServerRequestContext[AppContext], params: GetTaskPayloadRequestParams ) -> GetTaskPayloadResult: app = ctx.lifespan_context result = await app.store.get_result(params.task_id) assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, CallToolResult) return GetTaskPayloadResult(**result.model_dump()) server: Server[AppContext] = Server( "test-server", lifespan=_make_lifespan(store, task_done_events), on_list_tools=_handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks(on_task_result=handle_get_task_result) async with Client(server) as client: # Create a task create_result = await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="test_tool", arguments={}, task=TaskMetadata(ttl=60000), ) ), CreateTaskResult, ) task_id = create_result.task.task_id # Wait for task to complete await task_done_events[task_id].wait() # Use TaskClient to get task result task_result = await client.session.experimental.get_task_result(task_id, CallToolResult) assert len(task_result.content) == 1 content = task_result.content[0] assert isinstance(content, TextContent) assert content.text == "Task result content" async def test_session_experimental_list_tasks() -> None: """Test TaskClient.list_tasks() method.""" store = InMemoryTaskStore() task_done_events: dict[str, Event] = {} async def handle_list_tasks( ctx: ServerRequestContext[AppContext], params: PaginatedRequestParams | None ) -> ListTasksResult: app = ctx.lifespan_context cursor = params.cursor if params else None tasks_list, next_cursor = await app.store.list_tasks(cursor=cursor) return ListTasksResult(tasks=tasks_list, next_cursor=next_cursor) server: Server[AppContext] = Server( "test-server", lifespan=_make_lifespan(store, task_done_events), on_list_tools=_handle_list_tools, on_call_tool=_handle_call_tool_with_done_event, ) server.experimental.enable_tasks(on_list_tasks=handle_list_tasks) async with Client(server) as client: # Create two tasks for _ in range(2): create_result = await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="test_tool", arguments={}, task=TaskMetadata(ttl=60000), ) ), CreateTaskResult, ) await task_done_events[create_result.task.task_id].wait() # Use TaskClient to list tasks list_result = await client.session.experimental.list_tasks() assert len(list_result.tasks) == 2 async def test_session_experimental_cancel_task() -> None: """Test TaskClient.cancel_task() method.""" store = InMemoryTaskStore() task_done_events: dict[str, Event] = {} async def handle_call_tool_no_work( ctx: ServerRequestContext[AppContext], params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: app = ctx.lifespan_context if ctx.experimental.is_task: task_metadata = ctx.experimental.task_metadata assert task_metadata is not None task = await app.store.create_task(task_metadata) # Don't start any work - task stays in "working" status return CreateTaskResult(task=task) raise NotImplementedError async def handle_get_task(ctx: ServerRequestContext[AppContext], params: GetTaskRequestParams) -> GetTaskResult: app = ctx.lifespan_context task = await app.store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) async def handle_cancel_task( ctx: ServerRequestContext[AppContext], params: CancelTaskRequestParams ) -> CancelTaskResult: app = ctx.lifespan_context task = await app.store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" await app.store.update_task(params.task_id, status="cancelled") updated_task = await app.store.get_task(params.task_id) assert updated_task is not None return CancelTaskResult( task_id=updated_task.task_id, status=updated_task.status, created_at=updated_task.created_at, last_updated_at=updated_task.last_updated_at, ttl=updated_task.ttl, ) server: Server[AppContext] = Server( "test-server", lifespan=_make_lifespan(store, task_done_events), on_list_tools=_handle_list_tools, on_call_tool=handle_call_tool_no_work, ) server.experimental.enable_tasks(on_get_task=handle_get_task, on_cancel_task=handle_cancel_task) async with Client(server) as client: # Create a task (but don't complete it) create_result = await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="test_tool", arguments={}, task=TaskMetadata(ttl=60000), ) ), CreateTaskResult, ) task_id = create_result.task.task_id # Verify task is working status_before = await client.session.experimental.get_task(task_id) assert status_before.status == "working" # Cancel the task await client.session.experimental.cancel_task(task_id) # Verify task is cancelled status_after = await client.session.experimental.get_task(task_id) assert status_after.status == "cancelled"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/client/test_tasks.py", "license": "MIT License", "lines": 257, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_context.py
"""Tests for TaskContext and helper functions.""" import pytest from mcp.shared.experimental.tasks.context import TaskContext from mcp.shared.experimental.tasks.helpers import create_task_state, task_execution from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.types import CallToolResult, TaskMetadata, TextContent @pytest.mark.anyio async def test_task_context_properties() -> None: """Test TaskContext basic properties.""" store = InMemoryTaskStore() task = await store.create_task(metadata=TaskMetadata(ttl=60000)) ctx = TaskContext(task, store) assert ctx.task_id == task.task_id assert ctx.task.task_id == task.task_id assert ctx.task.status == "working" assert ctx.is_cancelled is False store.cleanup() @pytest.mark.anyio async def test_task_context_update_status() -> None: """Test TaskContext.update_status.""" store = InMemoryTaskStore() task = await store.create_task(metadata=TaskMetadata(ttl=60000)) ctx = TaskContext(task, store) await ctx.update_status("Processing step 1...") # Check status message was updated updated = await store.get_task(task.task_id) assert updated is not None assert updated.status_message == "Processing step 1..." store.cleanup() @pytest.mark.anyio async def test_task_context_complete() -> None: """Test TaskContext.complete.""" store = InMemoryTaskStore() task = await store.create_task(metadata=TaskMetadata(ttl=60000)) ctx = TaskContext(task, store) result = CallToolResult(content=[TextContent(type="text", text="Done!")]) await ctx.complete(result) # Check task status updated = await store.get_task(task.task_id) assert updated is not None assert updated.status == "completed" # Check result is stored stored_result = await store.get_result(task.task_id) assert stored_result is not None store.cleanup() @pytest.mark.anyio async def test_task_context_fail() -> None: """Test TaskContext.fail.""" store = InMemoryTaskStore() task = await store.create_task(metadata=TaskMetadata(ttl=60000)) ctx = TaskContext(task, store) await ctx.fail("Something went wrong!") # Check task status updated = await store.get_task(task.task_id) assert updated is not None assert updated.status == "failed" assert updated.status_message == "Something went wrong!" store.cleanup() @pytest.mark.anyio async def test_task_context_cancellation() -> None: """Test TaskContext cancellation request.""" store = InMemoryTaskStore() task = await store.create_task(metadata=TaskMetadata(ttl=60000)) ctx = TaskContext(task, store) assert ctx.is_cancelled is False ctx.request_cancellation() assert ctx.is_cancelled is True store.cleanup() def test_create_task_state_generates_id() -> None: """create_task_state generates a unique task ID when none provided.""" task1 = create_task_state(TaskMetadata(ttl=60000)) task2 = create_task_state(TaskMetadata(ttl=60000)) assert task1.task_id != task2.task_id def test_create_task_state_uses_provided_id() -> None: """create_task_state uses the provided task ID.""" task = create_task_state(TaskMetadata(ttl=60000), task_id="my-task-123") assert task.task_id == "my-task-123" def test_create_task_state_null_ttl() -> None: """create_task_state handles null TTL.""" task = create_task_state(TaskMetadata(ttl=None)) assert task.ttl is None def test_create_task_state_has_created_at() -> None: """create_task_state sets createdAt timestamp.""" task = create_task_state(TaskMetadata(ttl=60000)) assert task.created_at is not None @pytest.mark.anyio async def test_task_execution_provides_context() -> None: """task_execution provides a TaskContext for the task.""" store = InMemoryTaskStore() await store.create_task(TaskMetadata(ttl=60000), task_id="exec-test-1") async with task_execution("exec-test-1", store) as ctx: assert ctx.task_id == "exec-test-1" assert ctx.task.status == "working" store.cleanup() @pytest.mark.anyio async def test_task_execution_auto_fails_on_exception() -> None: """task_execution automatically fails task on unhandled exception.""" store = InMemoryTaskStore() await store.create_task(TaskMetadata(ttl=60000), task_id="exec-fail-1") async with task_execution("exec-fail-1", store): raise RuntimeError("Oops!") # Task should be failed failed_task = await store.get_task("exec-fail-1") assert failed_task is not None assert failed_task.status == "failed" assert "Oops!" in (failed_task.status_message or "") store.cleanup() @pytest.mark.anyio async def test_task_execution_doesnt_fail_if_already_terminal() -> None: """task_execution doesn't re-fail if task already terminal.""" store = InMemoryTaskStore() await store.create_task(TaskMetadata(ttl=60000), task_id="exec-term-1") async with task_execution("exec-term-1", store) as ctx: # Complete the task first await ctx.complete(CallToolResult(content=[TextContent(type="text", text="Done")])) # Then raise - shouldn't change status raise RuntimeError("This shouldn't matter") # Task should remain completed final_task = await store.get_task("exec-term-1") assert final_task is not None assert final_task.status == "completed" store.cleanup() @pytest.mark.anyio async def test_task_execution_not_found() -> None: """task_execution raises ValueError for non-existent task.""" store = InMemoryTaskStore() with pytest.raises(ValueError, match="not found"): async with task_execution("nonexistent", store): ...
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_context.py", "license": "MIT License", "lines": 129, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_integration.py
"""End-to-end integration tests for tasks functionality. These tests demonstrate the full task lifecycle: 1. Client sends task-augmented request (tools/call with task metadata) 2. Server creates task and returns CreateTaskResult immediately 3. Background work executes (using task_execution context manager) 4. Client polls with tasks/get 5. Client retrieves result with tasks/result """ from collections.abc import AsyncIterator from contextlib import asynccontextmanager from dataclasses import dataclass, field import anyio import pytest from anyio import Event from anyio.abc import TaskGroup from mcp import Client from mcp.server import Server, ServerRequestContext from mcp.shared.experimental.tasks.helpers import task_execution from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.types import ( CallToolRequest, CallToolRequestParams, CallToolResult, CreateTaskResult, GetTaskPayloadRequestParams, GetTaskPayloadResult, GetTaskRequestParams, GetTaskResult, ListTasksResult, ListToolsResult, PaginatedRequestParams, TaskMetadata, TextContent, ) pytestmark = pytest.mark.anyio @dataclass class AppContext: """Application context passed via lifespan_context.""" task_group: TaskGroup store: InMemoryTaskStore task_done_events: dict[str, Event] = field(default_factory=lambda: {}) def _make_lifespan(store: InMemoryTaskStore, task_done_events: dict[str, Event]): @asynccontextmanager async def app_lifespan(server: Server[AppContext]) -> AsyncIterator[AppContext]: async with anyio.create_task_group() as tg: yield AppContext(task_group=tg, store=store, task_done_events=task_done_events) return app_lifespan async def test_task_lifecycle_with_task_execution() -> None: """Test the complete task lifecycle using the task_execution pattern.""" store = InMemoryTaskStore() task_done_events: dict[str, Event] = {} async def handle_list_tools( ctx: ServerRequestContext[AppContext], params: PaginatedRequestParams | None ) -> ListToolsResult: raise NotImplementedError async def handle_call_tool( ctx: ServerRequestContext[AppContext], params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: app = ctx.lifespan_context if params.name == "process_data" and ctx.experimental.is_task: task_metadata = ctx.experimental.task_metadata assert task_metadata is not None task = await app.store.create_task(task_metadata) done_event = Event() app.task_done_events[task.task_id] = done_event async def do_work() -> None: async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.update_status("Processing input...") input_value = (params.arguments or {}).get("input", "") result_text = f"Processed: {input_value.upper()}" await task_ctx.complete(CallToolResult(content=[TextContent(type="text", text=result_text)])) done_event.set() app.task_group.start_soon(do_work) return CreateTaskResult(task=task) raise NotImplementedError async def handle_get_task(ctx: ServerRequestContext[AppContext], params: GetTaskRequestParams) -> GetTaskResult: app = ctx.lifespan_context task = await app.store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) async def handle_get_task_result( ctx: ServerRequestContext[AppContext], params: GetTaskPayloadRequestParams ) -> GetTaskPayloadResult: app = ctx.lifespan_context result = await app.store.get_result(params.task_id) assert result is not None, f"Test setup error: result for {params.task_id} should exist" assert isinstance(result, CallToolResult) return GetTaskPayloadResult(**result.model_dump()) async def handle_list_tasks( ctx: ServerRequestContext[AppContext], params: PaginatedRequestParams | None ) -> ListTasksResult: raise NotImplementedError server: Server[AppContext] = Server( "test-tasks", lifespan=_make_lifespan(store, task_done_events), on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks( on_get_task=handle_get_task, on_task_result=handle_get_task_result, on_list_tasks=handle_list_tasks, ) async with Client(server) as client: # Step 1: Send task-augmented tool call create_result = await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="process_data", arguments={"input": "hello world"}, task=TaskMetadata(ttl=60000), ), ), CreateTaskResult, ) assert isinstance(create_result, CreateTaskResult) assert create_result.task.status == "working" task_id = create_result.task.task_id # Step 2: Wait for task to complete await task_done_events[task_id].wait() task_status = await client.session.experimental.get_task(task_id) assert task_status.task_id == task_id assert task_status.status == "completed" # Step 3: Retrieve the actual result task_result = await client.session.experimental.get_task_result(task_id, CallToolResult) assert len(task_result.content) == 1 content = task_result.content[0] assert isinstance(content, TextContent) assert content.text == "Processed: HELLO WORLD" async def test_task_auto_fails_on_exception() -> None: """Test that task_execution automatically fails the task on unhandled exception.""" store = InMemoryTaskStore() task_done_events: dict[str, Event] = {} async def handle_list_tools( ctx: ServerRequestContext[AppContext], params: PaginatedRequestParams | None ) -> ListToolsResult: raise NotImplementedError async def handle_call_tool( ctx: ServerRequestContext[AppContext], params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: app = ctx.lifespan_context if params.name == "failing_task" and ctx.experimental.is_task: task_metadata = ctx.experimental.task_metadata assert task_metadata is not None task = await app.store.create_task(task_metadata) done_event = Event() app.task_done_events[task.task_id] = done_event async def do_failing_work() -> None: async with task_execution(task.task_id, app.store) as task_ctx: await task_ctx.update_status("About to fail...") raise RuntimeError("Something went wrong!") # This line is reached because task_execution suppresses the exception done_event.set() app.task_group.start_soon(do_failing_work) return CreateTaskResult(task=task) raise NotImplementedError async def handle_get_task(ctx: ServerRequestContext[AppContext], params: GetTaskRequestParams) -> GetTaskResult: app = ctx.lifespan_context task = await app.store.get_task(params.task_id) assert task is not None, f"Test setup error: task {params.task_id} should exist" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=task.poll_interval, ) server: Server[AppContext] = Server( "test-tasks-failure", lifespan=_make_lifespan(store, task_done_events), on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks(on_get_task=handle_get_task) async with Client(server) as client: # Send task request create_result = await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="failing_task", arguments={}, task=TaskMetadata(ttl=60000), ), ), CreateTaskResult, ) task_id = create_result.task.task_id # Wait for task to complete (even though it fails) await task_done_events[task_id].wait() # Check that task was auto-failed task_status = await client.session.experimental.get_task(task_id) assert task_status.status == "failed" assert task_status.status_message == "Something went wrong!"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_integration.py", "license": "MIT License", "lines": 203, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_run_task_flow.py
"""Tests for the simplified task API: enable_tasks() + run_task() This tests the recommended user flow: 1. server.experimental.enable_tasks() - one-line setup 2. ctx.experimental.run_task(work) - spawns work, returns CreateTaskResult 3. work function uses ServerTaskContext for elicit/create_message These are integration tests that verify the complete flow works end-to-end. """ from unittest.mock import Mock import anyio import pytest from anyio import Event from mcp import Client from mcp.server import Server, ServerRequestContext from mcp.server.experimental.request_context import Experimental from mcp.server.experimental.task_context import ServerTaskContext from mcp.server.experimental.task_support import TaskSupport from mcp.server.lowlevel import NotificationOptions from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.experimental.tasks.message_queue import InMemoryTaskMessageQueue from mcp.types import ( TASK_REQUIRED, CallToolRequestParams, CallToolResult, CreateTaskResult, GetTaskRequestParams, GetTaskResult, ListToolsResult, PaginatedRequestParams, TextContent, ) pytestmark = pytest.mark.anyio async def _handle_list_tools_simple_task( ctx: ServerRequestContext, params: PaginatedRequestParams | None ) -> ListToolsResult: raise NotImplementedError async def test_run_task_basic_flow() -> None: """Test the basic run_task flow without elicitation.""" work_completed = Event() received_meta: list[str | None] = [None] async def handle_call_tool( ctx: ServerRequestContext, params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) if ctx.meta is not None: # pragma: no branch received_meta[0] = ctx.meta.get("custom_field") async def work(task: ServerTaskContext) -> CallToolResult: await task.update_status("Working...") input_val = (params.arguments or {}).get("input", "default") result = CallToolResult(content=[TextContent(type="text", text=f"Processed: {input_val}")]) work_completed.set() return result return await ctx.experimental.run_task(work) server = Server( "test-run-task", on_list_tools=_handle_list_tools_simple_task, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks() async with Client(server) as client: result = await client.session.experimental.call_tool_as_task( "simple_task", {"input": "hello"}, meta={"custom_field": "test_value"}, ) task_id = result.task.task_id assert result.task.status == "working" with anyio.fail_after(5): await work_completed.wait() with anyio.fail_after(5): while True: task_status = await client.session.experimental.get_task(task_id) if task_status.status == "completed": # pragma: no branch break assert received_meta[0] == "test_value" async def test_run_task_auto_fails_on_exception() -> None: """Test that run_task automatically fails the task when work raises.""" work_failed = Event() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool( ctx: ServerRequestContext, params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: work_failed.set() raise RuntimeError("Something went wrong!") return await ctx.experimental.run_task(work) server = Server( "test-run-task-fail", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks() async with Client(server) as client: result = await client.session.experimental.call_tool_as_task("failing_task", {}) task_id = result.task.task_id with anyio.fail_after(5): await work_failed.wait() with anyio.fail_after(5): while True: task_status = await client.session.experimental.get_task(task_id) if task_status.status == "failed": # pragma: no branch break assert "Something went wrong" in (task_status.status_message or "") async def test_enable_tasks_auto_registers_handlers() -> None: """Test that enable_tasks() auto-registers get_task, list_tasks, cancel_task handlers.""" server = Server("test-enable-tasks") # Before enable_tasks, no task capabilities caps_before = server.get_capabilities(NotificationOptions(), {}) assert caps_before.tasks is None # Enable tasks server.experimental.enable_tasks() # After enable_tasks, should have task capabilities caps_after = server.get_capabilities(NotificationOptions(), {}) assert caps_after.tasks is not None assert caps_after.tasks.list is not None assert caps_after.tasks.cancel is not None assert caps_after.tasks.requests is not None assert caps_after.tasks.requests.tools is not None assert caps_after.tasks.requests.tools.call is not None async def test_enable_tasks_with_custom_store_and_queue() -> None: """Test that enable_tasks() uses provided store and queue instead of defaults.""" server = Server("test-custom-store-queue") custom_store = InMemoryTaskStore() custom_queue = InMemoryTaskMessageQueue() task_support = server.experimental.enable_tasks(store=custom_store, queue=custom_queue) assert task_support.store is custom_store assert task_support.queue is custom_queue async def test_enable_tasks_skips_default_handlers_when_custom_registered() -> None: """Test that enable_tasks() doesn't override already-registered handlers.""" server = Server("test-custom-handlers") # Register custom handlers via enable_tasks kwargs async def custom_get_task(ctx: ServerRequestContext, params: GetTaskRequestParams) -> GetTaskResult: raise NotImplementedError server.experimental.enable_tasks(on_get_task=custom_get_task) # Verify handler is registered assert server._has_handler("tasks/get") assert server._has_handler("tasks/list") assert server._has_handler("tasks/cancel") assert server._has_handler("tasks/result") async def test_run_task_without_enable_tasks_raises() -> None: """Test that run_task raises when enable_tasks() wasn't called.""" experimental = Experimental( task_metadata=None, _client_capabilities=None, _session=None, _task_support=None, # Not enabled ) async def work(task: ServerTaskContext) -> CallToolResult: raise NotImplementedError with pytest.raises(RuntimeError, match="Task support not enabled"): await experimental.run_task(work) async def test_task_support_task_group_before_run_raises() -> None: """Test that accessing task_group before run() raises RuntimeError.""" task_support = TaskSupport.in_memory() with pytest.raises(RuntimeError, match="TaskSupport not running"): _ = task_support.task_group async def test_run_task_without_session_raises() -> None: """Test that run_task raises when session is not available.""" task_support = TaskSupport.in_memory() experimental = Experimental( task_metadata=None, _client_capabilities=None, _session=None, # No session _task_support=task_support, ) async def work(task: ServerTaskContext) -> CallToolResult: raise NotImplementedError with pytest.raises(RuntimeError, match="Session not available"): await experimental.run_task(work) async def test_run_task_without_task_metadata_raises() -> None: """Test that run_task raises when request is not task-augmented.""" task_support = TaskSupport.in_memory() mock_session = Mock() experimental = Experimental( task_metadata=None, # Not a task-augmented request _client_capabilities=None, _session=mock_session, _task_support=task_support, ) async def work(task: ServerTaskContext) -> CallToolResult: raise NotImplementedError with pytest.raises(RuntimeError, match="Request is not task-augmented"): await experimental.run_task(work) async def test_run_task_with_model_immediate_response() -> None: """Test that run_task includes model_immediate_response in CreateTaskResult._meta.""" work_completed = Event() immediate_response_text = "Processing your request..." async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool( ctx: ServerRequestContext, params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: work_completed.set() return CallToolResult(content=[TextContent(type="text", text="Done")]) return await ctx.experimental.run_task(work, model_immediate_response=immediate_response_text) server = Server( "test-run-task-immediate", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks() async with Client(server) as client: result = await client.session.experimental.call_tool_as_task("task_with_immediate", {}) assert result.meta is not None assert "io.modelcontextprotocol/model-immediate-response" in result.meta assert result.meta["io.modelcontextprotocol/model-immediate-response"] == immediate_response_text with anyio.fail_after(5): await work_completed.wait() async def test_run_task_doesnt_complete_if_already_terminal() -> None: """Test that run_task doesn't auto-complete if work manually completed the task.""" work_completed = Event() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool( ctx: ServerRequestContext, params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: manual_result = CallToolResult(content=[TextContent(type="text", text="Manually completed")]) await task.complete(manual_result, notify=False) work_completed.set() return CallToolResult(content=[TextContent(type="text", text="This should be ignored")]) return await ctx.experimental.run_task(work) server = Server( "test-already-complete", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks() async with Client(server) as client: result = await client.session.experimental.call_tool_as_task("manual_complete_task", {}) task_id = result.task.task_id with anyio.fail_after(5): await work_completed.wait() with anyio.fail_after(5): while True: status = await client.session.experimental.get_task(task_id) if status.status == "completed": # pragma: no branch break async def test_run_task_doesnt_fail_if_already_terminal() -> None: """Test that run_task doesn't auto-fail if work manually failed/cancelled the task.""" work_completed = Event() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool( ctx: ServerRequestContext, params: CallToolRequestParams ) -> CallToolResult | CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: await task.fail("Manually failed", notify=False) work_completed.set() raise RuntimeError("This error should not change status") return await ctx.experimental.run_task(work) server = Server( "test-already-failed", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool, ) server.experimental.enable_tasks() async with Client(server) as client: result = await client.session.experimental.call_tool_as_task("manual_cancel_task", {}) task_id = result.task.task_id with anyio.fail_after(5): await work_completed.wait() with anyio.fail_after(5): while True: status = await client.session.experimental.get_task(task_id) if status.status == "failed": # pragma: no branch break assert status.status_message == "Manually failed"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_run_task_flow.py", "license": "MIT License", "lines": 273, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_server.py
"""Tests for server-side task support (handlers, capabilities, integration).""" from datetime import datetime, timezone from typing import Any import anyio import pytest from mcp import Client from mcp.client.session import ClientSession from mcp.server import Server, ServerRequestContext from mcp.server.lowlevel import NotificationOptions from mcp.server.models import InitializationOptions from mcp.server.session import ServerSession from mcp.shared.exceptions import MCPError from mcp.shared.message import ServerMessageMetadata, SessionMessage from mcp.shared.response_router import ResponseRouter from mcp.shared.session import RequestResponder from mcp.types import ( INVALID_REQUEST, TASK_FORBIDDEN, TASK_OPTIONAL, TASK_REQUIRED, CallToolRequest, CallToolRequestParams, CallToolResult, CancelTaskRequestParams, CancelTaskResult, ClientResult, ErrorData, GetTaskPayloadRequest, GetTaskPayloadRequestParams, GetTaskPayloadResult, GetTaskRequestParams, GetTaskResult, JSONRPCError, JSONRPCNotification, JSONRPCResponse, ListTasksResult, ListToolsResult, PaginatedRequestParams, SamplingMessage, ServerCapabilities, ServerNotification, ServerRequest, Task, TaskMetadata, TextContent, Tool, ToolExecution, ) pytestmark = pytest.mark.anyio async def test_list_tasks_handler() -> None: """Test that experimental list_tasks handler works via Client.""" now = datetime.now(timezone.utc) test_tasks = [ Task(task_id="task-1", status="working", created_at=now, last_updated_at=now, ttl=60000, poll_interval=1000), Task(task_id="task-2", status="completed", created_at=now, last_updated_at=now, ttl=60000, poll_interval=1000), ] async def handle_list_tasks(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListTasksResult: return ListTasksResult(tasks=test_tasks) server = Server("test") server.experimental.enable_tasks(on_list_tasks=handle_list_tasks) async with Client(server) as client: result = await client.session.experimental.list_tasks() assert len(result.tasks) == 2 assert result.tasks[0].task_id == "task-1" assert result.tasks[1].task_id == "task-2" async def test_get_task_handler() -> None: """Test that experimental get_task handler works via Client.""" async def handle_get_task(ctx: ServerRequestContext, params: GetTaskRequestParams) -> GetTaskResult: now = datetime.now(timezone.utc) return GetTaskResult( task_id=params.task_id, status="working", created_at=now, last_updated_at=now, ttl=60000, poll_interval=1000, ) server = Server("test") server.experimental.enable_tasks(on_get_task=handle_get_task) async with Client(server) as client: result = await client.session.experimental.get_task("test-task-123") assert result.task_id == "test-task-123" assert result.status == "working" async def test_get_task_result_handler() -> None: """Test that experimental get_task_result handler works via Client.""" async def handle_get_task_result( ctx: ServerRequestContext, params: GetTaskPayloadRequestParams ) -> GetTaskPayloadResult: return GetTaskPayloadResult() server = Server("test") server.experimental.enable_tasks(on_task_result=handle_get_task_result) async with Client(server) as client: result = await client.session.send_request( GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="test-task-123")), GetTaskPayloadResult, ) assert isinstance(result, GetTaskPayloadResult) async def test_cancel_task_handler() -> None: """Test that experimental cancel_task handler works via Client.""" async def handle_cancel_task(ctx: ServerRequestContext, params: CancelTaskRequestParams) -> CancelTaskResult: now = datetime.now(timezone.utc) return CancelTaskResult( task_id=params.task_id, status="cancelled", created_at=now, last_updated_at=now, ttl=60000, ) server = Server("test") server.experimental.enable_tasks(on_cancel_task=handle_cancel_task) async with Client(server) as client: result = await client.session.experimental.cancel_task("test-task-123") assert result.task_id == "test-task-123" assert result.status == "cancelled" async def test_server_capabilities_include_tasks() -> None: """Test that server capabilities include tasks when handlers are registered.""" server = Server("test") async def noop_list_tasks(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListTasksResult: raise NotImplementedError async def noop_cancel_task(ctx: ServerRequestContext, params: CancelTaskRequestParams) -> CancelTaskResult: raise NotImplementedError server.experimental.enable_tasks(on_list_tasks=noop_list_tasks, on_cancel_task=noop_cancel_task) capabilities = server.get_capabilities(notification_options=NotificationOptions(), experimental_capabilities={}) assert capabilities.tasks is not None assert capabilities.tasks.list is not None assert capabilities.tasks.cancel is not None assert capabilities.tasks.requests is not None assert capabilities.tasks.requests.tools is not None @pytest.mark.skip( reason="TODO(maxisbey): enable_tasks registers default handlers for all task methods, " "so partial capabilities aren't possible yet. Low-level API should support " "selectively enabling/disabling task capabilities." ) async def test_server_capabilities_partial_tasks() -> None: # pragma: no cover """Test capabilities with only some task handlers registered.""" server = Server("test") async def noop_list_tasks(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListTasksResult: raise NotImplementedError # Only list_tasks registered, not cancel_task server.experimental.enable_tasks(on_list_tasks=noop_list_tasks) capabilities = server.get_capabilities(notification_options=NotificationOptions(), experimental_capabilities={}) assert capabilities.tasks is not None assert capabilities.tasks.list is not None assert capabilities.tasks.cancel is None # Not registered async def test_tool_with_task_execution_metadata() -> None: """Test that tools can declare task execution mode.""" async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: return ListToolsResult( tools=[ Tool( name="quick_tool", description="Fast tool", input_schema={"type": "object", "properties": {}}, execution=ToolExecution(task_support=TASK_FORBIDDEN), ), Tool( name="long_tool", description="Long running tool", input_schema={"type": "object", "properties": {}}, execution=ToolExecution(task_support=TASK_REQUIRED), ), Tool( name="flexible_tool", description="Can be either", input_schema={"type": "object", "properties": {}}, execution=ToolExecution(task_support=TASK_OPTIONAL), ), ] ) server = Server("test", on_list_tools=handle_list_tools) async with Client(server) as client: result = await client.list_tools() tools = result.tools assert tools[0].execution is not None assert tools[0].execution.task_support == TASK_FORBIDDEN assert tools[1].execution is not None assert tools[1].execution.task_support == TASK_REQUIRED assert tools[2].execution is not None assert tools[2].execution.task_support == TASK_OPTIONAL async def test_task_metadata_in_call_tool_request() -> None: """Test that task metadata is accessible via ctx when calling a tool.""" captured_task_metadata: TaskMetadata | None = None async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: nonlocal captured_task_metadata captured_task_metadata = ctx.experimental.task_metadata return CallToolResult(content=[TextContent(type="text", text="done")]) server = Server("test", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) async with Client(server) as client: # Call tool with task metadata await client.session.send_request( CallToolRequest( params=CallToolRequestParams( name="long_task", arguments={}, task=TaskMetadata(ttl=60000), ), ), CallToolResult, ) assert captured_task_metadata is not None assert captured_task_metadata.ttl == 60000 async def test_task_metadata_is_task_property() -> None: """Test that ctx.experimental.is_task works correctly.""" is_task_values: list[bool] = [] async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: is_task_values.append(ctx.experimental.is_task) return CallToolResult(content=[TextContent(type="text", text="done")]) server = Server("test", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) async with Client(server) as client: # Call without task metadata await client.session.send_request( CallToolRequest(params=CallToolRequestParams(name="test_tool", arguments={})), CallToolResult, ) # Call with task metadata await client.session.send_request( CallToolRequest( params=CallToolRequestParams(name="test_tool", arguments={}, task=TaskMetadata(ttl=60000)), ), CallToolResult, ) assert len(is_task_values) == 2 assert is_task_values[0] is False # First call without task assert is_task_values[1] is True # Second call with task async def test_update_capabilities_no_handlers() -> None: """Test that update_capabilities returns early when no task handlers are registered.""" server = Server("test-no-handlers") _ = server.experimental caps = server.get_capabilities(NotificationOptions(), {}) assert caps.tasks is None async def test_update_capabilities_partial_handlers() -> None: """Test that update_capabilities skips list/cancel when only tasks/get is registered.""" server = Server("test-partial") # Access .experimental to create the ExperimentalHandlers instance exp = server.experimental # Second access returns the same cached instance assert server.experimental is exp async def noop_get(ctx: ServerRequestContext, params: GetTaskRequestParams) -> GetTaskResult: raise NotImplementedError server._add_request_handler("tasks/get", noop_get) caps = server.get_capabilities(NotificationOptions(), {}) assert caps.tasks is not None assert caps.tasks.list is None assert caps.tasks.cancel is None async def test_default_task_handlers_via_enable_tasks() -> None: """Test that enable_tasks() auto-registers working default handlers.""" server = Server("test-default-handlers") task_support = server.experimental.enable_tasks() store = task_support.store server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def message_handler( message: RequestResponder[ServerRequest, ClientResult] | ServerNotification | Exception, ) -> None: ... # pragma: no branch async def run_server() -> None: async with task_support.run(): async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=server.get_capabilities( notification_options=NotificationOptions(), experimental_capabilities={}, ), ), ) as server_session: task_support.configure_session(server_session) async for message in server_session.incoming_messages: await server._handle_message(message, server_session, {}, False) async with anyio.create_task_group() as tg: tg.start_soon(run_server) async with ClientSession( server_to_client_receive, client_to_server_send, message_handler=message_handler, ) as client_session: await client_session.initialize() # Create a task directly in the store for testing task = await store.create_task(TaskMetadata(ttl=60000)) # Test list_tasks (default handler) list_result = await client_session.experimental.list_tasks() assert len(list_result.tasks) == 1 assert list_result.tasks[0].task_id == task.task_id # Test get_task (default handler - found) get_result = await client_session.experimental.get_task(task.task_id) assert get_result.task_id == task.task_id assert get_result.status == "working" # Test get_task (default handler - not found path) with pytest.raises(MCPError, match="not found"): await client_session.experimental.get_task("nonexistent-task") # Create a completed task to test get_task_result completed_task = await store.create_task(TaskMetadata(ttl=60000)) await store.store_result( completed_task.task_id, CallToolResult(content=[TextContent(type="text", text="Test result")]) ) await store.update_task(completed_task.task_id, status="completed") # Test get_task_result (default handler) payload_result = await client_session.send_request( GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=completed_task.task_id)), GetTaskPayloadResult, ) # The result should have the related-task metadata assert payload_result.meta is not None assert "io.modelcontextprotocol/related-task" in payload_result.meta # Test cancel_task (default handler) cancel_result = await client_session.experimental.cancel_task(task.task_id) assert cancel_result.task_id == task.task_id assert cancel_result.status == "cancelled" tg.cancel_scope.cancel() @pytest.mark.anyio async def test_build_elicit_form_request() -> None: """Test that _build_elicit_form_request builds a proper elicitation request.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions(server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities()), ) as server_session: # Test without task_id request = server_session._build_elicit_form_request( message="Test message", requested_schema={"type": "object", "properties": {"answer": {"type": "string"}}}, ) assert request.method == "elicitation/create" assert request.params is not None assert request.params["message"] == "Test message" # Test with related_task_id (adds related-task metadata) request_with_task = server_session._build_elicit_form_request( message="Task message", requested_schema={"type": "object"}, related_task_id="test-task-123", ) assert request_with_task.method == "elicitation/create" assert request_with_task.params is not None assert "_meta" in request_with_task.params assert "io.modelcontextprotocol/related-task" in request_with_task.params["_meta"] assert ( request_with_task.params["_meta"]["io.modelcontextprotocol/related-task"]["taskId"] == "test-task-123" ) finally: await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_build_elicit_url_request() -> None: """Test that _build_elicit_url_request builds a proper URL mode elicitation request.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions(server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities()), ) as server_session: # Test without related_task_id request = server_session._build_elicit_url_request( message="Please authorize with GitHub", url="https://github.com/login/oauth/authorize", elicitation_id="oauth-123", ) assert request.method == "elicitation/create" assert request.params is not None assert request.params["message"] == "Please authorize with GitHub" assert request.params["url"] == "https://github.com/login/oauth/authorize" assert request.params["elicitationId"] == "oauth-123" assert request.params["mode"] == "url" # Test with related_task_id (adds related-task metadata) request_with_task = server_session._build_elicit_url_request( message="OAuth required", url="https://example.com/oauth", elicitation_id="oauth-456", related_task_id="test-task-789", ) assert request_with_task.method == "elicitation/create" assert request_with_task.params is not None assert "_meta" in request_with_task.params assert "io.modelcontextprotocol/related-task" in request_with_task.params["_meta"] assert ( request_with_task.params["_meta"]["io.modelcontextprotocol/related-task"]["taskId"] == "test-task-789" ) finally: await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_build_create_message_request() -> None: """Test that _build_create_message_request builds a proper sampling request.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities(), ), ) as server_session: messages = [ SamplingMessage(role="user", content=TextContent(type="text", text="Hello")), ] # Test without task_id request = server_session._build_create_message_request( messages=messages, max_tokens=100, system_prompt="You are helpful", ) assert request.method == "sampling/createMessage" assert request.params is not None assert request.params["maxTokens"] == 100 # Test with related_task_id (adds related-task metadata) request_with_task = server_session._build_create_message_request( messages=messages, max_tokens=50, related_task_id="sampling-task-456", ) assert request_with_task.method == "sampling/createMessage" assert request_with_task.params is not None assert "_meta" in request_with_task.params assert "io.modelcontextprotocol/related-task" in request_with_task.params["_meta"] assert ( request_with_task.params["_meta"]["io.modelcontextprotocol/related-task"]["taskId"] == "sampling-task-456" ) finally: await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_send_message() -> None: """Test that send_message sends a raw session message.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities(), ), ) as server_session: # Create a test message notification = JSONRPCNotification(jsonrpc="2.0", method="test/notification") message = SessionMessage( message=notification, metadata=ServerMessageMetadata(related_request_id="test-req-1"), ) # Send the message await server_session.send_message(message) # Verify it was sent to the stream received = await server_to_client_receive.receive() assert isinstance(received.message, JSONRPCNotification) assert received.message.method == "test/notification" finally: # pragma: lax no cover await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_response_routing_success() -> None: """Test that response routing works for success responses.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) # Track routed responses with event for synchronization routed_responses: list[dict[str, Any]] = [] response_received = anyio.Event() class TestRouter(ResponseRouter): def route_response(self, request_id: str | int, response: dict[str, Any]) -> bool: routed_responses.append({"id": request_id, "response": response}) response_received.set() return True # Handled def route_error(self, request_id: str | int, error: ErrorData) -> bool: raise NotImplementedError try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities(), ), ) as server_session: router = TestRouter() server_session.add_response_router(router) # Simulate receiving a response from client response = JSONRPCResponse(jsonrpc="2.0", id="test-req-1", result={"status": "ok"}) message = SessionMessage(message=response) # Send from "client" side await client_to_server_send.send(message) # Wait for response to be routed with anyio.fail_after(5): await response_received.wait() # Verify response was routed assert len(routed_responses) == 1 assert routed_responses[0]["id"] == "test-req-1" assert routed_responses[0]["response"]["status"] == "ok" finally: # pragma: lax no cover await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_response_routing_error() -> None: """Test that error routing works for error responses.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) # Track routed errors with event for synchronization routed_errors: list[dict[str, Any]] = [] error_received = anyio.Event() class TestRouter(ResponseRouter): def route_response(self, request_id: str | int, response: dict[str, Any]) -> bool: raise NotImplementedError def route_error(self, request_id: str | int, error: ErrorData) -> bool: routed_errors.append({"id": request_id, "error": error}) error_received.set() return True # Handled try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities(), ), ) as server_session: router = TestRouter() server_session.add_response_router(router) # Simulate receiving an error response from client error_data = ErrorData(code=INVALID_REQUEST, message="Test error") error_response = JSONRPCError(jsonrpc="2.0", id="test-req-2", error=error_data) message = SessionMessage(message=error_response) # Send from "client" side await client_to_server_send.send(message) # Wait for error to be routed with anyio.fail_after(5): await error_received.wait() # Verify error was routed assert len(routed_errors) == 1 assert routed_errors[0]["id"] == "test-req-2" assert routed_errors[0]["error"].message == "Test error" finally: # pragma: lax no cover await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_response_routing_skips_non_matching_routers() -> None: """Test that routing continues to next router when first doesn't match.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) # Track which routers were called router_calls: list[str] = [] response_received = anyio.Event() class NonMatchingRouter(ResponseRouter): def route_response(self, request_id: str | int, response: dict[str, Any]) -> bool: router_calls.append("non_matching_response") return False # Doesn't handle it def route_error(self, request_id: str | int, error: ErrorData) -> bool: raise NotImplementedError class MatchingRouter(ResponseRouter): def route_response(self, request_id: str | int, response: dict[str, Any]) -> bool: router_calls.append("matching_response") response_received.set() return True # Handles it def route_error(self, request_id: str | int, error: ErrorData) -> bool: raise NotImplementedError try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities(), ), ) as server_session: # Add non-matching router first, then matching router server_session.add_response_router(NonMatchingRouter()) server_session.add_response_router(MatchingRouter()) # Send a response - should skip first router and be handled by second response = JSONRPCResponse(jsonrpc="2.0", id="test-req-1", result={"status": "ok"}) message = SessionMessage(message=response) await client_to_server_send.send(message) with anyio.fail_after(5): await response_received.wait() # Verify both routers were called (first returned False, second returned True) assert router_calls == ["non_matching_response", "matching_response"] finally: # pragma: lax no cover await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose() @pytest.mark.anyio async def test_error_routing_skips_non_matching_routers() -> None: """Test that error routing continues to next router when first doesn't match.""" server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) # Track which routers were called router_calls: list[str] = [] error_received = anyio.Event() class NonMatchingRouter(ResponseRouter): def route_response(self, request_id: str | int, response: dict[str, Any]) -> bool: raise NotImplementedError def route_error(self, request_id: str | int, error: ErrorData) -> bool: router_calls.append("non_matching_error") return False # Doesn't handle it class MatchingRouter(ResponseRouter): def route_response(self, request_id: str | int, response: dict[str, Any]) -> bool: raise NotImplementedError def route_error(self, request_id: str | int, error: ErrorData) -> bool: router_calls.append("matching_error") error_received.set() return True # Handles it try: async with ServerSession( client_to_server_receive, server_to_client_send, InitializationOptions( server_name="test-server", server_version="1.0.0", capabilities=ServerCapabilities(), ), ) as server_session: # Add non-matching router first, then matching router server_session.add_response_router(NonMatchingRouter()) server_session.add_response_router(MatchingRouter()) # Send an error - should skip first router and be handled by second error_data = ErrorData(code=INVALID_REQUEST, message="Test error") error_response = JSONRPCError(jsonrpc="2.0", id="test-req-2", error=error_data) message = SessionMessage(message=error_response) await client_to_server_send.send(message) with anyio.fail_after(5): await error_received.wait() # Verify both routers were called (first returned False, second returned True) assert router_calls == ["non_matching_error", "matching_error"] finally: # pragma: lax no cover await server_to_client_send.aclose() await server_to_client_receive.aclose() await client_to_server_send.aclose() await client_to_server_receive.aclose()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_server.py", "license": "MIT License", "lines": 656, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_server_task_context.py
"""Tests for ServerTaskContext.""" import asyncio from unittest.mock import AsyncMock, Mock import anyio import pytest from mcp.server.experimental.task_context import ServerTaskContext from mcp.server.experimental.task_result_handler import TaskResultHandler from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.experimental.tasks.message_queue import InMemoryTaskMessageQueue from mcp.types import ( CallToolResult, ClientCapabilities, ClientTasksCapability, ClientTasksRequestsCapability, Implementation, InitializeRequestParams, JSONRPCRequest, SamplingMessage, TaskMetadata, TasksCreateElicitationCapability, TasksCreateMessageCapability, TasksElicitationCapability, TasksSamplingCapability, TextContent, ) @pytest.mark.anyio async def test_server_task_context_properties() -> None: """Test ServerTaskContext property accessors.""" store = InMemoryTaskStore() mock_session = Mock() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-123") ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, ) assert ctx.task_id == "test-123" assert ctx.task.task_id == "test-123" assert ctx.is_cancelled is False store.cleanup() @pytest.mark.anyio async def test_server_task_context_request_cancellation() -> None: """Test ServerTaskContext.request_cancellation().""" store = InMemoryTaskStore() mock_session = Mock() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, ) assert ctx.is_cancelled is False ctx.request_cancellation() assert ctx.is_cancelled is True store.cleanup() @pytest.mark.anyio async def test_server_task_context_update_status_with_notify() -> None: """Test update_status sends notification when notify=True.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.send_notification = AsyncMock() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, ) await ctx.update_status("Working...", notify=True) mock_session.send_notification.assert_called_once() store.cleanup() @pytest.mark.anyio async def test_server_task_context_update_status_without_notify() -> None: """Test update_status skips notification when notify=False.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.send_notification = AsyncMock() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, ) await ctx.update_status("Working...", notify=False) mock_session.send_notification.assert_not_called() store.cleanup() @pytest.mark.anyio async def test_server_task_context_complete_with_notify() -> None: """Test complete sends notification when notify=True.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.send_notification = AsyncMock() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, ) result = CallToolResult(content=[TextContent(type="text", text="Done")]) await ctx.complete(result, notify=True) mock_session.send_notification.assert_called_once() store.cleanup() @pytest.mark.anyio async def test_server_task_context_fail_with_notify() -> None: """Test fail sends notification when notify=True.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.send_notification = AsyncMock() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, ) await ctx.fail("Something went wrong", notify=True) mock_session.send_notification.assert_called_once() store.cleanup() @pytest.mark.anyio async def test_elicit_raises_when_client_lacks_capability() -> None: """Test that elicit() raises MCPError when client doesn't support elicitation.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.check_client_capability = Mock(return_value=False) queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) with pytest.raises(MCPError) as exc_info: await ctx.elicit(message="Test?", requested_schema={"type": "object"}) assert "elicitation capability" in exc_info.value.error.message mock_session.check_client_capability.assert_called_once() store.cleanup() @pytest.mark.anyio async def test_create_message_raises_when_client_lacks_capability() -> None: """Test that create_message() raises MCPError when client doesn't support sampling.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.check_client_capability = Mock(return_value=False) queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) with pytest.raises(MCPError) as exc_info: await ctx.create_message(messages=[], max_tokens=100) assert "sampling capability" in exc_info.value.error.message mock_session.check_client_capability.assert_called_once() store.cleanup() @pytest.mark.anyio async def test_elicit_raises_without_handler() -> None: """Test that elicit() raises when handler is not provided.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=None, ) with pytest.raises(RuntimeError, match="handler is required"): await ctx.elicit(message="Test?", requested_schema={"type": "object"}) store.cleanup() @pytest.mark.anyio async def test_elicit_url_raises_without_handler() -> None: """Test that elicit_url() raises when handler is not provided.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=None, ) with pytest.raises(RuntimeError, match="handler is required for elicit_url"): await ctx.elicit_url( message="Please authorize", url="https://example.com/oauth", elicitation_id="oauth-123", ) store.cleanup() @pytest.mark.anyio async def test_create_message_raises_without_handler() -> None: """Test that create_message() raises when handler is not provided.""" store = InMemoryTaskStore() mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=None, ) with pytest.raises(RuntimeError, match="handler is required"): await ctx.create_message(messages=[], max_tokens=100) store.cleanup() @pytest.mark.anyio async def test_elicit_queues_request_and_waits_for_response() -> None: """Test that elicit() queues request and waits for response.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) mock_session._build_elicit_form_request = Mock( return_value=JSONRPCRequest( jsonrpc="2.0", id="test-req-1", method="elicitation/create", params={"message": "Test?", "_meta": {}}, ) ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) elicit_result = None async def run_elicit() -> None: nonlocal elicit_result elicit_result = await ctx.elicit( message="Test?", requested_schema={"type": "object"}, ) async with anyio.create_task_group() as tg: tg.start_soon(run_elicit) # Wait for request to be queued await queue.wait_for_message(task.task_id) # Verify task is in input_required status updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Dequeue and simulate response msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None # Resolve with mock elicitation response msg.resolver.set_result({"action": "accept", "content": {"name": "Alice"}}) # Verify result assert elicit_result is not None assert elicit_result.action == "accept" assert elicit_result.content == {"name": "Alice"} # Verify task is back to working final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" store.cleanup() @pytest.mark.anyio async def test_elicit_url_queues_request_and_waits_for_response() -> None: """Test that elicit_url() queues request and waits for response.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) mock_session._build_elicit_url_request = Mock( return_value=JSONRPCRequest( jsonrpc="2.0", id="test-url-req-1", method="elicitation/create", params={"message": "Authorize", "url": "https://example.com", "elicitationId": "123", "mode": "url"}, ) ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) elicit_result = None async def run_elicit_url() -> None: nonlocal elicit_result elicit_result = await ctx.elicit_url( message="Authorize", url="https://example.com/oauth", elicitation_id="oauth-123", ) async with anyio.create_task_group() as tg: tg.start_soon(run_elicit_url) # Wait for request to be queued await queue.wait_for_message(task.task_id) # Verify task is in input_required status updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Dequeue and simulate response msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None # Resolve with mock elicitation response (URL mode just returns action) msg.resolver.set_result({"action": "accept"}) # Verify result assert elicit_result is not None assert elicit_result.action == "accept" # Verify task is back to working final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" store.cleanup() @pytest.mark.anyio async def test_create_message_queues_request_and_waits_for_response() -> None: """Test that create_message() queues request and waits for response.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) mock_session._build_create_message_request = Mock( return_value=JSONRPCRequest( jsonrpc="2.0", id="test-req-2", method="sampling/createMessage", params={"messages": [], "maxTokens": 100, "_meta": {}}, ) ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) sampling_result = None async def run_sampling() -> None: nonlocal sampling_result sampling_result = await ctx.create_message( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, ) async with anyio.create_task_group() as tg: tg.start_soon(run_sampling) # Wait for request to be queued await queue.wait_for_message(task.task_id) # Verify task is in input_required status updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Dequeue and simulate response msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None # Resolve with mock sampling response msg.resolver.set_result( { "role": "assistant", "content": {"type": "text", "text": "Hello back!"}, "model": "test-model", "stopReason": "endTurn", } ) # Verify result assert sampling_result is not None assert sampling_result.role == "assistant" assert sampling_result.model == "test-model" # Verify task is back to working final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" store.cleanup() @pytest.mark.anyio async def test_elicit_restores_status_on_cancellation() -> None: """Test that elicit() restores task status to working when cancelled.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) mock_session._build_elicit_form_request = Mock( return_value=JSONRPCRequest( jsonrpc="2.0", id="test-req-cancel", method="elicitation/create", params={"message": "Test?", "_meta": {}}, ) ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) cancelled_error_raised = False async with anyio.create_task_group() as tg: async def do_elicit() -> None: nonlocal cancelled_error_raised try: await ctx.elicit( message="Test?", requested_schema={"type": "object"}, ) except anyio.get_cancelled_exc_class(): cancelled_error_raised = True # Don't re-raise - let the test continue tg.start_soon(do_elicit) # Wait for request to be queued await queue.wait_for_message(task.task_id) # Verify task is in input_required status updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Get the queued message and set cancellation exception on its resolver msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None # Trigger cancellation by setting exception (use asyncio.CancelledError directly) msg.resolver.set_exception(asyncio.CancelledError()) # Verify task is back to working after cancellation final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" assert cancelled_error_raised store.cleanup() @pytest.mark.anyio async def test_create_message_restores_status_on_cancellation() -> None: """Test that create_message() restores task status to working when cancelled.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() handler = TaskResultHandler(store, queue) task = await store.create_task(TaskMetadata(ttl=60000)) mock_session = Mock() mock_session.check_client_capability = Mock(return_value=True) mock_session._build_create_message_request = Mock( return_value=JSONRPCRequest( jsonrpc="2.0", id="test-req-cancel-2", method="sampling/createMessage", params={"messages": [], "maxTokens": 100, "_meta": {}}, ) ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=handler, ) cancelled_error_raised = False async with anyio.create_task_group() as tg: async def do_sampling() -> None: nonlocal cancelled_error_raised try: await ctx.create_message( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, ) except anyio.get_cancelled_exc_class(): cancelled_error_raised = True # Don't re-raise tg.start_soon(do_sampling) # Wait for request to be queued await queue.wait_for_message(task.task_id) # Verify task is in input_required status updated_task = await store.get_task(task.task_id) assert updated_task is not None assert updated_task.status == "input_required" # Get the queued message and set cancellation exception on its resolver msg = await queue.dequeue(task.task_id) assert msg is not None assert msg.resolver is not None # Trigger cancellation by setting exception (use asyncio.CancelledError directly) msg.resolver.set_exception(asyncio.CancelledError()) # Verify task is back to working after cancellation final_task = await store.get_task(task.task_id) assert final_task is not None assert final_task.status == "working" assert cancelled_error_raised store.cleanup() @pytest.mark.anyio async def test_elicit_as_task_raises_without_handler() -> None: """Test that elicit_as_task() raises when handler is not provided.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) # Create mock session with proper client capabilities mock_session = Mock() mock_session.client_params = InitializeRequestParams( protocol_version="2025-01-01", capabilities=ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) ), client_info=Implementation(name="test", version="1.0"), ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=None, ) with pytest.raises(RuntimeError, match="handler is required for elicit_as_task"): await ctx.elicit_as_task(message="Test?", requested_schema={"type": "object"}) store.cleanup() @pytest.mark.anyio async def test_create_message_as_task_raises_without_handler() -> None: """Test that create_message_as_task() raises when handler is not provided.""" store = InMemoryTaskStore() queue = InMemoryTaskMessageQueue() task = await store.create_task(TaskMetadata(ttl=60000)) # Create mock session with proper client capabilities mock_session = Mock() mock_session.client_params = InitializeRequestParams( protocol_version="2025-01-01", capabilities=ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) ), client_info=Implementation(name="test", version="1.0"), ) ctx = ServerTaskContext( task=task, store=store, session=mock_session, queue=queue, handler=None, ) with pytest.raises(RuntimeError, match="handler is required for create_message_as_task"): await ctx.create_message_as_task( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, ) store.cleanup()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_server_task_context.py", "license": "MIT License", "lines": 569, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_store.py
"""Tests for InMemoryTaskStore.""" from collections.abc import AsyncIterator from datetime import datetime, timedelta, timezone import pytest from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.helpers import cancel_task from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.types import INVALID_PARAMS, CallToolResult, TaskMetadata, TextContent @pytest.fixture async def store() -> AsyncIterator[InMemoryTaskStore]: """Provide a clean InMemoryTaskStore for each test with automatic cleanup.""" store = InMemoryTaskStore() yield store store.cleanup() @pytest.mark.anyio async def test_create_and_get(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore create and get operations.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) assert task.task_id is not None assert task.status == "working" assert task.ttl == 60000 retrieved = await store.get_task(task.task_id) assert retrieved is not None assert retrieved.task_id == task.task_id assert retrieved.status == "working" @pytest.mark.anyio async def test_create_with_custom_id(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore create with custom task ID.""" task = await store.create_task( metadata=TaskMetadata(ttl=60000), task_id="my-custom-id", ) assert task.task_id == "my-custom-id" assert task.status == "working" retrieved = await store.get_task("my-custom-id") assert retrieved is not None assert retrieved.task_id == "my-custom-id" @pytest.mark.anyio async def test_create_duplicate_id_raises(store: InMemoryTaskStore) -> None: """Test that creating a task with duplicate ID raises.""" await store.create_task(metadata=TaskMetadata(ttl=60000), task_id="duplicate") with pytest.raises(ValueError, match="already exists"): await store.create_task(metadata=TaskMetadata(ttl=60000), task_id="duplicate") @pytest.mark.anyio async def test_get_nonexistent_returns_none(store: InMemoryTaskStore) -> None: """Test that getting a nonexistent task returns None.""" retrieved = await store.get_task("nonexistent") assert retrieved is None @pytest.mark.anyio async def test_update_status(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore status updates.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) updated = await store.update_task(task.task_id, status="completed", status_message="All done!") assert updated.status == "completed" assert updated.status_message == "All done!" retrieved = await store.get_task(task.task_id) assert retrieved is not None assert retrieved.status == "completed" assert retrieved.status_message == "All done!" @pytest.mark.anyio async def test_update_nonexistent_raises(store: InMemoryTaskStore) -> None: """Test that updating a nonexistent task raises.""" with pytest.raises(ValueError, match="not found"): await store.update_task("nonexistent", status="completed") @pytest.mark.anyio async def test_store_and_get_result(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore result storage and retrieval.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) # Store result result = CallToolResult(content=[TextContent(type="text", text="Result data")]) await store.store_result(task.task_id, result) # Retrieve result retrieved_result = await store.get_result(task.task_id) assert retrieved_result == result @pytest.mark.anyio async def test_get_result_nonexistent_returns_none(store: InMemoryTaskStore) -> None: """Test that getting result for nonexistent task returns None.""" result = await store.get_result("nonexistent") assert result is None @pytest.mark.anyio async def test_get_result_no_result_returns_none(store: InMemoryTaskStore) -> None: """Test that getting result when none stored returns None.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) result = await store.get_result(task.task_id) assert result is None @pytest.mark.anyio async def test_list_tasks(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore list operation.""" # Create multiple tasks for _ in range(3): await store.create_task(metadata=TaskMetadata(ttl=60000)) tasks, next_cursor = await store.list_tasks() assert len(tasks) == 3 assert next_cursor is None # Less than page size @pytest.mark.anyio async def test_list_tasks_pagination() -> None: """Test InMemoryTaskStore pagination.""" # Needs custom page_size, can't use fixture store = InMemoryTaskStore(page_size=2) # Create 5 tasks for _ in range(5): await store.create_task(metadata=TaskMetadata(ttl=60000)) # First page tasks, next_cursor = await store.list_tasks() assert len(tasks) == 2 assert next_cursor is not None # Second page tasks, next_cursor = await store.list_tasks(cursor=next_cursor) assert len(tasks) == 2 assert next_cursor is not None # Third page (last) tasks, next_cursor = await store.list_tasks(cursor=next_cursor) assert len(tasks) == 1 assert next_cursor is None store.cleanup() @pytest.mark.anyio async def test_list_tasks_invalid_cursor(store: InMemoryTaskStore) -> None: """Test that invalid cursor raises.""" await store.create_task(metadata=TaskMetadata(ttl=60000)) with pytest.raises(ValueError, match="Invalid cursor"): await store.list_tasks(cursor="invalid-cursor") @pytest.mark.anyio async def test_delete_task(store: InMemoryTaskStore) -> None: """Test InMemoryTaskStore delete operation.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) deleted = await store.delete_task(task.task_id) assert deleted is True retrieved = await store.get_task(task.task_id) assert retrieved is None # Delete non-existent deleted = await store.delete_task(task.task_id) assert deleted is False @pytest.mark.anyio async def test_get_all_tasks_helper(store: InMemoryTaskStore) -> None: """Test the get_all_tasks debugging helper.""" await store.create_task(metadata=TaskMetadata(ttl=60000)) await store.create_task(metadata=TaskMetadata(ttl=60000)) all_tasks = store.get_all_tasks() assert len(all_tasks) == 2 @pytest.mark.anyio async def test_store_result_nonexistent_raises(store: InMemoryTaskStore) -> None: """Test that storing result for nonexistent task raises ValueError.""" result = CallToolResult(content=[TextContent(type="text", text="Result")]) with pytest.raises(ValueError, match="not found"): await store.store_result("nonexistent-id", result) @pytest.mark.anyio async def test_create_task_with_null_ttl(store: InMemoryTaskStore) -> None: """Test creating task with null TTL (never expires).""" task = await store.create_task(metadata=TaskMetadata(ttl=None)) assert task.ttl is None # Task should persist (not expire) retrieved = await store.get_task(task.task_id) assert retrieved is not None @pytest.mark.anyio async def test_task_expiration_cleanup(store: InMemoryTaskStore) -> None: """Test that expired tasks are cleaned up lazily.""" # Create a task with very short TTL task = await store.create_task(metadata=TaskMetadata(ttl=1)) # 1ms TTL # Manually force the expiry to be in the past stored = store._tasks.get(task.task_id) assert stored is not None stored.expires_at = datetime.now(timezone.utc) - timedelta(seconds=10) # Task should still exist in internal dict but be expired assert task.task_id in store._tasks # Any access operation should clean up expired tasks # list_tasks triggers cleanup tasks, _ = await store.list_tasks() # Expired task should be cleaned up assert task.task_id not in store._tasks assert len(tasks) == 0 @pytest.mark.anyio async def test_task_with_null_ttl_never_expires(store: InMemoryTaskStore) -> None: """Test that tasks with null TTL never expire during cleanup.""" # Create task with null TTL task = await store.create_task(metadata=TaskMetadata(ttl=None)) # Verify internal storage has no expiry stored = store._tasks.get(task.task_id) assert stored is not None assert stored.expires_at is None # Access operations should NOT remove this task await store.list_tasks() await store.get_task(task.task_id) # Task should still exist assert task.task_id in store._tasks retrieved = await store.get_task(task.task_id) assert retrieved is not None @pytest.mark.anyio async def test_terminal_task_ttl_reset(store: InMemoryTaskStore) -> None: """Test that TTL is reset when task enters terminal state.""" # Create task with short TTL task = await store.create_task(metadata=TaskMetadata(ttl=60000)) # 60s # Get the initial expiry stored = store._tasks.get(task.task_id) assert stored is not None initial_expiry = stored.expires_at assert initial_expiry is not None # Update to terminal state (completed) await store.update_task(task.task_id, status="completed") # Expiry should be reset to a new time (from now + TTL) new_expiry = stored.expires_at assert new_expiry is not None assert new_expiry >= initial_expiry @pytest.mark.anyio async def test_terminal_status_transition_rejected(store: InMemoryTaskStore) -> None: """Test that transitions from terminal states are rejected. Per spec: Terminal states (completed, failed, cancelled) MUST NOT transition to any other status. """ # Test each terminal status for terminal_status in ("completed", "failed", "cancelled"): task = await store.create_task(metadata=TaskMetadata(ttl=60000)) # Move to terminal state await store.update_task(task.task_id, status=terminal_status) # Attempting to transition to any other status should raise with pytest.raises(ValueError, match="Cannot transition from terminal status"): await store.update_task(task.task_id, status="working") # Also test transitioning to another terminal state other_terminal = "failed" if terminal_status != "failed" else "completed" with pytest.raises(ValueError, match="Cannot transition from terminal status"): await store.update_task(task.task_id, status=other_terminal) @pytest.mark.anyio async def test_terminal_status_allows_same_status(store: InMemoryTaskStore) -> None: """Test that setting the same terminal status doesn't raise. This is not a transition, so it should be allowed (no-op). """ task = await store.create_task(metadata=TaskMetadata(ttl=60000)) await store.update_task(task.task_id, status="completed") # Setting the same status should not raise updated = await store.update_task(task.task_id, status="completed") assert updated.status == "completed" # Updating just the message should also work updated = await store.update_task(task.task_id, status_message="Updated message") assert updated.status_message == "Updated message" @pytest.mark.anyio async def test_wait_for_update_nonexistent_raises(store: InMemoryTaskStore) -> None: """Test that wait_for_update raises for nonexistent task.""" with pytest.raises(ValueError, match="not found"): await store.wait_for_update("nonexistent-task-id") @pytest.mark.anyio async def test_cancel_task_succeeds_for_working_task(store: InMemoryTaskStore) -> None: """Test cancel_task helper succeeds for a working task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) assert task.status == "working" result = await cancel_task(store, task.task_id) assert result.task_id == task.task_id assert result.status == "cancelled" # Verify store is updated retrieved = await store.get_task(task.task_id) assert retrieved is not None assert retrieved.status == "cancelled" @pytest.mark.anyio async def test_cancel_task_rejects_nonexistent_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises MCPError with INVALID_PARAMS for nonexistent task.""" with pytest.raises(MCPError) as exc_info: await cancel_task(store, "nonexistent-task-id") assert exc_info.value.error.code == INVALID_PARAMS assert "not found" in exc_info.value.error.message @pytest.mark.anyio async def test_cancel_task_rejects_completed_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises MCPError with INVALID_PARAMS for completed task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) await store.update_task(task.task_id, status="completed") with pytest.raises(MCPError) as exc_info: await cancel_task(store, task.task_id) assert exc_info.value.error.code == INVALID_PARAMS assert "terminal state 'completed'" in exc_info.value.error.message @pytest.mark.anyio async def test_cancel_task_rejects_failed_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises MCPError with INVALID_PARAMS for failed task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) await store.update_task(task.task_id, status="failed") with pytest.raises(MCPError) as exc_info: await cancel_task(store, task.task_id) assert exc_info.value.error.code == INVALID_PARAMS assert "terminal state 'failed'" in exc_info.value.error.message @pytest.mark.anyio async def test_cancel_task_rejects_already_cancelled_task(store: InMemoryTaskStore) -> None: """Test cancel_task raises MCPError with INVALID_PARAMS for already cancelled task.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) await store.update_task(task.task_id, status="cancelled") with pytest.raises(MCPError) as exc_info: await cancel_task(store, task.task_id) assert exc_info.value.error.code == INVALID_PARAMS assert "terminal state 'cancelled'" in exc_info.value.error.message @pytest.mark.anyio async def test_cancel_task_succeeds_for_input_required_task(store: InMemoryTaskStore) -> None: """Test cancel_task helper succeeds for a task in input_required status.""" task = await store.create_task(metadata=TaskMetadata(ttl=60000)) await store.update_task(task.task_id, status="input_required") result = await cancel_task(store, task.task_id) assert result.task_id == task.task_id assert result.status == "cancelled"
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_store.py", "license": "MIT License", "lines": 292, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/server/test_task_result_handler.py
"""Tests for TaskResultHandler.""" from collections.abc import AsyncIterator from typing import Any from unittest.mock import AsyncMock, Mock import anyio import pytest from mcp.server.experimental.task_result_handler import TaskResultHandler from mcp.shared.exceptions import MCPError from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.experimental.tasks.message_queue import InMemoryTaskMessageQueue, QueuedMessage from mcp.shared.experimental.tasks.resolver import Resolver from mcp.shared.message import SessionMessage from mcp.types import ( INVALID_REQUEST, CallToolResult, ErrorData, GetTaskPayloadRequest, GetTaskPayloadRequestParams, GetTaskPayloadResult, JSONRPCRequest, TaskMetadata, TextContent, ) @pytest.fixture async def store() -> AsyncIterator[InMemoryTaskStore]: """Provide a clean store for each test.""" s = InMemoryTaskStore() yield s s.cleanup() @pytest.fixture def queue() -> InMemoryTaskMessageQueue: """Provide a clean queue for each test.""" return InMemoryTaskMessageQueue() @pytest.fixture def handler(store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue) -> TaskResultHandler: """Provide a handler for each test.""" return TaskResultHandler(store, queue) @pytest.mark.anyio async def test_handle_returns_result_for_completed_task( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that handle() returns the stored result for a completed task.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") result = CallToolResult(content=[TextContent(type="text", text="Done!")]) await store.store_result(task.task_id, result) await store.update_task(task.task_id, status="completed") mock_session = Mock() mock_session.send_message = AsyncMock() request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) response = await handler.handle(request, mock_session, "req-1") assert response is not None assert response.meta is not None assert "io.modelcontextprotocol/related-task" in response.meta @pytest.mark.anyio async def test_handle_raises_for_nonexistent_task( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that handle() raises MCPError for nonexistent task.""" mock_session = Mock() request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id="nonexistent")) with pytest.raises(MCPError) as exc_info: await handler.handle(request, mock_session, "req-1") assert "not found" in exc_info.value.error.message @pytest.mark.anyio async def test_handle_returns_empty_result_when_no_result_stored( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that handle() returns minimal result when task completed without stored result.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") await store.update_task(task.task_id, status="completed") mock_session = Mock() mock_session.send_message = AsyncMock() request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) response = await handler.handle(request, mock_session, "req-1") assert response is not None assert response.meta is not None assert "io.modelcontextprotocol/related-task" in response.meta @pytest.mark.anyio async def test_handle_delivers_queued_messages( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that handle() delivers queued messages before returning.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") queued_msg = QueuedMessage( type="notification", message=JSONRPCRequest( jsonrpc="2.0", id="notif-1", method="test/notification", params={}, ), ) await queue.enqueue(task.task_id, queued_msg) await store.update_task(task.task_id, status="completed") sent_messages: list[SessionMessage] = [] async def track_send(msg: SessionMessage) -> None: sent_messages.append(msg) mock_session = Mock() mock_session.send_message = track_send request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) await handler.handle(request, mock_session, "req-1") assert len(sent_messages) == 1 @pytest.mark.anyio async def test_handle_waits_for_task_completion( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that handle() waits for task to complete before returning.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") mock_session = Mock() mock_session.send_message = AsyncMock() request = GetTaskPayloadRequest(params=GetTaskPayloadRequestParams(task_id=task.task_id)) result_holder: list[GetTaskPayloadResult | None] = [None] async def run_handle() -> None: result_holder[0] = await handler.handle(request, mock_session, "req-1") async with anyio.create_task_group() as tg: tg.start_soon(run_handle) # Wait for handler to start waiting (event gets created when wait starts) while task.task_id not in store._update_events: await anyio.sleep(0) await store.store_result(task.task_id, CallToolResult(content=[TextContent(type="text", text="Done")])) await store.update_task(task.task_id, status="completed") assert result_holder[0] is not None @pytest.mark.anyio async def test_route_response_resolves_pending_request( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that route_response() resolves a pending request.""" resolver: Resolver[dict[str, Any]] = Resolver() handler._pending_requests["req-123"] = resolver result = handler.route_response("req-123", {"status": "ok"}) assert result is True assert resolver.done() assert await resolver.wait() == {"status": "ok"} @pytest.mark.anyio async def test_route_response_returns_false_for_unknown_request( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that route_response() returns False for unknown request ID.""" result = handler.route_response("unknown-req", {"status": "ok"}) assert result is False @pytest.mark.anyio async def test_route_response_returns_false_for_already_done_resolver( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that route_response() returns False if resolver already completed.""" resolver: Resolver[dict[str, Any]] = Resolver() resolver.set_result({"already": "done"}) handler._pending_requests["req-123"] = resolver result = handler.route_response("req-123", {"new": "data"}) assert result is False @pytest.mark.anyio async def test_route_error_resolves_pending_request_with_exception( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that route_error() sets exception on pending request.""" resolver: Resolver[dict[str, Any]] = Resolver() handler._pending_requests["req-123"] = resolver error = ErrorData(code=INVALID_REQUEST, message="Something went wrong") result = handler.route_error("req-123", error) assert result is True assert resolver.done() with pytest.raises(MCPError) as exc_info: await resolver.wait() assert exc_info.value.error.message == "Something went wrong" @pytest.mark.anyio async def test_route_error_returns_false_for_unknown_request( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that route_error() returns False for unknown request ID.""" error = ErrorData(code=INVALID_REQUEST, message="Error") result = handler.route_error("unknown-req", error) assert result is False @pytest.mark.anyio async def test_deliver_registers_resolver_for_request_messages( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that _deliver_queued_messages registers resolvers for request messages.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") resolver: Resolver[dict[str, Any]] = Resolver() queued_msg = QueuedMessage( type="request", message=JSONRPCRequest( jsonrpc="2.0", id="inner-req-1", method="elicitation/create", params={}, ), resolver=resolver, original_request_id="inner-req-1", ) await queue.enqueue(task.task_id, queued_msg) mock_session = Mock() mock_session.send_message = AsyncMock() await handler._deliver_queued_messages(task.task_id, mock_session, "outer-req-1") assert "inner-req-1" in handler._pending_requests assert handler._pending_requests["inner-req-1"] is resolver @pytest.mark.anyio async def test_deliver_skips_resolver_registration_when_no_original_id( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that _deliver_queued_messages skips resolver registration when original_request_id is None.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") resolver: Resolver[dict[str, Any]] = Resolver() queued_msg = QueuedMessage( type="request", message=JSONRPCRequest( jsonrpc="2.0", id="inner-req-1", method="elicitation/create", params={}, ), resolver=resolver, original_request_id=None, # No original request ID ) await queue.enqueue(task.task_id, queued_msg) mock_session = Mock() mock_session.send_message = AsyncMock() await handler._deliver_queued_messages(task.task_id, mock_session, "outer-req-1") # Resolver should NOT be registered since original_request_id is None assert len(handler._pending_requests) == 0 # But the message should still be sent mock_session.send_message.assert_called_once() @pytest.mark.anyio async def test_wait_for_task_update_handles_store_exception( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that _wait_for_task_update handles store exception gracefully.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") # Make wait_for_update raise an exception async def failing_wait(task_id: str) -> None: raise RuntimeError("Store error") store.wait_for_update = failing_wait # type: ignore[method-assign] # Queue a message to unblock the race via the queue path async def enqueue_later() -> None: # Wait for queue to start waiting (event gets created when wait starts) while task.task_id not in queue._events: await anyio.sleep(0) await queue.enqueue( task.task_id, QueuedMessage( type="notification", message=JSONRPCRequest( jsonrpc="2.0", id="notif-1", method="test/notification", params={}, ), ), ) async with anyio.create_task_group() as tg: tg.start_soon(enqueue_later) # This should complete via the queue path even though store raises await handler._wait_for_task_update(task.task_id) @pytest.mark.anyio async def test_wait_for_task_update_handles_queue_exception( store: InMemoryTaskStore, queue: InMemoryTaskMessageQueue, handler: TaskResultHandler ) -> None: """Test that _wait_for_task_update handles queue exception gracefully.""" task = await store.create_task(TaskMetadata(ttl=60000), task_id="test-task") # Make wait_for_message raise an exception async def failing_wait(task_id: str) -> None: raise RuntimeError("Queue error") queue.wait_for_message = failing_wait # type: ignore[method-assign] # Update the store to unblock the race via the store path async def update_later() -> None: # Wait for store to start waiting (event gets created when wait starts) while task.task_id not in store._update_events: await anyio.sleep(0) await store.update_task(task.task_id, status="completed") async with anyio.create_task_group() as tg: tg.start_soon(update_later) # This should complete via the store path even though queue raises await handler._wait_for_task_update(task.task_id)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/server/test_task_result_handler.py", "license": "MIT License", "lines": 273, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/test_capabilities.py
"""Tests for tasks capability checking utilities.""" import pytest from mcp import MCPError from mcp.shared.experimental.tasks.capabilities import ( check_tasks_capability, has_task_augmented_elicitation, has_task_augmented_sampling, require_task_augmented_elicitation, require_task_augmented_sampling, ) from mcp.types import ( ClientCapabilities, ClientTasksCapability, ClientTasksRequestsCapability, TasksCreateElicitationCapability, TasksCreateMessageCapability, TasksElicitationCapability, TasksSamplingCapability, ) class TestCheckTasksCapability: """Tests for check_tasks_capability function.""" def test_required_requests_none_returns_true(self) -> None: """When required.requests is None, should return True.""" required = ClientTasksCapability() client = ClientTasksCapability() assert check_tasks_capability(required, client) is True def test_client_requests_none_returns_false(self) -> None: """When client.requests is None but required.requests is set, should return False.""" required = ClientTasksCapability(requests=ClientTasksRequestsCapability()) client = ClientTasksCapability() assert check_tasks_capability(required, client) is False def test_elicitation_required_but_client_missing(self) -> None: """When elicitation is required but client doesn't have it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability(elicitation=TasksElicitationCapability()) ) client = ClientTasksCapability(requests=ClientTasksRequestsCapability()) assert check_tasks_capability(required, client) is False def test_elicitation_create_required_but_client_missing(self) -> None: """When elicitation.create is required but client doesn't have it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability() # No create ) ) assert check_tasks_capability(required, client) is False def test_elicitation_create_present(self) -> None: """When elicitation.create is required and client has it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) assert check_tasks_capability(required, client) is True def test_sampling_required_but_client_missing(self) -> None: """When sampling is required but client doesn't have it.""" required = ClientTasksCapability(requests=ClientTasksRequestsCapability(sampling=TasksSamplingCapability())) client = ClientTasksCapability(requests=ClientTasksRequestsCapability()) assert check_tasks_capability(required, client) is False def test_sampling_create_message_required_but_client_missing(self) -> None: """When sampling.createMessage is required but client doesn't have it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability() # No createMessage ) ) assert check_tasks_capability(required, client) is False def test_sampling_create_message_present(self) -> None: """When sampling.createMessage is required and client has it.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) assert check_tasks_capability(required, client) is True def test_both_elicitation_and_sampling_present(self) -> None: """When both elicitation.create and sampling.createMessage are required and client has both.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()), sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()), ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()), sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()), ) ) assert check_tasks_capability(required, client) is True def test_elicitation_without_create_required(self) -> None: """When elicitation is required but not create specifically.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability() # No create ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) assert check_tasks_capability(required, client) is True def test_sampling_without_create_message_required(self) -> None: """When sampling is required but not createMessage specifically.""" required = ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability() # No createMessage ) ) client = ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) assert check_tasks_capability(required, client) is True class TestHasTaskAugmentedElicitation: """Tests for has_task_augmented_elicitation function.""" def test_tasks_none(self) -> None: """Returns False when caps.tasks is None.""" caps = ClientCapabilities() assert has_task_augmented_elicitation(caps) is False def test_requests_none(self) -> None: """Returns False when caps.tasks.requests is None.""" caps = ClientCapabilities(tasks=ClientTasksCapability()) assert has_task_augmented_elicitation(caps) is False def test_elicitation_none(self) -> None: """Returns False when caps.tasks.requests.elicitation is None.""" caps = ClientCapabilities(tasks=ClientTasksCapability(requests=ClientTasksRequestsCapability())) assert has_task_augmented_elicitation(caps) is False def test_create_none(self) -> None: """Returns False when caps.tasks.requests.elicitation.create is None.""" caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability(elicitation=TasksElicitationCapability()) ) ) assert has_task_augmented_elicitation(caps) is False def test_create_present(self) -> None: """Returns True when full capability path is present.""" caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) ) assert has_task_augmented_elicitation(caps) is True class TestHasTaskAugmentedSampling: """Tests for has_task_augmented_sampling function.""" def test_tasks_none(self) -> None: """Returns False when caps.tasks is None.""" caps = ClientCapabilities() assert has_task_augmented_sampling(caps) is False def test_requests_none(self) -> None: """Returns False when caps.tasks.requests is None.""" caps = ClientCapabilities(tasks=ClientTasksCapability()) assert has_task_augmented_sampling(caps) is False def test_sampling_none(self) -> None: """Returns False when caps.tasks.requests.sampling is None.""" caps = ClientCapabilities(tasks=ClientTasksCapability(requests=ClientTasksRequestsCapability())) assert has_task_augmented_sampling(caps) is False def test_create_message_none(self) -> None: """Returns False when caps.tasks.requests.sampling.createMessage is None.""" caps = ClientCapabilities( tasks=ClientTasksCapability(requests=ClientTasksRequestsCapability(sampling=TasksSamplingCapability())) ) assert has_task_augmented_sampling(caps) is False def test_create_message_present(self) -> None: """Returns True when full capability path is present.""" caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) ) assert has_task_augmented_sampling(caps) is True class TestRequireTaskAugmentedElicitation: """Tests for require_task_augmented_elicitation function.""" def test_raises_when_none(self) -> None: """Raises MCPError when client_caps is None.""" with pytest.raises(MCPError) as exc_info: require_task_augmented_elicitation(None) assert "task-augmented elicitation" in str(exc_info.value) def test_raises_when_missing(self) -> None: """Raises MCPError when capability is missing.""" caps = ClientCapabilities() with pytest.raises(MCPError) as exc_info: require_task_augmented_elicitation(caps) assert "task-augmented elicitation" in str(exc_info.value) def test_passes_when_present(self) -> None: """Does not raise when capability is present.""" caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( elicitation=TasksElicitationCapability(create=TasksCreateElicitationCapability()) ) ) ) require_task_augmented_elicitation(caps) class TestRequireTaskAugmentedSampling: """Tests for require_task_augmented_sampling function.""" def test_raises_when_none(self) -> None: """Raises MCPError when client_caps is None.""" with pytest.raises(MCPError) as exc_info: require_task_augmented_sampling(None) assert "task-augmented sampling" in str(exc_info.value) def test_raises_when_missing(self) -> None: """Raises MCPError when capability is missing.""" caps = ClientCapabilities() with pytest.raises(MCPError) as exc_info: require_task_augmented_sampling(caps) assert "task-augmented sampling" in str(exc_info.value) def test_passes_when_present(self) -> None: """Does not raise when capability is present.""" caps = ClientCapabilities( tasks=ClientTasksCapability( requests=ClientTasksRequestsCapability( sampling=TasksSamplingCapability(create_message=TasksCreateMessageCapability()) ) ) ) require_task_augmented_sampling(caps)
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/test_capabilities.py", "license": "MIT License", "lines": 244, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
modelcontextprotocol/python-sdk:tests/experimental/tasks/test_elicitation_scenarios.py
"""Tests for the four elicitation scenarios with tasks. This tests all combinations of tool call types and elicitation types: 1. Normal tool call + Normal elicitation (session.elicit) 2. Normal tool call + Task-augmented elicitation (session.experimental.elicit_as_task) 3. Task-augmented tool call + Normal elicitation (task.elicit) 4. Task-augmented tool call + Task-augmented elicitation (task.elicit_as_task) And the same for sampling (create_message). """ from typing import Any import anyio import pytest from anyio import Event from mcp.client.experimental.task_handlers import ExperimentalTaskHandlers from mcp.client.session import ClientSession from mcp.server import Server, ServerRequestContext from mcp.server.experimental.task_context import ServerTaskContext from mcp.server.lowlevel import NotificationOptions from mcp.shared._context import RequestContext from mcp.shared.experimental.tasks.helpers import is_terminal from mcp.shared.experimental.tasks.in_memory_task_store import InMemoryTaskStore from mcp.shared.message import SessionMessage from mcp.types import ( TASK_REQUIRED, CallToolRequestParams, CallToolResult, CreateMessageRequestParams, CreateMessageResult, CreateTaskResult, ElicitRequestParams, ElicitResult, ErrorData, GetTaskPayloadResult, GetTaskResult, ListToolsResult, PaginatedRequestParams, SamplingMessage, TaskMetadata, TextContent, Tool, ) def create_client_task_handlers( client_task_store: InMemoryTaskStore, elicit_received: Event, ) -> ExperimentalTaskHandlers: """Create task handlers for client to handle task-augmented elicitation from server.""" elicit_response = ElicitResult(action="accept", content={"confirm": True}) task_complete_events: dict[str, Event] = {} async def handle_augmented_elicitation( context: RequestContext[ClientSession], params: ElicitRequestParams, task_metadata: TaskMetadata, ) -> CreateTaskResult: """Handle task-augmented elicitation by creating a client-side task.""" elicit_received.set() task = await client_task_store.create_task(task_metadata) task_complete_events[task.task_id] = Event() async def complete_task() -> None: # Store result before updating status to avoid race condition await client_task_store.store_result(task.task_id, elicit_response) await client_task_store.update_task(task.task_id, status="completed") task_complete_events[task.task_id].set() context.session._task_group.start_soon(complete_task) # pyright: ignore[reportPrivateUsage] return CreateTaskResult(task=task) async def handle_get_task( context: RequestContext[ClientSession], params: Any, ) -> GetTaskResult: """Handle tasks/get from server.""" task = await client_task_store.get_task(params.task_id) assert task is not None, f"Task not found: {params.task_id}" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=100, ) async def handle_get_task_result( context: RequestContext[ClientSession], params: Any, ) -> GetTaskPayloadResult | ErrorData: """Handle tasks/result from server.""" event = task_complete_events.get(params.task_id) assert event is not None, f"No completion event for task: {params.task_id}" await event.wait() result = await client_task_store.get_result(params.task_id) assert result is not None, f"Result not found for task: {params.task_id}" return GetTaskPayloadResult.model_validate(result.model_dump(by_alias=True)) return ExperimentalTaskHandlers( augmented_elicitation=handle_augmented_elicitation, get_task=handle_get_task, get_task_result=handle_get_task_result, ) def create_sampling_task_handlers( client_task_store: InMemoryTaskStore, sampling_received: Event, ) -> ExperimentalTaskHandlers: """Create task handlers for client to handle task-augmented sampling from server.""" sampling_response = CreateMessageResult( role="assistant", content=TextContent(type="text", text="Hello from the model!"), model="test-model", ) task_complete_events: dict[str, Event] = {} async def handle_augmented_sampling( context: RequestContext[ClientSession], params: CreateMessageRequestParams, task_metadata: TaskMetadata, ) -> CreateTaskResult: """Handle task-augmented sampling by creating a client-side task.""" sampling_received.set() task = await client_task_store.create_task(task_metadata) task_complete_events[task.task_id] = Event() async def complete_task() -> None: # Store result before updating status to avoid race condition await client_task_store.store_result(task.task_id, sampling_response) await client_task_store.update_task(task.task_id, status="completed") task_complete_events[task.task_id].set() context.session._task_group.start_soon(complete_task) # pyright: ignore[reportPrivateUsage] return CreateTaskResult(task=task) async def handle_get_task( context: RequestContext[ClientSession], params: Any, ) -> GetTaskResult: """Handle tasks/get from server.""" task = await client_task_store.get_task(params.task_id) assert task is not None, f"Task not found: {params.task_id}" return GetTaskResult( task_id=task.task_id, status=task.status, status_message=task.status_message, created_at=task.created_at, last_updated_at=task.last_updated_at, ttl=task.ttl, poll_interval=100, ) async def handle_get_task_result( context: RequestContext[ClientSession], params: Any, ) -> GetTaskPayloadResult | ErrorData: """Handle tasks/result from server.""" event = task_complete_events.get(params.task_id) assert event is not None, f"No completion event for task: {params.task_id}" await event.wait() result = await client_task_store.get_result(params.task_id) assert result is not None, f"Result not found for task: {params.task_id}" return GetTaskPayloadResult.model_validate(result.model_dump(by_alias=True)) return ExperimentalTaskHandlers( augmented_sampling=handle_augmented_sampling, get_task=handle_get_task, get_task_result=handle_get_task_result, ) @pytest.mark.anyio async def test_scenario1_normal_tool_normal_elicitation() -> None: """Scenario 1: Normal tool call with normal elicitation. Server calls session.elicit() directly, client responds immediately. """ elicit_received = Event() tool_result: list[str] = [] async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: return ListToolsResult( tools=[ Tool( name="confirm_action", description="Confirm an action", input_schema={"type": "object"}, ) ] ) async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: # Normal elicitation - expects immediate response result = await ctx.session.elicit( message="Please confirm the action", requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ) confirmed = result.content.get("confirm", False) if result.content else False tool_result.append("confirmed" if confirmed else "cancelled") return CallToolResult(content=[TextContent(type="text", text="confirmed" if confirmed else "cancelled")]) server = Server("test-scenario1", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) # Elicitation callback for client async def elicitation_callback( context: RequestContext[ClientSession], params: ElicitRequestParams, ) -> ElicitResult: elicit_received.set() return ElicitResult(action="accept", content={"confirm": True}) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def run_server() -> None: await server.run( client_to_server_receive, server_to_client_send, server.create_initialization_options( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) async def run_client() -> None: async with ClientSession( server_to_client_receive, client_to_server_send, elicitation_callback=elicitation_callback, ) as client_session: await client_session.initialize() # Call tool normally (not as task) result = await client_session.call_tool("confirm_action", {}) # Verify elicitation was received and tool completed assert elicit_received.is_set() assert len(result.content) > 0 assert isinstance(result.content[0], TextContent) assert result.content[0].text == "confirmed" async with anyio.create_task_group() as tg: tg.start_soon(run_server) tg.start_soon(run_client) assert tool_result[0] == "confirmed" @pytest.mark.anyio async def test_scenario2_normal_tool_task_augmented_elicitation() -> None: """Scenario 2: Normal tool call with task-augmented elicitation. Server calls session.experimental.elicit_as_task(), client creates a task for the elicitation and returns CreateTaskResult. Server polls client. """ elicit_received = Event() tool_result: list[str] = [] # Client-side task store for handling task-augmented elicitation client_task_store = InMemoryTaskStore() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: return ListToolsResult( tools=[ Tool( name="confirm_action", description="Confirm an action", input_schema={"type": "object"}, ) ] ) async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: # Task-augmented elicitation - server polls client result = await ctx.session.experimental.elicit_as_task( message="Please confirm the action", requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ttl=60000, ) confirmed = result.content.get("confirm", False) if result.content else False tool_result.append("confirmed" if confirmed else "cancelled") return CallToolResult(content=[TextContent(type="text", text="confirmed" if confirmed else "cancelled")]) server = Server("test-scenario2", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) task_handlers = create_client_task_handlers(client_task_store, elicit_received) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def run_server() -> None: await server.run( client_to_server_receive, server_to_client_send, server.create_initialization_options( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) async def run_client() -> None: async with ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as client_session: await client_session.initialize() # Call tool normally (not as task) result = await client_session.call_tool("confirm_action", {}) # Verify elicitation was received and tool completed assert elicit_received.is_set() assert len(result.content) > 0 assert isinstance(result.content[0], TextContent) assert result.content[0].text == "confirmed" async with anyio.create_task_group() as tg: tg.start_soon(run_server) tg.start_soon(run_client) assert tool_result[0] == "confirmed" client_task_store.cleanup() @pytest.mark.anyio async def test_scenario3_task_augmented_tool_normal_elicitation() -> None: """Scenario 3: Task-augmented tool call with normal elicitation. Client calls tool as task. Inside the task, server uses task.elicit() which queues the request and delivers via tasks/result. """ elicit_received = Event() work_completed = Event() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: # Normal elicitation within task - queued and delivered via tasks/result result = await task.elicit( message="Please confirm the action", requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ) confirmed = result.content.get("confirm", False) if result.content else False work_completed.set() return CallToolResult(content=[TextContent(type="text", text="confirmed" if confirmed else "cancelled")]) return await ctx.experimental.run_task(work) server = Server("test-scenario3", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) server.experimental.enable_tasks() # Elicitation callback for client async def elicitation_callback( context: RequestContext[ClientSession], params: ElicitRequestParams, ) -> ElicitResult: elicit_received.set() return ElicitResult(action="accept", content={"confirm": True}) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def run_server() -> None: await server.run( client_to_server_receive, server_to_client_send, server.create_initialization_options( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) async def run_client() -> None: async with ClientSession( server_to_client_receive, client_to_server_send, elicitation_callback=elicitation_callback, ) as client_session: await client_session.initialize() # Call tool as task create_result = await client_session.experimental.call_tool_as_task("confirm_action", {}) task_id = create_result.task.task_id assert create_result.task.status == "working" # Poll until input_required, then call tasks/result found_input_required = False async for status in client_session.experimental.poll_task(task_id): # pragma: no branch if status.status == "input_required": # pragma: no branch found_input_required = True break assert found_input_required, "Expected to see input_required status" # This will deliver the elicitation and get the response final_result = await client_session.experimental.get_task_result(task_id, CallToolResult) # Verify assert elicit_received.is_set() assert len(final_result.content) > 0 assert isinstance(final_result.content[0], TextContent) assert final_result.content[0].text == "confirmed" async with anyio.create_task_group() as tg: tg.start_soon(run_server) tg.start_soon(run_client) assert work_completed.is_set() @pytest.mark.anyio async def test_scenario4_task_augmented_tool_task_augmented_elicitation() -> None: """Scenario 4: Task-augmented tool call with task-augmented elicitation. Client calls tool as task. Inside the task, server uses task.elicit_as_task() which sends task-augmented elicitation. Client creates its own task for the elicitation, and server polls the client. This tests the full bidirectional flow where: 1. Client calls tasks/result on server (for tool task) 2. Server delivers task-augmented elicitation through that stream 3. Client creates its own task and returns CreateTaskResult 4. Server polls the client's task while the client's tasks/result is still open 5. Server gets the ElicitResult and completes the tool task 6. Client's tasks/result returns with the CallToolResult """ elicit_received = Event() work_completed = Event() # Client-side task store for handling task-augmented elicitation client_task_store = InMemoryTaskStore() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: # Task-augmented elicitation within task - server polls client result = await task.elicit_as_task( message="Please confirm the action", requested_schema={"type": "object", "properties": {"confirm": {"type": "boolean"}}}, ttl=60000, ) confirmed = result.content.get("confirm", False) if result.content else False work_completed.set() return CallToolResult(content=[TextContent(type="text", text="confirmed" if confirmed else "cancelled")]) return await ctx.experimental.run_task(work) server = Server("test-scenario4", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) server.experimental.enable_tasks() task_handlers = create_client_task_handlers(client_task_store, elicit_received) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def run_server() -> None: await server.run( client_to_server_receive, server_to_client_send, server.create_initialization_options( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) async def run_client() -> None: async with ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as client_session: await client_session.initialize() # Call tool as task create_result = await client_session.experimental.call_tool_as_task("confirm_action", {}) task_id = create_result.task.task_id assert create_result.task.status == "working" # Poll until input_required or terminal, then call tasks/result found_expected_status = False async for status in client_session.experimental.poll_task(task_id): # pragma: no branch if status.status == "input_required" or is_terminal(status.status): # pragma: no branch found_expected_status = True break assert found_expected_status, "Expected to see input_required or terminal status" # This will deliver the task-augmented elicitation, # server will poll client, and eventually return the tool result final_result = await client_session.experimental.get_task_result(task_id, CallToolResult) # Verify assert elicit_received.is_set() assert len(final_result.content) > 0 assert isinstance(final_result.content[0], TextContent) assert final_result.content[0].text == "confirmed" async with anyio.create_task_group() as tg: tg.start_soon(run_server) tg.start_soon(run_client) assert work_completed.is_set() client_task_store.cleanup() @pytest.mark.anyio async def test_scenario2_sampling_normal_tool_task_augmented_sampling() -> None: """Scenario 2 for sampling: Normal tool call with task-augmented sampling. Server calls session.experimental.create_message_as_task(), client creates a task for the sampling and returns CreateTaskResult. Server polls client. """ sampling_received = Event() tool_result: list[str] = [] # Client-side task store for handling task-augmented sampling client_task_store = InMemoryTaskStore() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: return ListToolsResult( tools=[ Tool( name="generate_text", description="Generate text using sampling", input_schema={"type": "object"}, ) ] ) async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: # Task-augmented sampling - server polls client result = await ctx.session.experimental.create_message_as_task( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, ttl=60000, ) assert isinstance(result.content, TextContent), "Expected TextContent response" response_text = result.content.text tool_result.append(response_text) return CallToolResult(content=[TextContent(type="text", text=response_text)]) server = Server("test-scenario2-sampling", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) task_handlers = create_sampling_task_handlers(client_task_store, sampling_received) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def run_server() -> None: await server.run( client_to_server_receive, server_to_client_send, server.create_initialization_options( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) async def run_client() -> None: async with ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as client_session: await client_session.initialize() # Call tool normally (not as task) result = await client_session.call_tool("generate_text", {}) # Verify sampling was received and tool completed assert sampling_received.is_set() assert len(result.content) > 0 assert isinstance(result.content[0], TextContent) assert result.content[0].text == "Hello from the model!" async with anyio.create_task_group() as tg: tg.start_soon(run_server) tg.start_soon(run_client) assert tool_result[0] == "Hello from the model!" client_task_store.cleanup() @pytest.mark.anyio async def test_scenario4_sampling_task_augmented_tool_task_augmented_sampling() -> None: """Scenario 4 for sampling: Task-augmented tool call with task-augmented sampling. Client calls tool as task. Inside the task, server uses task.create_message_as_task() which sends task-augmented sampling. Client creates its own task for the sampling, and server polls the client. """ sampling_received = Event() work_completed = Event() # Client-side task store for handling task-augmented sampling client_task_store = InMemoryTaskStore() async def handle_list_tools(ctx: ServerRequestContext, params: PaginatedRequestParams | None) -> ListToolsResult: raise NotImplementedError async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CreateTaskResult: ctx.experimental.validate_task_mode(TASK_REQUIRED) async def work(task: ServerTaskContext) -> CallToolResult: # Task-augmented sampling within task - server polls client result = await task.create_message_as_task( messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], max_tokens=100, ttl=60000, ) assert isinstance(result.content, TextContent), "Expected TextContent response" response_text = result.content.text work_completed.set() return CallToolResult(content=[TextContent(type="text", text=response_text)]) return await ctx.experimental.run_task(work) server = Server("test-scenario4-sampling", on_list_tools=handle_list_tools, on_call_tool=handle_call_tool) server.experimental.enable_tasks() task_handlers = create_sampling_task_handlers(client_task_store, sampling_received) # Set up streams server_to_client_send, server_to_client_receive = anyio.create_memory_object_stream[SessionMessage](10) client_to_server_send, client_to_server_receive = anyio.create_memory_object_stream[SessionMessage](10) async def run_server() -> None: await server.run( client_to_server_receive, server_to_client_send, server.create_initialization_options( notification_options=NotificationOptions(), experimental_capabilities={}, ), ) async def run_client() -> None: async with ClientSession( server_to_client_receive, client_to_server_send, experimental_task_handlers=task_handlers, ) as client_session: await client_session.initialize() # Call tool as task create_result = await client_session.experimental.call_tool_as_task("generate_text", {}) task_id = create_result.task.task_id assert create_result.task.status == "working" # Poll until input_required or terminal found_expected_status = False async for status in client_session.experimental.poll_task(task_id): # pragma: no branch if status.status == "input_required" or is_terminal(status.status): # pragma: no branch found_expected_status = True break assert found_expected_status, "Expected to see input_required or terminal status" final_result = await client_session.experimental.get_task_result(task_id, CallToolResult) # Verify assert sampling_received.is_set() assert len(final_result.content) > 0 assert isinstance(final_result.content[0], TextContent) assert final_result.content[0].text == "Hello from the model!" async with anyio.create_task_group() as tg: tg.start_soon(run_server) tg.start_soon(run_client) assert work_completed.is_set() client_task_store.cleanup()
{ "repo_id": "modelcontextprotocol/python-sdk", "file_path": "tests/experimental/tasks/test_elicitation_scenarios.py", "license": "MIT License", "lines": 567, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test