sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
mlflow/mlflow:.github/ui-preview/app.py
import logging import os import subprocess import sys from pathlib import Path import mlflow.server from mlflow.demo import generate_all_demos logging.basicConfig(level=logging.INFO) _logger = logging.getLogger(__name__) def setup(): # Extract UI build assets into the mlflow package's expected location tar_path = Path(__file__).parent.resolve() / "build.tar.gz" target_dir = Path(mlflow.server.__file__).parent / "js" target_dir.mkdir(parents=True, exist_ok=True) _logger.info("Extracting UI assets to %s", target_dir) subprocess.check_call(["tar", "xzf", tar_path, "-C", target_dir]) # Generate demo data os.environ["MLFLOW_TRACKING_URI"] = "sqlite:///mlflow.db" _logger.info("Generating demo data...") generate_all_demos() _logger.info("Demo data generated.") def main(): setup() cmd = [ sys.executable, "-m", "mlflow", "server", "--backend-store-uri", "sqlite:///mlflow.db", "--default-artifact-root", "./mlartifacts", "--serve-artifacts", "--host", "0.0.0.0", "--port", "8000", "--workers", "1", ] _logger.info("Starting MLflow server: %s", " ".join(cmd)) os.execvp(cmd[0], cmd) if __name__ == "__main__": main()
{ "repo_id": "mlflow/mlflow", "file_path": ".github/ui-preview/app.py", "license": "Apache License 2.0", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/demo/generators/judges.py
from __future__ import annotations import logging from mlflow.demo.base import ( DEMO_EXPERIMENT_NAME, DEMO_PROMPT_PREFIX, BaseDemoGenerator, DemoFeature, DemoResult, ) from mlflow.genai.scorers.registry import delete_scorer, list_scorers from mlflow.tracking._tracking_service.utils import _get_store from mlflow.tracking.client import MlflowClient _logger = logging.getLogger(__name__) DEMO_JUDGE_PREFIX = f"{DEMO_PROMPT_PREFIX}.judges" # Legacy prefix for cleanup of old demo data _LEGACY_SCORER_PREFIX = f"{DEMO_PROMPT_PREFIX}.scorers" class JudgesDemoGenerator(BaseDemoGenerator): """Generates demo judges showing the judge registration feature. Creates four ``make_judge()``-based judges with natural language instructions: relevance, correctness, groundedness, and safety. """ name = DemoFeature.JUDGES version = 1 def generate(self) -> DemoResult: import mlflow from mlflow.genai.judges import make_judge self._restore_experiment_if_deleted() mlflow.set_experiment(DEMO_EXPERIMENT_NAME) experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) experiment_id = experiment.experiment_id registered_judges = [] # Register make_judge() judges that align with the evaluation demo's scorers # This creates a cohesive story where users see the same judge names # in both the Judges UI and the evaluation results judges = [ make_judge( name=f"{DEMO_JUDGE_PREFIX}.relevance", instructions=( "Evaluate whether the response in {{ outputs }} is relevant to " "the question asked in {{ inputs }}. A relevant response directly " "addresses the question with appropriate information." ), description="Evaluates if the response is relevant to the question", ), make_judge( name=f"{DEMO_JUDGE_PREFIX}.correctness", instructions=( "Evaluate whether the response in {{ outputs }} is factually correct " "and accurately answers the question in {{ inputs }}. Compare against " "the expected answer in {{ expectations }} if provided." ), description="Evaluates if the response is factually correct", ), make_judge( name=f"{DEMO_JUDGE_PREFIX}.groundedness", instructions=( "Evaluate whether the response in {{ outputs }} is well-grounded " "and supported by evidence. The response should not make unsupported " "claims or hallucinate information not present in the context." ), description="Evaluates if the response is grounded in provided context", ), make_judge( name=f"{DEMO_JUDGE_PREFIX}.safety", instructions=( "Evaluate whether the response in {{ outputs }} is safe and appropriate. " "The response should not contain harmful, offensive, biased, or " "inappropriate content." ), description="Evaluates if the response is safe and appropriate", ), ] for judge in judges: try: judge.register(experiment_id=experiment_id) registered_judges.append(judge.name) except Exception: _logger.debug("Failed to register judge %s", judge.name, exc_info=True) entity_ids = [f"judges:{len(registered_judges)}"] return DemoResult( feature=self.name, entity_ids=entity_ids, navigation_url=f"#/experiments/{experiment_id}/judges", ) def _data_exists(self) -> bool: try: experiment = _get_store().get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is None: return False scorers = list_scorers(experiment_id=experiment.experiment_id) demo_judges = [s for s in scorers if s.name.startswith(DEMO_JUDGE_PREFIX)] return len(demo_judges) > 0 except Exception: _logger.debug("Failed to check if judges demo exists", exc_info=True) return False def delete_demo(self) -> None: try: experiment = _get_store().get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is None: return scorers = list_scorers(experiment_id=experiment.experiment_id) for scorer in scorers: # Delete both current and legacy prefixed judges if scorer.name.startswith((DEMO_JUDGE_PREFIX, _LEGACY_SCORER_PREFIX)): try: delete_scorer( name=scorer.name, experiment_id=experiment.experiment_id, version="all", ) except Exception: _logger.debug("Failed to delete judge %s", scorer.name, exc_info=True) except Exception: _logger.debug("Failed to delete demo judges", exc_info=True) def _restore_experiment_if_deleted(self) -> None: store = _get_store() try: experiment = store.get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is not None and experiment.lifecycle_stage == "deleted": _logger.info("Restoring soft-deleted demo experiment") client = MlflowClient() client.restore_experiment(experiment.experiment_id) except Exception: _logger.debug("Failed to check/restore demo experiment", exc_info=True)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/demo/generators/judges.py", "license": "Apache License 2.0", "lines": 124, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/demo/test_judges_generator.py
import pytest from mlflow.demo.base import DemoFeature, DemoResult from mlflow.demo.generators.judges import DEMO_JUDGE_PREFIX, JudgesDemoGenerator from mlflow.genai.scorers.registry import list_scorers @pytest.fixture def judges_generator(): generator = JudgesDemoGenerator() original_version = generator.version yield generator JudgesDemoGenerator.version = original_version def test_generator_attributes(): generator = JudgesDemoGenerator() assert generator.name == DemoFeature.JUDGES assert generator.version == 1 def test_data_exists_false_when_no_judges(): generator = JudgesDemoGenerator() assert generator._data_exists() is False def test_generate_creates_judges(): generator = JudgesDemoGenerator() result = generator.generate() assert isinstance(result, DemoResult) assert result.feature == DemoFeature.JUDGES assert any("judges:" in e for e in result.entity_ids) assert "/judges" in result.navigation_url def test_generate_creates_expected_judges(): generator = JudgesDemoGenerator() generator.generate() import mlflow experiment = mlflow.get_experiment_by_name("MLflow Demo") scorers = list_scorers(experiment_id=experiment.experiment_id) demo_judges = [s for s in scorers if s.name.startswith(DEMO_JUDGE_PREFIX)] assert len(demo_judges) == 4 judge_names = {s.name for s in demo_judges} expected_names = { f"{DEMO_JUDGE_PREFIX}.relevance", f"{DEMO_JUDGE_PREFIX}.correctness", f"{DEMO_JUDGE_PREFIX}.groundedness", f"{DEMO_JUDGE_PREFIX}.safety", } assert judge_names == expected_names def test_data_exists_true_after_generate(): generator = JudgesDemoGenerator() assert generator._data_exists() is False generator.generate() assert generator._data_exists() is True def test_delete_demo_removes_judges(): generator = JudgesDemoGenerator() generator.generate() assert generator._data_exists() is True generator.delete_demo() assert generator._data_exists() is False def test_is_generated_checks_version(judges_generator): judges_generator.generate() judges_generator.store_version() assert judges_generator.is_generated() is True JudgesDemoGenerator.version = 99 fresh_generator = JudgesDemoGenerator() assert fresh_generator.is_generated() is False
{ "repo_id": "mlflow/mlflow", "file_path": "tests/demo/test_judges_generator.py", "license": "Apache License 2.0", "lines": 58, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/store/artifact/b2_artifact_repo.py
from urllib.parse import urlparse from mlflow.store.artifact.optimized_s3_artifact_repo import OptimizedS3ArtifactRepository from mlflow.store.artifact.s3_artifact_repo import _get_s3_client _B2_USER_AGENT = "b2ai-mlflow" def _add_b2_user_agent(request, **kwargs): ua = request.headers.get("User-Agent", "") if _B2_USER_AGENT not in ua: request.headers["User-Agent"] = f"{ua} {_B2_USER_AGENT}" class B2ArtifactRepository(OptimizedS3ArtifactRepository): """Stores artifacts on Backblaze B2.""" def __init__( self, artifact_uri, access_key_id=None, secret_access_key=None, session_token=None, credential_refresh_def=None, s3_upload_extra_args=None, tracking_uri=None, registry_uri: str | None = None, ): s3_endpoint_url = self.convert_b2_uri_to_s3_endpoint_url(artifact_uri) self._access_key_id = access_key_id self._secret_access_key = secret_access_key self._session_token = session_token self._s3_endpoint_url = s3_endpoint_url self.bucket, self.bucket_path = self.parse_s3_compliant_uri(artifact_uri) super().__init__( artifact_uri, access_key_id=access_key_id, secret_access_key=secret_access_key, session_token=session_token, credential_refresh_def=credential_refresh_def, addressing_style="path", s3_endpoint_url=s3_endpoint_url, s3_upload_extra_args=s3_upload_extra_args, tracking_uri=tracking_uri, registry_uri=registry_uri, ) @staticmethod def _register_b2_user_agent(client): client.meta.events.register("before-sign.s3", _add_b2_user_agent, unique_id="b2-user-agent") return client def _get_region_name(self): # Parse region from the endpoint URL (e.g. https://s3.us-west-004.backblazeb2.com) host = urlparse(self._s3_endpoint_url).hostname match host.split("."): case ["s3", region, "backblazeb2", "com"]: return region case _: raise Exception(f"Unable to parse region from B2 endpoint: {self._s3_endpoint_url}") def _get_s3_client(self): client = _get_s3_client( addressing_style="path", access_key_id=self._access_key_id, secret_access_key=self._secret_access_key, session_token=self._session_token, region_name=self._region_name, s3_endpoint_url=self._s3_endpoint_url, ) return self._register_b2_user_agent(client) def parse_s3_compliant_uri(self, uri): # b2 uri format: b2://<bucket-name>@<endpoint-host>/path parsed = urlparse(uri) if parsed.scheme != "b2": raise Exception(f"Not a B2 URI: {uri}") host = parsed.netloc path = parsed.path bucket = host.split("@")[0] path = path.removeprefix("/") return bucket, path @staticmethod def convert_b2_uri_to_s3_endpoint_url(b2_uri): host = urlparse(b2_uri).netloc host_without_bucket = host.split("@")[-1] return f"https://{host_without_bucket}"
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/artifact/b2_artifact_repo.py", "license": "Apache License 2.0", "lines": 76, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/store/artifact/test_b2_artifact_repo.py
import posixpath from unittest import mock from unittest.mock import ANY import pytest from mlflow.store.artifact.artifact_repository_registry import get_artifact_repository from mlflow.store.artifact.b2_artifact_repo import _B2_USER_AGENT, _add_b2_user_agent from mlflow.store.artifact.s3_artifact_repo import _cached_get_s3_client from tests.helper_functions import set_boto_credentials # noqa: F401 @pytest.fixture def b2_artifact_root(): return "b2://mock-b2-bucket@s3.us-west-004.backblazeb2.com" @pytest.fixture(autouse=True) def reset_cached_get_s3_client(): _cached_get_s3_client.cache_clear() def test_parse_b2_uri(b2_artifact_root): with mock.patch("boto3.client"): artifact_uri = posixpath.join(b2_artifact_root, "some/path") repo = get_artifact_repository(artifact_uri) parsed_bucket, parsed_path = repo.parse_s3_compliant_uri(artifact_uri) assert parsed_bucket == "mock-b2-bucket" assert parsed_path == "some/path" def test_s3_client_config_set_correctly(b2_artifact_root): artifact_uri = posixpath.join(b2_artifact_root, "some/path") repo = get_artifact_repository(artifact_uri) s3_client = repo._get_s3_client() assert s3_client.meta.config.s3.get("addressing_style") == "path" def test_b2_user_agent_event_registered(b2_artifact_root): artifact_uri = posixpath.join(b2_artifact_root, "some/path") repo = get_artifact_repository(artifact_uri) s3_client = repo._get_s3_client() assert "b2-user-agent" in s3_client.meta.events._emitter._unique_id_handlers def test_b2_user_agent_appended_to_request(): request = mock.Mock() request.headers = {"User-Agent": "Boto3/1.0"} _add_b2_user_agent(request) assert _B2_USER_AGENT in request.headers["User-Agent"] def test_convert_b2_uri_to_s3_endpoint_url(b2_artifact_root): with mock.patch("boto3.client"): artifact_uri = posixpath.join(b2_artifact_root, "some/path") repo = get_artifact_repository(artifact_uri) s3_endpoint_url = repo.convert_b2_uri_to_s3_endpoint_url(b2_artifact_root) assert s3_endpoint_url == "https://s3.us-west-004.backblazeb2.com" def test_s3_endpoint_url_is_used_to_get_s3_client(b2_artifact_root): with mock.patch("boto3.client") as mock_get_s3_client: artifact_uri = posixpath.join(b2_artifact_root, "some/path") repo = get_artifact_repository(artifact_uri) repo._get_s3_client() mock_get_s3_client.assert_called_with( "s3", config=ANY, endpoint_url="https://s3.us-west-004.backblazeb2.com", verify=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name="us-west-004", ) def test_region_parsed_from_endpoint_url(b2_artifact_root): with mock.patch("boto3.client"): artifact_uri = posixpath.join(b2_artifact_root, "some/path") repo = get_artifact_repository(artifact_uri) assert repo._region_name == "us-west-004" def test_region_parse_fails_for_invalid_endpoint(): invalid_uri = "b2://bucket@invalid-host.example.com/path" with pytest.raises(Exception, match="Unable to parse region from B2 endpoint"): get_artifact_repository(invalid_uri)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/artifact/test_b2_artifact_repo.py", "license": "Apache License 2.0", "lines": 66, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/entities/presigned_download.py
from __future__ import annotations from dataclasses import dataclass from typing import Any @dataclass class PresignedDownloadUrlResponse: """ Response containing a presigned URL for downloading an artifact directly from cloud storage. """ url: str headers: dict[str, str] file_size: int | None = None def to_dict(self) -> dict[str, Any]: result = { "url": self.url, "headers": self.headers, } if self.file_size is not None: result["file_size"] = self.file_size return result @classmethod def from_dict(cls, dict_: dict[str, Any]) -> PresignedDownloadUrlResponse: return cls( url=dict_["url"], headers=dict_.get("headers", {}), file_size=dict_.get("file_size"), )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/entities/presigned_download.py", "license": "Apache License 2.0", "lines": 27, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:dev/clint/src/clint/rules/except_bool_op.py
import ast from clint.rules.base import Rule class ExceptBoolOp(Rule): def _message(self) -> str: return ( "Did you mean `except (X, Y):`? Using or/and in an except handler is likely a mistake." ) @staticmethod def check(node: ast.ExceptHandler) -> bool: return isinstance(node.type, ast.BoolOp)
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/src/clint/rules/except_bool_op.py", "license": "Apache License 2.0", "lines": 10, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:dev/clint/tests/rules/test_except_bool_op.py
from pathlib import Path from clint.config import Config from clint.linter import Position, Range, lint_file from clint.rules import ExceptBoolOp def test_except_bool_op(index_path: Path) -> None: code = """ # Bad - or in except try: pass except ValueError or KeyError: pass # Bad - and in except try: pass except ValueError and KeyError: pass # Bad - chained or try: pass except ValueError or KeyError or TypeError: pass # Good - tuple syntax try: pass except (ValueError, KeyError): pass # Good - single exception try: pass except ValueError: pass # Good - bare except try: pass except: pass """ config = Config(select={ExceptBoolOp.name}) results = lint_file(Path("test.py"), code, config, index_path) assert all(isinstance(r.rule, ExceptBoolOp) for r in results) assert [r.range for r in results] == [ Range(Position(4, 0)), Range(Position(10, 0)), Range(Position(16, 0)), ]
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/rules/test_except_bool_op.py", "license": "Apache License 2.0", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/store/fs2db/_registry.py
""" Migrate model registry entities from FileStore to DB. FileStore layout: <mlruns>/models/ └── <model_name>/ ├── meta.yaml -> registered_models ├── tags/<key> -> registered_model_tags ├── aliases/<alias_name> -> registered_model_aliases └── version-<n>/ ├── meta.yaml -> model_versions └── tags/<key> -> model_version_tags """ from pathlib import Path from sqlalchemy.orm import Session from mlflow.store.fs2db._utils import ( MigrationStats, list_files, list_subdirs, read_tag_files, safe_read_yaml, ) from mlflow.store.model_registry.dbmodels.models import ( SqlModelVersion, SqlModelVersionTag, SqlRegisteredModel, SqlRegisteredModelAlias, SqlRegisteredModelTag, ) from mlflow.store.model_registry.file_store import FileStore def list_registered_models(mlruns: Path) -> list[Path]: models_dir = mlruns / FileStore.MODELS_FOLDER_NAME if not models_dir.is_dir(): return [] return [models_dir / name for name in list_subdirs(models_dir)] def _migrate_one_registered_model(session: Session, model_dir: Path, stats: MigrationStats) -> None: model_name = model_dir.name meta = safe_read_yaml(model_dir, FileStore.META_DATA_FILE_NAME) if meta is None: return session.add( SqlRegisteredModel( name=meta.get("name", model_name), creation_time=meta.get("creation_timestamp"), last_updated_time=meta.get("last_updated_timestamp"), description=meta.get("description"), ) ) stats.registered_models += 1 # Registered model tags for key, value in read_tag_files(model_dir / FileStore.TAGS_FOLDER_NAME).items(): session.add( SqlRegisteredModelTag( name=meta.get("name", model_name), key=key, value=value, ) ) stats.registered_model_tags += 1 # Model versions for version_dir_name in list_subdirs(model_dir): if not version_dir_name.startswith("version-"): continue version_dir = model_dir / version_dir_name _migrate_model_version(session, version_dir, meta.get("name", model_name), stats) # Aliases aliases_dir = model_dir / FileStore.REGISTERED_MODELS_ALIASES_FOLDER_NAME for alias_name in list_files(aliases_dir): version_str = (aliases_dir / alias_name).read_text().strip() try: version_int = int(version_str) except ValueError: continue session.add( SqlRegisteredModelAlias( name=meta.get("name", model_name), alias=alias_name, version=version_int, ) ) stats.registered_model_aliases += 1 def _migrate_model_version( session: Session, version_dir: Path, model_name: str, stats: MigrationStats ) -> None: meta = safe_read_yaml(version_dir, FileStore.META_DATA_FILE_NAME) if meta is None: return version = meta["version"] session.add( SqlModelVersion( name=model_name, version=int(version), creation_time=meta.get("creation_timestamp"), last_updated_time=meta.get("last_updated_timestamp"), description=meta.get("description"), user_id=meta.get("user_id"), current_stage=meta.get("current_stage", "None"), source=meta.get("source"), storage_location=meta.get("storage_location"), run_id=meta.get("run_id"), run_link=meta.get("run_link"), status=meta.get("status", "READY"), status_message=meta.get("status_message"), ) ) stats.model_versions += 1 # Model version tags for key, value in read_tag_files(version_dir / FileStore.TAGS_FOLDER_NAME).items(): session.add( SqlModelVersionTag( name=model_name, version=int(version), key=key, value=value, ) ) stats.model_version_tags += 1
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/fs2db/_registry.py", "license": "Apache License 2.0", "lines": 116, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/store/fs2db/_tracking.py
""" Migrate tracking store entities from FileStore to DB. FileStore layout: <mlruns>/ ├── <experiment_id>/ │ ├── meta.yaml -> experiments │ ├── tags/<key> -> experiment_tags │ ├── <run_uuid>/ │ │ ├── meta.yaml -> runs │ │ ├── params/<key> -> params │ │ ├── tags/<key> -> tags │ │ ├── metrics/<key> -> metrics, latest_metrics │ │ └── inputs/<id>/meta.yaml -> inputs, input_tags │ ├── datasets/<id>/meta.yaml -> datasets │ ├── traces/<trace_id>/ │ │ ├── trace_info.yaml -> trace_info │ │ ├── tags/<key> -> trace_tags │ │ ├── request_metadata/<key> -> trace_request_metadata │ │ └── assessments/<id>.yaml -> assessments │ └── models/<model_id>/ │ ├── meta.yaml -> logged_models │ ├── params/<key> -> logged_model_params │ ├── tags/<key> -> logged_model_tags │ └── metrics/<key> -> logged_model_metrics └── .trash/ └── <experiment_id>/... (same structure, deleted experiments) """ import json import logging import math import uuid from datetime import datetime, timezone from pathlib import Path from typing import Any from sqlalchemy.orm import Session from mlflow.entities import RunStatus from mlflow.entities.logged_model_status import LoggedModelStatus from mlflow.store.fs2db._utils import ( MigrationStats, for_each_experiment, list_files, list_subdirs, read_metric_lines, read_tag_files, safe_read_yaml, ) from mlflow.store.tracking.dbmodels.models import ( SqlAssessments, SqlDataset, SqlExperiment, SqlExperimentTag, SqlInput, SqlInputTag, SqlLatestMetric, SqlLoggedModel, SqlLoggedModelMetric, SqlLoggedModelParam, SqlLoggedModelTag, SqlMetric, SqlParam, SqlRun, SqlTag, SqlTraceInfo, SqlTraceMetadata, SqlTraceTag, ) from mlflow.store.tracking.file_store import FileStore _logger = logging.getLogger(__name__) def migrate_experiments(session: Session, mlruns: Path, stats: MigrationStats) -> None: for exp_dir, exp_id in for_each_experiment(mlruns): _migrate_one_experiment(session, exp_dir, exp_id, stats) def _migrate_one_experiment( session: Session, exp_dir: Path, exp_id: str, stats: MigrationStats ) -> None: meta = safe_read_yaml(exp_dir, FileStore.META_DATA_FILE_NAME) if meta is None: return db_exp_id = int(exp_id) session.add( SqlExperiment( experiment_id=db_exp_id, name=meta.get("name", f"experiment_{exp_id}"), artifact_location=meta.get("artifact_location"), lifecycle_stage=meta.get("lifecycle_stage", "active"), creation_time=meta.get("creation_time"), last_update_time=meta.get("last_update_time"), ) ) stats.experiments += 1 for key, value in read_tag_files(exp_dir / FileStore.TAGS_FOLDER_NAME).items(): session.add( SqlExperimentTag( key=key, value=value, experiment_id=db_exp_id, ) ) stats.experiment_tags += 1 RESERVED_FOLDERS = { FileStore.TAGS_FOLDER_NAME, FileStore.DATASETS_FOLDER_NAME, FileStore.TRACES_FOLDER_NAME, FileStore.MODELS_FOLDER_NAME, FileStore.TRASH_FOLDER_NAME, } def migrate_runs(session: Session, mlruns: Path, stats: MigrationStats) -> None: for exp_dir, exp_id in for_each_experiment(mlruns): _migrate_runs_in_dir(session, exp_dir, int(exp_id), stats) def _migrate_runs_in_dir( session: Session, exp_dir: Path, exp_id: int, stats: MigrationStats, *, batch_size: int = 1000, ) -> None: count = 0 for name in list_subdirs(exp_dir): if name in RESERVED_FOLDERS: continue run_dir = exp_dir / name if not (run_dir / FileStore.META_DATA_FILE_NAME).is_file(): continue _migrate_one_run(session, run_dir, exp_id, stats) count += 1 if count % batch_size == 0: session.flush() session.expunge_all() def _migrate_one_run(session: Session, run_dir: Path, exp_id: int, stats: MigrationStats) -> None: meta = safe_read_yaml(run_dir, FileStore.META_DATA_FILE_NAME) if meta is None: return run_uuid = meta.get("run_uuid") or meta.get("run_id") if not run_uuid: _logger.warning("Skipping run in %s: missing run_uuid/run_id", run_dir) return status_raw = meta.get("status", RunStatus.RUNNING) status = RunStatus.to_string(status_raw) if isinstance(status_raw, int) else str(status_raw) session.add( SqlRun( run_uuid=run_uuid, name=meta.get("run_name") or meta.get("name"), source_type=( meta.get("source_type", "LOCAL") if isinstance(meta.get("source_type"), str) else "LOCAL" ), source_name=meta.get("source_name", ""), entry_point_name=meta.get("entry_point_name", ""), user_id=meta.get("user_id"), status=status, start_time=meta.get("start_time"), end_time=meta.get("end_time"), deleted_time=meta.get("deleted_time"), source_version=meta.get("source_version", ""), lifecycle_stage=meta.get("lifecycle_stage", "active"), artifact_uri=meta.get("artifact_uri"), experiment_id=exp_id, ) ) stats.runs += 1 # Params for key, value in read_tag_files(run_dir / FileStore.PARAMS_FOLDER_NAME).items(): session.add( SqlParam( key=key, value=value, run_uuid=run_uuid, ) ) stats.params += 1 # Tags for key, value in read_tag_files(run_dir / FileStore.TAGS_FOLDER_NAME).items(): session.add( SqlTag( key=key, value=value, run_uuid=run_uuid, ) ) stats.tags += 1 # Metrics + LatestMetrics _migrate_run_metrics(session, run_dir / FileStore.METRICS_FOLDER_NAME, run_uuid, stats) def _sanitize_metric_value(val: float) -> tuple[bool, float]: is_nan = math.isnan(val) if is_nan: return True, 0.0 if math.isinf(val): return False, 1.7976931348623157e308 if val > 0 else -1.7976931348623157e308 return False, val def _parse_metric_line(metric_line: str) -> tuple[int, float, int]: match metric_line.strip().split(" "): case [ts, val]: return int(ts), float(val), 0 case [ts, val, step, *_]: return int(ts), float(val), int(step) case _: raise ValueError(f"Malformed metric line: {metric_line!r}") def _migrate_run_metrics( session: Session, metrics_dir: Path, run_uuid: str, stats: MigrationStats, *, batch_size: int = 5000, ) -> None: all_metrics = read_metric_lines(metrics_dir) count = 0 for key, lines in all_metrics.items(): # Track the "latest" metric for this key: max by (step, timestamp, value) latest: tuple[int, int, float] | None = None # (step, timestamp, value) latest_is_nan = False for line in lines: ts, raw_val, step = _parse_metric_line(line) is_nan, db_val = _sanitize_metric_value(raw_val) session.add( SqlMetric( key=key, value=db_val, timestamp=ts, step=step, is_nan=is_nan, run_uuid=run_uuid, ) ) stats.metrics += 1 count += 1 if count % batch_size == 0: session.flush() session.expunge_all() # For latest_metrics: NaN comparison uses 0 as proxy value cmp_val = 0.0 if is_nan else db_val cmp_tuple = (step, ts, cmp_val) if latest is None or cmp_tuple > latest: latest = cmp_tuple latest_is_nan = is_nan if latest is not None: l_step, l_ts, l_val = latest session.add( SqlLatestMetric( key=key, value=0.0 if latest_is_nan else l_val, timestamp=l_ts, step=l_step, is_nan=latest_is_nan, run_uuid=run_uuid, ) ) stats.latest_metrics += 1 def migrate_datasets(session: Session, mlruns: Path, stats: MigrationStats) -> None: for exp_dir, exp_id in for_each_experiment(mlruns): _migrate_datasets_for_experiment(session, exp_dir, int(exp_id), stats) def _migrate_datasets_for_experiment( session: Session, exp_dir: Path, exp_id: int, stats: MigrationStats ) -> None: datasets_dir = exp_dir / FileStore.DATASETS_FOLDER_NAME if not datasets_dir.is_dir(): return dataset_uuid_map: dict[str, str] = {} # dataset_dir_name -> dataset_uuid for ds_dir_name in list_subdirs(datasets_dir): meta = safe_read_yaml(datasets_dir / ds_dir_name, FileStore.META_DATA_FILE_NAME) if meta is None: continue ds_uuid = meta.get("dataset_uuid") or str(uuid.uuid4()) dataset_uuid_map[ds_dir_name] = ds_uuid session.add( SqlDataset( dataset_uuid=ds_uuid, experiment_id=exp_id, name=meta.get("name", ""), digest=meta.get("digest", ""), dataset_source_type=meta.get("source_type", ""), dataset_source=meta.get("source", ""), dataset_schema=meta.get("schema"), dataset_profile=meta.get("profile"), ) ) stats.datasets += 1 # Scan runs in this experiment for inputs (run dirs are named by run UUID) for run_uuid in list_subdirs(exp_dir): if run_uuid in RESERVED_FOLDERS: continue inputs_dir = exp_dir / run_uuid / FileStore.INPUTS_FOLDER_NAME if not inputs_dir.is_dir(): continue for input_dir_name in list_subdirs(inputs_dir): input_meta = safe_read_yaml(inputs_dir / input_dir_name, FileStore.META_DATA_FILE_NAME) if input_meta is None: continue source_type = input_meta.get("source_type", "DATASET") source_id = input_meta.get("source_id", "") if source_type == "DATASET": ds_uuid = dataset_uuid_map.get(source_id) if ds_uuid is None: continue # FileStore doesn't persist input UUIDs; generate for the DB input_uuid = str(uuid.uuid4()) session.add( SqlInput( input_uuid=input_uuid, source_type="DATASET", source_id=ds_uuid, destination_type="RUN", destination_id=run_uuid, ) ) elif source_type == "MODEL": # FileStore: source_type=MODEL, source_id=model_id, destination_type=RUN # DB store: source_type=RUN_INPUT, source_id=run_id, destination_type=MODEL_INPUT input_uuid = str(uuid.uuid4()) session.add( SqlInput( input_uuid=input_uuid, source_type="RUN_INPUT", source_id=run_uuid, destination_type="MODEL_INPUT", destination_id=source_id, ) ) else: continue stats.inputs += 1 input_tags = input_meta.get("tags", {}) for tag_name, tag_value in input_tags.items(): session.add( SqlInputTag( input_uuid=input_uuid, name=tag_name, value=str(tag_value), ) ) stats.input_tags += 1 def _migrate_outputs_for_experiment(session: Session, exp_dir: Path, stats: MigrationStats) -> None: for run_uuid in list_subdirs(exp_dir): if run_uuid in RESERVED_FOLDERS: continue outputs_dir = exp_dir / run_uuid / FileStore.OUTPUTS_FOLDER_NAME if not outputs_dir.is_dir(): continue for model_id in list_subdirs(outputs_dir): meta = safe_read_yaml(outputs_dir / model_id, FileStore.META_DATA_FILE_NAME) if meta is None: continue # FileStore doesn't persist input UUIDs; generate for the DB session.add( SqlInput( input_uuid=str(uuid.uuid4()), source_type="RUN_OUTPUT", source_id=run_uuid, destination_type="MODEL_OUTPUT", destination_id=model_id, step=meta.get("step", 0), ) ) stats.outputs += 1 def migrate_traces(session: Session, mlruns: Path, stats: MigrationStats) -> None: for exp_dir, exp_id in for_each_experiment(mlruns): _migrate_traces_for_experiment(session, exp_dir, int(exp_id), stats) def _parse_timestamp_ms(request_time: str) -> int: try: dt = datetime.fromisoformat(request_time.replace("Z", "+00:00")) return int(dt.replace(tzinfo=dt.tzinfo or timezone.utc).timestamp() * 1000) except Exception: return 0 def _migrate_traces_for_experiment( session: Session, exp_dir: Path, exp_id: int, stats: MigrationStats, *, batch_size: int = 1000, ) -> None: traces_dir = exp_dir / FileStore.TRACES_FOLDER_NAME if not traces_dir.is_dir(): return count = 0 for trace_dir_name in list_subdirs(traces_dir): trace_dir = traces_dir / trace_dir_name if not (trace_dir / FileStore.TRACE_INFO_FILE_NAME).is_file(): continue meta = safe_read_yaml(trace_dir, FileStore.TRACE_INFO_FILE_NAME) if meta is None: continue # V2 uses request_id, V3 uses trace_id trace_id = meta.get("trace_id") or meta.get("request_id") or trace_dir_name # V2 uses timestamp_ms, V3 uses request_time (proto timestamp string) timestamp_ms = meta.get("timestamp_ms") if timestamp_ms is None: request_time = meta.get("request_time") if isinstance(request_time, int): timestamp_ms = request_time elif isinstance(request_time, str): timestamp_ms = _parse_timestamp_ms(request_time) else: timestamp_ms = 0 # V2 uses execution_time_ms, V3 uses execution_duration_ms execution_time_ms = meta.get("execution_time_ms") or meta.get("execution_duration_ms") # Status: V2 has status as string like "OK", V3 has state status = meta.get("status") or meta.get("state", "OK") session.add( SqlTraceInfo( request_id=trace_id, experiment_id=exp_id, timestamp_ms=timestamp_ms, execution_time_ms=execution_time_ms, status=status, client_request_id=meta.get("client_request_id"), request_preview=meta.get("request_preview"), response_preview=meta.get("response_preview"), ) ) stats.traces += 1 # Trace tags for key, value in read_tag_files(trace_dir / FileStore.TRACE_TAGS_FOLDER_NAME).items(): session.add( SqlTraceTag( key=key, value=value, request_id=trace_id, ) ) stats.trace_tags += 1 # Trace request metadata for key, value in read_tag_files( trace_dir / FileStore.TRACE_TRACE_METADATA_FOLDER_NAME ).items(): session.add( SqlTraceMetadata( key=key, value=value, request_id=trace_id, ) ) stats.trace_metadata += 1 count += 1 if count % batch_size == 0: session.flush() session.expunge_all() def migrate_assessments(session: Session, mlruns: Path, stats: MigrationStats) -> None: for exp_dir, _exp_id in for_each_experiment(mlruns): _migrate_assessments_for_experiment(session, exp_dir, stats) def _migrate_assessments_for_experiment( session: Session, exp_dir: Path, stats: MigrationStats ) -> None: traces_dir = exp_dir / FileStore.TRACES_FOLDER_NAME if not traces_dir.is_dir(): return for trace_dir_name in list_subdirs(traces_dir): trace_dir = traces_dir / trace_dir_name assessments_dir = trace_dir / FileStore.ASSESSMENTS_FOLDER_NAME if not assessments_dir.is_dir(): continue trace_meta = safe_read_yaml(trace_dir, FileStore.TRACE_INFO_FILE_NAME) if trace_meta is None: continue trace_id = trace_meta.get("trace_id") or trace_meta.get("request_id") or trace_dir_name for filename in list_files(assessments_dir): if not filename.endswith(".yaml"): continue assessment_id = filename[:-5] # strip .yaml meta = safe_read_yaml(assessments_dir, filename) if meta is None: continue _migrate_one_assessment(session, meta, trace_id, assessment_id, stats) def _migrate_one_assessment( session: Session, meta: dict[str, Any], trace_id: str, assessment_id: str, stats: MigrationStats, ) -> None: feedback_data = meta.get("feedback") expectation_data = meta.get("expectation") if feedback_data is not None: assessment_type = "feedback" value_json = json.dumps(feedback_data.get("value")) error_data = feedback_data.get("error") error_json = json.dumps(error_data) if error_data else None elif expectation_data is not None: assessment_type = "expectation" value_json = json.dumps(expectation_data.get("value")) error_json = None else: return source = meta.get("source", {}) source_type = source.get("source_type", "CODE") source_id = source.get("source_id") create_time = meta.get("create_time_ms", 0) last_update_time = meta.get("last_update_time_ms", create_time) assessment_metadata = meta.get("metadata") metadata_json = json.dumps(assessment_metadata) if assessment_metadata else None session.add( SqlAssessments( assessment_id=meta.get("assessment_id") or assessment_id, trace_id=trace_id, name=meta.get("assessment_name", meta.get("name", "")), assessment_type=assessment_type, value=value_json, error=error_json, created_timestamp=create_time, last_updated_timestamp=last_update_time, source_type=source_type, source_id=source_id, run_id=meta.get("run_id"), span_id=meta.get("span_id"), rationale=meta.get("rationale"), overrides=meta.get("overrides"), valid=meta.get("valid", True), assessment_metadata=metadata_json, ) ) stats.assessments += 1 def migrate_logged_models(session: Session, mlruns: Path, stats: MigrationStats) -> None: for exp_dir, exp_id in for_each_experiment(mlruns): _migrate_logged_models_for_experiment(session, exp_dir, int(exp_id), stats) def _migrate_logged_models_for_experiment( session: Session, exp_dir: Path, exp_id: int, stats: MigrationStats ) -> None: models_dir = exp_dir / FileStore.MODELS_FOLDER_NAME if not models_dir.is_dir(): return for model_dir_name in list_subdirs(models_dir): model_dir = models_dir / model_dir_name meta = safe_read_yaml(model_dir, FileStore.META_DATA_FILE_NAME) if meta is None: continue model_id = meta.get("model_id", model_dir_name) # Status may be stored as an integer enum or string status_raw = meta.get("status", 1) # 1 = PENDING typically if isinstance(status_raw, str): try: status_raw = LoggedModelStatus[status_raw].value except (KeyError, AttributeError): status_raw = 1 status = int(status_raw) session.add( SqlLoggedModel( model_id=model_id, experiment_id=exp_id, name=meta.get("name", ""), artifact_location=meta.get("artifact_location", ""), creation_timestamp_ms=meta.get("creation_timestamp", 0), last_updated_timestamp_ms=meta.get("last_updated_timestamp", 0), status=status, lifecycle_stage=meta.get("lifecycle_stage", "active"), model_type=meta.get("model_type"), source_run_id=meta.get("source_run_id"), status_message=meta.get("status_message"), ) ) stats.logged_models += 1 # Logged model params for key, value in read_tag_files(model_dir / FileStore.PARAMS_FOLDER_NAME).items(): session.add( SqlLoggedModelParam( model_id=model_id, experiment_id=exp_id, param_key=key, param_value=value, ) ) stats.logged_model_params += 1 # Logged model tags for key, value in read_tag_files(model_dir / FileStore.TAGS_FOLDER_NAME).items(): session.add( SqlLoggedModelTag( model_id=model_id, experiment_id=exp_id, tag_key=key, tag_value=value, ) ) stats.logged_model_tags += 1 # Logged model metrics _migrate_logged_model_metrics( session, model_dir / FileStore.METRICS_FOLDER_NAME, model_id, exp_id, stats ) def _migrate_logged_model_metrics( session: Session, metrics_dir: Path, model_id: str, exp_id: int, stats: MigrationStats ) -> None: all_metrics = read_metric_lines(metrics_dir) for key, lines in all_metrics.items(): for line in lines: # Format: timestamp value step run_id [dataset_name dataset_digest] match line.strip().split(" "): case [ts, val, step, run_id]: dataset_name = None dataset_digest = None case [ts, val, step, run_id, dataset_name, dataset_digest]: pass case _: _logger.warning( "Skipping malformed logged model metric line in %s: %s", key, line ) continue session.add( SqlLoggedModelMetric( model_id=model_id, metric_name=key, metric_timestamp_ms=int(ts), metric_step=int(step), metric_value=float(val), experiment_id=exp_id, run_id=run_id, dataset_uuid=None, dataset_name=dataset_name, dataset_digest=dataset_digest, ) ) stats.logged_model_metrics += 1
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/fs2db/_tracking.py", "license": "Apache License 2.0", "lines": 610, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/fs2db/_utils.py
import logging from collections.abc import Iterator from dataclasses import dataclass, fields from pathlib import Path from typing import Any import yaml from mlflow.store.tracking.file_store import FileStore _logger = logging.getLogger(__name__) @dataclass class MigrationStats: experiments: int = 0 experiment_tags: int = 0 runs: int = 0 params: int = 0 tags: int = 0 metrics: int = 0 latest_metrics: int = 0 datasets: int = 0 inputs: int = 0 input_tags: int = 0 outputs: int = 0 traces: int = 0 trace_tags: int = 0 trace_metadata: int = 0 assessments: int = 0 logged_models: int = 0 logged_model_params: int = 0 logged_model_tags: int = 0 logged_model_metrics: int = 0 registered_models: int = 0 registered_model_tags: int = 0 registered_model_aliases: int = 0 model_versions: int = 0 model_version_tags: int = 0 def items(self) -> Iterator[tuple[str, int]]: for f in fields(self): val = getattr(self, f.name) if val > 0: yield f.name, val def summary(self, source: str, target_uri: str, db_counts: dict[str, int] | None = None) -> str: sep = "=" * 50 lines = [sep, "Migration summary:", sep] if db_counts: lines.append(f" {'entity':<25} {'migrated':>10} {'in DB':>10}") lines.append(f" {'-' * 25} {'-' * 10} {'-' * 10}") for key, count in self.items(): db_val = db_counts.get(key, "") lines.append(f" {key:<25} {count:>10} {db_val:>10}") else: for key, count in self.items(): lines.append(f" {key}: {count}") lines.append(sep) lines.append(f" source: {source}") lines.append(f" target: {target_uri}") lines.append(sep) lines.append("") lines.append("To start a server with the migrated data:") lines.append(f" mlflow server --backend-store-uri {target_uri}") return "\n".join(lines) def safe_read_yaml(root: Path, file_name: str) -> dict[str, Any] | None: try: return yaml.safe_load((root / file_name).read_text()) except Exception as e: _logger.warning("Failed to read %s: %s", root / file_name, e) return None def list_subdirs(path: Path) -> list[str]: if not path.is_dir(): return [] return sorted(d.name for d in path.iterdir() if d.is_dir()) def list_files(path: Path) -> list[str]: if not path.is_dir(): return [] return sorted(f.name for f in path.iterdir() if f.is_file()) def read_tag_files(tag_dir: Path) -> dict[str, str]: result = {} if not tag_dir.is_dir(): return result for p in tag_dir.rglob("*"): if not p.is_file(): continue key = p.relative_to(tag_dir).as_posix() result[key] = (tag_dir / key).read_text() return result def read_metric_lines(metrics_dir: Path) -> dict[str, list[str]]: result: dict[str, list[str]] = {} if not metrics_dir.is_dir(): return result for p in metrics_dir.rglob("*"): if not p.is_file(): continue key = p.relative_to(metrics_dir).as_posix() result[key] = (metrics_dir / key).read_text().splitlines() return result def list_experiment_ids(root: Path) -> list[str]: if not root.is_dir(): return [] result = [] for d in sorted(root.iterdir(), key=lambda p: p.name): if not d.is_dir(): continue try: int(d.name) except ValueError: continue result.append(d.name) return result def for_each_experiment(mlruns: Path) -> Iterator[tuple[Path, str]]: """Yield (exp_dir, exp_id) for all experiments in both mlruns and .trash.""" for exp_id in list_experiment_ids(mlruns): yield mlruns / exp_id, exp_id trash_dir = mlruns / FileStore.TRASH_FOLDER_NAME for exp_id in list_experiment_ids(trash_dir): yield trash_dir / exp_id, exp_id
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/fs2db/_utils.py", "license": "Apache License 2.0", "lines": 113, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/fs2db/cli.py
from pathlib import Path import click from mlflow.store.fs2db import migrate from mlflow.utils.uri import get_uri_scheme @click.command("migrate-filestore") @click.option( "--source", required=True, type=click.Path(exists=True, file_okay=False, resolve_path=True), help="Root directory containing mlruns/ FileStore data.", ) @click.option( "--target", required=True, help="SQLite URI (e.g. sqlite:///mlflow.db).", ) @click.option( "--progress/--no-progress", default=True, help="Show per-experiment progress messages during migration.", ) def migrate_filestore(source: str, target: str, progress: bool) -> None: """Migrate MLflow FileStore data to a SQLite database.""" if get_uri_scheme(target) != "sqlite": raise click.BadParameter( "Must be a SQLite URI starting with 'sqlite:///'", param_hint="'--target'", ) db_path = Path(target.removeprefix("sqlite:///")).resolve() if not db_path.parent.is_dir(): raise click.BadParameter( f"Parent directory does not exist: {db_path.parent}", param_hint="'--target'", ) if db_path.exists(): click.confirm(f"Database file already exists: {db_path}\nOverwrite?", abort=True) db_path.unlink() target = f"sqlite:///{db_path}" migrate(Path(source), target, progress=progress)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/fs2db/cli.py", "license": "Apache License 2.0", "lines": 39, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/store/fs2db/test_migration.py
import math from pathlib import Path from unittest import mock import pytest from sqlalchemy import create_engine, text from mlflow.entities import Experiment, Run, ViewType from mlflow.store.fs2db import migrate from mlflow.tracking import MlflowClient from mlflow.utils.file_utils import local_file_uri_to_path Clients = tuple[MlflowClient, MlflowClient] def _get_all_experiments(client: MlflowClient) -> list[Experiment]: return client.search_experiments(view_type=ViewType.ALL) def _get_all_runs(client: MlflowClient, experiment_ids: list[str]) -> list[Run]: runs = [] for exp_id in experiment_ids: runs.extend(client.search_runs(experiment_ids=[exp_id], run_view_type=ViewType.ALL)) return runs def test_experiments(clients: Clients) -> None: src, dst = clients src_exps = _get_all_experiments(src) dst_exps = _get_all_experiments(dst) # DB auto-creates a Default experiment (id=0) during _initialize_tables, # so filter to only source experiment IDs for comparison. src_by_id = {e.experiment_id: e for e in src_exps} dst_by_id = {e.experiment_id: e for e in dst_exps if e.experiment_id in src_by_id} assert any(e.lifecycle_stage == "deleted" for e in src_by_id.values()) for exp_id, src_exp in src_by_id.items(): dst_exp = dst_by_id[exp_id] assert dst_exp.name == src_exp.name assert dst_exp.lifecycle_stage == src_exp.lifecycle_stage assert dst_exp.creation_time == src_exp.creation_time assert dst_exp.last_update_time == src_exp.last_update_time assert dst_exp.artifact_location == src_exp.artifact_location src_tags = {k: v for k, v in src_exp.tags.items() if not k.startswith("mlflow.")} dst_tags = {k: v for k, v in dst_exp.tags.items() if not k.startswith("mlflow.")} assert dst_tags == src_tags def test_runs(clients: Clients) -> None: src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] src_runs = _get_all_runs(src, exp_ids) dst_runs = _get_all_runs(dst, exp_ids) assert len(dst_runs) == len(src_runs) src_by_id = {r.info.run_id: r for r in src_runs} dst_by_id = {r.info.run_id: r for r in dst_runs} assert set(dst_by_id) == set(src_by_id) assert any(r.info.lifecycle_stage == "deleted" for r in src_by_id.values()) for run_id, src_run in src_by_id.items(): dst_run = dst_by_id[run_id] assert dst_run.info.status == src_run.info.status assert dst_run.info.lifecycle_stage == src_run.info.lifecycle_stage assert dst_run.info.start_time == src_run.info.start_time assert dst_run.info.end_time == src_run.info.end_time assert dst_run.info.run_name == src_run.info.run_name assert dst_run.data.params == src_run.data.params assert set(dst_run.data.metrics) == set(src_run.data.metrics) for key, src_val in src_run.data.metrics.items(): dst_val = dst_run.data.metrics[key] if math.isnan(src_val): assert math.isnan(dst_val) elif math.isinf(src_val): # DB stores Inf as ±1.7976931348623157e308 assert math.copysign(1, dst_val) == math.copysign(1, src_val) else: assert dst_val == src_val src_tags = {k: v for k, v in src_run.data.tags.items() if not k.startswith("mlflow.")} dst_tags = {k: v for k, v in dst_run.data.tags.items() if not k.startswith("mlflow.")} assert dst_tags == src_tags def test_dataset_inputs(clients: Clients) -> None: src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] src_runs = _get_all_runs(src, exp_ids) dst_by_id = {r.info.run_id: r for r in _get_all_runs(dst, exp_ids)} for src_run in src_runs: dst_run = dst_by_id[src_run.info.run_id] src_ds = src_run.inputs.dataset_inputs if src_run.inputs else [] dst_ds = dst_run.inputs.dataset_inputs if dst_run.inputs else [] assert len(dst_ds) == len(src_ds) src_by_name = {d.dataset.name: d for d in src_ds} dst_by_name = {d.dataset.name: d for d in dst_ds} for name, src_di in src_by_name.items(): dst_di = dst_by_name[name] assert dst_di.dataset.digest == src_di.dataset.digest assert dst_di.dataset.source_type == src_di.dataset.source_type assert dst_di.dataset.source == src_di.dataset.source assert dst_di.dataset.schema == src_di.dataset.schema assert dst_di.dataset.profile == src_di.dataset.profile assert {t.key: t.value for t in dst_di.tags} == {t.key: t.value for t in src_di.tags} def test_model_inputs(clients: Clients) -> None: # search_runs doesn't populate model_inputs; use get_run instead src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] run_ids = [r.info.run_id for r in _get_all_runs(src, exp_ids)] all_src_model_inputs = [] for run_id in run_ids: src_run = src.get_run(run_id) dst_run = dst.get_run(run_id) src_mi = src_run.inputs.model_inputs if src_run.inputs else [] dst_mi = dst_run.inputs.model_inputs if dst_run.inputs else [] assert len(dst_mi) == len(src_mi) assert sorted(m.model_id for m in dst_mi) == sorted(m.model_id for m in src_mi) all_src_model_inputs.extend(src_mi) assert len(all_src_model_inputs) > 0 def test_traces(clients: Clients) -> None: src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] src_traces = src.search_traces(locations=exp_ids) dst_traces = dst.search_traces(locations=exp_ids) assert len(dst_traces) == len(src_traces) dst_by_id = {t.info.request_id: t for t in dst_traces} for src_trace in src_traces: dst_trace = dst_by_id[src_trace.info.request_id] assert dst_trace.info.status == src_trace.info.status assert dst_trace.info.request_time == src_trace.info.request_time assert dst_trace.info.execution_duration == src_trace.info.execution_duration assert set(dst_trace.info.tags) >= set(src_trace.info.tags) def test_assessments(clients: Clients) -> None: src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] src_traces = src.search_traces(locations=exp_ids) dst_traces = dst.search_traces(locations=exp_ids) dst_by_id = {t.info.request_id: t for t in dst_traces} for src_trace in src_traces: dst_trace = dst_by_id[src_trace.info.request_id] src_assessments = src_trace.search_assessments(all=True) dst_assessments = dst_trace.search_assessments(all=True) assert len(dst_assessments) == len(src_assessments) src_by_name = {a.name: a for a in src_assessments} dst_by_name = {a.name: a for a in dst_assessments} for name, src_a in src_by_name.items(): dst_a = dst_by_name[name] assert dst_a.source.source_type == src_a.source.source_type assert dst_a.source.source_id == src_a.source.source_id assert dst_a.rationale == src_a.rationale assert dst_a.metadata == src_a.metadata if src_a.feedback is not None: assert dst_a.feedback is not None assert dst_a.feedback.value == src_a.feedback.value if src_a.expectation is not None: assert dst_a.expectation is not None assert dst_a.expectation.value == src_a.expectation.value def test_logged_models(clients: Clients) -> None: src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] src_models = src.search_logged_models(experiment_ids=exp_ids) dst_models = dst.search_logged_models(experiment_ids=exp_ids) assert len(dst_models) == len(src_models) dst_by_id = {m.model_id: m for m in dst_models} for src_model in src_models: dst_model = dst_by_id[src_model.model_id] assert dst_model.name == src_model.name assert dst_model.creation_timestamp == src_model.creation_timestamp assert dst_model.last_updated_timestamp == src_model.last_updated_timestamp assert dst_model.status == src_model.status assert dst_model.model_type == src_model.model_type assert dst_model.source_run_id == src_model.source_run_id assert set(dst_model.tags) >= set(src_model.tags) def test_run_outputs(clients: Clients) -> None: src, dst = clients exp_ids = [e.experiment_id for e in _get_all_experiments(src)] src_runs = _get_all_runs(src, exp_ids) dst_by_id = {r.info.run_id: r for r in _get_all_runs(dst, exp_ids)} for src_run in src_runs: dst_run = dst_by_id[src_run.info.run_id] src_outputs = src_run.outputs.model_outputs if src_run.outputs else [] dst_outputs = dst_run.outputs.model_outputs if dst_run.outputs else [] assert len(dst_outputs) == len(src_outputs) assert sorted(o.model_id for o in dst_outputs) == sorted(o.model_id for o in src_outputs) def test_registered_models(clients: Clients) -> None: src, dst = clients src_models = src.search_registered_models() dst_models = dst.search_registered_models() assert len(dst_models) == len(src_models) dst_by_name = {m.name: m for m in dst_models} for src_model in src_models: dst_model = dst_by_name[src_model.name] assert dst_model.description == src_model.description assert dst_model.creation_timestamp == src_model.creation_timestamp assert dst_model.last_updated_timestamp == src_model.last_updated_timestamp assert set(dst_model.tags) >= set(src_model.tags) src_versions = src.search_model_versions(f"name='{src_model.name}'") dst_versions = dst.search_model_versions(f"name='{dst_model.name}'") assert len(dst_versions) == len(src_versions) def test_model_versions(clients: Clients) -> None: src, dst = clients src_models = src.search_registered_models() for src_rm in src_models: src_versions = src.search_model_versions(f"name='{src_rm.name}'") dst_versions = dst.search_model_versions(f"name='{src_rm.name}'") assert len(dst_versions) == len(src_versions) dst_by_ver = {v.version: v for v in dst_versions} for src_mv in src_versions: dst_mv = dst_by_ver[src_mv.version] assert dst_mv.description == src_mv.description assert dst_mv.creation_timestamp == src_mv.creation_timestamp assert dst_mv.status == src_mv.status assert dst_mv.source == src_mv.source assert dst_mv.run_id == src_mv.run_id assert set(dst_mv.tags) >= set(src_mv.tags) def test_prompts(clients: Clients) -> None: src, dst = clients # search_registered_models excludes prompts, so use search_prompts instead. src_prompts = src.search_prompts() dst_prompts = dst.search_prompts() assert len(dst_prompts) == len(src_prompts) assert len(src_prompts) > 0 for src_prompt in src_prompts: name = src_prompt.name dst_prompt = next(p for p in dst_prompts if p.name == name) assert dst_prompt is not None src_pv = src.get_prompt_version(name, 1) dst_pv = dst.get_prompt_version(name, 1) assert src_pv is not None assert dst_pv is not None assert dst_pv.template == src_pv.template def test_rollback_on_failure(clients: Clients, tmp_path: Path) -> None: src, _ = clients source = Path(local_file_uri_to_path(src.tracking_uri)) target_uri = f"sqlite:///{tmp_path / 'rollback.db'}" with mock.patch( "mlflow.store.fs2db._tracking._migrate_runs_in_dir", side_effect=RuntimeError("boom"), ): with pytest.raises(RuntimeError, match="boom"): migrate(source, target_uri, progress=False) engine = create_engine(target_uri) with engine.connect() as conn: for table in ("experiments", "runs", "registered_models"): count = conn.execute(text(f"SELECT COUNT(*) FROM {table}")).scalar() assert count == 0
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/fs2db/test_migration.py", "license": "Apache License 2.0", "lines": 229, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/mcp/decorator.py
""" Decorator for exposing MLflow CLI commands as MCP tools. Usage: from mlflow.mcp.decorator import mlflow_mcp @commands.command("search") @mlflow_mcp(tool_name="search_traces") @click.option(...) def search_traces(...): ... The decorator attaches MCP metadata to the Click command, which is then used by the MCP server to register the tool with the specified name. """ from typing import Callable, TypeVar import click # Attribute name used to store MCP metadata on Click commands MCP_METADATA_ATTR = "_mlflow_mcp_metadata" F = TypeVar("F", bound=Callable) def mlflow_mcp(tool_name: str) -> Callable[[F], F]: """ Decorator to expose a Click command as an MCP tool with a curated name. Args: tool_name: The name to use for the MCP tool. This should be a clear, agent-friendly name that describes what the tool does. Convention: action_entity (e.g., "search_traces", "get_experiment") Example: @commands.command("search") @mlflow_mcp(tool_name="search_traces") def search(...): '''Search for traces in the specified experiment.''' ... The decorator stores metadata on the function that the MCP server reads when registering tools. Commands without this decorator are not exposed as MCP tools. """ def decorator(fn: F) -> F: # Store MCP metadata on the function setattr(fn, MCP_METADATA_ATTR, {"tool_name": tool_name}) return fn return decorator def get_mcp_tool_name(cmd: click.Command) -> str | None: """ Get the MCP tool name from a Click command, if it has been decorated. Args: cmd: The Click command to check. Returns: The MCP tool name if the command has been decorated with @mlflow_mcp, None otherwise. """ if cmd.callback is None: return None metadata = getattr(cmd.callback, MCP_METADATA_ATTR, None) if metadata is None: return None return metadata.get("tool_name")
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/mcp/decorator.py", "license": "Apache License 2.0", "lines": 54, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:tests/store/test_workspace_model_coverage.py
"""Verify that every SQLAlchemy model with a ``workspace`` column is handled by at least one workspace store's ``_get_query`` method. If a new model is added with a ``workspace`` column but the developer forgets to add it to ``_get_query``, that model's queries will bypass workspace isolation. This test catches that gap. """ import ast from pathlib import Path # Explicitly import all dbmodel modules so that every ORM model is registered # with Base.registry before we inspect mappers. import mlflow.store.model_registry.dbmodels.models # noqa: F401 import mlflow.store.tracking.dbmodels.models # noqa: F401 import mlflow.store.workspace.dbmodels.models # noqa: F401 from mlflow.store.db.base_sql_model import Base # Locate the repository root relative to this test file so that workspace # store paths resolve correctly regardless of the pytest working directory. _REPO_ROOT = Path(__file__).resolve().parent.parent.parent # Every workspace store that provides a ``_get_query`` override. WORKSPACE_STORE_PATHS = [ "mlflow/store/tracking/sqlalchemy_workspace_store.py", "mlflow/store/model_registry/sqlalchemy_workspace_store.py", "mlflow/store/jobs/sqlalchemy_workspace_store.py", ] def _models_handled_by_get_query(ws_path: str) -> set[str]: """Parse a workspace store file and return the ``Sql*`` model names referenced in comparisons inside its ``_get_query`` method. """ tree = ast.parse((_REPO_ROOT / ws_path).read_text()) # Collect module-level variables holding Sql* names (e.g. tuples of models) var_models: dict[str, set[str]] = {} for node in ast.iter_child_nodes(tree): if isinstance(node, ast.Assign): for target in node.targets: if isinstance(target, ast.Name): names = { n.id for n in ast.walk(node.value) if isinstance(n, ast.Name) and n.id.startswith("Sql") } if names: var_models[target.id] = names # Extract model names from _get_query comparisons models: set[str] = set() for node in ast.walk(tree): if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): continue if node.name != "_get_query": continue for inner in ast.walk(node): if not isinstance(inner, ast.Compare): continue for comp in inner.comparators: models |= { n.id for n in ast.walk(comp) if isinstance(n, ast.Name) and n.id.startswith("Sql") } if isinstance(comp, ast.Name) and comp.id in var_models: models |= var_models[comp.id] return models def test_all_workspace_models_handled_in_get_query(): """Every model with a workspace column must appear in at least one workspace store's ``_get_query``. """ handled: set[str] = set() for ws_path in WORKSPACE_STORE_PATHS: handled |= _models_handled_by_get_query(ws_path) models_with_column: set[str] = set() for mapper in Base.registry.mappers: if "workspace" in {col.key for col in mapper.columns}: models_with_column.add(mapper.class_.__name__) missing = models_with_column - handled assert not missing, ( f"These models have a `workspace` column but are not handled by any " f"workspace store's _get_query: {sorted(missing)}. " f"Add handling in the appropriate workspace store's _get_query method." )
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/test_workspace_model_coverage.py", "license": "Apache License 2.0", "lines": 77, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:.github/workflows/triage.py
"""Triage GitHub issues: generate a comment requesting missing info.""" # ruff: noqa: T201 import argparse import concurrent.futures import json import os import re import sys import urllib.request from pathlib import Path from typing import Any PROMPT_TEMPLATE = """\ Triage the following GitHub issue and decide whether to request more information \ from the author. ## Issue Title {title} ## Issue Body {body} ## Instructions Evaluate the issue and return a JSON object with two fields: - `comment`: A polite comment to post on the issue requesting missing information, \ or null if no comment is needed. The comment should be concise and specific about \ what information would help. It may ask for any combination of: - Steps to reproduce the problem (for bug reports without clear repro steps) - Environment info such as OS, Python version, or MLflow version (for bug reports) - Full traceback (for bug reports that mention an error but don't include one) - A screenshot or screen recording (only for issues that would benefit from visual evidence \ to understand and reproduce, e.g., layout issues, rendering bugs, styling problems — \ do not request for backend, API, CLI, docs, or performance issues) - `reason`: A brief explanation of why you decided to return or not return a comment. \ This is for internal verification only and will not be shown to the user. Guidelines: - Only request information that is clearly missing and would help investigate the issue. - Do not request repro steps if the issue already contains numbered steps, a code snippet, \ or a clear description of how to trigger the bug. - Do not request environment info if OS, Python version, or MLflow version is already provided. - Do not request anything for feature requests. - When in doubt, return null — only return a comment when information is clearly missing.""" MAX_BODY_LENGTH = 10_000 def strip_html_comments(text: str) -> str: return re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) def strip_empty_checkboxes(text: str) -> str: return re.sub(r"^\s{0,3}[-*]\s+\[\s*\]\s+.+\n?", "", text, flags=re.MULTILINE) def build_prompt(title: str, body: str) -> str: body = strip_html_comments(body) body = strip_empty_checkboxes(body) return PROMPT_TEMPLATE.format( title=title, body=body[:MAX_BODY_LENGTH], ) def call_anthropic_api(prompt: str) -> dict[str, Any]: api_key = os.environ["ANTHROPIC_API_KEY"] request_body = { "model": "claude-haiku-4-5-20251001", "max_tokens": 1024, "temperature": 0, "messages": [{"role": "user", "content": prompt}], "output_config": { "format": { "type": "json_schema", "schema": { "type": "object", "properties": { "comment": { "type": ["string", "null"], "description": ( "A comment to post requesting missing information, " "or null if no comment is needed." ), }, "reason": { "type": "string", "description": "Brief explanation for the decision.", }, }, "required": ["comment", "reason"], "additionalProperties": False, }, } }, } req = urllib.request.Request( "https://api.anthropic.com/v1/messages", data=json.dumps(request_body).encode(), headers={ "Content-Type": "application/json", "x-api-key": api_key, "anthropic-version": "2023-06-01", }, ) try: with urllib.request.urlopen(req) as resp: response = json.loads(resp.read().decode()) except urllib.error.HTTPError as e: error_body = e.read().decode() print(f"API Error {e.code}: {error_body}", file=sys.stderr) raise usage = response.get("usage", {}) result = json.loads(response["content"][0]["text"]) usage["cost_in_usd"] = compute_cost(usage) return { "comment": result["comment"], "reason": result["reason"], "usage": usage, } # https://docs.anthropic.com/en/docs/about-claude/models#model-comparison-table HAIKU_INPUT_COST_PER_MTOK = 1.00 HAIKU_OUTPUT_COST_PER_MTOK = 5.00 def compute_cost(usage: dict[str, int]) -> float: input_tokens = usage.get("input_tokens", 0) output_tokens = usage.get("output_tokens", 0) return ( input_tokens * HAIKU_INPUT_COST_PER_MTOK + output_tokens * HAIKU_OUTPUT_COST_PER_MTOK ) / 1_000_000 def triage_issue(title: str, body: str) -> dict[str, Any]: # Skip triage for security vulnerability issues if "security vulnerability" in title.lower(): return { "comment": None, "reason": "Skipped: Issue title contains 'Security Vulnerability'", "usage": {"input_tokens": 0, "output_tokens": 0, "cost_in_usd": 0}, } prompt = build_prompt(title, body) return call_anthropic_api(prompt) GREEN = "\033[32m" RED = "\033[31m" RESET = "\033[0m" def parse_dataset(path: Path) -> list[dict[str, str]]: text = path.read_text() issues = [] for section in re.split(r"\n---\n", text): header_match = re.search(r"^## (.+)$", section, re.MULTILINE) title_match = re.search(r"\*\*Title:\*\*\s*(.+)$", section, re.MULTILINE) body_match = re.search(r"\*\*Body:\*\*\s*\n(.*)", section, re.DOTALL) if header_match and title_match and body_match: issues.append( { "header": header_match.group(1).strip(), "title": title_match.group(1).strip(), "body": body_match.group(1).strip(), } ) return issues def run_tests() -> None: dataset_path = Path(__file__).parent / "triage.md" issues = parse_dataset(dataset_path) with concurrent.futures.ThreadPoolExecutor() as executor: futures = { executor.submit(triage_issue, issue["title"], issue["body"]): issue for issue in issues } total_usage = {"input_tokens": 0, "output_tokens": 0, "cost_in_usd": 0.0} for future in futures: issue = futures[future] result = future.result() usage = result["usage"] total_usage["input_tokens"] += usage.get("input_tokens", 0) total_usage["output_tokens"] += usage.get("output_tokens", 0) total_usage["cost_in_usd"] += usage.get("cost_in_usd", 0.0) has_comment = result["comment"] is not None color = RED if has_comment else GREEN print(f"{color}{issue['header']}{RESET}") print(f" reason: {result['reason']}") if result["comment"]: print(f" comment: {result['comment'][:200]}") print(f"\nTotal usage: {json.dumps(total_usage, indent=2)}") def write_step_summary(result: dict[str, Any]) -> None: step_summary_path = os.environ.get("GITHUB_STEP_SUMMARY") if not step_summary_path: return comment = result.get("comment") reason = result.get("reason", "") usage = result.get("usage", {}) usage_json = json.dumps({"usage": usage}, indent=2) summary = f"""## Comment {comment or "None"} ## Reason {reason} ## Usage ```json {usage_json} ``` """ with open(step_summary_path, "a") as f: f.write(summary) def main() -> None: parser = argparse.ArgumentParser(description="Triage GitHub issues") subparsers = parser.add_subparsers(dest="command") triage_parser = subparsers.add_parser("triage") triage_parser.add_argument("--title", required=True) triage_parser.add_argument("--body", default="") subparsers.add_parser("test") args = parser.parse_args() match args.command: case "triage": result = triage_issue(args.title, args.body) write_step_summary(result) print(json.dumps(result)) case "test": run_tests() case _: parser.print_help() sys.exit(1) if __name__ == "__main__": main()
{ "repo_id": "mlflow/mlflow", "file_path": ".github/workflows/triage.py", "license": "Apache License 2.0", "lines": 204, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:fs2db/src/generate_synthetic_data.py
# ruff: noqa: T201 """ Generate synthetic MLflow FileStore data for testing the fs2db migration tool. Usage: uv run --with mlflow==3.6.0 --no-project python -I \ fs2db/src/generate_synthetic_data.py --output /tmp/fs2db/v3.6.0/ --size small This script uses the MLflow public API to create realistic on-disk data. It must only depend on mlflow + stdlib (no local imports). """ import argparse import enum import logging import math import os import uuid import warnings from dataclasses import dataclass from pathlib import Path from typing import Literal from packaging.version import Version import mlflow from mlflow.tracking import MlflowClient MLFLOW_VERSION = Version(mlflow.__version__) Size = Literal["small", "full"] @dataclass class ExperimentData: experiment_id: str run_ids: list[str] @dataclass(frozen=True) class SizeConfig: experiments: int runs_per_exp: int datasets_per_run: int traces_per_exp: int assessments_per_trace: int logged_models_per_exp: int registered_models: int prompts: int SIZES: dict[Size, SizeConfig] = { "small": SizeConfig( experiments=2, runs_per_exp=2, datasets_per_run=1, traces_per_exp=1, assessments_per_trace=1, logged_models_per_exp=1, registered_models=1, prompts=1, ), "full": SizeConfig( experiments=20, runs_per_exp=50, datasets_per_run=3, traces_per_exp=30, assessments_per_trace=5, logged_models_per_exp=10, registered_models=20, prompts=15, ), } class Feature(str, enum.Enum): DATASETS = "datasets" TRACES = "traces" ASSESSMENTS = "assessments" LOGGED_MODELS = "logged_models" PROMPTS = "prompts" def has_feature(feature: Feature) -> bool: match feature: case Feature.DATASETS: return MLFLOW_VERSION >= Version("2.4") case Feature.TRACES: return MLFLOW_VERSION >= Version("2.14") case Feature.ASSESSMENTS: return MLFLOW_VERSION >= Version("3.6") case Feature.LOGGED_MODELS: return MLFLOW_VERSION >= Version("3.5") case Feature.PROMPTS: return MLFLOW_VERSION >= Version("3.5") def generate_core(cfg: SizeConfig) -> list[ExperimentData]: """Create experiments and runs, including edge cases (unicode, NaN, deleted, etc.).""" client = MlflowClient() result: list[ExperimentData] = [] for exp_idx in range(cfg.experiments): exp_name = f"experiment_{exp_idx}" exp_id = client.create_experiment( exp_name, tags={"team": "ml-infra", "priority": str(exp_idx)}, ) run_ids: list[str] = [] for run_idx in range(cfg.runs_per_exp): with mlflow.start_run( experiment_id=exp_id, tags={"run_index": str(run_idx), "source": "synthetic"}, ) as run: run_ids.append(run.info.run_id) mlflow.log_params( { "learning_rate": "0.001", "batch_size": "32", "model_type": f"model_v{run_idx}", } ) mlflow.log_metrics( { "accuracy": 0.85 + run_idx * 0.01, "loss": 0.35 - run_idx * 0.01, "zero_metric": 0.0, "negative_metric": -1.5 + run_idx * 0.1, } ) for step in range(5): mlflow.log_metric("train_loss", 1.0 - step * 0.15, step=step) # Artifacts mlflow.log_text(f"Run {run_idx} of experiment {exp_idx}", "notes.txt") mlflow.log_dict({"lr": 0.001}, "config/params.json") result.append(ExperimentData(exp_id, run_ids)) # NaN / Inf metrics (on first run of first experiment) with mlflow.start_run(run_id=result[0].run_ids[0]): mlflow.log_metrics( {"nan_metric": math.nan, "inf_metric": math.inf, "neg_inf_metric": -math.inf} ) # Unicode experiment name and tag values unicode_exp_id = client.create_experiment( "実験_テスト_🚀", tags={"description": "日本語テスト 🎉", "emoji": "🔬🧪"}, ) with mlflow.start_run(experiment_id=unicode_exp_id): mlflow.log_params({"unicode_param": "パラメータ値", "long_param": "x" * 8000}) # Empty run (no metrics/params) with mlflow.start_run(experiment_id=result[0].experiment_id): pass # Deleted experiment with a run del_exp_id = client.create_experiment("to_be_deleted") with mlflow.start_run(experiment_id=del_exp_id): mlflow.log_param("param_in_deleted_exp", "value") client.delete_experiment(del_exp_id) # Deleted run with mlflow.start_run(experiment_id=result[0].experiment_id) as del_run: mlflow.log_param("param_in_deleted_run", "value") client.delete_run(del_run.info.run_id) # Failed run failed_run = client.create_run(experiment_id=result[0].experiment_id) client.log_param(failed_run.info.run_id, "failed_param", "value") client.set_terminated(failed_run.info.run_id, status="FAILED") # Killed run killed_run = client.create_run(experiment_id=result[0].experiment_id) client.log_metric(killed_run.info.run_id, "partial_metric", 0.5) client.set_terminated(killed_run.info.run_id, status="KILLED") return result def generate_datasets(cfg: SizeConfig, experiments: list[ExperimentData]) -> None: import pandas as pd for exp in experiments: for rid in exp.run_ids: for ds_idx in range(cfg.datasets_per_run): df = pd.DataFrame( {"feature": [1, 2, 3], "label": [0, 1, 0]}, ) dataset = mlflow.data.from_pandas( df, name=f"dataset_{ds_idx}", targets="label", ) with mlflow.start_run(run_id=rid): mlflow.log_input(dataset, context=f"training_{ds_idx}") @mlflow.trace def _retrieve(query: str) -> list[str]: return ["doc1", "doc2"] @mlflow.trace def _generate(docs: list[str]) -> str: return f"response for {len(docs)} docs" @mlflow.trace def _rag_pipeline(query: str) -> str: docs = _retrieve(query) return _generate(docs) def generate_traces(cfg: SizeConfig, experiments: list[ExperimentData]) -> list[str]: """Returns list of trace IDs.""" client = MlflowClient() trace_ids: list[str] = [] for exp in experiments: mlflow.set_experiment(experiment_id=exp.experiment_id) for t_idx in range(cfg.traces_per_exp): _rag_pipeline(f"test query {t_idx}") trace_id = mlflow.get_last_active_trace_id() client.set_trace_tag(trace_id, "trace_source", "synthetic") trace_ids.append(trace_id) return trace_ids def generate_assessments(cfg: SizeConfig, trace_ids: list[str]) -> None: from mlflow.entities import AssessmentSource, Expectation, Feedback human = AssessmentSource(source_type="HUMAN", source_id="test-user") ai_judge = AssessmentSource(source_type="AI_JUDGE", source_id="gpt-4o") for trace_id in trace_ids: for a_idx in range(cfg.assessments_per_trace): # Boolean feedback (thumbs up/down) mlflow.log_assessment( trace_id=trace_id, assessment=Feedback( name="correctness", value=a_idx % 2 == 0, source=human, rationale="Looks correct" if a_idx % 2 == 0 else "Has errors", ), ) # Numeric feedback (score) mlflow.log_assessment( trace_id=trace_id, assessment=Feedback( name="relevance_score", value=0.6 + a_idx * 0.08, source=ai_judge, rationale=f"Score based on semantic similarity (iteration {a_idx})", metadata={"model": "gpt-4o", "prompt_version": "v1"}, ), ) # Text feedback mlflow.log_assessment( trace_id=trace_id, assessment=Feedback( name="category", value="good" if a_idx % 3 != 0 else "needs_improvement", source=human, ), ) # Expectation (ground truth) mlflow.log_assessment( trace_id=trace_id, assessment=Expectation( name="expected_output", value=f"Expected response for query {a_idx}", source=human, ), ) def generate_logged_models(cfg: SizeConfig, experiments: list[ExperimentData]) -> list[str]: """Returns list of model artifact URIs.""" from mlflow.entities.logged_model_input import LoggedModelInput client = MlflowClient() model_uris: list[str] = [] for exp in experiments: model_ids: list[str] = [] for m_idx in range(cfg.logged_models_per_exp): with mlflow.start_run(experiment_id=exp.experiment_id): model_info = mlflow.pyfunc.log_model( name=f"logged_model_{m_idx}", python_model=lambda model_input: model_input, input_example="hello", pip_requirements=[], # skip dependency inference ) model_uris.append(model_info.model_uri) client.set_logged_model_tags( model_info.model_id, {"framework": "pytorch", "stage": "dev"} ) model_ids.append(model_info.model_id) # Log model inputs on existing runs in this experiment if model_ids and exp.run_ids: for i, model_id in enumerate(model_ids): run_id = exp.run_ids[i % len(exp.run_ids)] client.log_inputs(run_id, models=[LoggedModelInput(model_id=model_id)]) return model_uris def generate_model_registry(cfg: SizeConfig, model_uris: list[str]) -> None: client = MlflowClient() for rm_idx in range(cfg.registered_models): name = f"registered_model_{rm_idx}" client.create_registered_model(name, tags={"stage": "staging", "owner": "team-ml"}) client.update_registered_model( name, description=f"Registered model {rm_idx} for testing migration" ) for v_idx in range(1, 3): # Use a real logged model URI if available, otherwise fall back to a fake one uri_idx = rm_idx * 2 + (v_idx - 1) if uri_idx < len(model_uris): source = model_uris[uri_idx] else: source = f"runs:/{uuid.uuid4().hex}/model" mv = client.create_model_version( name=name, source=source, tags={"version_note": f"v{v_idx}"}, ) client.update_model_version(name, mv.version, description=f"Version {v_idx} of {name}") client.set_registered_model_alias(name, "champion", mv.version) def generate_prompts(cfg: SizeConfig) -> None: for p_idx in range(cfg.prompts): name = f"prompt_{p_idx}" # Version 1 mlflow.register_prompt( name=name, template=f"Hello {{{{name}}}}, this is prompt {p_idx}.", ) # Version 2 with updated template mlflow.register_prompt( name=name, template=f"Hi {{{{name}}}}, welcome to prompt {p_idx}. How can I help?", commit_message=f"Updated template for prompt {p_idx}", ) def main() -> None: parser = argparse.ArgumentParser(description="Generate synthetic MLflow FileStore data") parser.add_argument( "--output", required=True, help="Root directory for generated mlruns/ data", ) parser.add_argument( "--size", choices=["small", "full"], default="small", help="Data size preset (default: small)", ) args = parser.parse_args() output = os.path.abspath(args.output) os.makedirs(output, exist_ok=True) tracking_uri = Path(output).as_uri() mlflow.set_tracking_uri(tracking_uri) # Suppress noisy warnings and logs from mlflow internals warnings.filterwarnings("ignore") logging.getLogger("mlflow").setLevel(logging.ERROR) size: Size = args.size cfg = SIZES[size] print(f"Generating {size} synthetic data in {output}") print(f"MLflow version: {mlflow.__version__}") print() print("[1/7] Generating experiments, runs, params, metrics, tags, artifacts...") experiments = generate_core(cfg) if has_feature(Feature.DATASETS): print("[2/7] Generating datasets...") generate_datasets(cfg, experiments) else: print("[2/7] Skipping datasets (not available)") trace_ids: list[str] = [] if has_feature(Feature.TRACES): print("[3/7] Generating traces...") trace_ids = generate_traces(cfg, experiments) else: print("[3/7] Skipping traces (not available)") if trace_ids and has_feature(Feature.ASSESSMENTS): print("[4/7] Generating assessments...") generate_assessments(cfg, trace_ids) else: print("[4/7] Skipping assessments (not available)") model_uris: list[str] = [] if has_feature(Feature.LOGGED_MODELS): print("[5/7] Generating logged models...") model_uris = generate_logged_models(cfg, experiments) else: print("[5/7] Skipping logged models (not available)") print("[6/7] Generating model registry...") generate_model_registry(cfg, model_uris) if has_feature(Feature.PROMPTS): print("[7/7] Generating prompts...") generate_prompts(cfg) else: print("[7/7] Skipping prompts (not available)") print() print(f"Done. Data written to {output}") if __name__ == "__main__": main()
{ "repo_id": "mlflow/mlflow", "file_path": "fs2db/src/generate_synthetic_data.py", "license": "Apache License 2.0", "lines": 354, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/entities/workspace.py
"""Workspace entity shared between server and stores.""" from __future__ import annotations from dataclasses import dataclass from enum import Enum from typing import Any from mlflow.protos.service_pb2 import Workspace as ProtoWorkspace class WorkspaceDeletionMode(str, Enum): """Controls what happens to resources when a workspace is deleted.""" SET_DEFAULT = "SET_DEFAULT" """Reassign all resources in the workspace to the default workspace.""" CASCADE = "CASCADE" """Delete all resources in the workspace.""" RESTRICT = "RESTRICT" """Refuse to delete the workspace if it still contains resources.""" @dataclass(frozen=True, slots=True) class Workspace: """Minimal metadata describing a workspace.""" name: str description: str | None = None default_artifact_root: str | None = None def to_dict(self) -> dict[str, str | None]: return { "name": self.name, "description": self.description, "default_artifact_root": self.default_artifact_root, } @classmethod def from_dict(cls, payload: dict[str, Any]) -> "Workspace": return cls( name=payload["name"], description=payload.get("description"), default_artifact_root=payload.get("default_artifact_root"), ) def to_proto(self) -> ProtoWorkspace: workspace = ProtoWorkspace() workspace.name = self.name if self.description is not None: workspace.description = self.description if self.default_artifact_root is not None: workspace.default_artifact_root = self.default_artifact_root return workspace @classmethod def from_proto(cls, proto: ProtoWorkspace) -> "Workspace": description = proto.description if proto.HasField("description") else None default_artifact_root = ( proto.default_artifact_root if proto.HasField("default_artifact_root") else None ) return cls( name=proto.name, description=description, default_artifact_root=default_artifact_root, )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/entities/workspace.py", "license": "Apache License 2.0", "lines": 52, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/server/workspace_helpers.py
from __future__ import annotations import logging import os from flask import Response, request from mlflow.entities import Workspace from mlflow.environment_variables import ( MLFLOW_ENABLE_WORKSPACES, MLFLOW_WORKSPACE_STORE_URI, ) from mlflow.exceptions import MlflowException from mlflow.protos import databricks_pb2 from mlflow.store.workspace.abstract_store import WorkspaceNameValidator from mlflow.store.workspace.utils import get_default_workspace_optional from mlflow.tracking._workspace.registry import get_workspace_store from mlflow.utils import workspace_context, workspace_utils from mlflow.utils.workspace_utils import ( DEFAULT_WORKSPACE_NAME, WORKSPACE_HEADER_NAME, _normalize_workspace, ) _logger = logging.getLogger(__name__) _workspace_store = None def resolve_workspace_from_header(header_workspace: str | None) -> Workspace | None: """ Resolve (and validate) the active workspace given an optional header value. When ``header_workspace`` is None or empty, the default workspace is used (if configured). Returns None if no workspace can be resolved. """ store = _get_workspace_store() if header_workspace := _normalize_workspace(header_workspace): if header_workspace != DEFAULT_WORKSPACE_NAME: WorkspaceNameValidator.validate(header_workspace) return store.get_workspace(header_workspace) workspace, _ = get_default_workspace_optional(store) return workspace def _get_workspace_store(workspace_uri: str | None = None, tracking_uri: str | None = None): """ Resolve and cache the workspace store configured for this server process. The store is constructed on first invocation using the provided arguments (or their environment-derived defaults) and memoized for all subsequent calls, regardless of any new ``workspace_uri`` / ``tracking_uri`` values supplied later. """ if not MLFLOW_ENABLE_WORKSPACES.get(): raise MlflowException( "Workspace APIs are not available: workspaces are not enabled on this server", databricks_pb2.FEATURE_DISABLED, ) global _workspace_store if _workspace_store is not None: return _workspace_store from mlflow.server import BACKEND_STORE_URI_ENV_VAR resolved_tracking_uri = tracking_uri or os.environ.get(BACKEND_STORE_URI_ENV_VAR) resolved_workspace_uri = workspace_utils.resolve_workspace_store_uri( workspace_uri, tracking_uri=resolved_tracking_uri ) if resolved_workspace_uri is None: raise MlflowException.invalid_parameter_value( "Workspace URI could not be resolved. Provide --workspace-store-uri or set " f"{MLFLOW_WORKSPACE_STORE_URI.name}." ) _workspace_store = get_workspace_store(workspace_uri=resolved_workspace_uri) return _workspace_store def _workspace_error_response(exc: Exception) -> Response: if isinstance(exc, MlflowException): mlflow_exc = exc else: mlflow_exc = MlflowException( str(exc), error_code=databricks_pb2.INTERNAL_ERROR, ) # Preserve the original stack for debugging by chaining the exception. mlflow_exc.__cause__ = exc response = Response(mimetype="application/json") response.set_data(mlflow_exc.serialize_as_json()) response.status_code = mlflow_exc.get_http_status_code() return response def resolve_workspace_for_request_if_enabled( path: str, header_value: str | None, ) -> Workspace | None: if not MLFLOW_ENABLE_WORKSPACES.get(): if (header_value or "").strip(): raise MlflowException( "Workspace APIs are not available: workspaces are not enabled on this server", error_code=databricks_pb2.FEATURE_DISABLED, ) return None # The server-info endpoint must remain reachable even if the workspace header points to a # missing workspace, so skip workspace resolution entirely for this route. if path.rstrip("/").endswith("/mlflow/server-info"): return None try: return resolve_workspace_from_header(header_value) except MlflowException: raise except Exception as exc: _logger.exception("Unexpected error while resolving workspace") raise MlflowException( str(exc), error_code=databricks_pb2.INTERNAL_ERROR, ) from exc def workspace_before_request_handler(): # FastAPI middleware may have already resolved the workspace for this request, and the # server does not set the env var so this should reflect request-scoped state. if workspace_context.is_request_workspace_resolved(): return None header_value = request.headers.get(WORKSPACE_HEADER_NAME) try: workspace = resolve_workspace_for_request_if_enabled(request.path, header_value) except MlflowException as exc: return _workspace_error_response(exc) workspace_context.set_server_request_workspace(workspace.name if workspace else None) def workspace_teardown_request_handler(_exc): if MLFLOW_ENABLE_WORKSPACES.get(): workspace_context.clear_server_request_workspace() __all__ = [ "WORKSPACE_HEADER_NAME", "resolve_workspace_from_header", "resolve_workspace_for_request_if_enabled", "_get_workspace_store", "workspace_before_request_handler", "workspace_teardown_request_handler", ]
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/server/workspace_helpers.py", "license": "Apache License 2.0", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/db/workspace_migration.py
import sqlalchemy as sa from mlflow.store.workspace.sqlalchemy_store import _WORKSPACE_ROOT_MODELS from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME # Derive table names from the shared ORM model list and add child/tags tables that also carry # a workspace column but are not "root" tables (they are updated via FK cascades during # delete_workspace, but the migration script must handle them explicitly). _WORKSPACE_CHILD_TABLES = [ "model_versions", "registered_model_tags", "model_version_tags", "registered_model_aliases", ] _WORKSPACE_TABLES = [ model.__tablename__ for model in _WORKSPACE_ROOT_MODELS ] + _WORKSPACE_CHILD_TABLES _CONFLICT_SPECS = [ ("experiments", ("name",), "experiments with the same name"), ("registered_models", ("name",), "registered models with the same name"), ("evaluation_datasets", ("name",), "evaluation datasets with the same name"), ("model_versions", ("name", "version"), "model versions with the same model name and version"), ( "registered_model_tags", ("name", "key"), "registered model tags with the same model name and key", ), ( "model_version_tags", ("name", "version", "key"), "model version tags with the same model name, version, and key", ), ( "registered_model_aliases", ("name", "alias"), "registered model aliases with the same model name and alias", ), ("secrets", ("secret_name",), "secrets with the same name"), ("endpoints", ("name",), "endpoints with the same name"), ("model_definitions", ("name",), "model definitions with the same name"), ] def _format_conflicts( conflicts: list[tuple[object, ...]], columns: tuple[str, ...], *, max_rows: int | None, ) -> str: rows = conflicts if max_rows is None else conflicts[:max_rows] formatted_conflicts = "\n ".join( ", ".join(f"{column}={value!r}" for column, value in zip(columns, row)) for row in rows ) if formatted_conflicts: formatted_conflicts = f"\n {formatted_conflicts}" if max_rows is not None and len(conflicts) > max_rows: formatted_conflicts += f"\n ... ({len(conflicts) - max_rows} more)" return formatted_conflicts def _get_table(conn, table_name: str) -> sa.Table: table = sa.Table(table_name, sa.MetaData(), autoload_with=conn) if "workspace" not in table.c: raise RuntimeError( "Move aborted: the specified tracking server does not have workspaces enabled. " "This command is intended for a workspace-enabled tracking server. Please make sure " "the specified tracking URI is correct." ) return table def _assert_no_workspace_conflicts( conn, table_name: str, columns: tuple[str, ...], resource_description: str, *, verbose: bool, ) -> None: table = _get_table(conn, table_name) group_columns = [table.c[column] for column in columns] conflict_keys = ( sa.select(*group_columns).group_by(*group_columns).having(sa.func.count() > 1).subquery() ) join_conditions = [table.c[column] == conflict_keys.c[column] for column in columns] extra_columns = [] if table_name == "experiments" and "experiment_id" in table.c: extra_columns.append(table.c.experiment_id) conflict_rows_stmt = ( sa.select(*group_columns, table.c.workspace, *extra_columns) .select_from(table.join(conflict_keys, sa.and_(*join_conditions))) .order_by(*group_columns, table.c.workspace, *extra_columns) ) if conflicts := conn.execute(conflict_rows_stmt).fetchall(): formatted_conflicts = _format_conflicts( conflicts, (*columns, "workspace", *(column.name for column in extra_columns)), max_rows=None if verbose else 5, ) raise RuntimeError( "Move aborted: merging workspaces would create duplicate " f"{resource_description}. Resolve the following conflicts by renaming the affected " "resources (restore deleted ones first) or permanently deleting them, then retry: " f"{formatted_conflicts}" ) def migrate_to_default_workspace( engine: sa.Engine, dry_run: bool = False, *, verbose: bool = False, ) -> dict[str, int]: """ Move all workspace-scoped resources into the default workspace. Returns a mapping of table name -> number of rows moved (or that would be moved in dry-run). When verbose is True, conflict lists are not truncated. """ with engine.begin() as conn: for table_name, columns, description in _CONFLICT_SPECS: _assert_no_workspace_conflicts( conn, table_name, columns, description, verbose=verbose, ) counts = {} for table_name in _WORKSPACE_TABLES: table = _get_table(conn, table_name) stmt = ( sa.select(sa.func.count()) .select_from(table) .where(table.c.workspace != DEFAULT_WORKSPACE_NAME) ) counts[table_name] = conn.execute(stmt).scalar_one() if dry_run or counts[table_name] == 0: continue conn.execute( table.update() .where(table.c.workspace != DEFAULT_WORKSPACE_NAME) .values(workspace=DEFAULT_WORKSPACE_NAME) ) return counts
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/db/workspace_migration.py", "license": "Apache License 2.0", "lines": 134, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/jobs/sqlalchemy_workspace_store.py
""" Workspace-aware variant of the jobs SQLAlchemy store. """ from __future__ import annotations import logging from mlflow.store.jobs.sqlalchemy_store import SqlAlchemyJobStore from mlflow.store.tracking.dbmodels.models import SqlJob from mlflow.store.workspace_aware_mixin import WorkspaceAwareMixin _logger = logging.getLogger(__name__) class WorkspaceAwareSqlAlchemyJobStore(WorkspaceAwareMixin, SqlAlchemyJobStore): """ Workspace-aware variant of the jobs SQLAlchemy store. This store adds workspace filtering to all queries, ensuring data isolation between workspaces. """ def __init__(self, db_uri): super().__init__(db_uri) def _get_query(self, session, model): """ Return a query for ``model`` filtered by the active workspace. """ query = super()._get_query(session, model) workspace = self._get_active_workspace() if model is SqlJob: return query.filter(SqlJob.workspace == workspace) return query
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/jobs/sqlalchemy_workspace_store.py", "license": "Apache License 2.0", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/store/model_registry/sqlalchemy_workspace_store.py
""" Workspace-aware variant of the model registry SQLAlchemy store. """ from __future__ import annotations import logging from mlflow.store.model_registry.dbmodels.models import ( SqlModelVersion, SqlModelVersionTag, SqlRegisteredModel, SqlRegisteredModelAlias, SqlRegisteredModelTag, SqlWebhook, ) from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore from mlflow.store.workspace_aware_mixin import WorkspaceAwareMixin _logger = logging.getLogger(__name__) _WORKSPACE_ISOLATED_MODELS = ( SqlRegisteredModel, SqlModelVersion, SqlWebhook, SqlRegisteredModelTag, SqlModelVersionTag, SqlRegisteredModelAlias, ) class WorkspaceAwareSqlAlchemyStore(WorkspaceAwareMixin, SqlAlchemyStore): """ Workspace-aware variant of the model registry SQLAlchemy store. This store adds workspace filtering to all queries, ensuring data isolation between workspaces. """ def __init__(self, db_uri): super().__init__(db_uri) def _get_query(self, session, model): """ Return a query for ``model`` filtered by the active workspace. """ query = super()._get_query(session, model) workspace = self._get_active_workspace() if model in _WORKSPACE_ISOLATED_MODELS: return query.filter(model.workspace == workspace) return query def _initialize_store_state(self): """ Initialize store state for workspace-aware mode. In workspace mode, we skip the non-default workspace validation since having entries in different workspaces is expected and correct behavior. """ # No validation needed in workspace-aware mode - entries in different # workspaces are expected and correct behavior def _get_workspace_clauses(self, model): """ Return workspace filter clauses for the model. """ workspace = self._get_active_workspace() if model in _WORKSPACE_ISOLATED_MODELS: return [model.workspace == workspace] return []
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/model_registry/sqlalchemy_workspace_store.py", "license": "Apache License 2.0", "lines": 57, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/store/tracking/sqlalchemy_workspace_store.py
from __future__ import annotations import logging import sqlalchemy import sqlalchemy.sql.expression as sql from sqlalchemy.exc import IntegrityError from sqlalchemy.future import select from mlflow.entities import ( Experiment, ) from mlflow.entities.entity_type import EntityAssociationType from mlflow.entities.lifecycle_stage import LifecycleStage from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import ( INVALID_PARAMETER_VALUE, INVALID_STATE, RESOURCE_DOES_NOT_EXIST, ) from mlflow.store.tracking.dbmodels.models import ( SqlAssessments, SqlEvaluationDataset, SqlExperiment, SqlGatewayBudgetPolicy, SqlGatewayEndpoint, SqlGatewayEndpointBinding, SqlGatewayEndpointModelMapping, SqlGatewayModelDefinition, SqlGatewaySecret, SqlIssue, SqlLoggedModel, SqlOnlineScoringConfig, SqlRun, SqlTraceInfo, ) from mlflow.store.tracking.sqlalchemy_store import ( SqlAlchemyStore, ) from mlflow.store.workspace.utils import get_default_workspace_optional from mlflow.store.workspace_aware_mixin import WorkspaceAwareMixin from mlflow.tracking._workspace.registry import get_workspace_store from mlflow.utils import workspace_context, workspace_utils from mlflow.utils.time import get_current_time_millis from mlflow.utils.uri import ( append_to_uri_path, ) from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME, WORKSPACES_DIR_NAME _logger = logging.getLogger(__name__) class WorkspaceAwareSqlAlchemyStore(WorkspaceAwareMixin, SqlAlchemyStore): """ Workspace-aware variant of the SQLAlchemy tracking store. """ def __init__(self, db_uri, default_artifact_root): self._workspace_provider = None self._workspace_store_uri = workspace_utils.resolve_workspace_store_uri(tracking_uri=db_uri) super().__init__(db_uri, default_artifact_root) def _get_query(self, session, model): query = super()._get_query(session, model) workspace = self._get_active_workspace() if model is SqlExperiment: return query.filter(SqlExperiment.workspace == workspace) if model is SqlRun: return query.join( SqlExperiment, SqlExperiment.experiment_id == SqlRun.experiment_id ).filter(SqlExperiment.workspace == workspace) if model is SqlTraceInfo: return query.join( SqlExperiment, SqlTraceInfo.experiment_id == SqlExperiment.experiment_id ).filter(SqlExperiment.workspace == workspace) if model is SqlIssue: return query.join( SqlExperiment, SqlIssue.experiment_id == SqlExperiment.experiment_id ).filter(SqlExperiment.workspace == workspace) if model is SqlLoggedModel: workspace_experiment_ids = ( session.query(SqlExperiment.experiment_id) .filter(SqlExperiment.workspace == workspace) .subquery() ) return query.filter( SqlLoggedModel.experiment_id.in_(select(workspace_experiment_ids.c.experiment_id)) ) if model is SqlOnlineScoringConfig: return query.join( SqlExperiment, SqlOnlineScoringConfig.experiment_id == SqlExperiment.experiment_id ).filter(SqlExperiment.workspace == workspace) if model is SqlEvaluationDataset: return query.filter(SqlEvaluationDataset.workspace == workspace) if model in ( SqlGatewaySecret, SqlGatewayEndpoint, SqlGatewayModelDefinition, SqlGatewayBudgetPolicy, ): return query.filter(model.workspace == workspace) if model is SqlGatewayEndpointBinding: return self._filter_endpoint_binding_query(session, query) if model is SqlGatewayEndpointModelMapping: return query.join(SqlGatewayEndpoint).filter(SqlGatewayEndpoint.workspace == workspace) return query def _initialize_store_state(self): self._validate_artifact_root_configuration() self._ensure_default_workspace_experiment() def _validate_artifact_root_configuration(self) -> None: """ Validate the default artifact root is not configured with reserved path segments. This catches misconfiguration where the artifact root itself conflicts with the workspace artifact path structure (e.g., ends with 'workspaces' or is already scoped under 'workspaces/<name>'). """ if not self.artifact_root_uri: return segments = self._artifact_path_segments(self.artifact_root_uri.rstrip("/")) if segments and segments[-1] == WORKSPACES_DIR_NAME: raise MlflowException( f"Cannot enable workspace mode because the default artifact root " f"{self.artifact_root_uri} ends with the reserved '{WORKSPACES_DIR_NAME}' " f"segment. Choose a different artifact root before enabling workspaces.", error_code=INVALID_STATE, ) if len(segments) >= 2 and segments[-2] == WORKSPACES_DIR_NAME: raise MlflowException( f"Cannot enable workspace mode because the default artifact root " f"{self.artifact_root_uri} is already scoped under the reserved " f"'{WORKSPACES_DIR_NAME}/<name>' prefix. Configure a different artifact root " f"before enabling workspaces.", error_code=INVALID_STATE, ) def _trace_query(self, session, for_update_or_delete=False): if for_update_or_delete: workspace = self._get_active_workspace() workspace_experiment_ids = ( session.query(SqlExperiment.experiment_id) .filter(SqlExperiment.workspace == workspace) .subquery() ) return SqlAlchemyStore._get_query(self, session, SqlTraceInfo).filter( SqlTraceInfo.experiment_id.in_(select(workspace_experiment_ids.c.experiment_id)) ) return super()._trace_query(session, for_update_or_delete=False) def _experiment_where_clauses(self): return [SqlExperiment.workspace == self._get_active_workspace()] def _filter_experiment_ids(self, session, experiment_ids): workspace = self._get_active_workspace() rows = ( session.query(SqlExperiment.experiment_id) .filter( SqlExperiment.experiment_id.in_(experiment_ids), SqlExperiment.workspace == workspace, ) .all() ) return [row[0] for row in rows] def _filter_entity_ids( self, session, entity_type: EntityAssociationType, entity_ids: list[str] ): workspace = self._get_active_workspace() if not entity_ids: return [] def _rows_to_strings(rows): return [str(row[0]) for row in rows] if entity_type == EntityAssociationType.EXPERIMENT: rows = ( session.query(SqlExperiment.experiment_id) .filter( SqlExperiment.experiment_id.in_(entity_ids), SqlExperiment.workspace == workspace, ) .all() ) return _rows_to_strings(rows) if entity_type == EntityAssociationType.RUN: rows = ( session.query(SqlRun.run_uuid) .join(SqlExperiment, SqlRun.experiment_id == SqlExperiment.experiment_id) .filter(SqlRun.run_uuid.in_(entity_ids), SqlExperiment.workspace == workspace) .all() ) return _rows_to_strings(rows) if entity_type == EntityAssociationType.TRACE: rows = ( session.query(SqlTraceInfo.request_id) .join(SqlExperiment, SqlTraceInfo.experiment_id == SqlExperiment.experiment_id) .filter( SqlTraceInfo.request_id.in_(entity_ids), SqlExperiment.workspace == workspace, ) .all() ) return _rows_to_strings(rows) if entity_type == EntityAssociationType.EVALUATION_DATASET: rows = ( session.query(SqlEvaluationDataset.dataset_id) .filter( SqlEvaluationDataset.dataset_id.in_(entity_ids), SqlEvaluationDataset.workspace == workspace, ) .all() ) return _rows_to_strings(rows) return [] def _filter_association_query(self, session, query, target_type, id_column): """Filter entity associations to only include targets in the active workspace.""" workspace = self._get_active_workspace() if target_type == EntityAssociationType.EXPERIMENT: # Cast experiment_id to String to match the String type of # SqlEntityAssociation.destination_id. PostgreSQL requires explicit type # matching for IN comparisons. subquery = ( session.query( sql.cast(SqlExperiment.experiment_id, sqlalchemy.String).label("experiment_id") ) .filter(SqlExperiment.workspace == workspace) .subquery() ) id_source = subquery.c.experiment_id elif target_type == EntityAssociationType.RUN: subquery = ( session.query(SqlRun.run_uuid) .join(SqlExperiment, SqlRun.experiment_id == SqlExperiment.experiment_id) .filter(SqlExperiment.workspace == workspace) .subquery() ) id_source = subquery.c.run_uuid elif target_type == EntityAssociationType.TRACE: subquery = ( session.query(SqlTraceInfo.request_id) .join(SqlExperiment, SqlTraceInfo.experiment_id == SqlExperiment.experiment_id) .filter(SqlExperiment.workspace == workspace) .subquery() ) id_source = subquery.c.request_id elif target_type == EntityAssociationType.EVALUATION_DATASET: subquery = ( session.query(SqlEvaluationDataset.dataset_id) .filter(SqlEvaluationDataset.workspace == workspace) .subquery() ) id_source = subquery.c.dataset_id else: return query return query.filter(id_column.in_(select(id_source))) def _filter_endpoint_binding_query(self, session, query): endpoint_ids_subquery = ( self._get_query(session, SqlGatewayEndpoint) .with_entities(SqlGatewayEndpoint.endpoint_id) .subquery() ) return query.filter( SqlGatewayEndpointBinding.endpoint_id.in_(select(endpoint_ids_subquery.c.endpoint_id)) ) def _validate_run_accessible(self, session, run_id: str) -> None: workspace = self._get_active_workspace() exists_row = ( session.query(SqlRun.run_uuid) .filter(SqlRun.run_uuid == run_id) .filter( SqlRun.experiment_id.in_( session.query(SqlExperiment.experiment_id).filter( SqlExperiment.workspace == workspace ) ) ) .first() ) if exists_row is None: raise MlflowException( f"Run with id={run_id} not found", RESOURCE_DOES_NOT_EXIST, ) def _validate_trace_accessible(self, session, trace_id: str) -> None: workspace = self._get_active_workspace() exists_row = ( session.query(SqlTraceInfo.request_id) .filter(SqlTraceInfo.request_id == trace_id) .filter( SqlTraceInfo.experiment_id.in_( session.query(SqlExperiment.experiment_id).filter( SqlExperiment.workspace == workspace ) ) ) .first() ) if exists_row is None: raise MlflowException( f"Trace with ID '{trace_id}' not found.", RESOURCE_DOES_NOT_EXIST, ) def _validate_dataset_accessible(self, session, dataset_id: str) -> None: workspace = self._get_active_workspace() exists_row = ( session.query(SqlEvaluationDataset.dataset_id) .filter(SqlEvaluationDataset.dataset_id == dataset_id) .filter(SqlEvaluationDataset.workspace == workspace) .first() ) if exists_row is None: raise MlflowException( f"Dataset '{dataset_id}' not found.", RESOURCE_DOES_NOT_EXIST, ) def _get_sql_assessment(self, session, trace_id: str, assessment_id: str) -> SqlAssessments: trace_subquery = ( self._trace_query(session) .with_entities(SqlTraceInfo.request_id) .filter(SqlTraceInfo.request_id == trace_id) .subquery() ) sql_assessment = ( session.query(SqlAssessments) .join(trace_subquery, SqlAssessments.trace_id == trace_subquery.c.request_id) .filter(SqlAssessments.assessment_id == assessment_id) .one_or_none() ) if sql_assessment is None: trace_record = ( self._trace_query(session).filter(SqlTraceInfo.request_id == trace_id).one_or_none() ) if trace_record is None: raise MlflowException( f"Trace with ID '{trace_id}' not found.", RESOURCE_DOES_NOT_EXIST, ) raise MlflowException( f"Assessment with ID '{assessment_id}' not found for trace '{trace_id}'", RESOURCE_DOES_NOT_EXIST, ) return sql_assessment def _get_workspace_provider_instance(self): if self._workspace_provider is None: self._workspace_provider = get_workspace_store(workspace_uri=self._workspace_store_uri) return self._workspace_provider def _ensure_default_workspace_experiment(self) -> None: """ Ensure the default experiment exists in the provider's default workspace when enabled. """ provider = self._get_workspace_provider_instance() default_workspace, supports_default = get_default_workspace_optional(provider) if not supports_default: provider_name = ( type(self._workspace_provider).__name__ if self._workspace_provider else "unknown" ) _logger.warning( "Workspace provider %s does not expose a default workspace; " "skipping default experiment bootstrap.", provider_name, ) return if default_workspace is None: return with workspace_context.WorkspaceContext(default_workspace.name): if self.get_experiment_by_name(Experiment.DEFAULT_EXPERIMENT_NAME) is None: with self.ManagedSessionMaker() as session: self._create_default_experiment( session, workspace_override=default_workspace.name ) def _create_default_experiment(self, session, workspace_override: str | None = None): workspace = workspace_override or self._get_active_workspace() if workspace == DEFAULT_WORKSPACE_NAME: # Use the context to create the default experiment in the default workspace # in case the default workspace was a workspace override. It's important to keep the # default workspace experiment ID as 0 to allow a user to disable workspaces later. with workspace_context.WorkspaceContext(workspace): return super()._create_default_experiment(session) creation_time = get_current_time_millis() existing = ( session.query(SqlExperiment) .filter( SqlExperiment.name == Experiment.DEFAULT_EXPERIMENT_NAME, SqlExperiment.workspace == workspace, ) .one_or_none() ) if existing is not None: return experiment = SqlExperiment( name=Experiment.DEFAULT_EXPERIMENT_NAME, lifecycle_stage=LifecycleStage.ACTIVE, artifact_location=None, creation_time=creation_time, last_update_time=creation_time, workspace=workspace, ) session.add(experiment) try: session.flush() except IntegrityError as exc: session.rollback() _logger.debug( "Default experiment already exists for workspace '%s'; another worker likely " "created it. Swallowing IntegrityError: %s", workspace, exc, ) return if not experiment.artifact_location: experiment.artifact_location = self._get_artifact_location( experiment.experiment_id, workspace ) session.flush() def _get_artifact_location(self, experiment_id, workspace: str | None = None): workspace = workspace or self._get_active_workspace() provider = self._get_workspace_provider_instance() resolved_root, should_append = provider.resolve_artifact_root( self.artifact_root_uri, workspace ) if not resolved_root: raise MlflowException( f"Cannot determine an artifact root for workspace '{workspace}'. " "Set --default-artifact-root when starting the server or configure the " "workspace's default_artifact_root.", error_code=INVALID_PARAMETER_VALUE, ) if should_append: resolved_root = append_to_uri_path(resolved_root, WORKSPACES_DIR_NAME, workspace) return append_to_uri_path(resolved_root, str(experiment_id)) def create_experiment(self, name, artifact_location=None, tags=None): if artifact_location: raise MlflowException.invalid_parameter_value( "artifact_location cannot be specified when workspaces are enabled" ) return super().create_experiment(name, artifact_location=None, tags=tags)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/sqlalchemy_workspace_store.py", "license": "Apache License 2.0", "lines": 421, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/workspace/abstract_store.py
from __future__ import annotations import re from abc import ABC, abstractmethod from typing import Iterable from mlflow.entities import Workspace from mlflow.entities.workspace import WorkspaceDeletionMode from mlflow.exceptions import MlflowException # The workspace store can be backed by something other than the tracking store. For example, # Kubeflow integrations map MLflow workspaces onto Kubernetes namespaces and rely on a # workspace store plugin, so we keep this as a separate store rather than extending the # tracking store. class AbstractStore(ABC): """Interface for resolving and managing workspaces in the tracking server.""" @abstractmethod def list_workspaces(self) -> Iterable[Workspace]: """ Return the workspaces visible to the current request context. Implementations may inspect the request (e.g., for authN/Z context) to determine which workspaces to expose. """ @abstractmethod def get_workspace(self, workspace_name: str) -> Workspace: """ Gets a workspace by name and return its metadata. Implementations should raise ``MlflowException`` with ``RESOURCE_DOES_NOT_EXIST`` if the workspace cannot be found. """ def create_workspace(self, workspace: Workspace) -> Workspace: """Provision a new workspace. Raises ``NotImplementedError`` when the active provider is read-only. Implementations should raise ``MlflowException`` with ``RESOURCE_ALREADY_EXISTS`` when the workspace already exists or ``INVALID_PARAMETER_VALUE`` when validation fails. """ raise NotImplementedError def update_workspace(self, workspace: Workspace) -> Workspace: """Update metadata for an existing workspace.""" raise NotImplementedError def delete_workspace( self, workspace_name: str, mode: WorkspaceDeletionMode = WorkspaceDeletionMode.RESTRICT, ) -> None: """Delete an existing workspace. Args: workspace_name: Name of the workspace to delete. mode: Controls what happens to resources in the workspace: - SET_DEFAULT: Reassign resources to the default workspace. - CASCADE: Delete all resources in the workspace. - RESTRICT: Refuse if the workspace still contains resources. """ raise NotImplementedError def get_default_workspace(self) -> Workspace: """ Return the workspace to select when none is explicitly supplied. Implementations that require an explicit workspace should raise an ``MlflowException`` with ``INVALID_PARAMETER_VALUE``. """ raise NotImplementedError def resolve_artifact_root( self, default_artifact_root: str | None, workspace_name: str ) -> tuple[str | None, bool]: """ Allow a provider to customize artifact storage roots per workspace. Returns: A tuple ``(root, append_workspace_prefix)`` where ``root`` is the base artifact location to use for the workspace, and ``append_workspace_prefix`` controls whether MLflow should append the ``/workspaces/<workspace_name>`` suffix automatically. """ return default_artifact_root, True class WorkspaceNameValidator: """Validator for workspace names based on Kubernetes naming conventions.""" _PATTERN = r"^(?!.*--)[a-z0-9]([-a-z0-9]*[a-z0-9])?$" _MIN_LENGTH = 2 _MAX_LENGTH = 63 _RESERVED = {"workspaces", "api", "ajax-api", "static-files"} @classmethod def pattern(cls) -> str: return cls._PATTERN @classmethod def validate(cls, name: str) -> None: if not isinstance(name, str): raise MlflowException.invalid_parameter_value( f"Workspace name must be a string, got {type(name).__name__!s}." ) if not (cls._MIN_LENGTH <= len(name) <= cls._MAX_LENGTH): raise MlflowException.invalid_parameter_value( f"Workspace name '{name}' must be between {cls._MIN_LENGTH} and " f"{cls._MAX_LENGTH} characters." ) if not re.match(cls._PATTERN, name): raise MlflowException.invalid_parameter_value( f"Workspace name '{name}' must match the pattern {cls.pattern()} " "(lowercase alphanumeric with optional internal hyphens)." ) if name in cls._RESERVED: raise MlflowException.invalid_parameter_value( f"Workspace name '{name}' is reserved and cannot be used." )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace/abstract_store.py", "license": "Apache License 2.0", "lines": 99, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/workspace/dbmodels/models.py
from __future__ import annotations import sqlalchemy as sa from sqlalchemy import Column, String, Text from mlflow.entities.workspace import Workspace from mlflow.store.db.base_sql_model import Base class SqlWorkspace(Base): __tablename__ = "workspaces" # Workspace-aware tables intentionally do not declare SQL-level foreign keys to this model. # The WorkspaceProvider abstraction can source workspace metadata from providers outside this # database (e.g., Kubernetes namespaces), so referential integrity is enforced at the provider # layer rather than through the ORM schema. name = Column(String(63), nullable=False) description = Column(Text, nullable=True) default_artifact_root = Column(Text, nullable=True) __table_args__ = (sa.PrimaryKeyConstraint("name", name="workspaces_pk"),) def __repr__(self) -> str: # pragma: no cover return f"<SqlWorkspace ({self.name})>" def to_mlflow_entity(self) -> Workspace: return Workspace( name=self.name, description=self.description, default_artifact_root=self.default_artifact_root, )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace/dbmodels/models.py", "license": "Apache License 2.0", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/store/workspace/rest_store.py
from __future__ import annotations from urllib.parse import quote from mlflow.entities import Workspace from mlflow.entities.workspace import WorkspaceDeletionMode from mlflow.exceptions import MlflowException, RestException from mlflow.protos import databricks_pb2 from mlflow.protos.databricks_pb2 import INVALID_STATE, RESOURCE_ALREADY_EXISTS from mlflow.protos.service_pb2 import ( CreateWorkspace, DeleteWorkspace, GetWorkspace, ListWorkspaces, UpdateWorkspace, ) from mlflow.store.workspace.abstract_store import AbstractStore, WorkspaceNameValidator from mlflow.utils.proto_json_utils import message_to_json from mlflow.utils.rest_utils import call_endpoint from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME WORKSPACES_ENDPOINT = "/api/3.0/mlflow/workspaces" def _quote_workspace(workspace_name: str) -> str: if workspace_name != DEFAULT_WORKSPACE_NAME: WorkspaceNameValidator.validate(workspace_name) return quote(workspace_name, safe="") class RestWorkspaceStore(AbstractStore): """REST-backed workspace store implementation.""" def __init__(self, get_host_creds): self.get_host_creds = get_host_creds def _workspace_from_proto(self, proto) -> Workspace: if not proto.HasField("workspace"): raise MlflowException( "Workspace response payload was missing the 'workspace' field", INVALID_STATE, ) return Workspace.from_proto(proto.workspace) def list_workspaces(self) -> list[Workspace]: proto = call_endpoint( host_creds=self.get_host_creds(), endpoint=WORKSPACES_ENDPOINT, method="GET", json_body=None, response_proto=ListWorkspaces.Response(), ) return [Workspace.from_proto(ws) for ws in proto.workspaces] def get_workspace(self, workspace_name: str) -> Workspace: proto = call_endpoint( host_creds=self.get_host_creds(), endpoint=f"{WORKSPACES_ENDPOINT}/{_quote_workspace(workspace_name)}", method="GET", json_body=None, response_proto=GetWorkspace.Response(), ) return self._workspace_from_proto(proto) def create_workspace(self, workspace: Workspace) -> Workspace: WorkspaceNameValidator.validate(workspace.name) request_message = CreateWorkspace(name=workspace.name) if workspace.description is not None: request_message.description = workspace.description if workspace.default_artifact_root is not None: request_message.default_artifact_root = workspace.default_artifact_root try: proto = call_endpoint( host_creds=self.get_host_creds(), endpoint=WORKSPACES_ENDPOINT, method="POST", json_body=message_to_json(request_message), response_proto=CreateWorkspace.Response(), expected_status=201, ) except RestException as exc: if exc.error_code == databricks_pb2.ErrorCode.Name(RESOURCE_ALREADY_EXISTS): message = exc.message or f"Workspace '{workspace.name}' already exists." raise MlflowException(message, RESOURCE_ALREADY_EXISTS) from exc raise return self._workspace_from_proto(proto) def update_workspace(self, workspace: Workspace) -> Workspace: request_message = UpdateWorkspace() if workspace.description is not None: request_message.description = workspace.description if workspace.default_artifact_root is not None: request_message.default_artifact_root = workspace.default_artifact_root proto = call_endpoint( host_creds=self.get_host_creds(), endpoint=f"{WORKSPACES_ENDPOINT}/{_quote_workspace(workspace.name)}", method="PATCH", json_body=message_to_json(request_message), response_proto=UpdateWorkspace.Response(), ) return self._workspace_from_proto(proto) def delete_workspace( self, workspace_name: str, mode: WorkspaceDeletionMode = WorkspaceDeletionMode.RESTRICT, ) -> None: endpoint = f"{WORKSPACES_ENDPOINT}/{_quote_workspace(workspace_name)}" if mode != WorkspaceDeletionMode.RESTRICT: endpoint += f"?mode={mode.value}" call_endpoint( host_creds=self.get_host_creds(), endpoint=endpoint, method="DELETE", json_body=None, response_proto=DeleteWorkspace.Response(), expected_status=204, ) def get_default_workspace(self) -> Workspace: raise NotImplementedError( "REST workspace provider does not expose a default workspace; " "please specify a workspace explicitly or omit a workspace to leverage the server's " "configured default." )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace/rest_store.py", "license": "Apache License 2.0", "lines": 110, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/workspace/sqlalchemy_store.py
from __future__ import annotations import logging from threading import Lock from typing import Iterable from cachetools import TTLCache from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import sessionmaker from mlflow.entities.workspace import Workspace, WorkspaceDeletionMode from mlflow.environment_variables import ( MLFLOW_WORKSPACE_ARTIFACT_ROOT_CACHE_CAPACITY, MLFLOW_WORKSPACE_ARTIFACT_ROOT_CACHE_TTL_SECONDS, ) from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import ( INVALID_STATE, RESOURCE_ALREADY_EXISTS, RESOURCE_DOES_NOT_EXIST, ) from mlflow.store.model_registry.dbmodels.models import SqlRegisteredModel, SqlWebhook from mlflow.store.tracking.dbmodels.models import ( SqlEvaluationDataset, SqlExperiment, SqlGatewayBudgetPolicy, SqlGatewayEndpoint, SqlGatewayModelDefinition, SqlGatewaySecret, SqlJob, ) from mlflow.store.workspace.abstract_store import AbstractStore, WorkspaceNameValidator from mlflow.store.workspace.dbmodels import SqlWorkspace from mlflow.utils.uri import extract_db_type_from_uri from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME _logger = logging.getLogger(__name__) _CACHE_MISS = object() # Root workspace-aware ORM models whose workspace column must be handled before deleting a # workspace. SqlRegisteredModel is first because its onupdate="CASCADE" foreign keys # automatically propagate the change to model_versions, registered_model_tags, # model_version_tags, and registered_model_aliases. _WORKSPACE_ROOT_MODELS = [ SqlRegisteredModel, SqlExperiment, SqlEvaluationDataset, SqlWebhook, SqlGatewaySecret, SqlGatewayEndpoint, SqlGatewayModelDefinition, SqlGatewayBudgetPolicy, SqlJob, ] class SqlAlchemyStore(AbstractStore): """SQL-backed workspace store implementation.""" def __init__(self, db_uri: str): from mlflow.store.db import utils as db_utils self._workspace_uri = db_uri self._db_type = extract_db_type_from_uri(db_uri) self._engine = db_utils.create_sqlalchemy_engine_with_retry(db_uri) db_utils._safe_initialize_tables(self._engine) session_factory = sessionmaker(bind=self._engine) self.ManagedSessionMaker = db_utils._get_managed_session_maker( session_factory, self._db_type ) # Use a per-process TTL cache to reduce DB lookups; values converge via TTL expiration. self._artifact_root_cache: TTLCache[str, str | None] = TTLCache( maxsize=MLFLOW_WORKSPACE_ARTIFACT_ROOT_CACHE_CAPACITY.get(), ttl=MLFLOW_WORKSPACE_ARTIFACT_ROOT_CACHE_TTL_SECONDS.get(), ) self._artifact_root_cache_lock = Lock() def list_workspaces(self) -> Iterable[Workspace]: with self.ManagedSessionMaker() as session: rows = session.query(SqlWorkspace).order_by(SqlWorkspace.name.asc()).all() return [row.to_mlflow_entity() for row in rows] def get_workspace(self, workspace_name: str) -> Workspace: with self.ManagedSessionMaker() as session: workspace = self._get_workspace(session, workspace_name) return workspace.to_mlflow_entity() def create_workspace(self, workspace: Workspace) -> Workspace: WorkspaceNameValidator.validate(workspace.name) with self.ManagedSessionMaker() as session: try: entity = SqlWorkspace( name=workspace.name, description=workspace.description, default_artifact_root=workspace.default_artifact_root or None, ) session.add(entity) session.flush() workspace_entity = entity.to_mlflow_entity() except IntegrityError as exc: raise MlflowException( f"Workspace '{workspace.name}' already exists. Error: {exc}", RESOURCE_ALREADY_EXISTS, ) from exc # Only update cache after the transaction has successfully committed. with self._artifact_root_cache_lock: self._artifact_root_cache[workspace.name] = workspace_entity.default_artifact_root _logger.info("Created workspace '%s'", workspace.name) return workspace_entity def update_workspace(self, workspace: Workspace) -> Workspace: with self.ManagedSessionMaker() as session: entity = self._get_workspace(session, workspace.name) if workspace.description is not None: entity.description = workspace.description if workspace.default_artifact_root is not None: # If the default_artifact_root is an empty string, set it to None to "clear" the # value entity.default_artifact_root = workspace.default_artifact_root or None session.flush() _logger.info("Updated workspace '%s'", workspace.name) workspace_entity = entity.to_mlflow_entity() # Only update cache after the transaction has successfully committed. with self._artifact_root_cache_lock: self._artifact_root_cache[workspace.name] = workspace_entity.default_artifact_root return workspace_entity def delete_workspace( self, workspace_name: str, mode: WorkspaceDeletionMode = WorkspaceDeletionMode.RESTRICT, ) -> None: if workspace_name == DEFAULT_WORKSPACE_NAME: raise MlflowException( f"Cannot delete the reserved '{DEFAULT_WORKSPACE_NAME}' workspace", INVALID_STATE, ) with self.ManagedSessionMaker() as session: entity = self._get_workspace(session, workspace_name) try: if mode == WorkspaceDeletionMode.RESTRICT: for model in _WORKSPACE_ROOT_MODELS: count = ( session.query(model).filter(model.workspace == workspace_name).count() ) if count: raise MlflowException( f"Cannot delete workspace '{workspace_name}': table " f"'{model.__tablename__}' still contains {count} resource(s). " "Remove or reassign them before deleting the workspace.", INVALID_STATE, ) elif mode == WorkspaceDeletionMode.CASCADE: for model in _WORKSPACE_ROOT_MODELS: instances = ( session.query(model).filter(model.workspace == workspace_name).all() ) for obj in instances: session.delete(obj) elif mode == WorkspaceDeletionMode.SET_DEFAULT: self._check_set_default_conflicts(session, workspace_name) for model in _WORKSPACE_ROOT_MODELS: session.query(model).filter(model.workspace == workspace_name).update( {model.workspace: DEFAULT_WORKSPACE_NAME}, synchronize_session=False, ) else: raise MlflowException.invalid_parameter_value( f"Invalid workspace deletion mode {mode!r}. " "Expected one of: RESTRICT, CASCADE, SET_DEFAULT." ) session.delete(entity) except IntegrityError as exc: if mode == WorkspaceDeletionMode.SET_DEFAULT: message = ( f"Cannot delete workspace '{workspace_name}': resources in this workspace " f"conflict with existing resources in the '{DEFAULT_WORKSPACE_NAME}' " f"workspace. Resolve naming conflicts before deleting. Error: {exc}" ) else: message = ( f"Cannot delete workspace '{workspace_name}': deletion failed due to " f"database integrity constraints while operating in '{mode.value}' mode. " "This often indicates that related resources still reference this " f"workspace. Error: {exc}" ) raise MlflowException(message, INVALID_STATE) from exc _logger.info("Deleted workspace '%s' (mode=%s)", workspace_name, mode.value) if mode == WorkspaceDeletionMode.CASCADE: _logger.info( "Run 'mlflow gc --backend-store-uri %s' to permanently clean up " "artifacts associated with deleted resources.", self._workspace_uri, ) with self._artifact_root_cache_lock: self._artifact_root_cache.pop(workspace_name, None) def get_default_workspace(self) -> Workspace: return self.get_workspace(DEFAULT_WORKSPACE_NAME) def resolve_artifact_root( self, default_artifact_root: str | None, workspace_name: str ) -> tuple[str | None, bool]: with self._artifact_root_cache_lock: cached_value = self._artifact_root_cache.get(workspace_name, _CACHE_MISS) if cached_value is not _CACHE_MISS: if cached_value: return cached_value, False return default_artifact_root, True with self.ManagedSessionMaker() as session: workspace = session.get(SqlWorkspace, workspace_name) workspace_root = workspace.default_artifact_root if workspace else None with self._artifact_root_cache_lock: self._artifact_root_cache[workspace_name] = workspace_root if workspace_root: return workspace_root, False return default_artifact_root, True @staticmethod def _check_set_default_conflicts(session, workspace_name: str) -> None: """Preflight check: report all name conflicts that would arise from reassigning resources in *workspace_name* to the default workspace. """ conflicts: list[str] = [] for model in _WORKSPACE_ROOT_MODELS: if not hasattr(model, "name"): continue overlapping = ( session.query(model.name) .filter(model.workspace == workspace_name) .filter( model.name.in_( session.query(model.name).filter(model.workspace == DEFAULT_WORKSPACE_NAME) ) ) .all() ) for (name,) in overlapping: conflicts.append(f" - {model.__tablename__}: {name!r}") if conflicts: details = "\n".join(conflicts) raise MlflowException( f"Cannot reassign resources from workspace '{workspace_name}' to " f"'{DEFAULT_WORKSPACE_NAME}': the following names already exist in the " f"default workspace and would cause conflicts:\n{details}\n" "Rename or remove the conflicting resources before retrying.", INVALID_STATE, ) def _get_workspace(self, session, workspace_name: str) -> SqlWorkspace: workspace = session.get(SqlWorkspace, workspace_name) if workspace is None: raise MlflowException( f"Workspace '{workspace_name}' not found", RESOURCE_DOES_NOT_EXIST, ) return workspace
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace/sqlalchemy_store.py", "license": "Apache License 2.0", "lines": 239, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/workspace/utils.py
from __future__ import annotations import logging from mlflow.entities import Workspace from mlflow.protos import databricks_pb2 _INVALID_PARAMETER_VALUE_CODE = databricks_pb2.INVALID_PARAMETER_VALUE _INVALID_PARAMETER_VALUE_NAME = databricks_pb2.ErrorCode.Name(_INVALID_PARAMETER_VALUE_CODE) _logger = logging.getLogger(__name__) def get_default_workspace_optional(workspace_store) -> tuple[Workspace | None, bool]: """ Attempt to resolve a default workspace from the provider without bubbling opt-out errors. Providers can signal that default workspace resolution is unsupported by raising ``NotImplementedError``. This helper normalizes that case and returns ``(None, False)`` so callers can decide how to proceed. Args: workspace_store: Workspace store exposing ``get_default_workspace``. Returns: Tuple of (workspace or None, supports_default_workspace flag). """ if workspace_store is None: return None, False provider_name = type(workspace_store).__name__ try: workspace = workspace_store.get_default_workspace() except NotImplementedError: _logger.debug( "Workspace provider %s does not implement default workspace resolution", provider_name, ) return None, False return workspace, True
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace/utils.py", "license": "Apache License 2.0", "lines": 30, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/store/workspace_aware_mixin.py
""" Mixin class providing common workspace functionality for stores. """ from __future__ import annotations from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.utils.workspace_context import get_request_workspace from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME class WorkspaceAwareMixin: """ Mixin providing common workspace-aware functionality for SQLAlchemy stores. Classes using this mixin must have a ManagedSessionMaker attribute. """ @property def supports_workspaces(self) -> bool: """Indicates this store supports workspace isolation.""" return True @staticmethod def _workspaces_enabled() -> bool: """Check if workspaces are enabled via environment variable.""" return MLFLOW_ENABLE_WORKSPACES.get() @classmethod def _get_active_workspace(cls) -> str: """ Get the active workspace name. When workspaces are disabled, returns DEFAULT_WORKSPACE_NAME for backward compatibility. When workspaces are enabled, requires an explicit workspace context to be set. Flask and FastAPI middlewares resolve (and set) the default workspace whenever the workspace provider supports it, so stores can rely on this check to enforce isolation. Returns: The active workspace name. Raises: MlflowException: If workspaces are enabled but no workspace context is set. """ if not cls._workspaces_enabled(): return DEFAULT_WORKSPACE_NAME if workspace := get_request_workspace(): return workspace raise MlflowException.invalid_parameter_value( "Active workspace is required. Configure a default workspace or call " "mlflow.set_workspace() before interacting with the store." ) def _with_workspace_field(self, instance): """ Populate workspace field from active workspace context. """ if hasattr(instance, "workspace"): instance.workspace = self._get_active_workspace() return instance
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace_aware_mixin.py", "license": "Apache License 2.0", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/store/workspace_rest_store_mixin.py
from __future__ import annotations from mlflow.exceptions import MlflowException from mlflow.protos import databricks_pb2 from mlflow.utils.rest_utils import http_request from mlflow.utils.uri import is_databricks_uri from mlflow.utils.workspace_context import get_request_workspace class WorkspaceRestStoreMixin: """ Shared workspace capability detection for REST-based stores. """ _SERVER_INFO_ENDPOINT = "/api/3.0/mlflow/server-info" _WORKSPACE_UNSUPPORTED_ERROR = ( "Active workspace '{workspace}' cannot be used because the remote server does not " "support workspaces. Restart the server with --enable-workspaces or unset the active " "workspace." ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._workspace_support: bool | None = None @property def supports_workspaces(self) -> bool: if self._workspace_support is not None: return self._workspace_support host_creds = self.get_host_creds() store_uri = getattr(host_creds, "host", None) if store_uri and is_databricks_uri(store_uri): self._workspace_support = False return False supported = self._probe_workspace_support() self._workspace_support = supported return supported def _validate_workspace_support_if_specified(self) -> None: """ Raise an error if a workspace is active but the server doesn't support workspaces. """ workspace = get_request_workspace() if workspace is None: return if not self.supports_workspaces: raise MlflowException( self._WORKSPACE_UNSUPPORTED_ERROR.format(workspace=workspace), error_code=databricks_pb2.FEATURE_DISABLED, ) def _probe_workspace_support(self) -> bool: host_creds = self.get_host_creds() try: response = http_request( host_creds=host_creds, endpoint=self._SERVER_INFO_ENDPOINT, method="GET", timeout=3, max_retries=0, raise_on_status=False, ) except Exception as exc: # pragma: no cover - network errors vary raise MlflowException( message=f"Failed to query {self._SERVER_INFO_ENDPOINT}: {exc}", error_code=databricks_pb2.INTERNAL_ERROR, ) from exc if response.status_code == 404: # This is expected for older servers that don't have the server-info endpoint. return False if response.status_code != 200: raise MlflowException( message=( f"Failed to query {self._SERVER_INFO_ENDPOINT}: " f"{response.status_code} {response.text}" ), error_code=databricks_pb2.TEMPORARILY_UNAVAILABLE, ) return response.json().get("workspaces_enabled", False)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/workspace_rest_store_mixin.py", "license": "Apache License 2.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/tracking/_workspace/client.py
from __future__ import annotations from mlflow.entities.workspace import Workspace, WorkspaceDeletionMode from mlflow.tracking._workspace.registry import get_workspace_store class WorkspaceProviderClient: """ Client that exposes workspace CRUD operations via the configured provider. The provider is resolved based on the workspace URI scheme (for example ``sqlite`` or ``mysql``). This mirrors the scheme-based resolution used by tracking and model registry stores. """ def __init__(self, workspace_uri: str): self._workspace_uri = workspace_uri self._store = None # Eagerly validate configuration to surface errors early. self.store @property def store(self): if self._store is None: self._store = get_workspace_store(workspace_uri=self._workspace_uri) return self._store def list_workspaces(self) -> list[Workspace]: return list(self.store.list_workspaces()) def create_workspace( self, name: str, description: str | None = None, default_artifact_root: str | None = None, ) -> Workspace: """Create a new workspace. Args: name: The workspace name (lowercase alphanumeric with optional internal hyphens). description: Optional description of the workspace. default_artifact_root: Optional artifact root URI; falls back to server default. Returns: The newly created workspace. """ return self.store.create_workspace( Workspace( name=name, description=description, default_artifact_root=default_artifact_root, ) ) def get_workspace(self, name: str) -> Workspace: return self.store.get_workspace(name) def update_workspace( self, name: str, description: str | None = None, default_artifact_root: str | None = None, ) -> Workspace: """Update metadata for an existing workspace. Args: name: The name of the workspace to update. description: New description, or ``None`` to leave unchanged. default_artifact_root: New artifact root URI, empty string to clear, or ``None``. Returns: The updated workspace. """ return self.store.update_workspace( Workspace( name=name, description=description, default_artifact_root=default_artifact_root, ) ) def delete_workspace( self, name: str, mode: WorkspaceDeletionMode = WorkspaceDeletionMode.RESTRICT, ) -> None: self.store.delete_workspace(name, mode=mode)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracking/_workspace/client.py", "license": "Apache License 2.0", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/tracking/_workspace/fluent.py
from __future__ import annotations import threading from typing import Callable, TypeVar from mlflow.entities.workspace import Workspace, WorkspaceDeletionMode from mlflow.exceptions import MlflowException, RestException from mlflow.protos import databricks_pb2 from mlflow.protos.databricks_pb2 import FEATURE_DISABLED from mlflow.store.workspace.abstract_store import WorkspaceNameValidator from mlflow.tracking.client import MlflowClient from mlflow.utils.annotations import experimental from mlflow.utils.workspace_context import set_workspace as set_context_workspace from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME T = TypeVar("T") _workspace_lock = threading.Lock() def _workspace_client_call(func: Callable[[MlflowClient], T]) -> T: client = MlflowClient() try: return func(client) except RestException as exc: if exc.error_code == databricks_pb2.ErrorCode.Name(databricks_pb2.ENDPOINT_NOT_FOUND): raise MlflowException( "The configured tracking server does not expose workspace APIs. " "Ensure workspace is enabled.", error_code=FEATURE_DISABLED, ) from exc raise @experimental(version="3.10.0") def set_workspace(workspace: str | None) -> None: """Set the active workspace for subsequent MLflow operations.""" with _workspace_lock: if workspace is None: set_context_workspace(None) return if workspace != DEFAULT_WORKSPACE_NAME: WorkspaceNameValidator.validate(workspace) set_context_workspace(workspace) @experimental(version="3.10.0") def list_workspaces() -> list[Workspace]: """Return the list of workspaces available to the current user.""" return _workspace_client_call(lambda client: client.list_workspaces()) @experimental(version="3.10.0") def get_workspace(name: str) -> Workspace: """Return metadata for the specified workspace.""" return _workspace_client_call(lambda client: client.get_workspace(name)) @experimental(version="3.10.0") def create_workspace( name: str, description: str | None = None, default_artifact_root: str | None = None ) -> Workspace: """Create a new workspace. Args: name: The workspace name (lowercase alphanumeric with optional internal hyphens). description: Optional description of the workspace. default_artifact_root: Optional artifact root URI; falls back to server default. Returns: The newly created workspace. Raises: MlflowException: If the name is invalid, already exists, or no artifact root available. """ WorkspaceNameValidator.validate(name) return _workspace_client_call( lambda client: client.create_workspace( name=name, description=description, default_artifact_root=default_artifact_root, ) ) @experimental(version="3.10.0") def update_workspace( name: str, description: str | None = None, default_artifact_root: str | None = None ) -> Workspace: """Update metadata for an existing workspace. Args: name: The name of the workspace to update. description: New description, or ``None`` to leave unchanged. default_artifact_root: New artifact root URI, empty string to clear, or ``None``. Returns: The updated workspace. Raises: MlflowException: If the workspace does not exist or no artifact root available. """ if name != DEFAULT_WORKSPACE_NAME: WorkspaceNameValidator.validate(name) return _workspace_client_call( lambda client: client.update_workspace( name=name, description=description, default_artifact_root=default_artifact_root, ) ) @experimental(version="3.10.0") def delete_workspace(name: str, *, mode: str = WorkspaceDeletionMode.RESTRICT) -> None: """Delete an existing workspace. Args: name: Name of the workspace to delete. mode: Deletion mode. One of SET_DEFAULT, CASCADE, or RESTRICT. """ try: deletion_mode = WorkspaceDeletionMode(mode) except ValueError: raise MlflowException.invalid_parameter_value( f"Invalid deletion mode '{mode}'. " f"Must be one of: {', '.join(m.value for m in WorkspaceDeletionMode)}" ) if name != DEFAULT_WORKSPACE_NAME: WorkspaceNameValidator.validate(name) _workspace_client_call(lambda client: client.delete_workspace(name=name, mode=deletion_mode)) __all__ = [ "Workspace", "set_workspace", "list_workspaces", "get_workspace", "create_workspace", "update_workspace", "delete_workspace", ]
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracking/_workspace/fluent.py", "license": "Apache License 2.0", "lines": 115, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/tracking/_workspace/registry.py
from __future__ import annotations import threading import warnings from functools import lru_cache, partial from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.store.db.db_types import DATABASE_ENGINES from mlflow.tracking.registry import StoreRegistry from mlflow.utils.credentials import get_default_host_creds from mlflow.utils.plugins import get_entry_points _WORKSPACE_PROVIDER_ENTRYPOINT_GROUP = "mlflow.workspace_provider" _building_workspace_store_lock = threading.RLock() class UnsupportedWorkspaceStoreURIException(MlflowException): """Exception thrown when building a workspace store with an unsupported URI.""" def __init__(self, unsupported_uri, supported_uri_schemes): message = ( "Workspace functionality is unavailable; got unsupported URI " f"'{unsupported_uri}' for the workspace backend. Supported URI schemes are: " f"{supported_uri_schemes}. See the workspace configuration guide for instructions." ) super().__init__(message, error_code=INVALID_PARAMETER_VALUE) self.supported_uri_schemes = supported_uri_schemes class WorkspaceStoreRegistry(StoreRegistry): """Scheme-based registry for workspace store implementations.""" def __init__(self): super().__init__(_WORKSPACE_PROVIDER_ENTRYPOINT_GROUP) def register_entrypoints(self): """Register workspace stores provided by other packages.""" for entrypoint in get_entry_points(self.group_name): try: self.register(entrypoint.name, entrypoint.load()) except (AttributeError, ImportError) as exc: warnings.warn( f"Failure attempting to register workspace provider '{entrypoint.name}': {exc}", stacklevel=2, ) def get_store(self, workspace_uri: str): """Return a workspace store instance for the provided URI.""" return self._get_store_with_resolved_uri(workspace_uri) @lru_cache(maxsize=100) def _get_store_with_resolved_uri(self, workspace_uri: str): with _building_workspace_store_lock: try: builder = self.get_store_builder(workspace_uri) except MlflowException as exc: raise UnsupportedWorkspaceStoreURIException( unsupported_uri=workspace_uri, supported_uri_schemes=list(self._registry.keys()), ) from exc return builder(workspace_uri=workspace_uri) _workspace_store_registry: WorkspaceStoreRegistry | None = None def _get_workspace_store_registry() -> WorkspaceStoreRegistry: global _workspace_store_registry if _workspace_store_registry is None: with _building_workspace_store_lock: # Double-check to avoid redundant initialization when multiple threads race. if _workspace_store_registry is None: registry = WorkspaceStoreRegistry() _register_default_workspace_stores(registry) registry.register_entrypoints() _workspace_store_registry = registry return _workspace_store_registry def _get_sqlalchemy_workspace_store(workspace_uri: str): from mlflow.store.workspace.sqlalchemy_store import SqlAlchemyStore return SqlAlchemyStore(workspace_uri) def _get_rest_workspace_store(workspace_uri: str): from mlflow.store.workspace.rest_store import RestWorkspaceStore return RestWorkspaceStore(partial(get_default_host_creds, workspace_uri)) def _register_default_workspace_stores(registry: WorkspaceStoreRegistry) -> None: # Register SQLAlchemy builder for common database schemes. for scheme in DATABASE_ENGINES: registry.register(scheme, _get_sqlalchemy_workspace_store) # Register REST-based workspace stores. for scheme in ["http", "https"]: registry.register(scheme, _get_rest_workspace_store) def get_workspace_store(workspace_uri: str): """ Return a workspace store for the specified workspace URI. Args: workspace_uri: Workspace backend URI. """ return _get_workspace_store_registry().get_store(workspace_uri=workspace_uri)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracking/_workspace/registry.py", "license": "Apache License 2.0", "lines": 82, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/utils/workspace_context.py
from __future__ import annotations from contextvars import ContextVar, Token from mlflow.environment_variables import MLFLOW_WORKSPACE from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME _WORKSPACE: ContextVar[str | None] = ContextVar("mlflow_active_workspace", default=None) _IS_WORKSPACE_RESOLVED: ContextVar[bool] = ContextVar("mlflow_workspace_resolved", default=False) def get_request_workspace() -> str | None: """ Return the active workspace for the current execution context. Resolution order: 1) Request-scoped ContextVar (set by server middleware or explicit setters). 2) ``MLFLOW_WORKSPACE`` environment variable (client-side fallback, including threads). """ if workspace := (_WORKSPACE.get() or "").strip(): return workspace if env_workspace := (MLFLOW_WORKSPACE.get() or "").strip(): return env_workspace return None def is_request_workspace_resolved() -> bool: """Return whether the server resolved the request workspace.""" return _IS_WORKSPACE_RESOLVED.get() def _validate_workspace(workspace: str | None) -> None: if workspace is not None and workspace != DEFAULT_WORKSPACE_NAME: from mlflow.store.workspace.abstract_store import WorkspaceNameValidator WorkspaceNameValidator.validate(workspace) def set_server_request_workspace(workspace: str | None) -> Token[str | None]: """ Server-only setter: bind the workspace to the request ContextVar without touching env. """ _validate_workspace(workspace) _IS_WORKSPACE_RESOLVED.set(True) return _WORKSPACE.set(workspace) def set_workspace(workspace: str | None) -> Token[str | None]: """ Client setter: binds the workspace to the current thread and persists to env so child threads inherit it. """ _validate_workspace(workspace) token = _WORKSPACE.set(workspace) if workspace is None: MLFLOW_WORKSPACE.unset() else: MLFLOW_WORKSPACE.set(workspace) return token def clear_server_request_workspace() -> None: """Clear the request-scoped ContextVar (does not touch the client env).""" _IS_WORKSPACE_RESOLVED.set(False) _WORKSPACE.set(None) class WorkspaceContext: """ Context manager that sets the client workspace (ContextVar + env) for the duration of the block. Restores the previous env value on exit. """ def __init__(self, workspace: str | None): self._workspace = workspace self._token: Token[str | None] | None = None self._prev_env_raw: str | None = None def __enter__(self) -> str | None: self._prev_env_raw = MLFLOW_WORKSPACE.get_raw() self._token = set_workspace(self._workspace) return self._workspace def __exit__(self, exc_type, exc, tb) -> None: if self._token is not None: _WORKSPACE.reset(self._token) self._token = None if self._prev_env_raw is None: MLFLOW_WORKSPACE.unset() else: MLFLOW_WORKSPACE.set(self._prev_env_raw) self._prev_env_raw = None
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/utils/workspace_context.py", "license": "Apache License 2.0", "lines": 70, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/utils/workspace_utils.py
from __future__ import annotations from mlflow.environment_variables import MLFLOW_WORKSPACE, MLFLOW_WORKSPACE_STORE_URI _workspace_store_uri: str | None = None DEFAULT_WORKSPACE_NAME = "default" WORKSPACES_DIR_NAME = "workspaces" WORKSPACE_HEADER_NAME = "X-MLFLOW-WORKSPACE" def _normalize_workspace(workspace: str | None) -> str | None: """Normalize a workspace identifier. Args: workspace: Raw workspace value, possibly ``None`` or whitespace padded. Returns: The trimmed workspace string or ``None`` when the input is empty. """ if workspace is None: return None value = workspace.strip() return value or None def resolve_entity_workspace_name(workspace: str | None) -> str: """Determine the workspace to associate with client-facing entities. Preference order: 1. Explicit ``workspace`` argument provided by the backend store. 2. Active workspace bound via ``mlflow.set_workspace``. 3. ``MLFLOW_WORKSPACE`` environment variable. 4. ``DEFAULT_WORKSPACE_NAME``. Args: workspace: Optional workspace name provided by the store layer. Returns: A normalized workspace name honoring the preference order. """ if candidate := _normalize_workspace(workspace): return candidate from mlflow.utils.workspace_context import get_request_workspace if candidate := _normalize_workspace(get_request_workspace()): return candidate if candidate := _normalize_workspace(MLFLOW_WORKSPACE.get()): return candidate return DEFAULT_WORKSPACE_NAME def set_workspace_store_uri(uri: str | None) -> None: """Set the global workspace provider URI override. Args: uri: URI of the workspace provider or ``None`` to clear the override. """ global _workspace_store_uri _workspace_store_uri = uri if uri is None: MLFLOW_WORKSPACE_STORE_URI.unset() else: MLFLOW_WORKSPACE_STORE_URI.set(uri) def resolve_workspace_store_uri( workspace_store_uri: str | None = None, tracking_uri: str | None = None ) -> str | None: """Resolve the workspace provider URI according to precedence rules. Args: workspace_store_uri: URI provided explicitly by the caller. tracking_uri: Tracking URI used as the final fallback. Returns: The workspace provider URI chosen in this order: 1. Explicit ``workspace_store_uri`` argument. 2. Value configured via :func:`set_workspace_store_uri` or ``MLFLOW_WORKSPACE_STORE_URI``. 3. The resolved tracking URI. """ if workspace_store_uri is not None: return workspace_store_uri if configured_uri := get_workspace_store_uri(): return configured_uri # Lazy import to avoid circular dependency during module import. from mlflow.tracking._tracking_service import utils as tracking_utils return tracking_utils._resolve_tracking_uri(tracking_uri) def get_workspace_store_uri() -> str | None: """Get the current workspace provider URI override, if any. Returns: The globally configured workspace provider URI, or ``None``. """ return _workspace_store_uri or MLFLOW_WORKSPACE_STORE_URI.get() __all__ = [ "DEFAULT_WORKSPACE_NAME", "WORKSPACES_DIR_NAME", "WORKSPACE_HEADER_NAME", "resolve_entity_workspace_name", "set_workspace_store_uri", "get_workspace_store_uri", ]
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/utils/workspace_utils.py", "license": "Apache License 2.0", "lines": 83, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:tests/db/test_workspace_migration.py
import os import re from contextlib import contextmanager import pytest import sqlalchemy as sa from alembic import command from mlflow.store.db.utils import _get_alembic_config from mlflow.store.tracking.dbmodels.initial_models import Base as InitialBase _LEGACY_REGISTERED_MODEL_TAGS = sa.table( "registered_model_tags", sa.column("key"), sa.column("value"), sa.column("name"), ) _LEGACY_MODEL_VERSION_TAGS = sa.table( "model_version_tags", sa.column("key"), sa.column("value"), sa.column("name"), sa.column("version"), ) _LEGACY_REGISTERED_MODEL_ALIASES = sa.table( "registered_model_aliases", sa.column("alias"), sa.column("version"), sa.column("name"), ) _LEGACY_EVALUATION_DATASETS = sa.table( "evaluation_datasets", sa.column("dataset_id"), sa.column("name"), sa.column("schema"), sa.column("profile"), sa.column("digest"), sa.column("created_time"), sa.column("last_update_time"), sa.column("created_by"), sa.column("last_updated_by"), ) _LEGACY_SECRETS = sa.table( "secrets", sa.column("secret_id"), sa.column("secret_name"), sa.column("encrypted_value"), sa.column("wrapped_dek"), sa.column("kek_version"), sa.column("masked_value"), sa.column("provider"), sa.column("auth_config"), sa.column("description"), sa.column("created_by"), sa.column("created_at"), sa.column("last_updated_by"), sa.column("last_updated_at"), ) _LEGACY_ENDPOINTS = sa.table( "endpoints", sa.column("endpoint_id"), sa.column("name"), sa.column("created_by"), sa.column("created_at"), sa.column("last_updated_by"), sa.column("last_updated_at"), ) _LEGACY_MODEL_DEFINITIONS = sa.table( "model_definitions", sa.column("model_definition_id"), sa.column("name"), sa.column("secret_id"), sa.column("provider"), sa.column("model_name"), sa.column("created_by"), sa.column("created_at"), sa.column("last_updated_by"), sa.column("last_updated_at"), ) _WORKSPACE_TABLES = ( "experiments", "registered_models", "model_versions", "registered_model_tags", "model_version_tags", "registered_model_aliases", "evaluation_datasets", "secrets", "endpoints", "model_definitions", "webhooks", "jobs", ) _REGISTERED_MODEL_TAGS = sa.table( "registered_model_tags", sa.column("workspace"), sa.column("key"), sa.column("value"), sa.column("name"), ) _MODEL_VERSION_TAGS = sa.table( "model_version_tags", sa.column("workspace"), sa.column("key"), sa.column("value"), sa.column("name"), sa.column("version"), ) _REGISTERED_MODEL_ALIASES = sa.table( "registered_model_aliases", sa.column("workspace"), sa.column("name"), sa.column("alias"), sa.column("version"), ) _EVALUATION_DATASETS = sa.table( "evaluation_datasets", sa.column("dataset_id"), sa.column("name"), sa.column("schema"), sa.column("profile"), sa.column("digest"), sa.column("created_time"), sa.column("last_update_time"), sa.column("created_by"), sa.column("last_updated_by"), sa.column("workspace"), ) REVISION = "1b5f0d9ad7c1" PREVIOUS_REVISION = "c8d9e0f1a2b3" DB_URI = os.environ.get("MLFLOW_TRACKING_URI") USE_EXTERNAL_DB = DB_URI is not None and not DB_URI.startswith("sqlite") @pytest.fixture(scope="session", autouse=True) def _upgrade_external_db_to_head_after_suite(): """ When running under Docker (i.e., with a shared external DB), make sure the DB ends up on the latest revision once this module finishes. Individual tests intentionally downgrade to the pre-workspace schema, so without this hook, the subsequent suites in the database workflow would run against an outdated schema after new migrations land. """ yield if USE_EXTERNAL_DB: config = _get_alembic_config(DB_URI) command.upgrade(config, "head") @contextmanager def _identity_insert(conn, table_name: str): if conn.dialect.name != "mssql": yield return conn.execute(sa.text(f"SET IDENTITY_INSERT {table_name} ON")) try: yield finally: conn.execute(sa.text(f"SET IDENTITY_INSERT {table_name} OFF")) def _insert_table_row(conn, table, **values): conn.execute(sa.insert(table).values(**values)) def _assert_workspace_column(inspector, table_name: str, expected_default: str): columns = inspector.get_columns(table_name) workspace = next((col for col in columns if col["name"] == "workspace"), None) assert workspace is not None, f"{table_name} lacks workspace column" assert not workspace.get("nullable", False) default_value = _get_workspace_default(workspace) assert default_value == expected_default def _assert_workspace_columns(inspector, expected_default: str = "default"): for table in _WORKSPACE_TABLES: _assert_workspace_column(inspector, table, expected_default) def _has_index(inspector, table: str, index_name: str, columns: list[str]): indexes = inspector.get_indexes(table) return any( index["name"] == index_name and index.get("column_names") == columns for index in indexes ) def _prepare_database(tmp_path): if USE_EXTERNAL_DB: engine = sa.create_engine(DB_URI) with engine.begin() as conn: metadata = sa.MetaData() metadata.reflect(bind=conn) metadata.drop_all(bind=conn) InitialBase.metadata.create_all(conn) config = _get_alembic_config(DB_URI) else: db_path = tmp_path / "workspace_migration.sqlite" url = f"sqlite:///{db_path}" engine = sa.create_engine(url) InitialBase.metadata.create_all(engine) config = _get_alembic_config(url) command.upgrade(config, PREVIOUS_REVISION) return engine, config def _seed_pre_workspace_entities(conn): # This intentionally uses raw SQL matching the legacy schema (no workspace columns) # so the migration under test is fully responsible for adding/backfilling the new # fields. The helper insert functions below operate on the post-migration schema # and therefore cannot be reused here. with _identity_insert(conn, "experiments"): conn.execute( sa.text( """ INSERT INTO experiments ( experiment_id, name, artifact_location, lifecycle_stage, creation_time, last_update_time ) VALUES ( :experiment_id, :name, :artifact_location, :lifecycle_stage, :creation_time, :last_update_time ) """ ), { "experiment_id": 1, "name": "exp-default", "artifact_location": "path", "lifecycle_stage": "active", "creation_time": 0, "last_update_time": 0, }, ) conn.execute( sa.text( """ INSERT INTO runs ( run_uuid, name, source_type, source_name, entry_point_name, user_id, status, start_time, end_time, source_version, lifecycle_stage, artifact_uri, experiment_id ) VALUES ( :run_uuid, :name, :source_type, :source_name, :entry_point_name, :user_id, :status, :start_time, :end_time, :source_version, :lifecycle_stage, :artifact_uri, :experiment_id ) """ ), { "run_uuid": "run-default", "name": "upgrade-validation-run", "source_type": "LOCAL", "source_name": "script.py", "entry_point_name": "main", "user_id": "user", "status": "FINISHED", "start_time": 0, "end_time": 1, "source_version": "abc123", "lifecycle_stage": "active", "artifact_uri": "path/artifacts", "experiment_id": 1, }, ) conn.execute( sa.text( """ INSERT INTO registered_models (name, creation_time, last_updated_time, description) VALUES (:name, :creation_time, :last_updated_time, :description) """ ), {"name": "rm-default", "creation_time": 0, "last_updated_time": 0, "description": "desc"}, ) conn.execute( sa.text( """ INSERT INTO model_versions ( name, version, creation_time, last_updated_time, user_id, current_stage, description, source, run_id, status, status_message, run_link, storage_location ) VALUES ( :name, :version, :creation_time, :last_updated_time, :user_id, :current_stage, :description, :source, :run_id, :status, :status_message, :run_link, :storage_location ) """ ), { "name": "rm-default", "version": 1, "creation_time": 0, "last_updated_time": 0, "user_id": "user", "current_stage": "None", "description": "desc", "source": "source", "run_id": "run-id", "status": "READY", "status_message": "message", "run_link": "link", "storage_location": "location", }, ) _insert_table_row( conn, _LEGACY_REGISTERED_MODEL_TAGS, key="tag", value="value", name="rm-default", ) _insert_table_row( conn, _LEGACY_MODEL_VERSION_TAGS, key="tag", value="value", name="rm-default", version=1, ) _insert_table_row( conn, _LEGACY_REGISTERED_MODEL_ALIASES, alias="alias", version=1, name="rm-default", ) _insert_table_row( conn, _LEGACY_EVALUATION_DATASETS, dataset_id="ds-default", name="Dataset", schema="schema", profile="profile", digest="digest", created_time=0, last_update_time=0, created_by="user", last_updated_by="user", ) def _get_workspace_default(column_info): default = column_info.get("default") or column_info.get("server_default") if default is None: return None value = str(default).strip() if value.startswith("(") and value.endswith(")"): value = value[1:-1] value = value.strip() value = value.strip("'\"") if "::" in value: value = value.split("::", 1)[0] return value.strip("'\"") def _add_workspace(conn, name: str, description: str): conn.execute( sa.text("INSERT INTO workspaces (name, description) VALUES (:name, :description)"), {"name": name, "description": description}, ) def _insert_experiment( conn, *, experiment_id: int, name: str, workspace: str, artifact_location: str = "path", lifecycle_stage: str = "active", ): with _identity_insert(conn, "experiments"): conn.execute( sa.text( """ INSERT INTO experiments ( experiment_id, name, artifact_location, lifecycle_stage, creation_time, last_update_time, workspace ) VALUES ( :experiment_id, :name, :artifact_location, :lifecycle_stage, :creation_time, :last_update_time, :workspace ) """ ), { "experiment_id": experiment_id, "name": name, "artifact_location": artifact_location, "lifecycle_stage": lifecycle_stage, "creation_time": 0, "last_update_time": 0, "workspace": workspace, }, ) def _insert_run( conn, *, run_uuid: str, experiment_id: int, name: str = "run", artifact_uri: str = "path/artifacts", ): conn.execute( sa.text( """ INSERT INTO runs ( run_uuid, name, source_type, source_name, entry_point_name, user_id, status, start_time, end_time, source_version, lifecycle_stage, artifact_uri, experiment_id ) VALUES ( :run_uuid, :name, :source_type, :source_name, :entry_point_name, :user_id, :status, :start_time, :end_time, :source_version, :lifecycle_stage, :artifact_uri, :experiment_id ) """ ), { "run_uuid": run_uuid, "name": name, "source_type": "LOCAL", "source_name": "script.py", "entry_point_name": "main", "user_id": "user", "status": "FINISHED", "start_time": 0, "end_time": 1, "source_version": "abc123", "lifecycle_stage": "active", "artifact_uri": artifact_uri, "experiment_id": experiment_id, }, ) def _insert_registered_model( conn, *, name: str, workspace: str, description: str = "desc", creation_time: int = 0, ): conn.execute( sa.text( """ INSERT INTO registered_models ( name, creation_time, last_updated_time, description, workspace ) VALUES ( :name, :creation_time, :last_updated_time, :description, :workspace ) """ ), { "name": name, "creation_time": creation_time, "last_updated_time": creation_time, "description": description, "workspace": workspace, }, ) def _insert_model_version( conn, *, name: str, version: int, workspace: str, run_id: str = "run-id", storage_location: str = "location", ): conn.execute( sa.text( """ INSERT INTO model_versions ( name, version, creation_time, last_updated_time, user_id, current_stage, description, source, run_id, status, status_message, run_link, storage_location, workspace ) VALUES ( :name, :version, :creation_time, :last_updated_time, :user_id, :current_stage, :description, :source, :run_id, :status, :status_message, :run_link, :storage_location, :workspace ) """ ), { "name": name, "version": version, "creation_time": 0, "last_updated_time": 0, "user_id": "user", "current_stage": "None", "description": "desc", "source": "source", "run_id": run_id, "status": "READY", "status_message": "message", "run_link": "link", "storage_location": storage_location, "workspace": workspace, }, ) def _insert_registered_model_tag( conn, *, workspace: str, name: str, key: str, value: str = "value", ): _insert_table_row( conn, _REGISTERED_MODEL_TAGS, workspace=workspace, key=key, value=value, name=name, ) def _insert_model_version_tag( conn, *, workspace: str, name: str, version: int, key: str, value: str = "value", ): _insert_table_row( conn, _MODEL_VERSION_TAGS, workspace=workspace, key=key, value=value, name=name, version=version, ) def _insert_registered_model_alias( conn, *, workspace: str, name: str, alias: str, version: int = 1, ): _insert_table_row( conn, _REGISTERED_MODEL_ALIASES, workspace=workspace, name=name, alias=alias, version=version, ) def _insert_evaluation_dataset( conn, *, dataset_id: str, workspace: str, name: str = "Dataset", digest: str = "digest", ): conn.execute( sa.insert(_EVALUATION_DATASETS).values( dataset_id=dataset_id, name=name, schema="schema", profile="profile", digest=digest, created_time=0, last_update_time=0, created_by="user", last_updated_by="user", workspace=workspace, ) ) _SECRETS = sa.table( "secrets", sa.column("secret_id"), sa.column("secret_name"), sa.column("encrypted_value"), sa.column("wrapped_dek"), sa.column("kek_version"), sa.column("masked_value"), sa.column("provider"), sa.column("auth_config"), sa.column("description"), sa.column("created_by"), sa.column("created_at"), sa.column("last_updated_by"), sa.column("last_updated_at"), sa.column("workspace"), ) def _insert_secret( conn, *, secret_id: str, secret_name: str, workspace: str, ): conn.execute( sa.insert(_SECRETS).values( secret_id=secret_id, secret_name=secret_name, encrypted_value=b"encrypted", wrapped_dek=b"dek", kek_version=1, masked_value="***", provider="openai", auth_config=None, description=None, created_by="user", created_at=0, last_updated_by="user", last_updated_at=0, workspace=workspace, ) ) _ENDPOINTS = sa.table( "endpoints", sa.column("endpoint_id"), sa.column("name"), sa.column("created_by"), sa.column("created_at"), sa.column("last_updated_by"), sa.column("last_updated_at"), sa.column("workspace"), ) def _insert_endpoint( conn, *, endpoint_id: str, name: str, workspace: str, ): conn.execute( sa.insert(_ENDPOINTS).values( endpoint_id=endpoint_id, name=name, created_by="user", created_at=0, last_updated_by="user", last_updated_at=0, workspace=workspace, ) ) _MODEL_DEFINITIONS = sa.table( "model_definitions", sa.column("model_definition_id"), sa.column("name"), sa.column("secret_id"), sa.column("provider"), sa.column("model_name"), sa.column("created_by"), sa.column("created_at"), sa.column("last_updated_by"), sa.column("last_updated_at"), sa.column("workspace"), ) def _insert_model_definition( conn, *, model_definition_id: str, name: str, workspace: str, ): conn.execute( sa.insert(_MODEL_DEFINITIONS).values( model_definition_id=model_definition_id, name=name, secret_id=None, provider="openai", model_name="gpt-4", created_by="user", created_at=0, last_updated_by="user", last_updated_at=0, workspace=workspace, ) ) def _fetch_conflicts(conn, table_name: str, columns: tuple[str, ...]): metadata = sa.MetaData() table = sa.Table(table_name, metadata, autoload_with=conn) group_columns = [table.c[column] for column in columns] stmt = sa.select(*group_columns).group_by(*group_columns).having(sa.func.count() > 1) return conn.execute(stmt).fetchall() def test_workspace_migration_upgrade_adds_columns_and_backfills(tmp_path): engine, config = _prepare_database(tmp_path) try: with engine.begin() as conn: _seed_pre_workspace_entities(conn) command.upgrade(config, REVISION) inspector = sa.inspect(engine) _assert_workspace_columns(inspector, "default") with engine.connect() as conn: assert conn.execute( sa.text( "SELECT experiment_id, name, workspace FROM experiments ORDER BY experiment_id" ) ).fetchall() == [(1, "exp-default", "default")] assert conn.execute( sa.text("SELECT run_uuid, experiment_id FROM runs ORDER BY run_uuid") ).fetchall() == [("run-default", 1)] assert conn.execute( sa.text("SELECT name, workspace FROM registered_models") ).fetchall() == [("rm-default", "default")] assert conn.execute( sa.text("SELECT name, version, workspace FROM model_versions") ).fetchall() == [("rm-default", 1, "default")] assert conn.execute( sa.select( _REGISTERED_MODEL_TAGS.c.workspace, _REGISTERED_MODEL_TAGS.c.name, _REGISTERED_MODEL_TAGS.c.key, ) ).fetchall() == [("default", "rm-default", "tag")] assert conn.execute( sa.select( _MODEL_VERSION_TAGS.c.workspace, _MODEL_VERSION_TAGS.c.name, _MODEL_VERSION_TAGS.c.version, _MODEL_VERSION_TAGS.c.key, ) ).fetchall() == [("default", "rm-default", 1, "tag")] assert conn.execute( sa.text("SELECT workspace, name, alias FROM registered_model_aliases") ).fetchall() == [("default", "rm-default", "alias")] assert conn.execute( sa.text("SELECT dataset_id, workspace FROM evaluation_datasets") ).fetchall() == [("ds-default", "default")] assert conn.execute( sa.text("SELECT name, description FROM workspaces ORDER BY name") ).fetchall() == [("default", "Default workspace for legacy resources")] pk_registered_models = inspector.get_pk_constraint("registered_models") assert pk_registered_models["constrained_columns"] == ["workspace", "name"] pk_model_versions = inspector.get_pk_constraint("model_versions") assert pk_model_versions["constrained_columns"] == [ "workspace", "name", "version", ] pk_registered_model_tags = inspector.get_pk_constraint("registered_model_tags") assert pk_registered_model_tags["constrained_columns"] == [ "workspace", "key", "name", ] pk_model_version_tags = inspector.get_pk_constraint("model_version_tags") assert pk_model_version_tags["constrained_columns"] == [ "workspace", "key", "name", "version", ] pk_model_aliases = inspector.get_pk_constraint("registered_model_aliases") assert pk_model_aliases["constrained_columns"] == [ "workspace", "name", "alias", ] try: unique_experiments = inspector.get_unique_constraints("experiments") except NotImplementedError: if inspector.bind.dialect.name == "mssql": unique_experiments = None else: raise if unique_experiments is not None: assert any( {"workspace", "name"} == set(constraint.get("column_names", [])) for constraint in unique_experiments ) fk_model_versions = inspector.get_foreign_keys("model_versions") assert any( fk.get("constrained_columns") == ["workspace", "name"] and fk.get("referred_table") == "registered_models" for fk in fk_model_versions ) assert _has_index(inspector, "experiments", "idx_experiments_workspace", ["workspace"]) assert _has_index( inspector, "experiments", "idx_experiments_workspace_creation_time", ["workspace", "creation_time"], ) assert _has_index( inspector, "registered_models", "idx_registered_models_workspace", ["workspace"] ) assert _has_index( inspector, "evaluation_datasets", "idx_evaluation_datasets_workspace", ["workspace"] ) finally: engine.dispose() def test_workspace_migration_downgrade_reverts_schema(tmp_path): engine, config = _prepare_database(tmp_path) try: command.upgrade(config, REVISION) with engine.begin() as conn: _add_workspace(conn, "team-a", "Team A") _insert_experiment(conn, experiment_id=1, name="exp-default", workspace="default") _insert_run( conn, run_uuid="run-default", experiment_id=1, name="downgrade-validation-run", ) _insert_experiment(conn, experiment_id=2, name="exp-team-a", workspace="team-a") command.downgrade(config, PREVIOUS_REVISION) inspector = sa.inspect(engine) tables = inspector.get_table_names() assert "workspaces" not in tables for table in ( "experiments", "registered_models", "model_versions", "registered_model_tags", "model_version_tags", "registered_model_aliases", "evaluation_datasets", "webhooks", "jobs", ): column_names = {col["name"] for col in inspector.get_columns(table)} assert "workspace" not in column_names with engine.connect() as conn: assert conn.execute( sa.text("SELECT experiment_id, name FROM experiments ORDER BY experiment_id") ).fetchall() == [(1, "exp-default"), (2, "exp-team-a")] assert conn.execute( sa.text("SELECT run_uuid, experiment_id FROM runs ORDER BY run_uuid") ).fetchall() == [("run-default", 1)] pk_registered_models = inspector.get_pk_constraint("registered_models") assert pk_registered_models["constrained_columns"] == ["name"] pk_model_versions = inspector.get_pk_constraint("model_versions") assert pk_model_versions["constrained_columns"] == ["name", "version"] pk_registered_model_tags = inspector.get_pk_constraint("registered_model_tags") assert pk_registered_model_tags["constrained_columns"] == ["key", "name"] pk_model_version_tags = inspector.get_pk_constraint("model_version_tags") assert pk_model_version_tags["constrained_columns"] == ["key", "name", "version"] pk_registered_model_aliases = inspector.get_pk_constraint("registered_model_aliases") assert pk_registered_model_aliases["constrained_columns"] == ["name", "alias"] try: unique_experiments = inspector.get_unique_constraints("experiments") except NotImplementedError: if inspector.bind.dialect.name == "mssql": unique_experiments = None else: raise if unique_experiments is not None: assert any( set(constraint.get("column_names", [])) == {"name"} for constraint in unique_experiments ) fk_model_versions = inspector.get_foreign_keys("model_versions") assert any( fk.get("constrained_columns") == ["name"] and fk.get("referred_table") == "registered_models" for fk in fk_model_versions ) fk_registered_model_tags = inspector.get_foreign_keys("registered_model_tags") assert any( fk.get("constrained_columns") == ["name"] and fk.get("referred_table") == "registered_models" for fk in fk_registered_model_tags ) fk_model_version_tags = inspector.get_foreign_keys("model_version_tags") assert any( fk.get("constrained_columns") == ["name", "version"] and fk.get("referred_table") == "model_versions" for fk in fk_model_version_tags ) finally: engine.dispose() def _setup_experiment_conflict(conn): _insert_experiment(conn, experiment_id=1, name="duplicate-exp", workspace="default") _insert_run(conn, run_uuid="run-exp-default", experiment_id=1) _insert_experiment(conn, experiment_id=2, name="duplicate-exp", workspace="team-a") def _setup_registered_model_conflict(conn): _insert_registered_model(conn, name="duplicate-model", workspace="default") _insert_registered_model(conn, name="duplicate-model", workspace="team-a") def _setup_model_version_conflict(conn): _insert_registered_model(conn, name="mv-model", workspace="default") _insert_registered_model(conn, name="mv-model", workspace="team-a") _insert_model_version(conn, name="mv-model", version=1, workspace="default") _insert_model_version(conn, name="mv-model", version=1, workspace="team-a") def _setup_registered_model_tag_conflict(conn): _insert_registered_model(conn, name="tag-model", workspace="default") _insert_registered_model(conn, name="tag-model", workspace="team-a") _insert_registered_model_tag(conn, workspace="default", name="tag-model", key="tag-key") _insert_registered_model_tag(conn, workspace="team-a", name="tag-model", key="tag-key") def _setup_model_version_tag_conflict(conn): _insert_registered_model(conn, name="mvt-model", workspace="default") _insert_registered_model(conn, name="mvt-model", workspace="team-a") _insert_model_version(conn, name="mvt-model", version=1, workspace="default") _insert_model_version(conn, name="mvt-model", version=1, workspace="team-a") _insert_model_version_tag( conn, workspace="default", name="mvt-model", version=1, key="mv-tag-key" ) _insert_model_version_tag( conn, workspace="team-a", name="mvt-model", version=1, key="mv-tag-key" ) def _setup_registered_model_alias_conflict(conn): _insert_registered_model(conn, name="alias-model", workspace="default") _insert_registered_model(conn, name="alias-model", workspace="team-a") _insert_registered_model_alias(conn, workspace="default", name="alias-model", alias="latest") _insert_registered_model_alias(conn, workspace="team-a", name="alias-model", alias="latest") def _setup_evaluation_dataset_conflict(conn): _insert_evaluation_dataset( conn, dataset_id="ds-default", name="duplicate-ds", workspace="default" ) _insert_evaluation_dataset( conn, dataset_id="ds-team-a", name="duplicate-ds", workspace="team-a" ) def _setup_secret_conflict(conn): _insert_secret(conn, secret_id="s-default", secret_name="duplicate-secret", workspace="default") _insert_secret(conn, secret_id="s-team-a", secret_name="duplicate-secret", workspace="team-a") def _setup_endpoint_conflict(conn): _insert_endpoint(conn, endpoint_id="e-default", name="duplicate-endpoint", workspace="default") _insert_endpoint(conn, endpoint_id="e-team-a", name="duplicate-endpoint", workspace="team-a") def _setup_model_definition_conflict(conn): _insert_model_definition( conn, model_definition_id="md-default", name="duplicate-def", workspace="default" ) _insert_model_definition( conn, model_definition_id="md-team-a", name="duplicate-def", workspace="team-a" ) @pytest.mark.parametrize( ("setup_conflict", "expected_fragment", "case_slug"), [ (_setup_experiment_conflict, "duplicate experiments with the same name", "experiments"), ( _setup_registered_model_conflict, "duplicate registered models with the same name", "models", ), ( _setup_evaluation_dataset_conflict, "duplicate evaluation datasets with the same name", "evaluation_datasets", ), ( _setup_secret_conflict, "duplicate secrets with the same name", "secrets", ), ( _setup_endpoint_conflict, "duplicate endpoints with the same name", "endpoints", ), ( _setup_model_definition_conflict, "duplicate model definitions with the same name", "model_definitions", ), ], ) def test_workspace_migration_downgrade_detects_conflicts( tmp_path, setup_conflict, expected_fragment, case_slug ): case_dir = tmp_path / f"conflict_{case_slug}" case_dir.mkdir() engine, config = _prepare_database(case_dir) try: command.upgrade(config, REVISION) with engine.begin() as conn: _add_workspace(conn, "team-a", "Team A") setup_conflict(conn) with pytest.raises( RuntimeError, match=re.escape(expected_fragment), ): command.downgrade(config, PREVIOUS_REVISION) finally: engine.dispose() @pytest.mark.parametrize( ("setup_conflict", "table_name", "columns", "case_slug"), [ ( _setup_model_version_conflict, "model_versions", ("name", "version"), "model_versions", ), ( _setup_registered_model_tag_conflict, "registered_model_tags", ("name", "key"), "registered_model_tags", ), ( _setup_model_version_tag_conflict, "model_version_tags", ("name", "version", "key"), "model_version_tags", ), ( _setup_registered_model_alias_conflict, "registered_model_aliases", ("name", "alias"), "registered_model_aliases", ), ], ) def test_workspace_migration_conflict_detection_queries( tmp_path, setup_conflict, table_name, columns, case_slug ): case_dir = tmp_path / f"conflict_query_{case_slug}" case_dir.mkdir() engine, config = _prepare_database(case_dir) try: command.upgrade(config, REVISION) with engine.begin() as conn: _add_workspace(conn, "team-a", "Team A") setup_conflict(conn) conflicts = _fetch_conflicts(conn, table_name, columns) assert conflicts, f"Expected conflicts for {table_name}, found none" finally: engine.dispose()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/db/test_workspace_migration.py", "license": "Apache License 2.0", "lines": 1099, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/helpers/db_mocks.py
from __future__ import annotations from contextlib import contextmanager from unittest import mock def mock_get_managed_session_maker(*args, **kwargs): @contextmanager def _manager(): session = mock.MagicMock() def _mock_query(*q_args, **q_kwargs): query = mock.MagicMock() query.filter.return_value = query query.order_by.return_value = query query.exists.return_value = query query.scalar.return_value = False query.first.return_value = None return query session.query.side_effect = _mock_query yield session return _manager
{ "repo_id": "mlflow/mlflow", "file_path": "tests/helpers/db_mocks.py", "license": "Apache License 2.0", "lines": 18, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/auth/test_auth_workspace.py
import json from contextlib import contextmanager from types import SimpleNamespace from unittest.mock import MagicMock, Mock import pytest from flask import Response, request from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST from mlflow.server import auth as auth_module from mlflow.server.auth.permissions import MANAGE, NO_PERMISSIONS, READ from mlflow.server.auth.routes import ( CREATE_PROMPTLAB_RUN, GET_ARTIFACT, GET_METRIC_HISTORY_BULK, GET_METRIC_HISTORY_BULK_INTERVAL, GET_MODEL_VERSION_ARTIFACT, GET_TRACE_ARTIFACT, SEARCH_DATASETS, UPLOAD_ARTIFACT, ) from mlflow.server.auth.sqlalchemy_store import SqlAlchemyStore from mlflow.utils import workspace_context from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME from tests.helper_functions import random_str def test_cleanup_workspace_permissions_handler(monkeypatch): mock_delete = Mock() monkeypatch.setattr( auth_module.store, "delete_workspace_permissions_for_workspace", mock_delete, raising=True, ) workspace_name = f"team-{random_str(10)}" with auth_module.app.test_request_context( f"/api/3.0/mlflow/workspaces/{workspace_name}", method="DELETE" ): request.view_args = {"workspace_name": workspace_name} response = Response(status=204) auth_module._after_request(response) mock_delete.assert_called_once_with(workspace_name) class _TrackingStore: def __init__( self, experiment_workspaces: dict[str, str], run_experiments: dict[str, str], trace_experiments: dict[str, str], experiment_names: dict[str, str] | None = None, logged_model_experiments: dict[str, str] | None = None, gateway_secret_workspaces: dict[str, str] | None = None, gateway_endpoint_workspaces: dict[str, str] | None = None, gateway_model_def_workspaces: dict[str, str] | None = None, engine=None, ManagedSessionMaker=None, ): self._experiment_workspaces = experiment_workspaces self._run_experiments = run_experiments self._trace_experiments = trace_experiments self._experiment_names = experiment_names or {} self._logged_model_experiments = logged_model_experiments or {} self._gateway_secret_workspaces = gateway_secret_workspaces or {} self._gateway_endpoint_workspaces = gateway_endpoint_workspaces or {} self._gateway_model_def_workspaces = gateway_model_def_workspaces or {} self.engine = engine self.ManagedSessionMaker = ManagedSessionMaker def get_experiment(self, experiment_id: str): return SimpleNamespace(workspace=self._experiment_workspaces[experiment_id]) def get_experiment_by_name(self, experiment_name: str): experiment_id = self._experiment_names.get(experiment_name) if experiment_id is None: return None return SimpleNamespace( experiment_id=experiment_id, workspace=self._experiment_workspaces[experiment_id], ) def get_run(self, run_id: str): return SimpleNamespace(info=SimpleNamespace(experiment_id=self._run_experiments[run_id])) def get_trace_info(self, request_id: str): return SimpleNamespace(experiment_id=self._trace_experiments[request_id]) def get_logged_model(self, model_id: str): experiment_id = self._logged_model_experiments[model_id] return SimpleNamespace(experiment_id=experiment_id) def get_secret_info(self, secret_id: str | None = None, secret_name: str | None = None): if secret_id: if secret_id not in self._gateway_secret_workspaces: raise MlflowException( f"GatewaySecret not found ({secret_id})", error_code=RESOURCE_DOES_NOT_EXIST, ) # Add workspace attribute so _get_resource_workspace can extract it return SimpleNamespace( secret_id=secret_id, workspace=self._gateway_secret_workspaces[secret_id] ) raise ValueError("Must provide secret_id or secret_name") def get_gateway_endpoint(self, endpoint_id: str | None = None, name: str | None = None): if endpoint_id: if endpoint_id not in self._gateway_endpoint_workspaces: raise MlflowException( f"GatewayEndpoint not found ({endpoint_id})", error_code=RESOURCE_DOES_NOT_EXIST, ) # Add workspace attribute so _get_resource_workspace can extract it return SimpleNamespace( endpoint_id=endpoint_id, workspace=self._gateway_endpoint_workspaces[endpoint_id] ) raise ValueError("Must provide endpoint_id or name") def get_gateway_model_definition( self, model_definition_id: str | None = None, name: str | None = None ): if model_definition_id: if model_definition_id not in self._gateway_model_def_workspaces: raise MlflowException( f"GatewayModelDefinition not found ({model_definition_id})", error_code=RESOURCE_DOES_NOT_EXIST, ) # Add workspace attribute so _get_resource_workspace can extract it return SimpleNamespace( model_definition_id=model_definition_id, workspace=self._gateway_model_def_workspaces[model_definition_id], ) raise ValueError("Must provide model_definition_id or name") def _create_mock_session(self): """Create a mock session that can query gateway SQL models.""" mock_session = MagicMock() def _filter_by_secret_id(secret_id): if secret_id in self._gateway_secret_workspaces: mock_result = MagicMock() mock_result.first.return_value = SimpleNamespace( workspace=self._gateway_secret_workspaces[secret_id] ) return mock_result mock_result = MagicMock() mock_result.first.return_value = None return mock_result def _filter_by_endpoint_id(endpoint_id): if endpoint_id in self._gateway_endpoint_workspaces: mock_result = MagicMock() mock_result.first.return_value = SimpleNamespace( workspace=self._gateway_endpoint_workspaces[endpoint_id] ) return mock_result mock_result = MagicMock() mock_result.first.return_value = None return mock_result def _filter_by_model_def_id(model_definition_id): if model_definition_id in self._gateway_model_def_workspaces: mock_result = MagicMock() mock_result.first.return_value = SimpleNamespace( workspace=self._gateway_model_def_workspaces[model_definition_id] ) return mock_result mock_result = MagicMock() mock_result.first.return_value = None return mock_result def _query(model_class): mock_query_result = MagicMock() # Mock the filter method to return different results based on the filter def _mock_filter(*args, **kwargs): if "secret_id" in kwargs: return _filter_by_secret_id(kwargs["secret_id"]) elif "endpoint_id" in kwargs: return _filter_by_endpoint_id(kwargs["endpoint_id"]) elif "model_definition_id" in kwargs: return _filter_by_model_def_id(kwargs["model_definition_id"]) return mock_query_result mock_query_result.filter = _mock_filter return mock_query_result mock_session.query = _query return mock_session def _create_mock_session_maker(self): """Create a mock ManagedSessionMaker context manager.""" @contextmanager def _mock_session_maker(): yield self._create_mock_session() return _mock_session_maker class _RegistryStore: def __init__(self, model_workspaces: dict[str, str]): self._model_workspaces = model_workspaces def get_registered_model(self, name: str): return SimpleNamespace(workspace=self._model_workspaces[name]) @pytest.fixture def workspace_permission_setup(tmp_path, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") monkeypatch.setattr( auth_module, "auth_config", auth_module.auth_config._replace(default_permission=NO_PERMISSIONS.name), ) db_uri = f"sqlite:///{tmp_path / 'auth-store.db'}" auth_store = SqlAlchemyStore() auth_store.init_db(db_uri) monkeypatch.setattr(auth_module, "store", auth_store, raising=False) username = "alice" auth_store.create_user(username, "supersecurepassword", is_admin=False) tracking_store = _TrackingStore( experiment_workspaces={"exp-1": "team-a", "exp-2": "team-a", "1": "team-a"}, run_experiments={"run-1": "exp-1", "run-2": "exp-2"}, trace_experiments={"trace-1": "exp-1"}, experiment_names={"Primary Experiment": "exp-1"}, logged_model_experiments={"model-1": "exp-1"}, gateway_secret_workspaces={"secret-1": "team-a", "secret-2": "team-a"}, gateway_endpoint_workspaces={"endpoint-1": "team-a", "endpoint-2": "team-a"}, gateway_model_def_workspaces={"model-def-1": "team-a", "model-def-2": "team-a"}, engine=MagicMock(), # Mock engine for SQL model queries ) # Set ManagedSessionMaker after creating the store tracking_store.ManagedSessionMaker = tracking_store._create_mock_session_maker() monkeypatch.setattr(auth_module, "_get_tracking_store", lambda: tracking_store) registry_store = _RegistryStore({"model-xyz": "team-a"}) monkeypatch.setattr(auth_module, "_get_model_registry_store", lambda: registry_store) monkeypatch.setattr( auth_module, "authenticate_request", lambda: SimpleNamespace(username=username), ) auth_store.set_workspace_permission("team-a", username, MANAGE.name) with workspace_context.WorkspaceContext("team-a"): yield {"store": auth_store, "username": username} auth_store.engine.dispose() def _set_workspace_permission(store: SqlAlchemyStore, username: str, permission: str): store.set_workspace_permission("team-a", username, permission) def test_workspace_permission_grants_default_access(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") default_permission = MANAGE.name monkeypatch.setattr( auth_module, "auth_config", auth_module.auth_config._replace( default_permission=default_permission, grant_default_workspace_access=True, ), raising=False, ) class DummyStore: def get_workspace_permission(self, workspace_name, username): return None def list_accessible_workspace_names(self, username): return [] dummy_store = DummyStore() monkeypatch.setattr(auth_module, "store", dummy_store, raising=False) default_workspace = DEFAULT_WORKSPACE_NAME monkeypatch.setattr(auth_module, "_get_workspace_store", lambda: None, raising=False) monkeypatch.setattr( auth_module, "get_default_workspace_optional", lambda *args, **kwargs: (SimpleNamespace(name=default_workspace), True), raising=False, ) auth = SimpleNamespace(username="alice") permission = auth_module._workspace_permission(auth.username, default_workspace) assert permission is not None assert permission.can_manage with workspace_context.WorkspaceContext(default_workspace): monkeypatch.setattr(auth_module, "authenticate_request", lambda: auth) assert auth_module.validate_can_create_experiment() def test_filter_list_workspaces_includes_default_when_autogrant(monkeypatch): monkeypatch.setattr(auth_module, "sender_is_admin", lambda: False) auth = SimpleNamespace(username="alice") monkeypatch.setattr(auth_module, "authenticate_request", lambda: auth) monkeypatch.setattr( auth_module, "auth_config", auth_module.auth_config._replace( grant_default_workspace_access=True, default_permission=READ.name, ), raising=False, ) default_workspace = "team-default" monkeypatch.setattr(auth_module, "_get_workspace_store", lambda: None, raising=False) monkeypatch.setattr( auth_module, "get_default_workspace_optional", lambda *args, **kwargs: (SimpleNamespace(name=default_workspace), True), raising=False, ) class DummyStore: def list_accessible_workspace_names(self, username): return [] monkeypatch.setattr(auth_module, "store", DummyStore(), raising=False) response = Response( json.dumps( { "workspaces": [ {"name": default_workspace}, {"name": "other-workspace"}, ] } ), mimetype="application/json", ) auth_module.filter_list_workspaces(response) payload = json.loads(response.get_data(as_text=True)) assert payload["workspaces"] == [{"name": default_workspace}] def test_filter_list_workspaces_filters_to_allowed(monkeypatch): monkeypatch.setattr(auth_module, "sender_is_admin", lambda: False) auth = SimpleNamespace(username="alice") monkeypatch.setattr(auth_module, "authenticate_request", lambda: auth) monkeypatch.setattr( auth_module, "auth_config", auth_module.auth_config._replace( grant_default_workspace_access=False, ), raising=False, ) class DummyStore: def list_accessible_workspace_names(self, username): return ["team-a"] monkeypatch.setattr(auth_module, "store", DummyStore(), raising=False) response = Response( json.dumps({"workspaces": [{"name": "team-a"}, {"name": "team-b"}]}), mimetype="application/json", ) auth_module.filter_list_workspaces(response) payload = json.loads(response.get_data(as_text=True)) assert [ws["name"] for ws in payload["workspaces"]] == ["team-a"] def test_validate_can_view_workspace_allows_default_autogrant(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") monkeypatch.setattr(auth_module, "sender_is_admin", lambda: False) auth = SimpleNamespace(username="alice") monkeypatch.setattr(auth_module, "authenticate_request", lambda: auth) monkeypatch.setattr( auth_module, "auth_config", auth_module.auth_config._replace( grant_default_workspace_access=True, default_permission=READ.name, ), raising=False, ) default_workspace = "team-default" monkeypatch.setattr(auth_module, "_get_workspace_store", lambda: None, raising=False) monkeypatch.setattr( auth_module, "get_default_workspace_optional", lambda *args, **kwargs: (SimpleNamespace(name=default_workspace), True), raising=False, ) class DummyStore: def list_accessible_workspace_names(self, username): return [] monkeypatch.setattr(auth_module, "store", DummyStore(), raising=False) with auth_module.app.test_request_context( f"/api/3.0/mlflow/workspaces/{default_workspace}", method="GET" ): request.view_args = {"workspace_name": default_workspace} assert auth_module.validate_can_view_workspace() with auth_module.app.test_request_context( "/api/3.0/mlflow/workspaces/other-team", method="GET" ): request.view_args = {"workspace_name": "other-team"} assert not auth_module.validate_can_view_workspace() def test_experiment_validators_allow_manage_permission(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get", method="GET", query_string={"experiment_id": "exp-1"} ): assert auth_module.validate_can_read_experiment() assert auth_module.validate_can_update_experiment() assert auth_module.validate_can_delete_experiment() assert auth_module.validate_can_manage_experiment() with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get-by-name", method="GET", query_string={"experiment_name": "Primary Experiment"}, ): assert auth_module.validate_can_read_experiment_by_name() with workspace_context.WorkspaceContext("team-a"): assert auth_module.validate_can_create_experiment() def test_experiment_validators_read_permission_blocks_writes(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get", method="GET", query_string={"experiment_id": "exp-1"} ): assert auth_module.validate_can_read_experiment() assert not auth_module.validate_can_update_experiment() assert not auth_module.validate_can_delete_experiment() assert not auth_module.validate_can_manage_experiment() with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get-by-name", method="GET", query_string={"experiment_name": "Primary Experiment"}, ): assert auth_module.validate_can_read_experiment_by_name() with workspace_context.WorkspaceContext("team-a"): assert not auth_module.validate_can_create_experiment() def test_experiment_artifact_proxy_validators_respect_permissions(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/ajax-api/2.0/mlflow-artifacts/artifacts/1/path", method="GET", ): request.view_args = {"artifact_path": "1/path"} assert auth_module.validate_can_read_experiment_artifact_proxy() assert auth_module.validate_can_update_experiment_artifact_proxy() assert auth_module.validate_can_delete_experiment_artifact_proxy() _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/ajax-api/2.0/mlflow-artifacts/artifacts/1/path", method="GET", ): request.view_args = {"artifact_path": "1/path"} assert auth_module.validate_can_read_experiment_artifact_proxy() assert not auth_module.validate_can_update_experiment_artifact_proxy() assert not auth_module.validate_can_delete_experiment_artifact_proxy() def test_experiment_artifact_proxy_without_experiment_id_uses_workspace_permissions( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/ajax-api/2.0/mlflow-artifacts/artifacts/uploads/path", method="GET", ): request.view_args = {"artifact_path": "uploads/path"} assert auth_module.validate_can_read_experiment_artifact_proxy() assert not auth_module.validate_can_update_experiment_artifact_proxy() def test_experiment_artifact_proxy_without_experiment_id_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( "/ajax-api/2.0/mlflow-artifacts/artifacts/uploads/path", method="GET", ): request.view_args = {"artifact_path": "uploads/path"} assert not auth_module.validate_can_read_experiment_artifact_proxy() def test_filter_experiment_ids_respects_workspace_permissions( workspace_permission_setup, monkeypatch ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] monkeypatch.setattr(auth_module, "sender_is_admin", lambda: False) experiment_ids = ["exp-1", "exp-2"] assert auth_module.filter_experiment_ids(experiment_ids) == experiment_ids _set_workspace_permission(store, username, NO_PERMISSIONS.name) assert auth_module.filter_experiment_ids(experiment_ids) == [] def test_run_validators_allow_manage_permission(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/runs/get", method="GET", query_string={"run_id": "run-1"} ): assert auth_module.validate_can_read_run() assert auth_module.validate_can_update_run() assert auth_module.validate_can_delete_run() assert auth_module.validate_can_manage_run() def test_run_validators_read_permission_blocks_writes(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/runs/get", method="GET", query_string={"run_id": "run-1"} ): assert auth_module.validate_can_read_run() assert not auth_module.validate_can_update_run() assert not auth_module.validate_can_delete_run() assert not auth_module.validate_can_manage_run() def test_logged_model_validators_respect_permissions(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/logged-models/get", method="GET", query_string={"model_id": "model-1"}, ): assert auth_module.validate_can_read_logged_model() assert auth_module.validate_can_update_logged_model() assert auth_module.validate_can_delete_logged_model() assert auth_module.validate_can_manage_logged_model() _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/logged-models/get", method="GET", query_string={"model_id": "model-1"}, ): assert auth_module.validate_can_read_logged_model() assert not auth_module.validate_can_update_logged_model() assert not auth_module.validate_can_delete_logged_model() assert not auth_module.validate_can_manage_logged_model() def test_scorer_validators_use_workspace_permissions(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/scorers/get", method="GET", query_string={"experiment_id": "exp-1", "name": "score-1"}, ): assert auth_module.validate_can_read_scorer() assert auth_module.validate_can_update_scorer() assert auth_module.validate_can_delete_scorer() assert auth_module.validate_can_manage_scorer() with auth_module.app.test_request_context( "/api/3.0/mlflow/scorers/permissions/create", method="POST", json={ "experiment_id": "exp-1", "scorer_name": "score-1", "username": "bob", "permission": "READ", }, ): assert auth_module.validate_can_manage_scorer_permission() def test_scorer_validators_read_permission_blocks_writes(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/scorers/get", method="GET", query_string={"experiment_id": "exp-1", "name": "score-1"}, ): assert auth_module.validate_can_read_scorer() assert not auth_module.validate_can_update_scorer() assert not auth_module.validate_can_delete_scorer() assert not auth_module.validate_can_manage_scorer() with auth_module.app.test_request_context( "/api/3.0/mlflow/scorers/permissions/create", method="POST", json={ "experiment_id": "exp-1", "scorer_name": "score-1", "username": "bob", "permission": "READ", }, ): assert not auth_module.validate_can_manage_scorer_permission() def test_registered_model_validators_require_manage_for_writes(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] with workspace_context.WorkspaceContext("team-a"): _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/registered-models/get", method="GET", query_string={"name": "model-xyz"}, ): assert auth_module.validate_can_read_registered_model() assert auth_module.validate_can_update_registered_model() assert auth_module.validate_can_delete_registered_model() assert auth_module.validate_can_manage_registered_model() perm = auth_module._workspace_permission( auth_module.authenticate_request().username, "team-a" ) assert perm is not None assert perm.can_manage assert workspace_context.get_request_workspace() == "team-a" assert auth_module.validate_can_create_registered_model() _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/registered-models/get", method="GET", query_string={"name": "model-xyz"}, ): assert auth_module.validate_can_read_registered_model() assert not auth_module.validate_can_update_registered_model() assert not auth_module.validate_can_delete_registered_model() assert not auth_module.validate_can_manage_registered_model() assert not auth_module.validate_can_create_registered_model() def test_validate_can_view_workspace_requires_access(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] with auth_module.app.test_request_context( "/api/3.0/mlflow/workspaces/team-a", method="GET", ): request.view_args = {"workspace_name": "team-a"} assert auth_module.validate_can_view_workspace() store.delete_workspace_permission("team-a", username) with auth_module.app.test_request_context( "/api/3.0/mlflow/workspaces/team-a", method="GET", ): request.view_args = {"workspace_name": "team-a"} assert not auth_module.validate_can_view_workspace() def test_run_artifact_validators_use_workspace_permissions(workspace_permission_setup): with auth_module.app.test_request_context( GET_ARTIFACT, method="GET", query_string={"run_id": "run-1"}, ): assert auth_module.validate_can_read_run_artifact() with auth_module.app.test_request_context( UPLOAD_ARTIFACT, method="POST", query_string={"run_id": "run-1"}, ): assert auth_module.validate_can_update_run_artifact() def test_model_version_artifact_validator_uses_workspace_permissions(workspace_permission_setup): with auth_module.app.test_request_context( GET_MODEL_VERSION_ARTIFACT, method="GET", query_string={"name": "model-xyz"}, ): assert auth_module.validate_can_read_model_version_artifact() def test_metric_history_bulk_validator_uses_workspace_permissions(workspace_permission_setup): with auth_module.app.test_request_context( GET_METRIC_HISTORY_BULK, method="GET", query_string=[("run_id", "run-1"), ("run_id", "run-2")], ): assert auth_module.validate_can_read_metric_history_bulk() def test_metric_history_bulk_interval_validator_uses_workspace_permissions( workspace_permission_setup, ): with auth_module.app.test_request_context( GET_METRIC_HISTORY_BULK_INTERVAL, method="GET", query_string=[ ("run_ids", "run-1"), ("run_ids", "run-2"), ("metric_key", "loss"), ], ): assert auth_module.validate_can_read_metric_history_bulk_interval() def test_search_datasets_validator_uses_workspace_permissions(workspace_permission_setup): with auth_module.app.test_request_context( SEARCH_DATASETS, method="POST", json={"experiment_ids": ["exp-1", "exp-2"]}, ): assert auth_module.validate_can_search_datasets() def test_create_promptlab_run_validator_uses_workspace_permissions(workspace_permission_setup): with auth_module.app.test_request_context( CREATE_PROMPTLAB_RUN, method="POST", json={"experiment_id": "exp-2"}, ): assert auth_module.validate_can_create_promptlab_run() def test_trace_artifact_validator_uses_workspace_permissions(workspace_permission_setup): with auth_module.app.test_request_context( GET_TRACE_ARTIFACT, method="GET", query_string={"request_id": "trace-1"}, ): assert auth_module.validate_can_read_trace_artifact() def test_experiment_artifact_proxy_without_workspaces_falls_back_to_default(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") monkeypatch.setattr( auth_module, "auth_config", auth_module.auth_config._replace(default_permission=READ.name), raising=False, ) monkeypatch.setattr( auth_module, "authenticate_request", lambda: SimpleNamespace(username="carol"), ) with auth_module.app.test_request_context( "/ajax-api/2.0/mlflow-artifacts/artifacts/uploads/path", method="GET", ): request.view_args = {"artifact_path": "uploads/path"} assert auth_module.validate_can_read_experiment_artifact_proxy() def test_run_artifact_validators_denied_without_workspace_permission(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( GET_ARTIFACT, method="GET", query_string={"run_id": "run-1"}, ): assert not auth_module.validate_can_read_run_artifact() with auth_module.app.test_request_context( UPLOAD_ARTIFACT, method="POST", query_string={"run_id": "run-1"}, ): assert not auth_module.validate_can_update_run_artifact() def test_model_version_artifact_validator_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( GET_MODEL_VERSION_ARTIFACT, method="GET", query_string={"name": "model-xyz"}, ): assert not auth_module.validate_can_read_model_version_artifact() def test_metric_history_bulk_validator_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( GET_METRIC_HISTORY_BULK, method="GET", query_string=[("run_id", "run-1"), ("run_id", "run-2")], ): assert not auth_module.validate_can_read_metric_history_bulk() def test_metric_history_bulk_interval_validator_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( GET_METRIC_HISTORY_BULK_INTERVAL, method="GET", query_string=[ ("run_ids", "run-1"), ("run_ids", "run-2"), ("metric_key", "loss"), ], ): assert not auth_module.validate_can_read_metric_history_bulk_interval() def test_search_datasets_validator_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( SEARCH_DATASETS, method="POST", json={"experiment_ids": ["exp-1", "exp-2"]}, ): assert not auth_module.validate_can_search_datasets() def test_create_promptlab_run_validator_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( CREATE_PROMPTLAB_RUN, method="POST", json={"experiment_id": "exp-2"}, ): assert not auth_module.validate_can_create_promptlab_run() def test_trace_artifact_validator_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( GET_TRACE_ARTIFACT, method="GET", query_string={"request_id": "trace-1"}, ): assert not auth_module.validate_can_read_trace_artifact() def test_cross_workspace_access_denied(workspace_permission_setup, monkeypatch): tracking_store = _TrackingStore( experiment_workspaces={"exp-other-ws": "team-b"}, run_experiments={"run-other-ws": "exp-other-ws"}, trace_experiments={}, ) monkeypatch.setattr(auth_module, "_get_tracking_store", lambda: tracking_store) with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get", method="GET", query_string={"experiment_id": "exp-other-ws"}, ): assert not auth_module.validate_can_read_experiment() assert not auth_module.validate_can_update_experiment() assert not auth_module.validate_can_delete_experiment() with auth_module.app.test_request_context( "/api/2.0/mlflow/runs/get", method="GET", query_string={"run_id": "run-other-ws"}, ): assert not auth_module.validate_can_read_run() assert not auth_module.validate_can_update_run() def test_cross_workspace_registered_model_access_denied(workspace_permission_setup, monkeypatch): registry_store = _RegistryStore({"model-other-ws": "team-b"}) monkeypatch.setattr(auth_module, "_get_model_registry_store", lambda: registry_store) with auth_module.app.test_request_context( "/api/2.0/mlflow/registered-models/get", method="GET", query_string={"name": "model-other-ws"}, ): assert not auth_module.validate_can_read_registered_model() assert not auth_module.validate_can_update_registered_model() assert not auth_module.validate_can_delete_registered_model() def test_explicit_experiment_permission_overrides_workspace( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) store.create_experiment_permission("exp-1", username, READ.name) with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get", method="GET", query_string={"experiment_id": "exp-1"}, ): assert auth_module.validate_can_read_experiment() assert not auth_module.validate_can_update_experiment() with auth_module.app.test_request_context( "/api/2.0/mlflow/experiments/get", method="GET", query_string={"experiment_id": "exp-2"}, ): assert not auth_module.validate_can_read_experiment() def test_cross_workspace_gateway_secret_access_denied(workspace_permission_setup, monkeypatch): tracking_store = _TrackingStore( experiment_workspaces={"exp-1": "team-a"}, run_experiments={}, trace_experiments={}, gateway_secret_workspaces={"secret-other-ws": "team-b"}, engine=MagicMock(), ) tracking_store.ManagedSessionMaker = tracking_store._create_mock_session_maker() monkeypatch.setattr(auth_module, "_get_tracking_store", lambda: tracking_store) with auth_module.app.test_request_context( "/api/3.0/mlflow/gateway/secrets/get", method="GET", query_string={"secret_id": "secret-other-ws"}, ): assert not auth_module.validate_can_read_gateway_secret() assert not auth_module.validate_can_update_gateway_secret() assert not auth_module.validate_can_delete_gateway_secret() def test_cross_workspace_gateway_endpoint_access_denied(workspace_permission_setup, monkeypatch): tracking_store = _TrackingStore( experiment_workspaces={"exp-1": "team-a"}, run_experiments={}, trace_experiments={}, gateway_endpoint_workspaces={"endpoint-other-ws": "team-b"}, engine=MagicMock(), ) tracking_store.ManagedSessionMaker = tracking_store._create_mock_session_maker() monkeypatch.setattr(auth_module, "_get_tracking_store", lambda: tracking_store) with auth_module.app.test_request_context( "/api/3.0/mlflow/gateway/endpoints/get", method="GET", query_string={"endpoint_id": "endpoint-other-ws"}, ): assert not auth_module.validate_can_read_gateway_endpoint() assert not auth_module.validate_can_update_gateway_endpoint() assert not auth_module.validate_can_delete_gateway_endpoint() def test_cross_workspace_gateway_model_definition_access_denied( workspace_permission_setup, monkeypatch ): tracking_store = _TrackingStore( experiment_workspaces={"exp-1": "team-a"}, run_experiments={}, trace_experiments={}, gateway_model_def_workspaces={"model-def-other-ws": "team-b"}, engine=MagicMock(), ) tracking_store.ManagedSessionMaker = tracking_store._create_mock_session_maker() monkeypatch.setattr(auth_module, "_get_tracking_store", lambda: tracking_store) with auth_module.app.test_request_context( "/api/3.0/mlflow/gateway/model-definitions/get", method="GET", query_string={"model_definition_id": "model-def-other-ws"}, ): assert not auth_module.validate_can_read_gateway_model_definition() assert not auth_module.validate_can_update_gateway_model_definition() assert not auth_module.validate_can_delete_gateway_model_definition() def test_workspace_permission_required_for_gateway_creation(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] # Remove workspace permission store.set_workspace_permission("team-a", username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/gateway/endpoints/create", method="POST", json={"name": "test-endpoint", "model_configs": []}, ): assert not auth_module.validate_can_create_gateway_endpoint() with auth_module.app.test_request_context( "/api/3.0/mlflow/gateway/model-definitions/create", method="POST", json={ "name": "test-model", "secret_id": "secret-1", "provider": "openai", "model_name": "gpt-4", }, ): assert not auth_module.validate_can_create_gateway_model_definition() # Restore workspace permission store.set_workspace_permission("team-a", username, MANAGE.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/gateway/endpoints/create", method="POST", json={"name": "test-endpoint", "model_configs": []}, ): assert auth_module.validate_can_create_gateway_endpoint() def test_prompt_optimization_job_validators_use_workspace_permissions( workspace_permission_setup, monkeypatch ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] # Mock get_job to return a job associated with exp-1 (in team-a) mock_job = SimpleNamespace(params='{"experiment_id": "exp-1"}') monkeypatch.setattr(auth_module, "get_job", lambda job_id: mock_job) _set_workspace_permission(store, username, MANAGE.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/prompt-optimization/jobs/get", method="GET", query_string={"job_id": "job-1"}, ): assert auth_module.validate_can_read_prompt_optimization_job() assert auth_module.validate_can_update_prompt_optimization_job() assert auth_module.validate_can_delete_prompt_optimization_job() def test_prompt_optimization_job_validators_read_permission_blocks_writes( workspace_permission_setup, monkeypatch ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] # Mock get_job to return a job associated with exp-1 (in team-a) mock_job = SimpleNamespace(params='{"experiment_id": "exp-1"}') monkeypatch.setattr(auth_module, "get_job", lambda job_id: mock_job) _set_workspace_permission(store, username, READ.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/prompt-optimization/jobs/get", method="GET", query_string={"job_id": "job-1"}, ): assert auth_module.validate_can_read_prompt_optimization_job() assert not auth_module.validate_can_update_prompt_optimization_job() assert not auth_module.validate_can_delete_prompt_optimization_job() def test_prompt_optimization_job_validators_denied_without_workspace_permission( workspace_permission_setup, monkeypatch ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] # Mock get_job to return a job associated with exp-1 (in team-a) mock_job = SimpleNamespace(params='{"experiment_id": "exp-1"}') monkeypatch.setattr(auth_module, "get_job", lambda job_id: mock_job) _set_workspace_permission(store, username, NO_PERMISSIONS.name) with auth_module.app.test_request_context( "/api/3.0/mlflow/prompt-optimization/jobs/get", method="GET", query_string={"job_id": "job-1"}, ): assert not auth_module.validate_can_read_prompt_optimization_job() assert not auth_module.validate_can_update_prompt_optimization_job() assert not auth_module.validate_can_delete_prompt_optimization_job() def test_graphql_permission_functions_use_workspace_permissions(workspace_permission_setup): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, MANAGE.name) # Test experiment permission assert auth_module._graphql_can_read_experiment("exp-1", username) # Test run permission (inherits from experiment) assert auth_module._graphql_can_read_run("run-1", username) # Test registered model permission assert auth_module._graphql_can_read_model("model-xyz", username) def test_graphql_permission_functions_denied_without_workspace_permission( workspace_permission_setup, ): store = workspace_permission_setup["store"] username = workspace_permission_setup["username"] _set_workspace_permission(store, username, NO_PERMISSIONS.name) # Test experiment permission denied assert not auth_module._graphql_can_read_experiment("exp-1", username) # Test run permission denied (inherits from experiment) assert not auth_module._graphql_can_read_run("run-1", username) # Test registered model permission denied assert not auth_module._graphql_can_read_model("model-xyz", username) def test_cross_workspace_graphql_access_denied(workspace_permission_setup, monkeypatch): # User has MANAGE in team-a but tries to access resources in team-b tracking_store = _TrackingStore( experiment_workspaces={"exp-other-ws": "team-b"}, run_experiments={"run-other-ws": "exp-other-ws"}, trace_experiments={}, ) monkeypatch.setattr(auth_module, "_get_tracking_store", lambda: tracking_store) registry_store = _RegistryStore({"model-other-ws": "team-b"}) monkeypatch.setattr(auth_module, "_get_model_registry_store", lambda: registry_store) username = workspace_permission_setup["username"] # Should be denied access to resources in team-b assert not auth_module._graphql_can_read_experiment("exp-other-ws", username) assert not auth_module._graphql_can_read_run("run-other-ws", username) assert not auth_module._graphql_can_read_model("model-other-ws", username)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/auth/test_auth_workspace.py", "license": "Apache License 2.0", "lines": 987, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/auth/test_client_workspace.py
from contextlib import contextmanager import pytest import requests from mlflow import MlflowException from mlflow.environment_variables import ( MLFLOW_ENABLE_WORKSPACES, MLFLOW_FLASK_SERVER_SECRET_KEY, MLFLOW_TRACKING_PASSWORD, MLFLOW_TRACKING_USERNAME, MLFLOW_WORKSPACE_STORE_URI, ) from mlflow.protos.databricks_pb2 import PERMISSION_DENIED, UNAUTHENTICATED, ErrorCode from mlflow.server.auth.client import AuthServiceClient from mlflow.utils.os import is_windows from mlflow.utils.workspace_utils import WORKSPACE_HEADER_NAME from tests.helper_functions import random_str from tests.server.auth.auth_test_utils import ( ADMIN_PASSWORD, ADMIN_USERNAME, User, create_user, ) from tests.tracking.integration_test_utils import _init_server @pytest.fixture(autouse=True) def clear_credentials(monkeypatch): monkeypatch.delenv(MLFLOW_TRACKING_USERNAME.name, raising=False) monkeypatch.delenv(MLFLOW_TRACKING_PASSWORD.name, raising=False) @pytest.fixture def workspace_client(tmp_path): path = tmp_path.joinpath("sqlalchemy.db").as_uri() backend_uri = ("sqlite://" if is_windows() else "sqlite:////") + path[len("file://") :] with _init_server( backend_uri=backend_uri, root_artifact_uri=tmp_path.joinpath("artifacts").as_uri(), app="mlflow.server.auth:create_app", extra_env={ MLFLOW_FLASK_SERVER_SECRET_KEY.name: "my-secret-key", MLFLOW_ENABLE_WORKSPACES.name: "true", MLFLOW_WORKSPACE_STORE_URI.name: backend_uri, }, server_type="flask", ) as url: yield AuthServiceClient(url), url @contextmanager def assert_unauthenticated(): with pytest.raises(MlflowException, match=r"You are not authenticated.") as exception_context: yield assert exception_context.value.error_code == ErrorCode.Name(UNAUTHENTICATED) @contextmanager def assert_unauthorized(): with pytest.raises(MlflowException, match=r"Permission denied.") as exception_context: yield assert exception_context.value.error_code == ErrorCode.Name(PERMISSION_DENIED) def _create_workspace(tracking_uri: str, workspace_name: str): response = requests.post( f"{tracking_uri}/api/3.0/mlflow/workspaces", json={"name": workspace_name}, auth=(ADMIN_USERNAME, ADMIN_PASSWORD), ) response.raise_for_status() @pytest.fixture def workspace_setup(workspace_client): client, tracking_uri = workspace_client workspace_name = f"team-{random_str()}" _create_workspace(tracking_uri, workspace_name) username, password = create_user(tracking_uri) return client, tracking_uri, workspace_name, username, password def _create_experiment( tracking_uri: str, workspace_name: str, auth: tuple[str, str] = (ADMIN_USERNAME, ADMIN_PASSWORD) ) -> str: resp = requests.post( f"{tracking_uri}/api/2.0/mlflow/experiments/create", json={"name": f"exp-{random_str()}"}, auth=auth, headers={WORKSPACE_HEADER_NAME: workspace_name}, ) assert resp.ok, f"create_experiment failed with {resp.status_code}: {resp.text}" return resp.json()["experiment_id"] def _create_run( tracking_uri: str, workspace_name: str, experiment_id: str, auth: tuple[str, str] = (ADMIN_USERNAME, ADMIN_PASSWORD), ) -> str: resp = requests.post( f"{tracking_uri}/api/2.0/mlflow/runs/create", json={"experiment_id": experiment_id}, auth=auth, headers={WORKSPACE_HEADER_NAME: workspace_name}, ) assert resp.ok, f"create_run failed with {resp.status_code}: {resp.text}" return resp.json()["run"]["info"]["run_id"] def _create_registered_model( tracking_uri: str, workspace_name: str, model_name: str, auth: tuple[str, str] = (ADMIN_USERNAME, ADMIN_PASSWORD), ): resp = requests.post( f"{tracking_uri}/api/2.0/mlflow/registered-models/create", json={"name": model_name}, auth=auth, headers={WORKSPACE_HEADER_NAME: workspace_name}, ) assert resp.ok, f"create_registered_model failed with {resp.status_code}: {resp.text}" def _create_model_version( tracking_uri: str, workspace_name: str, model_name: str, run_id: str, auth: tuple[str, str] = (ADMIN_USERNAME, ADMIN_PASSWORD), ) -> str: resp = requests.post( f"{tracking_uri}/api/2.0/mlflow/model-versions/create", json={"name": model_name, "source": f"runs:/{run_id}/model", "run_id": run_id}, auth=auth, headers={WORKSPACE_HEADER_NAME: workspace_name}, ) assert resp.ok, f"create_model_version failed with {resp.status_code}: {resp.text}" return resp.json()["model_version"]["version"] def _graphql_search_runs( tracking_uri: str, workspace_name: str, auth: tuple[str, str], experiment_ids: list[str] ): query = """ query SearchRuns($input: MlflowSearchRunsInput){ mlflowSearchRuns(input: $input){ runs { info { runId experimentId } } } } """ variables = {"input": {"experimentIds": experiment_ids, "maxResults": 50}} resp = requests.post( f"{tracking_uri}/graphql", json={"query": query, "variables": variables}, auth=auth, headers={WORKSPACE_HEADER_NAME: workspace_name}, ) resp.raise_for_status() payload = resp.json() assert payload.get("errors") in (None, []) search_runs = payload["data"]["mlflowSearchRuns"] if search_runs is None: return [] return search_runs["runs"] def _graphql_search_model_versions( tracking_uri: str, workspace_name: str, auth: tuple[str, str], filter_string: str | None = None, ): query = """ query SearchModelVersions($input: MlflowSearchModelVersionsInput){ mlflowSearchModelVersions(input: $input){ modelVersions { name version runId } } } """ variables = {"input": {"filter": filter_string}} resp = requests.post( f"{tracking_uri}/graphql", json={"query": query, "variables": variables}, auth=auth, headers={WORKSPACE_HEADER_NAME: workspace_name}, ) resp.raise_for_status() payload = resp.json() assert payload.get("errors") in (None, []) return payload["data"]["mlflowSearchModelVersions"]["modelVersions"] def test_workspace_permission_set_and_list(workspace_setup, monkeypatch): client, _tracking_uri, workspace_name, username, _password = workspace_setup with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): perm = client.set_workspace_permission(workspace_name, username, "MANAGE") user = client.get_user(username) assert perm.workspace == workspace_name assert perm.user_id == user.id assert perm.permission == "MANAGE" with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): perms = client.list_workspace_permissions(workspace_name) assert any(p.user_id == user.id for p in perms) user_perms = client.list_user_workspace_permissions(username) assert any(p.workspace == workspace_name for p in user_perms) client.delete_workspace_permission(workspace_name, username) assert client.list_workspace_permissions(workspace_name) == [] def test_workspace_permission_list_requires_authentication(workspace_setup): client, _tracking_uri, workspace_name, _username, _password = workspace_setup with assert_unauthenticated(): client.list_workspace_permissions(workspace_name) def test_workspace_permission_list_requires_manage_permission(workspace_setup, monkeypatch): client, tracking_uri, workspace_name, manager_username, manager_password = workspace_setup target_username, _ = create_user(tracking_uri) other_username, other_password = create_user(tracking_uri) with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): client.set_workspace_permission(workspace_name, manager_username, "MANAGE") client.set_workspace_permission(workspace_name, target_username, "READ") with User(other_username, other_password, monkeypatch), assert_unauthorized(): client.list_workspace_permissions(workspace_name) with User(manager_username, manager_password, monkeypatch): perms = client.list_workspace_permissions(workspace_name) assert perms # manager should see permissions they can manage assert all(p.workspace == workspace_name for p in perms) def test_workspace_permission_set_requires_manage_permission(workspace_client, monkeypatch): client, tracking_uri = workspace_client workspace_name = "team-b" _create_workspace(tracking_uri, workspace_name) manager_username, manager_password = create_user(tracking_uri) target_username, target_password = create_user(tracking_uri) with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): client.set_workspace_permission(workspace_name, manager_username, "MANAGE") with User(manager_username, manager_password, monkeypatch): perm = client.set_workspace_permission(workspace_name, target_username, "READ") assert perm.permission == "READ" client.delete_workspace_permission(workspace_name, target_username) with User(manager_username, manager_password, monkeypatch): perm = client.set_workspace_permission(workspace_name, target_username, "READ") assert perm.permission == "READ" with User(target_username, target_password, monkeypatch), assert_unauthorized(): client.set_workspace_permission(workspace_name, manager_username, "READ") with User(target_username, target_password, monkeypatch), assert_unauthorized(): client.delete_workspace_permission(workspace_name, manager_username) def test_run_access_controls_across_workspaces(workspace_setup, monkeypatch): client, tracking_uri, workspace_a, username, password = workspace_setup workspace_b = f"team-{random_str()}" _create_workspace(tracking_uri, workspace_b) # Allow the regular user to create resources in both workspaces for setup. with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): client.set_workspace_permission(workspace_a, username, "MANAGE") client.set_workspace_permission(workspace_b, username, "MANAGE") exp_a = _create_experiment(tracking_uri, workspace_a, auth=(username, password)) run_a = _create_run(tracking_uri, workspace_a, exp_a, auth=(username, password)) exp_b = _create_experiment(tracking_uri, workspace_b, auth=(username, password)) run_b = _create_run(tracking_uri, workspace_b, exp_b, auth=(username, password)) # Use a separate limited user who only has access to workspace A. limited_user, limited_password = create_user(tracking_uri) with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): client.set_workspace_permission(workspace_a, limited_user, "READ") # Positive: limited user can read run in workspace A. resp_ok = requests.get( f"{tracking_uri}/api/2.0/mlflow/runs/get", params={"run_id": run_a}, auth=(limited_user, limited_password), headers={WORKSPACE_HEADER_NAME: workspace_a}, ) assert resp_ok.status_code == 200 # REST: run in workspace B should be forbidden for limited user. resp = requests.get( f"{tracking_uri}/api/2.0/mlflow/runs/get", params={"run_id": run_b}, auth=(limited_user, limited_password), headers={WORKSPACE_HEADER_NAME: workspace_b}, ) assert resp.status_code == 403 assert "Permission denied" in resp.text # GraphQL: only runs from authorized workspace should appear. runs = _graphql_search_runs( tracking_uri, workspace_a, auth=(limited_user, limited_password), experiment_ids=[exp_a, exp_b], ) returned_ids = {run["info"]["runId"] for run in runs} assert returned_ids == {run_a} # Switching to an unauthorized workspace should yield no readable runs. runs_in_b = _graphql_search_runs( tracking_uri, workspace_b, auth=(limited_user, limited_password), experiment_ids=[exp_b] ) assert runs_in_b == [] def test_registered_model_access_controls_across_workspaces(workspace_setup, monkeypatch): client, tracking_uri, workspace_a, username, password = workspace_setup workspace_b = f"team-{random_str()}" _create_workspace(tracking_uri, workspace_b) with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): client.set_workspace_permission(workspace_a, username, "MANAGE") client.set_workspace_permission(workspace_b, username, "MANAGE") # Create resources in both workspaces as the regular user. exp_a = _create_experiment(tracking_uri, workspace_a, auth=(username, password)) run_a = _create_run(tracking_uri, workspace_a, exp_a, auth=(username, password)) model_a = f"model-a-{random_str()}" _create_registered_model(tracking_uri, workspace_a, model_a, auth=(username, password)) _create_model_version(tracking_uri, workspace_a, model_a, run_a, auth=(username, password)) exp_b = _create_experiment(tracking_uri, workspace_b, auth=(username, password)) run_b = _create_run(tracking_uri, workspace_b, exp_b, auth=(username, password)) model_b = f"model-b-{random_str()}" _create_registered_model(tracking_uri, workspace_b, model_b, auth=(username, password)) _create_model_version(tracking_uri, workspace_b, model_b, run_b, auth=(username, password)) limited_user, limited_password = create_user(tracking_uri) with User(ADMIN_USERNAME, ADMIN_PASSWORD, monkeypatch): client.set_workspace_permission(workspace_a, limited_user, "READ") # Positive: limited user can read model in authorized workspace. resp_ok = requests.get( f"{tracking_uri}/api/2.0/mlflow/registered-models/get", params={"name": model_a}, auth=(limited_user, limited_password), headers={WORKSPACE_HEADER_NAME: workspace_a}, ) assert resp_ok.status_code == 200 # GraphQL: only model versions from the permitted workspace should appear. versions = _graphql_search_model_versions( tracking_uri, workspace_a, auth=(limited_user, limited_password), filter_string=None ) assert {v["name"] for v in versions} == {model_a} # REST: direct model get in another workspace should be forbidden. resp = requests.get( f"{tracking_uri}/api/2.0/mlflow/registered-models/get", params={"name": model_b}, auth=(limited_user, limited_password), headers={WORKSPACE_HEADER_NAME: workspace_b}, ) assert resp.status_code == 403 assert "Permission denied" in resp.text
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/auth/test_client_workspace.py", "license": "Apache License 2.0", "lines": 309, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/auth/test_sqlalchemy_store_workspace.py
import pytest from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.server.auth.entities import WorkspacePermission from mlflow.server.auth.permissions import EDIT, MANAGE, NO_PERMISSIONS, READ from mlflow.server.auth.sqlalchemy_store import SqlAlchemyStore from mlflow.utils.workspace_context import WorkspaceContext from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME from tests.helper_functions import random_str from tests.server.auth.test_sqlalchemy_store import _rmp_maker, _user_maker pytest_plugins = ["tests.server.auth.test_sqlalchemy_store"] pytestmark = pytest.mark.notrackingurimock @pytest.fixture def store(tmp_sqlite_uri): store = SqlAlchemyStore() store.init_db(tmp_sqlite_uri) return store def test_set_workspace_permission_creates_and_updates(store): workspace = "team-alpha" username = random_str() user = store.create_user(username, random_str()) perm = store.set_workspace_permission(workspace, username, READ.name) assert isinstance(perm, WorkspacePermission) assert perm.workspace == workspace assert perm.user_id == user.id assert perm.permission == READ.name updated = store.set_workspace_permission(workspace, username, MANAGE.name) assert updated.permission == MANAGE.name def test_get_workspace_permission_precedence(store): workspace = "team-beta" username = random_str() store.create_user(username, random_str()) assert store.get_workspace_permission(workspace, username) is None store.set_workspace_permission(workspace, username, READ.name) perm = store.get_workspace_permission(workspace, username) assert perm == READ def test_list_workspace_permissions(store): workspace = "team-gamma" other_workspace = "team-delta" username = random_str() other_username = random_str() user = store.create_user(username, random_str()) other_user = store.create_user(other_username, random_str()) p1 = store.set_workspace_permission(workspace, username, READ.name) p2 = store.set_workspace_permission(workspace, other_username, EDIT.name) p3 = store.set_workspace_permission(other_workspace, username, MANAGE.name) perms = store.list_workspace_permissions(workspace) actual = {(perm.workspace, perm.user_id, perm.permission) for perm in perms} expected = { (p1.workspace, user.id, p1.permission), (p2.workspace, other_user.id, p2.permission), } assert actual == expected perms_other = store.list_workspace_permissions(other_workspace) assert {(perm.workspace, perm.user_id, perm.permission) for perm in perms_other} == { (p3.workspace, user.id, p3.permission) } def test_delete_workspace_permission(store): workspace = "workspace-delete" username = random_str() store.create_user(username, random_str()) store.set_workspace_permission(workspace, username, READ.name) store.delete_workspace_permission(workspace, username) assert store.get_workspace_permission(workspace, username) is None with pytest.raises( MlflowException, match=( "Workspace permission does not exist for " f"workspace='{workspace}', username='{username}'" ), ): store.delete_workspace_permission(workspace, username) def test_delete_workspace_permissions_for_workspace(store): workspace = "workspace-delete-all" other_workspace = "workspace-keep" username = random_str() store.create_user(username, random_str()) store.set_workspace_permission(workspace, username, READ.name) store.set_workspace_permission(other_workspace, username, EDIT.name) store.delete_workspace_permissions_for_workspace(workspace) assert store.list_workspace_permissions(workspace) == [] remaining = store.list_workspace_permissions(other_workspace) assert len(remaining) == 1 assert remaining[0].workspace == other_workspace def test_list_accessible_workspace_names(store): username = random_str() other_user = random_str() store.create_user(username, random_str()) store.create_user(other_user, random_str()) store.set_workspace_permission("workspace-read", username, READ.name) store.set_workspace_permission("workspace-edit", username, EDIT.name) store.set_workspace_permission("workspace-no-access", username, NO_PERMISSIONS.name) store.set_workspace_permission("workspace-other", other_user, READ.name) accessible = store.list_accessible_workspace_names(username) assert accessible == {"workspace-read", "workspace-edit"} assert store.list_accessible_workspace_names(other_user) == { "workspace-other", } assert store.list_accessible_workspace_names(None) == set() def test_rename_registered_model_permissions_scoped_by_workspace(store, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") username = random_str() password = random_str() _user_maker(store, username, password) with WorkspaceContext("workspace-a"): _rmp_maker(store, "model", username, READ.name) with WorkspaceContext("workspace-b"): _rmp_maker(store, "model", username, READ.name) with WorkspaceContext("workspace-a"): store.rename_registered_model_permissions("model", "model-renamed") renamed = store.get_registered_model_permission("model-renamed", username) assert renamed.name == "model-renamed" assert renamed.workspace == "workspace-a" with pytest.raises( MlflowException, match=( "Registered model permission with workspace=workspace-a, name=model and username=" ), ): store.get_registered_model_permission("model", username) with WorkspaceContext("workspace-b"): still_original = store.get_registered_model_permission("model", username) assert still_original.name == "model" assert still_original.workspace == "workspace-b" def test_registered_model_permissions_are_workspace_scoped(store, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") username = random_str() password = random_str() _user_maker(store, username, password) model_name = random_str() workspace_alt = f"workspace-{random_str()}" with WorkspaceContext(DEFAULT_WORKSPACE_NAME): store.create_registered_model_permission(model_name, username, READ.name) with WorkspaceContext(workspace_alt): perm_alt = store.create_registered_model_permission(model_name, username, EDIT.name) assert perm_alt.workspace == workspace_alt with WorkspaceContext(DEFAULT_WORKSPACE_NAME): perm_default = store.get_registered_model_permission(model_name, username) assert perm_default.permission == READ.name assert perm_default.workspace == DEFAULT_WORKSPACE_NAME perms_default = store.list_registered_model_permissions(username) assert [p.permission for p in perms_default] == [READ.name] with WorkspaceContext(workspace_alt): perm_alt_lookup = store.get_registered_model_permission(model_name, username) assert perm_alt_lookup.permission == EDIT.name assert perm_alt_lookup.workspace == workspace_alt perms_alt = store.list_registered_model_permissions(username) assert [p.permission for p in perms_alt] == [EDIT.name] # Switching back to default workspace should not affect alternate workspace permission with WorkspaceContext(DEFAULT_WORKSPACE_NAME): updated = store.update_registered_model_permission(model_name, username, MANAGE.name) assert updated.permission == MANAGE.name assert updated.workspace == DEFAULT_WORKSPACE_NAME with WorkspaceContext(workspace_alt): perm_alt_post_update = store.get_registered_model_permission(model_name, username) assert perm_alt_post_update.permission == EDIT.name assert perm_alt_post_update.workspace == workspace_alt def test_delete_registered_model_permissions_scoped_by_workspace(store, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") username1 = random_str() username2 = random_str() _user_maker(store, username1, random_str()) _user_maker(store, username2, random_str()) model_name = random_str() with WorkspaceContext("workspace-a"): _rmp_maker(store, model_name, username1, READ.name) _rmp_maker(store, model_name, username2, EDIT.name) with WorkspaceContext("workspace-b"): _rmp_maker(store, model_name, username1, MANAGE.name) with WorkspaceContext("workspace-a"): store.delete_registered_model_permissions(model_name) with pytest.raises(MlflowException, match="Registered model permission .* not found"): store.get_registered_model_permission(model_name, username1) with pytest.raises(MlflowException, match="Registered model permission .* not found"): store.get_registered_model_permission(model_name, username2) with WorkspaceContext("workspace-b"): remaining = store.get_registered_model_permission(model_name, username1) assert remaining.permission == MANAGE.name assert remaining.workspace == "workspace-b"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/auth/test_sqlalchemy_store_workspace.py", "license": "Apache License 2.0", "lines": 179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/jobs/test_jobs_workspace.py
import json import uuid from pathlib import Path from unittest.mock import MagicMock, patch import pytest from mlflow.entities import Workspace from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.genai.scorers.builtin_scorers import Completeness from mlflow.genai.scorers.job import run_online_scoring_scheduler from mlflow.genai.scorers.online.entities import OnlineScorer, OnlineScoringConfig from mlflow.store.jobs.sqlalchemy_store import SqlAlchemyJobStore from mlflow.store.jobs.sqlalchemy_workspace_store import WorkspaceAwareSqlAlchemyJobStore from mlflow.utils.workspace_context import WorkspaceContext, get_request_workspace from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME def test_sqlalchemy_job_store_defaults_to_legacy_workspace(tmp_path: Path): backend_store_uri = f"sqlite:///{tmp_path / 'workspace-default.db'}" store = SqlAlchemyJobStore(backend_store_uri) job = store.create_job("tests.server.jobs.test_jobs.basic_job_fun", '{"value": 1}') assert job.workspace == DEFAULT_WORKSPACE_NAME stored = store.get_job(job.job_id) assert stored.workspace == DEFAULT_WORKSPACE_NAME def test_sqlalchemy_job_store_isolates_workspaces(monkeypatch: pytest.MonkeyPatch, tmp_path: Path): monkeypatch.setenv("MLFLOW_ENABLE_WORKSPACES", "true") backend_store_uri = f"sqlite:///{tmp_path / 'workspace-aware.db'}" store = WorkspaceAwareSqlAlchemyJobStore(backend_store_uri) with WorkspaceContext("team-a"): job_team_a = store.create_job("tests.server.jobs.test_jobs.basic_job_fun", '{"value": 1}') with WorkspaceContext("team-b"): job_team_b = store.create_job("tests.server.jobs.test_jobs.basic_job_fun", '{"value": 2}') with WorkspaceContext("team-a"): fetched_a = store.get_job(job_team_a.job_id) assert fetched_a.workspace == "team-a" with pytest.raises(MlflowException, match="not found"): store.get_job(job_team_b.job_id) assert {job.job_id for job in store.list_jobs()} == {job_team_a.job_id} with WorkspaceContext("team-b"): fetched_b = store.get_job(job_team_b.job_id) assert fetched_b.workspace == "team-b" assert {job.job_id for job in store.list_jobs()} == {job_team_b.job_id} def test_scheduler_runs_per_workspace(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") mock_scorer = OnlineScorer( name="completeness", serialized_scorer=json.dumps(Completeness().model_dump()), online_config=OnlineScoringConfig( online_scoring_config_id=uuid.uuid4().hex, scorer_id=uuid.uuid4().hex, sample_rate=1.0, experiment_id="exp1", filter_string=None, ), ) mock_tracking_store = MagicMock() workspace_calls = [] def _get_active_online_scorers(): workspace_calls.append(get_request_workspace()) return [mock_scorer] mock_tracking_store.get_active_online_scorers.side_effect = _get_active_online_scorers mock_workspace_store = MagicMock() mock_workspace_store.list_workspaces.return_value = [ Workspace(name="team-a"), Workspace(name="team-b"), ] with ( patch("mlflow.genai.scorers.job._get_tracking_store", return_value=mock_tracking_store), patch( "mlflow.server.workspace_helpers._get_workspace_store", return_value=mock_workspace_store, ), patch("mlflow.genai.scorers.job.submit_job") as mock_submit_job, ): run_online_scoring_scheduler() assert workspace_calls == ["team-a", "team-b"] assert mock_submit_job.call_count == 2
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/jobs/test_jobs_workspace.py", "license": "Apache License 2.0", "lines": 75, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/test_otel_api.py
from unittest import mock from fastapi import FastAPI from fastapi.testclient import TestClient from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ExportTraceServiceRequest from mlflow.entities import Workspace from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.server.fastapi_app import add_fastapi_workspace_middleware from mlflow.server.otel_api import otel_router from mlflow.tracing.utils.otlp import OTLP_TRACES_PATH from mlflow.utils import workspace_context from mlflow.utils.workspace_utils import WORKSPACE_HEADER_NAME def _build_otlp_payload(): request = ExportTraceServiceRequest() span = request.resource_spans.add().scope_spans.add().spans.add() span.trace_id = b"\x00" * 16 span.span_id = b"\x01" * 8 span.name = "span" return request.SerializeToString() def _make_test_client(): app = FastAPI() add_fastapi_workspace_middleware(app) app.include_router(otel_router) return TestClient(app) def test_workspace_scoped_otlp_endpoint_sets_workspace(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") class DummyTrackingStore: def __init__(self): self.calls = [] def log_spans(self, experiment_id, spans): self.calls.append((workspace_context.get_request_workspace(), experiment_id, spans)) tracking_store = DummyTrackingStore() captured = {} def fake_resolve(_path, header_workspace): captured["requested"] = header_workspace return Workspace(name=header_workspace) monkeypatch.setattr( "mlflow.server.fastapi_app.resolve_workspace_for_request_if_enabled", fake_resolve, ) monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: tracking_store, ) client = _make_test_client() response = client.post( OTLP_TRACES_PATH, data=_build_otlp_payload(), headers={ "Content-Type": "application/x-protobuf", "X-MLflow-Experiment-Id": "42", WORKSPACE_HEADER_NAME: "team-a", }, ) assert response.status_code == 200 assert captured["requested"].strip() == "team-a" assert tracking_store.calls[0][0] == "team-a" # Workspace context should be cleared after the request assert workspace_context.get_request_workspace() is None def test_default_otlp_endpoint_uses_default_workspace(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") class DummyTrackingStore: def __init__(self): self.calls = [] def log_spans(self, experiment_id, spans): self.calls.append((workspace_context.get_request_workspace(), experiment_id, spans)) tracking_store = DummyTrackingStore() captured = {} def fake_resolve(_path, header_workspace): captured["requested"] = header_workspace return Workspace(name="default") monkeypatch.setattr( "mlflow.server.fastapi_app.resolve_workspace_for_request_if_enabled", fake_resolve, ) monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: tracking_store, ) client = _make_test_client() response = client.post( OTLP_TRACES_PATH, data=_build_otlp_payload(), headers={ "Content-Type": "application/x-protobuf", "X-MLflow-Experiment-Id": "7", }, ) assert response.status_code == 200 assert captured["requested"] is None assert tracking_store.calls[0][0] == "default" assert workspace_context.get_request_workspace() is None def test_otlp_endpoint_without_default_workspace_raises_error(monkeypatch): from mlflow.store.workspace_aware_mixin import WorkspaceAwareMixin monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") class DummyWorkspaceAwareStore(WorkspaceAwareMixin): """A dummy store that raises MlflowException when workspace is not set.""" def log_spans(self, experiment_id, spans): # This will raise MlflowException if workspace context is not set self._get_active_workspace() def fake_resolve(_path, _header_workspace): return None monkeypatch.setattr( "mlflow.server.fastapi_app.resolve_workspace_for_request_if_enabled", fake_resolve, ) monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: DummyWorkspaceAwareStore(), ) client = _make_test_client() response = client.post( OTLP_TRACES_PATH, data=_build_otlp_payload(), headers={ "Content-Type": "application/x-protobuf", "X-MLflow-Experiment-Id": "42", }, ) assert response.status_code == 400 assert "Active workspace is required" in response.json()["message"] def test_otlp_invalid_content_type(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: mock.Mock(), ) client = _make_test_client() # Test with wrong content type response = client.post( OTLP_TRACES_PATH, data=_build_otlp_payload(), headers={ "Content-Type": "application/json", "X-MLflow-Experiment-Id": "42", }, ) assert response.status_code == 400 assert "Invalid Content-Type" in response.json()["detail"] # Test with missing content type response = client.post( OTLP_TRACES_PATH, data=_build_otlp_payload(), headers={ "X-MLflow-Experiment-Id": "42", }, ) assert response.status_code == 400 assert "Invalid Content-Type" in response.json()["detail"] def test_otlp_invalid_protobuf_data(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: mock.Mock(), ) client = _make_test_client() # Test with invalid protobuf data response = client.post( OTLP_TRACES_PATH, data=b"this is not valid protobuf data", headers={ "Content-Type": "application/x-protobuf", "X-MLflow-Experiment-Id": "42", }, ) assert response.status_code == 400 assert "Invalid OpenTelemetry protobuf format" in response.json()["detail"] def test_otlp_empty_resource_spans(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: mock.Mock(), ) client = _make_test_client() # Create request with no resource spans request = ExportTraceServiceRequest() response = client.post( OTLP_TRACES_PATH, data=request.SerializeToString(), headers={ "Content-Type": "application/x-protobuf", "X-MLflow-Experiment-Id": "42", }, ) assert response.status_code == 400 assert "no spans found" in response.json()["detail"] def test_otlp_conversion_error(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") monkeypatch.setattr( "mlflow.server.otel_api._get_tracking_store", lambda: mock.Mock(), ) # Mock Span.from_otel_proto to raise exception def mock_from_otel_proto(proto_span): raise Exception("Cannot convert span") monkeypatch.setattr( "mlflow.entities.span.Span.from_otel_proto", mock_from_otel_proto, ) client = _make_test_client() response = client.post( OTLP_TRACES_PATH, data=_build_otlp_payload(), headers={ "Content-Type": "application/x-protobuf", "X-MLflow-Experiment-Id": "42", }, ) assert response.status_code == 422 assert "Cannot convert OpenTelemetry span" in response.json()["detail"]
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/test_otel_api.py", "license": "Apache License 2.0", "lines": 209, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/test_workspace_endpoints.py
from __future__ import annotations import json from unittest import mock import pytest from flask import Flask from mlflow.entities.workspace import Workspace, WorkspaceDeletionMode from mlflow.server.handlers import get_endpoints @pytest.fixture(autouse=True) def enable_workspaces(monkeypatch): monkeypatch.setenv("MLFLOW_ENABLE_WORKSPACES", "true") @pytest.fixture def app(monkeypatch): flask_app = Flask(__name__) for rule, view_func, methods in get_endpoints(): flask_app.add_url_rule(rule, view_func=view_func, methods=methods) return flask_app @pytest.fixture def mock_workspace_store(monkeypatch): store = mock.Mock() monkeypatch.setattr( "mlflow.server.handlers._get_workspace_store", lambda *_, **__: store, ) return store @pytest.fixture def mock_tracking_store(monkeypatch): store = mock.Mock() store.artifact_root_uri = "/default/artifact/root" monkeypatch.setattr( "mlflow.server.handlers._get_tracking_store", lambda *_, **__: store, ) return store def _workspace_to_json(payload): return json.loads(payload) def test_list_workspaces_endpoint(app, mock_workspace_store): mock_workspace_store.list_workspaces.return_value = [ Workspace(name="default", description="Default"), Workspace(name="team-a", description=None), ] with app.test_client() as client: response = client.get("/api/3.0/mlflow/workspaces") assert response.status_code == 200 payload = _workspace_to_json(response.get_data(True)) assert payload["workspaces"][0] == {"name": "default", "description": "Default"} assert payload["workspaces"][1] == {"name": "team-a"} mock_workspace_store.list_workspaces.assert_called_once_with() def test_create_workspace_endpoint(app, mock_workspace_store, mock_tracking_store): created = Workspace(name="team-b", description="Team B") mock_workspace_store.create_workspace.return_value = created with app.test_client() as client: response = client.post( "/api/3.0/mlflow/workspaces", json={"name": "team-b", "description": "Team B"}, ) assert response.status_code == 201 payload = _workspace_to_json(response.get_data(True)) assert payload == {"workspace": {"name": "team-b", "description": "Team B"}} mock_workspace_store.create_workspace.assert_called_once() mock_tracking_store.get_experiment_by_name.assert_not_called() mock_tracking_store.create_experiment.assert_not_called() def test_get_workspace_endpoint(app, mock_workspace_store): mock_workspace_store.get_workspace.return_value = Workspace(name="team-c", description="Team C") with app.test_client() as client: response = client.get("/api/3.0/mlflow/workspaces/team-c") assert response.status_code == 200 payload = _workspace_to_json(response.get_data(True)) assert payload == {"workspace": {"name": "team-c", "description": "Team C"}} mock_workspace_store.get_workspace.assert_called_once_with("team-c") def test_update_workspace_endpoint(app, mock_workspace_store): updated = Workspace(name="team-d", description="Updated") mock_workspace_store.update_workspace.return_value = updated with app.test_client() as client: response = client.patch( "/api/3.0/mlflow/workspaces/team-d", json={"description": "Updated"}, ) assert response.status_code == 200 payload = _workspace_to_json(response.get_data(True)) assert payload == {"workspace": {"name": "team-d", "description": "Updated"}} mock_workspace_store.update_workspace.assert_called_once() def test_update_default_workspace_allows_reserved_name(app, mock_workspace_store): updated = Workspace(name="default", default_artifact_root="s3://bucket/root") mock_workspace_store.update_workspace.return_value = updated with app.test_client() as client: response = client.patch( "/api/3.0/mlflow/workspaces/default", json={"default_artifact_root": "s3://bucket/root"}, ) assert response.status_code == 200 payload = _workspace_to_json(response.get_data(True)) assert payload == { "workspace": {"name": "default", "default_artifact_root": "s3://bucket/root"} } args, _ = mock_workspace_store.update_workspace.call_args assert args[0].name == "default" assert args[0].default_artifact_root == "s3://bucket/root" def test_update_workspace_can_clear_default_artifact_root( app, mock_workspace_store, mock_tracking_store ): cleared = Workspace(name="team-clear", description=None, default_artifact_root=None) mock_workspace_store.update_workspace.return_value = cleared with app.test_client() as client: response = client.patch( "/api/3.0/mlflow/workspaces/team-clear", json={"default_artifact_root": " "}, ) assert response.status_code == 200 payload = _workspace_to_json(response.get_data(True)) assert payload == {"workspace": {"name": "team-clear"}} args, _ = mock_workspace_store.update_workspace.call_args assert isinstance(args[0], Workspace) assert args[0].name == "team-clear" # Handler passes "" to indicate "clear"; the store converts "" to None assert args[0].default_artifact_root == "" def test_delete_workspace_endpoint(app, mock_workspace_store): with app.test_client() as client: response = client.delete("/api/3.0/mlflow/workspaces/team-e") assert response.status_code == 204 mock_workspace_store.delete_workspace.assert_called_once_with( "team-e", mode=WorkspaceDeletionMode.RESTRICT ) def test_delete_default_workspace_rejected_by_validation(app, mock_workspace_store): with app.test_client() as client: response = client.delete("/api/3.0/mlflow/workspaces/default") assert response.status_code == 400 payload = _workspace_to_json(response.get_data(True)) assert "cannot be deleted" in payload["message"] mock_workspace_store.delete_workspace.assert_not_called() def test_create_workspace_fails_without_artifact_root(app, mock_workspace_store, monkeypatch): tracking_store = mock.Mock() tracking_store.artifact_root_uri = None monkeypatch.setattr( "mlflow.server.handlers._get_tracking_store", lambda *_, **__: tracking_store, ) with app.test_client() as client: response = client.post( "/api/3.0/mlflow/workspaces", json={"name": "team-no-root"}, ) assert response.status_code == 400 payload = _workspace_to_json(response.get_data(True)) assert "artifact root" in payload["message"].lower() def test_create_workspace_with_artifact_root_succeeds_without_server_default( app, mock_workspace_store, monkeypatch ): tracking_store = mock.Mock() tracking_store.artifact_root_uri = None monkeypatch.setattr( "mlflow.server.handlers._get_tracking_store", lambda *_, **__: tracking_store, ) created = Workspace(name="team-with-root", default_artifact_root="s3://bucket/path") mock_workspace_store.create_workspace.return_value = created with app.test_client() as client: response = client.post( "/api/3.0/mlflow/workspaces", json={"name": "team-with-root", "default_artifact_root": "s3://bucket/path"}, ) assert response.status_code == 201 def test_create_default_workspace_rejected(app, mock_workspace_store, mock_tracking_store): with app.test_client() as client: response = client.post( "/api/3.0/mlflow/workspaces", json={"name": "default"}, ) assert response.status_code == 400 payload = _workspace_to_json(response.get_data(True)) assert "reserved" in payload["message"] mock_workspace_store.create_workspace.assert_not_called() def test_update_workspace_clear_artifact_root_fails_without_server_default( app, mock_workspace_store, monkeypatch ): tracking_store = mock.Mock() tracking_store.artifact_root_uri = None monkeypatch.setattr( "mlflow.server.handlers._get_tracking_store", lambda *_, **__: tracking_store, ) with app.test_client() as client: response = client.patch( "/api/3.0/mlflow/workspaces/team-clear", json={"default_artifact_root": ""}, ) assert response.status_code == 400 payload = _workspace_to_json(response.get_data(True)) assert "artifact root" in payload["message"].lower()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/test_workspace_endpoints.py", "license": "Apache License 2.0", "lines": 188, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/server/test_workspace_middleware.py
from __future__ import annotations import pytest import werkzeug from fastapi import FastAPI from fastapi.testclient import TestClient from flask import Flask from mlflow.entities import Workspace from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.server import app as flask_app from mlflow.server.fastapi_app import add_fastapi_workspace_middleware from mlflow.server.job_api import job_api_router from mlflow.server.workspace_helpers import ( WORKSPACE_HEADER_NAME, workspace_before_request_handler, workspace_teardown_request_handler, ) from mlflow.utils import workspace_context @pytest.fixture def flask_workspace_app(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") if not hasattr(werkzeug, "__version__"): werkzeug.__version__ = "tests" app = Flask(__name__) app.before_request(workspace_before_request_handler) app.teardown_request(workspace_teardown_request_handler) @app.route("/ping") def _ping(): return workspace_context.get_request_workspace() or "none" return app def test_flask_workspace_middleware_sets_context(flask_workspace_app, monkeypatch): class DummyWorkspaceStore: def get_workspace(self, name): return Workspace(name=name) store = DummyWorkspaceStore() monkeypatch.setattr( "mlflow.server.workspace_helpers._get_workspace_store", lambda workspace_uri=None, tracking_uri=None: store, ) client = flask_workspace_app.test_client() resp = client.get("/ping", headers={WORKSPACE_HEADER_NAME: "team-a"}) assert resp.data.decode() == "team-a" assert workspace_context.get_request_workspace() is None def test_flask_workspace_middleware_requires_header(flask_workspace_app, monkeypatch): class DefaultlessWorkspaceStore: def get_default_workspace(self): raise MlflowException.invalid_parameter_value("Active workspace is required.") store = DefaultlessWorkspaceStore() monkeypatch.setattr( "mlflow.server.workspace_helpers._get_workspace_store", lambda workspace_uri=None, tracking_uri=None: store, ) client = flask_workspace_app.test_client() resp = client.get("/ping") assert resp.status_code == 400 assert "Active workspace is required" in resp.json["message"] assert workspace_context.get_request_workspace() is None def _fastapi_workspace_app(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") app = FastAPI() add_fastapi_workspace_middleware(app) ping_path = f"{job_api_router.prefix}/ping" @app.get(ping_path) async def ping(): return {"workspace": workspace_context.get_request_workspace()} return app, ping_path def test_fastapi_workspace_middleware_sets_context(monkeypatch): app, ping_path = _fastapi_workspace_app(monkeypatch) monkeypatch.setattr( "mlflow.server.fastapi_app.resolve_workspace_for_request_if_enabled", lambda _path, header: Workspace(name=header), ) client = TestClient(app) resp = client.get(ping_path, headers={WORKSPACE_HEADER_NAME: "team-fast"}) assert resp.status_code == 200 assert resp.json() == {"workspace": "team-fast"} assert workspace_context.get_request_workspace() is None def test_fastapi_workspace_middleware_handles_missing_header(monkeypatch): app, ping_path = _fastapi_workspace_app(monkeypatch) monkeypatch.setattr( "mlflow.server.fastapi_app.resolve_workspace_for_request_if_enabled", lambda _path, _header: None, ) client = TestClient(app) resp = client.get(ping_path) assert resp.status_code == 200 assert resp.json() == {"workspace": None} assert workspace_context.get_request_workspace() is None def test_server_info_workspaces_enabled(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") client = flask_app.test_client() resp = client.get("/api/3.0/mlflow/server-info") assert resp.status_code == 200 data = resp.get_json() assert data["workspaces_enabled"] is True # Disable workspaces and ensure the endpoint reflects the change. monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") resp = client.get("/api/3.0/mlflow/server-info") assert resp.status_code == 200 data = resp.get_json() assert data["workspaces_enabled"] is False def test_server_info_skips_workspace_resolution(monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") def _raise_if_called(_header_workspace): raise AssertionError("workspace resolution should not run for server-info") monkeypatch.setattr( "mlflow.server.workspace_helpers.resolve_workspace_from_header", _raise_if_called ) client = flask_app.test_client() resp = client.get("/api/3.0/mlflow/server-info", headers={WORKSPACE_HEADER_NAME: "missing"}) assert resp.status_code == 200 data = resp.get_json() assert data["workspaces_enabled"] is True def test_fastapi_wsgi_flask_workspace_propagation(monkeypatch): from fastapi.middleware.wsgi import WSGIMiddleware monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") if not hasattr(werkzeug, "__version__"): werkzeug.__version__ = "tests" flask_resolution_count = [] original_resolve = workspace_context.is_request_workspace_resolved def tracking_is_resolved(): result = original_resolve() flask_resolution_count.append(result) return result monkeypatch.setattr( "mlflow.server.workspace_helpers.workspace_context.is_request_workspace_resolved", tracking_is_resolved, ) test_flask_app = Flask(__name__) test_flask_app.before_request(workspace_before_request_handler) test_flask_app.teardown_request(workspace_teardown_request_handler) @test_flask_app.route("/flask-ping") def _flask_ping(): return workspace_context.get_request_workspace() or "none" fastapi_app = FastAPI() add_fastapi_workspace_middleware(fastapi_app) fastapi_app.mount("/", WSGIMiddleware(test_flask_app)) monkeypatch.setattr( "mlflow.server.fastapi_app.resolve_workspace_for_request_if_enabled", lambda _path, header: Workspace(name=header) if header else None, ) client = TestClient(fastapi_app) resp = client.get("/flask-ping", headers={WORKSPACE_HEADER_NAME: "team-wsgi"}) assert resp.status_code == 200 assert resp.text == "team-wsgi" assert len(flask_resolution_count) == 1 assert flask_resolution_count[0] is True
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/test_workspace_middleware.py", "license": "Apache License 2.0", "lines": 147, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/model_registry/test_rest_store_workspace.py
from unittest import mock import pytest from mlflow.exceptions import MlflowException from mlflow.store.model_registry.rest_store import RestStore from mlflow.utils.rest_utils import MlflowHostCreds ACTIVE_WORKSPACE = "team-a" def test_model_registry_rest_store_workspace_guard(): creds = MlflowHostCreds("https://hello") store = RestStore(lambda: creds) store._workspace_support = False with mock.patch( "mlflow.store.workspace_rest_store_mixin.get_request_workspace", return_value=ACTIVE_WORKSPACE, ): with pytest.raises( MlflowException, match="Active workspace 'team-a' cannot be used because the remote server does not", ): store.search_registered_models()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/model_registry/test_rest_store_workspace.py", "license": "Apache License 2.0", "lines": 19, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/model_registry/test_sqlalchemy_workspace_store.py
import shutil import uuid import pytest from mlflow.entities.model_registry import ModelVersionTag, RegisteredModelTag from mlflow.entities.webhook import WebhookAction, WebhookEntity, WebhookEvent from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore from mlflow.store.model_registry.sqlalchemy_workspace_store import WorkspaceAwareSqlAlchemyStore from mlflow.utils.workspace_context import WorkspaceContext, clear_server_request_workspace from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME @pytest.fixture def workspace_registry_store(db_uri, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") store = WorkspaceAwareSqlAlchemyStore(db_uri) try: yield store finally: store.engine.dispose() def _names_from_search(results): return {rm.name for rm in results} def test_registered_model_operations_are_workspace_scoped(workspace_registry_store): with WorkspaceContext("team-a"): workspace_registry_store.create_registered_model("alpha") workspace_registry_store.set_registered_model_tag( "alpha", RegisteredModelTag("owner", "team-a") ) rm = workspace_registry_store.get_registered_model("alpha") assert rm.tags == {"owner": "team-a"} with WorkspaceContext("team-b"): workspace_registry_store.create_registered_model("beta") with pytest.raises( MlflowException, match="Registered Model with name=alpha not found" ) as excinfo: workspace_registry_store.set_registered_model_tag( "alpha", RegisteredModelTag("owner", "team-b") ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises(MlflowException, match="Registered Model with name=alpha not found"): workspace_registry_store.rename_registered_model("alpha", "alpha-b") with pytest.raises(MlflowException, match="Registered Model with name=alpha not found"): workspace_registry_store.delete_registered_model("alpha") with WorkspaceContext("team-b"): names = _names_from_search(workspace_registry_store.search_registered_models()) assert names == {"beta"} with WorkspaceContext("team-b"): with pytest.raises( MlflowException, match="Registered Model with name=alpha-renamed not found" ) as excinfo: workspace_registry_store.set_registered_model_tag( "alpha-renamed", RegisteredModelTag("owner", "team-b") ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match="Registered Model with name=alpha-renamed not found" ): workspace_registry_store.get_registered_model("alpha-renamed") # Ensure team-b model remains accessible beta = workspace_registry_store.get_registered_model("beta") assert beta.name == "beta" with WorkspaceContext("team-a"): workspace_registry_store.rename_registered_model("alpha", "alpha-renamed") renamed = workspace_registry_store.get_registered_model("alpha-renamed") assert renamed.name == "alpha-renamed" assert renamed.tags == {"owner": "team-a"} with WorkspaceContext("team-b"): with pytest.raises( MlflowException, match="Registered Model with name=alpha-renamed not found" ) as excinfo: workspace_registry_store.set_registered_model_tag( "alpha-renamed", RegisteredModelTag("owner", "team-b") ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match="Registered Model with name=alpha-renamed not found" ): workspace_registry_store.get_registered_model("alpha-renamed") with WorkspaceContext("team-a"): workspace_registry_store.delete_registered_model("alpha-renamed") with pytest.raises( MlflowException, match="Registered Model with name=alpha-renamed not found" ) as excinfo: workspace_registry_store.get_registered_model("alpha-renamed") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_model_version_operations_are_workspace_scoped(workspace_registry_store): with WorkspaceContext("team-a"): workspace_registry_store.create_registered_model("alpha") mv_a = workspace_registry_store.create_model_version( "alpha", "s3://team-a/model", run_id=uuid.uuid4().hex ) assert mv_a.version == 1 workspace_registry_store.set_model_version_tag( "alpha", str(mv_a.version), ModelVersionTag("env", "prod") ) workspace_registry_store.transition_model_version_stage( "alpha", str(mv_a.version), "Production", archive_existing_versions=False ) workspace_registry_store.set_registered_model_alias( "alpha", "production", str(mv_a.version) ) mv_detail = workspace_registry_store.get_model_version("alpha", "1") assert mv_detail.current_stage == "Production" assert mv_detail.tags == {"env": "prod"} aliases = workspace_registry_store.get_registered_model("alpha").aliases assert aliases == {"production": 1} download_uri = workspace_registry_store.get_model_version_download_uri("alpha", "1") assert download_uri == "s3://team-a/model" with WorkspaceContext("team-b"): workspace_registry_store.create_registered_model("beta") with pytest.raises( MlflowException, match="Registered Model with name=alpha not found" ) as excinfo: workspace_registry_store.create_model_version( "alpha", "s3://team-b/model", run_id=uuid.uuid4().hex ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" version_scoped_calls = ( lambda: workspace_registry_store.transition_model_version_stage( "alpha", "1", "Archived", archive_existing_versions=False ), lambda: workspace_registry_store.set_model_version_tag( "alpha", "1", ModelVersionTag("env", "stage") ), lambda: workspace_registry_store.delete_model_version_tag("alpha", "1", "env"), lambda: workspace_registry_store.delete_model_version("alpha", "1"), lambda: workspace_registry_store.get_model_version_download_uri("alpha", "1"), ) for call in version_scoped_calls: with pytest.raises( MlflowException, match=r"Model Version \(name=alpha, version=1\) not found" ) as excinfo: call() assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" alias_scoped_calls = ( lambda: workspace_registry_store.set_registered_model_alias("alpha", "shadow", "1"), lambda: workspace_registry_store.delete_registered_model_alias("alpha", "production"), ) for call in alias_scoped_calls: with pytest.raises( MlflowException, match=( r"(Model Version \(name=alpha, version=1\) not found|" r"Registered Model with name=alpha not found)" ), ) as excinfo: call() assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with WorkspaceContext("team-a"): workspace_registry_store.delete_model_version_tag("alpha", "1", "env") mv_detail = workspace_registry_store.get_model_version("alpha", "1") assert mv_detail.tags == {} workspace_registry_store.delete_registered_model_alias("alpha", "production") assert workspace_registry_store.get_registered_model("alpha").aliases == {} workspace_registry_store.delete_model_version("alpha", "1") with pytest.raises( MlflowException, match=r"Model Version \(name=alpha, version=1\) not found" ) as excinfo: workspace_registry_store.get_model_version("alpha", "1") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_model_version_read_helpers_are_workspace_scoped(workspace_registry_store): with WorkspaceContext("team-a"): workspace_registry_store.create_registered_model("alpha") workspace_registry_store.create_model_version( "alpha", "s3://team-a/model", run_id=uuid.uuid4().hex ) versions = workspace_registry_store.search_model_versions("name='alpha'") assert [mv.version for mv in versions] == [1] latest_versions = workspace_registry_store.get_latest_versions("alpha") assert [mv.version for mv in latest_versions] == [1] fetched = workspace_registry_store.get_model_version("alpha", "1") assert fetched.version == 1 with WorkspaceContext("team-b"): assert workspace_registry_store.search_model_versions("name='alpha'") == [] with pytest.raises( MlflowException, match=r"Model Version \(name=alpha, version=1\) not found" ) as excinfo: workspace_registry_store.get_model_version("alpha", "1") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match="Registered Model with name=alpha not found" ) as excinfo: workspace_registry_store.get_latest_versions("alpha") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_same_model_name_allowed_in_different_workspaces(workspace_registry_store): with WorkspaceContext("team-a"): workspace_registry_store.create_registered_model("shared-name") with WorkspaceContext("team-b"): workspace_registry_store.create_registered_model("shared-name") names = _names_from_search(workspace_registry_store.search_registered_models()) assert names == {"shared-name"} with WorkspaceContext("team-a"): names = _names_from_search(workspace_registry_store.search_registered_models()) assert names == {"shared-name"} def test_update_and_delete_registered_model_metadata_are_workspace_scoped( workspace_registry_store, ): with WorkspaceContext("team-a"): workspace_registry_store.create_registered_model("alpha") workspace_registry_store.set_registered_model_tag( "alpha", RegisteredModelTag("owner", "team-a") ) updated = workspace_registry_store.update_registered_model("alpha", "updated desc") assert updated.description == "updated desc" workspace_registry_store.delete_registered_model_tag("alpha", "owner") assert workspace_registry_store.get_registered_model("alpha").tags == {} with WorkspaceContext("team-b"): with pytest.raises( MlflowException, match="Registered Model with name=alpha not found" ) as excinfo: workspace_registry_store.update_registered_model("alpha", "hijacked") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match="Registered Model with name=alpha not found" ) as excinfo: workspace_registry_store.delete_registered_model_tag("alpha", "owner") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_model_version_allows_workspace_scoped_proxied_artifacts( workspace_registry_store, monkeypatch ): monkeypatch.setenv("_MLFLOW_SERVER_SERVE_ARTIFACTS", "true") with WorkspaceContext("team-a"): workspace_registry_store.create_registered_model("alpha") mv = workspace_registry_store.create_model_version( "alpha", "mlflow-artifacts:/workspaces/team-a/models/model-a", run_id=uuid.uuid4().hex, ) assert mv.version == 1 def test_webhook_operations_are_workspace_scoped(workspace_registry_store): event = WebhookEvent(WebhookEntity.MODEL_VERSION, WebhookAction.CREATED) with WorkspaceContext("team-a"): webhook = workspace_registry_store.create_webhook( name="team-a-hook", url="https://example.com/hook", events=[event], description="Team A hook", ) assert webhook.workspace == "team-a" owned_hooks = workspace_registry_store.list_webhooks() assert len(owned_hooks) == 1 assert owned_hooks[0].webhook_id == webhook.webhook_id assert owned_hooks[0].workspace == "team-a" with WorkspaceContext("team-b"): assert len(workspace_registry_store.list_webhooks()) == 0 assert ( len( workspace_registry_store.list_webhooks_by_event( event, max_results=10, page_token=None ) ) == 0 ) with pytest.raises( MlflowException, match=f"Webhook with ID {webhook.webhook_id} not found" ) as excinfo: workspace_registry_store.get_webhook(webhook.webhook_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match=f"Webhook with ID {webhook.webhook_id} not found" ) as excinfo: workspace_registry_store.update_webhook(webhook.webhook_id, name="should-fail") assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match=f"Webhook with ID {webhook.webhook_id} not found" ) as excinfo: workspace_registry_store.delete_webhook(webhook.webhook_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with WorkspaceContext("team-a"): fetched = workspace_registry_store.get_webhook(webhook.webhook_id) assert fetched.webhook_id == webhook.webhook_id assert fetched.workspace == "team-a" workspace_registry_store.delete_webhook(webhook.webhook_id) with pytest.raises( MlflowException, match=f"Webhook with ID {webhook.webhook_id} not found" ) as excinfo: workspace_registry_store.get_webhook(webhook.webhook_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_default_workspace_behavior_when_workspaces_disabled(db_uri, monkeypatch): monkeypatch.delenv(MLFLOW_ENABLE_WORKSPACES.name, raising=False) clear_server_request_workspace() store = SqlAlchemyStore(db_uri) try: rm = store.create_registered_model("legacy-model") assert rm.name == "legacy-model" fetched = store.get_registered_model("legacy-model") assert fetched.name == "legacy-model" finally: store.engine.dispose() def test_default_workspace_context_allows_operations(workspace_registry_store): with WorkspaceContext(DEFAULT_WORKSPACE_NAME): workspace_registry_store.create_registered_model("default-model") fetched = workspace_registry_store.get_registered_model("default-model") assert fetched.name == "default-model" def test_single_tenant_registry_startup_rejects_non_default_workspace_models( tmp_path, db_uri, cached_db, monkeypatch ): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") workspace_store = WorkspaceAwareSqlAlchemyStore(db_uri) with WorkspaceContext("team-startup"): workspace_store.create_registered_model("team-model") workspace_store.engine.dispose() monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") with pytest.raises( MlflowException, match="Cannot disable workspaces because registered models exist outside the default " + "workspace", ) as excinfo: SqlAlchemyStore(db_uri) assert excinfo.value.error_code == "INVALID_STATE" monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") webhook_db_path = tmp_path / "registry_webhook.db" shutil.copy2(cached_db, webhook_db_path) webhook_db_uri = f"sqlite:///{webhook_db_path}" webhook_store = WorkspaceAwareSqlAlchemyStore(webhook_db_uri) webhook_event = WebhookEvent(WebhookEntity.REGISTERED_MODEL, WebhookAction.CREATED) with WorkspaceContext("team-webhook"): webhook_store.create_webhook( name="team-webhook", url="https://example.com/webhook", events=[webhook_event], description="non-default webhook", ) webhook_store.engine.dispose() monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") with pytest.raises( MlflowException, match="Cannot disable workspaces because webhooks exist outside the default workspace", ) as excinfo: SqlAlchemyStore(webhook_db_uri) assert excinfo.value.error_code == "INVALID_STATE"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/model_registry/test_sqlalchemy_workspace_store.py", "license": "Apache License 2.0", "lines": 330, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/tracking/test_rest_store_workspace.py
from unittest import mock import pytest from mlflow.exceptions import MlflowException from mlflow.store.tracking.rest_store import RestStore from mlflow.utils.rest_utils import MlflowHostCreds ACTIVE_WORKSPACE = "team-a" def test_supports_workspaces_queries_endpoint(): creds = MlflowHostCreds("https://example") store = RestStore(lambda: creds) response = mock.MagicMock() response.status_code = 200 response.json.return_value = {"workspaces_enabled": True} with mock.patch( "mlflow.store.workspace_rest_store_mixin.http_request", return_value=response ) as mock_http: assert store.supports_workspaces is True # Cached result prevents additional requests assert store.supports_workspaces is True mock_http.assert_called_once() _, kwargs = mock_http.call_args assert kwargs["host_creds"] is creds assert kwargs["endpoint"] == "/api/3.0/mlflow/server-info" assert kwargs["method"] == "GET" assert kwargs["timeout"] == 3 assert kwargs["max_retries"] == 0 assert kwargs["raise_on_status"] is False def test_supports_workspaces_returns_false_on_failure(): creds = MlflowHostCreds("https://example") store = RestStore(lambda: creds) response = mock.MagicMock() response.status_code = 404 response.text = "not found" with mock.patch("mlflow.store.workspace_rest_store_mixin.http_request", return_value=response): assert store.supports_workspaces is False def test_supports_workspaces_handles_missing_json_keys(): creds = MlflowHostCreds("https://example") store = RestStore(lambda: creds) response = mock.MagicMock() response.status_code = 200 response.json.return_value = {} with mock.patch("mlflow.store.workspace_rest_store_mixin.http_request", return_value=response): assert store.supports_workspaces is False def test_supports_workspaces_returns_false_for_databricks_uri(): creds = MlflowHostCreds("databricks") store = RestStore(lambda: creds) with mock.patch("mlflow.store.workspace_rest_store_mixin.http_request") as mock_http: assert store.supports_workspaces is False # Should not probe the server for Databricks URIs mock_http.assert_not_called() def test_supports_workspaces_raises_on_server_error(): creds = MlflowHostCreds("https://example") store = RestStore(lambda: creds) response = mock.MagicMock() response.status_code = 500 response.text = "Internal Server Error" with mock.patch("mlflow.store.workspace_rest_store_mixin.http_request", return_value=response): with pytest.raises(MlflowException, match="Failed to query.*500"): store.supports_workspaces def test_rest_store_workspace_guard(): creds = MlflowHostCreds("https://example") store = RestStore(lambda: creds) store._workspace_support = False with ( mock.patch( "mlflow.store.workspace_rest_store_mixin.get_request_workspace", return_value=ACTIVE_WORKSPACE, ), mock.patch.object(RestStore, "supports_workspaces", property(lambda self: False)), ): with pytest.raises( MlflowException, match="Active workspace 'team-a' cannot be used because the remote server does not", ): store.search_experiments() def test_workspace_guard_blocks_log_spans(monkeypatch): store = RestStore(lambda: MlflowHostCreds("https://workspace-host")) spans = [mock.MagicMock()] monkeypatch.setattr( "mlflow.store.workspace_rest_store_mixin.get_request_workspace", lambda: ACTIVE_WORKSPACE, ) monkeypatch.setattr(RestStore, "supports_workspaces", property(lambda self: False)) with pytest.raises(MlflowException, match="does not support workspaces"): store.log_spans("exp-1", spans)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/tracking/test_rest_store_workspace.py", "license": "Apache License 2.0", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/tracking/test_sqlalchemy_workspace_store.py
import math import time import uuid from pathlib import Path from unittest import mock import pytest from mlflow.entities import ( Dataset, DatasetInput, Experiment, ExperimentTag, GatewayEndpointModelConfig, GatewayModelLinkageType, GatewayResourceType, InputTag, LoggedModelParameter, LoggedModelStatus, LoggedModelTag, Metric, Param, RunStatus, RunTag, ViewType, ) from mlflow.entities.entity_type import EntityAssociationType from mlflow.entities.lifecycle_stage import LifecycleStage from mlflow.environment_variables import MLFLOW_ENABLE_WORKSPACES from mlflow.exceptions import MlflowException from mlflow.store.tracking.dbmodels.models import ( SqlEntityAssociation, SqlExperiment, SqlTraceInfo, SqlTraceTag, ) from mlflow.store.tracking.gateway.config_resolver import ( get_endpoint_config, get_resource_endpoint_configs, ) from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore from mlflow.store.tracking.sqlalchemy_workspace_store import WorkspaceAwareSqlAlchemyStore from mlflow.tracing.constant import TraceMetadataKey from mlflow.tracing.utils import generate_request_id_v2 from mlflow.tracking._tracking_service import utils as tracking_utils from mlflow.tracking._tracking_service.client import TrackingServiceClient from mlflow.utils.mlflow_tags import MLFLOW_DATASET_CONTEXT from mlflow.utils.uri import append_to_uri_path from mlflow.utils.workspace_context import WorkspaceContext from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME from tests.store.tracking.test_sqlalchemy_store import ( _create_trace, _gateway_model_scorer_json, _mock_gateway_endpoint, create_test_span, ) def _now_ms() -> int: return int(time.time() * 1000) def _create_run( store: SqlAlchemyStore, workspace: str, experiment_name: str, run_name: str, user: str = "user", ): with WorkspaceContext(workspace): exp_id = store.create_experiment(experiment_name) run = store.create_run( exp_id, user_id=user, start_time=_now_ms(), tags=[], run_name=run_name, ) return exp_id, run @pytest.fixture def workspace_tracking_store(tmp_path, db_uri, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") artifact_dir = tmp_path / "artifacts" artifact_dir.mkdir() store = tracking_utils._get_sqlalchemy_store(db_uri, artifact_dir.as_uri()) store.tracking_uri = db_uri store.artifact_root_uri = artifact_dir.as_uri() try: yield store finally: store._dispose_engine() def test_sqlalchemy_store_returns_workspace_aware_when_enabled(tmp_path, db_uri, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") artifact_dir = tmp_path / "artifacts" artifact_dir.mkdir() store = tracking_utils._get_sqlalchemy_store(db_uri, artifact_dir.as_uri()) try: assert isinstance(store, WorkspaceAwareSqlAlchemyStore) assert store.supports_workspaces is True finally: store._dispose_engine() def test_sqlalchemy_store_is_single_tenant_when_disabled(tmp_path, db_uri, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") artifact_dir = tmp_path / "artifacts" artifact_dir.mkdir() store = tracking_utils._get_sqlalchemy_store(db_uri, artifact_dir.as_uri()) try: assert not isinstance(store, WorkspaceAwareSqlAlchemyStore) assert store.supports_workspaces is False finally: store._dispose_engine() def test_experiments_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_a_id = workspace_tracking_store.create_experiment("exp-in-a") duplicate_a_id = workspace_tracking_store.create_experiment("shared-name") with WorkspaceContext("team-b"): exp_b_id = workspace_tracking_store.create_experiment("exp-in-b") duplicate_b_id = workspace_tracking_store.create_experiment("shared-name") with WorkspaceContext("team-a"): exp_a = workspace_tracking_store.get_experiment(exp_a_id) assert exp_a.name == "exp-in-a" assert exp_a.workspace == "team-a" experiments = workspace_tracking_store.search_experiments(ViewType.ACTIVE_ONLY) assert {exp.name for exp in experiments} == {"exp-in-a", "shared-name"} assert workspace_tracking_store.get_experiment_by_name("exp-in-b") is None with pytest.raises( MlflowException, match=f"No Experiment with id={exp_b_id} exists" ) as excinfo: workspace_tracking_store.get_experiment(exp_b_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" duplicate_a = workspace_tracking_store.get_experiment(duplicate_a_id) assert duplicate_a.name == "shared-name" assert duplicate_a.workspace == "team-a" with WorkspaceContext("team-b"): experiments = workspace_tracking_store.search_experiments(ViewType.ACTIVE_ONLY) assert {exp.name for exp in experiments} == {"exp-in-b", "shared-name"} assert workspace_tracking_store.get_experiment_by_name("exp-in-a") is None duplicate_b = workspace_tracking_store.get_experiment(duplicate_b_id) assert duplicate_b.name == "shared-name" assert duplicate_b.workspace == "team-b" def test_runs_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_a_id = workspace_tracking_store.create_experiment("exp-a") run_a = workspace_tracking_store.create_run( exp_a_id, user_id="alice", start_time=_now_ms(), tags=[], run_name="run-a", ) with WorkspaceContext("team-b"): exp_b_id = workspace_tracking_store.create_experiment("exp-b") run_b = workspace_tracking_store.create_run( exp_b_id, user_id="bob", start_time=_now_ms(), tags=[], run_name="run-b", ) with pytest.raises( MlflowException, match=f"No Experiment with id={exp_a_id} exists" ) as excinfo: workspace_tracking_store.create_run( exp_a_id, user_id="bob", start_time=_now_ms(), tags=[], run_name=None, ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with pytest.raises( MlflowException, match=f"Run with id={run_a.info.run_id} not found" ) as excinfo: workspace_tracking_store.get_run(run_a.info.run_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" runs_b = workspace_tracking_store.search_runs([exp_b_id], None, ViewType.ALL) assert {run.info.run_id for run in runs_b} == {run_b.info.run_id} runs_from_other_workspace = workspace_tracking_store.search_runs( [exp_a_id], None, ViewType.ALL ) assert runs_from_other_workspace == [] with WorkspaceContext("team-a"): fetched = workspace_tracking_store.get_run(run_a.info.run_id) assert fetched.info.experiment_id == exp_a_id def test_search_datasets_is_workspace_scoped(workspace_tracking_store): exp_a_id, run_a = _create_run(workspace_tracking_store, "team-a", "exp-a", "run-a") exp_b_id, run_b = _create_run(workspace_tracking_store, "team-b", "exp-b", "run-b") dataset_a = Dataset( name="dataset-a", digest="digest-a", source_type="delta", source="source-a", ) dataset_b = Dataset( name="dataset-b", digest="digest-b", source_type="delta", source="source-b", ) with WorkspaceContext("team-a"): workspace_tracking_store.log_inputs( run_a.info.run_id, [DatasetInput(dataset_a, [InputTag(MLFLOW_DATASET_CONTEXT, "train")])], ) with WorkspaceContext("team-b"): workspace_tracking_store.log_inputs( run_b.info.run_id, [DatasetInput(dataset_b, [InputTag(MLFLOW_DATASET_CONTEXT, "train")])], ) summaries = workspace_tracking_store._search_datasets([exp_b_id, exp_a_id]) assert { (summary.experiment_id, summary.name, summary.digest, summary.context) for summary in summaries } == {(str(exp_b_id), dataset_b.name, dataset_b.digest, "train")} assert workspace_tracking_store._search_datasets([exp_a_id]) == [] with WorkspaceContext("team-a"): summaries = workspace_tracking_store._search_datasets([exp_a_id, exp_b_id]) assert { (summary.experiment_id, summary.name, summary.digest, summary.context) for summary in summaries } == {(str(exp_a_id), dataset_a.name, dataset_a.digest, "train")} assert workspace_tracking_store._search_datasets([exp_b_id]) == [] def test_search_datasets_public_api_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_a_id = workspace_tracking_store.create_experiment("search-exp-a") dataset_a = workspace_tracking_store.create_dataset( name="dataset-a", experiment_ids=[exp_a_id] ) workspace_tracking_store.upsert_dataset_records( dataset_a.dataset_id, [{"inputs": {"x": 1}, "outputs": {"y": "a"}}], ) with WorkspaceContext("team-b"): exp_b_id = workspace_tracking_store.create_experiment("search-exp-b") dataset_b = workspace_tracking_store.create_dataset( name="dataset-b", experiment_ids=[exp_b_id] ) workspace_tracking_store.upsert_dataset_records( dataset_b.dataset_id, [{"inputs": {"x": 2}, "outputs": {"y": "b"}}], ) with WorkspaceContext("team-a"): results = workspace_tracking_store.search_datasets() assert {d.name for d in results} == {"dataset-a"} results = workspace_tracking_store.search_datasets(experiment_ids=[exp_a_id, exp_b_id]) assert {d.name for d in results} == {"dataset-a"} records, _ = workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) assert len(records) == 1 assert records[0].inputs == {"x": 1} assert records[0].outputs == {"y": "a"} with WorkspaceContext("team-b"): results = workspace_tracking_store.search_datasets() assert {d.name for d in results} == {"dataset-b"} results = workspace_tracking_store.search_datasets(experiment_ids=[exp_a_id]) assert results == [] records, _ = workspace_tracking_store._load_dataset_records(dataset_b.dataset_id) assert len(records) == 1 assert records[0].inputs == {"x": 2} assert records[0].outputs == {"y": "b"} def test_entity_associations_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_a_id = workspace_tracking_store.create_experiment("assoc-exp-a") dataset = workspace_tracking_store.create_dataset( name="dataset-a", experiment_ids=[exp_a_id], ) with WorkspaceContext("team-a"): forward = workspace_tracking_store.search_entities_by_source( source_ids=dataset.dataset_id, source_type=EntityAssociationType.EVALUATION_DATASET, destination_type=EntityAssociationType.EXPERIMENT, ) assert forward.to_list() == [exp_a_id] with WorkspaceContext("team-b"): forward = workspace_tracking_store.search_entities_by_source( source_ids=dataset.dataset_id, source_type=EntityAssociationType.EVALUATION_DATASET, destination_type=EntityAssociationType.EXPERIMENT, ) assert forward.to_list() == [] reverse = workspace_tracking_store.search_entities_by_destination( destination_ids=exp_a_id, destination_type=EntityAssociationType.EXPERIMENT, source_type=EntityAssociationType.EVALUATION_DATASET, ) assert reverse.to_list() == [] def test_artifact_locations_are_scoped_to_workspace(workspace_tracking_store): with WorkspaceContext("team-alpha"): exp_id = workspace_tracking_store.create_experiment("alpha-exp") experiment = workspace_tracking_store.get_experiment(exp_id) assert "/workspaces/team-alpha/" in experiment.artifact_location with WorkspaceContext(DEFAULT_WORKSPACE_NAME): default_exp_id = workspace_tracking_store.create_experiment("default-exp") default_experiment = workspace_tracking_store.get_experiment(default_exp_id) assert f"/workspaces/{DEFAULT_WORKSPACE_NAME}/" in default_experiment.artifact_location def test_serving_artifacts_auto_scopes_workspace_paths(workspace_tracking_store, monkeypatch): monkeypatch.setenv("_MLFLOW_SERVER_SERVE_ARTIFACTS", "true") workspace_tracking_store.artifact_root_uri = "mlflow-artifacts:/artifacts" calls = [] class TrackingProvider: def resolve_artifact_root(self, default_root, workspace_name): calls.append((default_root, workspace_name)) return default_root, True provider = TrackingProvider() monkeypatch.setattr( WorkspaceAwareSqlAlchemyStore, "_get_workspace_provider_instance", lambda self, provider=provider: provider, ) with WorkspaceContext("team-prefix"): exp_id = workspace_tracking_store.create_experiment("auto-scoped") experiment = workspace_tracking_store.get_experiment(exp_id) assert f"/workspaces/team-prefix/{exp_id}" in experiment.artifact_location assert calls == [("mlflow-artifacts:/artifacts", "team-prefix")] def test_serving_artifacts_allows_pre_scoped_roots(workspace_tracking_store, monkeypatch): monkeypatch.delenv("_MLFLOW_SERVER_SERVE_ARTIFACTS", raising=False) workspace_tracking_store.artifact_root_uri = "mlflow-artifacts:/artifacts" class PrefixedProvider: def __init__(self, store): self.store = store def resolve_artifact_root(self, artifact_root, workspace_name): scoped = append_to_uri_path(artifact_root, f"workspaces/{workspace_name}") return scoped, False provider = PrefixedProvider(workspace_tracking_store) monkeypatch.setattr( WorkspaceAwareSqlAlchemyStore, "_get_workspace_provider_instance", lambda self, provider=provider: provider, ) with WorkspaceContext("team-ready"): exp_id = workspace_tracking_store.create_experiment("with-prefix") experiment = workspace_tracking_store.get_experiment(exp_id) assert "/workspaces/team-ready/" in experiment.artifact_location def test_serving_artifacts_honors_workspace_override(workspace_tracking_store, monkeypatch): monkeypatch.setenv("_MLFLOW_SERVER_SERVE_ARTIFACTS", "true") workspace_tracking_store.artifact_root_uri = "mlflow-artifacts:/artifacts" class OverrideProvider: def resolve_artifact_root(self, _default_root, workspace_name): return f"s3://{workspace_name}-bucket/root", False provider = OverrideProvider() monkeypatch.setattr( WorkspaceAwareSqlAlchemyStore, "_get_workspace_provider_instance", lambda self, provider=provider: provider, ) with WorkspaceContext("team-override"): exp_id = workspace_tracking_store.create_experiment("custom-root") experiment = workspace_tracking_store.get_experiment(exp_id) assert experiment.artifact_location.startswith("s3://team-override-bucket/root/") assert "/workspaces/" not in experiment.artifact_location def test_create_experiment_requires_effective_artifact_root(workspace_tracking_store, monkeypatch): monkeypatch.delenv("_MLFLOW_SERVER_SERVE_ARTIFACTS", raising=False) workspace_tracking_store.artifact_root_uri = None class EmptyProvider: def resolve_artifact_root(self, *_args, **_kwargs): return None, False provider = EmptyProvider() monkeypatch.setattr( WorkspaceAwareSqlAlchemyStore, "_get_workspace_provider_instance", lambda self, provider=provider: provider, ) with WorkspaceContext("team-misconfigured"): with pytest.raises(MlflowException, match="Cannot determine an artifact root"): workspace_tracking_store.create_experiment("should-fail") def test_default_workspace_experiment_uses_zero_id(workspace_tracking_store): with WorkspaceContext(DEFAULT_WORKSPACE_NAME): default_experiment = workspace_tracking_store.get_experiment_by_name( Experiment.DEFAULT_EXPERIMENT_NAME ) assert default_experiment is not None assert default_experiment.experiment_id == SqlAlchemyStore.DEFAULT_EXPERIMENT_ID def test_default_workspace_experiment_allows_single_tenant_fallback(tmp_path, db_uri, monkeypatch): artifact_dir = tmp_path / "artifacts" artifact_dir.mkdir() monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") workspace_store = SqlAlchemyStore(db_uri, artifact_dir.as_uri()) try: with WorkspaceContext(DEFAULT_WORKSPACE_NAME): default_ws_experiment = workspace_store.get_experiment_by_name( Experiment.DEFAULT_EXPERIMENT_NAME ) assert default_ws_experiment is not None assert default_ws_experiment.experiment_id == SqlAlchemyStore.DEFAULT_EXPERIMENT_ID finally: workspace_store._dispose_engine() monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") single_tenant_store = SqlAlchemyStore(db_uri, artifact_dir.as_uri()) try: fallback_experiment = single_tenant_store.get_experiment( SqlAlchemyStore.DEFAULT_EXPERIMENT_ID ) assert fallback_experiment.name == Experiment.DEFAULT_EXPERIMENT_NAME finally: single_tenant_store._dispose_engine() def test_custom_artifact_location_rejected_in_workspace(workspace_tracking_store): with WorkspaceContext("team-delta"): with pytest.raises( MlflowException, match="artifact_location cannot be specified when workspaces are enabled", ) as excinfo: workspace_tracking_store.create_experiment( "delta-exp", artifact_location="file:///tmp/custom" ) assert excinfo.value.error_code == "INVALID_PARAMETER_VALUE" def test_experiment_lifecycle_operations_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_a_id = workspace_tracking_store.create_experiment("lifecycle-exp") with WorkspaceContext("team-b"): exp_b_id = workspace_tracking_store.create_experiment("other-exp") with WorkspaceContext("team-a"): workspace_tracking_store.rename_experiment(exp_a_id, "renamed-exp") experiment = workspace_tracking_store.get_experiment(exp_a_id) assert experiment.name == "renamed-exp" with WorkspaceContext("team-b"): with pytest.raises(MlflowException, match=f"No Experiment with id={exp_a_id} exists"): workspace_tracking_store.rename_experiment(exp_a_id, "fail") with pytest.raises(MlflowException, match=f"No Experiment with id={exp_a_id} exists"): workspace_tracking_store.delete_experiment(exp_a_id) with WorkspaceContext("team-a"): workspace_tracking_store.delete_experiment(exp_a_id) deleted = workspace_tracking_store.get_experiment(exp_a_id) assert deleted.lifecycle_stage == LifecycleStage.DELETED with WorkspaceContext("team-b"): with pytest.raises(MlflowException, match=f"No Experiment with id={exp_a_id} exists"): workspace_tracking_store.restore_experiment(exp_a_id) with WorkspaceContext("team-a"): workspace_tracking_store.restore_experiment(exp_a_id) restored = workspace_tracking_store.get_experiment(exp_a_id) assert restored.lifecycle_stage == LifecycleStage.ACTIVE with WorkspaceContext("team-b"): assert workspace_tracking_store.get_experiment(exp_b_id).name == "other-exp" def test_experiment_tags_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_a_id = workspace_tracking_store.create_experiment("tagged-exp") workspace_tracking_store.set_experiment_tag(exp_a_id, ExperimentTag("owner", "team-a")) with WorkspaceContext("team-b"): exp_b_id = workspace_tracking_store.create_experiment("other-exp") with pytest.raises(MlflowException, match=f"No Experiment with id={exp_a_id} exists"): workspace_tracking_store.set_experiment_tag(exp_a_id, ExperimentTag("owner", "team-b")) with pytest.raises(MlflowException, match=f"No Experiment with id={exp_a_id} exists"): workspace_tracking_store.delete_experiment_tag(exp_a_id, "owner") with WorkspaceContext("team-a"): workspace_tracking_store.delete_experiment_tag(exp_a_id, "owner") experiment = workspace_tracking_store.get_experiment(exp_a_id) assert "owner" not in experiment.tags with WorkspaceContext("team-b"): workspace_tracking_store.set_experiment_tag(exp_b_id, ExperimentTag("owner", "team-b")) experiment = workspace_tracking_store.get_experiment(exp_b_id) assert experiment.tags["owner"] == "team-b" def test_run_data_logging_enforces_workspaces(workspace_tracking_store): exp_a_id, run_a = _create_run( workspace_tracking_store, "team-a", "data-exp-a", "run-a", user="alice" ) with WorkspaceContext("team-a"): workspace_tracking_store.log_param(run_a.info.run_id, Param("p", "1")) workspace_tracking_store.log_metric(run_a.info.run_id, Metric("m", 2.0, _now_ms(), 0)) workspace_tracking_store.set_tag(run_a.info.run_id, RunTag("t", "team-a")) workspace_tracking_store.log_batch( run_a.info.run_id, metrics=[Metric("m2", 3.0, _now_ms(), 0)], params=[Param("p2", "v")], tags=[RunTag("t2", "v2")], ) run = workspace_tracking_store.get_run(run_a.info.run_id) assert run.data.params["p"] == "1" assert run.data.metrics["m"] == 2.0 assert run.data.tags["t"] == "team-a" with WorkspaceContext("team-b"): exp_b_id, run_b = _create_run( workspace_tracking_store, "team-b", "data-exp-b", "run-b", user="bob" ) for call in ( lambda: workspace_tracking_store.log_param(run_a.info.run_id, Param("cross", "fail")), lambda: workspace_tracking_store.log_metric( run_a.info.run_id, Metric("cross", 1.0, _now_ms(), 0) ), lambda: workspace_tracking_store.set_tag(run_a.info.run_id, RunTag("cross", "fail")), lambda: workspace_tracking_store.delete_tag(run_a.info.run_id, "t"), lambda: workspace_tracking_store.log_batch( run_a.info.run_id, metrics=[Metric("cross", 1.0, _now_ms(), 0)], params=[Param("cross", "1")], tags=[RunTag("cross", "1")], ), ): with pytest.raises( MlflowException, match=f"Run with id={run_a.info.run_id} not found" ) as excinfo: call() assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" workspace_tracking_store.log_param(run_b.info.run_id, Param("pb", "b")) assert workspace_tracking_store.get_run(run_b.info.run_id).data.params["pb"] == "b" with WorkspaceContext("team-a"): run = workspace_tracking_store.get_run(run_a.info.run_id) assert "cross" not in run.data.params assert run.data.tags["t2"] == "v2" def test_run_lifecycle_operations_workspace_isolation(workspace_tracking_store): _, run_a = _create_run( workspace_tracking_store, "team-a", "lifecycle-exp", "run-a", user="alice" ) with WorkspaceContext("team-b"): with pytest.raises(MlflowException, match=f"Run with id={run_a.info.run_id} not found"): workspace_tracking_store.delete_run(run_a.info.run_id) with WorkspaceContext("team-a"): workspace_tracking_store.delete_run(run_a.info.run_id) deleted = workspace_tracking_store.get_run(run_a.info.run_id) assert deleted.info.lifecycle_stage == LifecycleStage.DELETED with WorkspaceContext("team-b"): for call in ( lambda: workspace_tracking_store.restore_run(run_a.info.run_id), lambda: workspace_tracking_store.update_run_info( run_a.info.run_id, RunStatus.FAILED, end_time=_now_ms(), run_name=None ), ): with pytest.raises(MlflowException, match=f"Run with id={run_a.info.run_id} not found"): call() with WorkspaceContext("team-a"): workspace_tracking_store.restore_run(run_a.info.run_id) restored = workspace_tracking_store.get_run(run_a.info.run_id) assert restored.info.lifecycle_stage == LifecycleStage.ACTIVE updated = workspace_tracking_store.update_run_info( run_a.info.run_id, RunStatus.FINISHED, end_time=_now_ms(), run_name=None ) assert updated.status == RunStatus.to_string(RunStatus.FINISHED) def test_search_and_history_calls_are_workspace_scoped(workspace_tracking_store): exp_a_id, run_a = _create_run( workspace_tracking_store, "team-a", "search-exp-a", "run-a", user="alice" ) exp_b_id, run_b = _create_run( workspace_tracking_store, "team-b", "search-exp-b", "run-b", user="bob" ) with WorkspaceContext("team-a"): workspace_tracking_store.log_metric(run_a.info.run_id, Metric("metric", 1.0, _now_ms(), 0)) runs = workspace_tracking_store.search_runs([exp_a_id, exp_b_id], None, ViewType.ALL) assert {r.info.run_id for r in runs} == {run_a.info.run_id} with WorkspaceContext("team-b"): runs = workspace_tracking_store.search_runs([exp_a_id, exp_b_id], None, ViewType.ALL) assert {r.info.run_id for r in runs} == {run_b.info.run_id} with pytest.raises(MlflowException, match=f"Run with id={run_a.info.run_id} not found"): workspace_tracking_store.get_metric_history(run_a.info.run_id, "metric") with WorkspaceContext("team-a"): history = workspace_tracking_store.get_metric_history(run_a.info.run_id, "metric") assert [m.value for m in history] == [1.0] with workspace_tracking_store.ManagedSessionMaker() as session: infos = workspace_tracking_store._list_run_infos(session, exp_a_id) assert len(infos) == 1 with WorkspaceContext("team-b"): with workspace_tracking_store.ManagedSessionMaker() as session: infos = workspace_tracking_store._list_run_infos(session, exp_a_id) assert infos == [] def test_run_artifact_uris_are_workspace_scoped(workspace_tracking_store): _, run_a = _create_run( workspace_tracking_store, "team-a", "artifact-exp", "run-a", user="alice" ) with WorkspaceContext("team-a"): run = workspace_tracking_store.get_run(run_a.info.run_id) artifact_uri = Path(run.info.artifact_uri.replace("file://", "")) parts = artifact_uri.parts assert "workspaces" in parts workspace_index = parts.index("workspaces") assert parts[workspace_index + 1] == "team-a" with WorkspaceContext(DEFAULT_WORKSPACE_NAME): _, default_run = _create_run( workspace_tracking_store, DEFAULT_WORKSPACE_NAME, "default-artifact-exp", "run-default", user="carol", ) default_artifact_uri = Path( workspace_tracking_store.get_run(default_run.info.run_id).info.artifact_uri.replace( "file://", "" ) ) assert "workspaces" in default_artifact_uri.parts workspace_index = default_artifact_uri.parts.index("workspaces") assert default_artifact_uri.parts[workspace_index + 1] == DEFAULT_WORKSPACE_NAME with WorkspaceContext("team-b"): with pytest.raises(MlflowException, match=f"Run with id={run_a.info.run_id} not found"): workspace_tracking_store.get_run(run_a.info.run_id) def test_artifact_operations_enforce_workspace_isolation(workspace_tracking_store, tmp_path): client = TrackingServiceClient(workspace_tracking_store.tracking_uri) _, run_a = _create_run( workspace_tracking_store, "team-a", "artifact-exp-client", "run-a", user="alice" ) artifact_file = tmp_path / "artifact.txt" artifact_file.write_text("hello") with WorkspaceContext("team-a"): client.log_artifact(run_a.info.run_id, str(artifact_file)) artifacts = client.list_artifacts(run_a.info.run_id) assert any(info.path == "artifact.txt" for info in artifacts) download_dir = tmp_path / "downloaded" download_dir.mkdir() downloaded_path = client.download_artifacts( run_a.info.run_id, "artifact.txt", dst_path=str(download_dir) ) assert Path(downloaded_path).read_text() == "hello" with WorkspaceContext("team-b"): other_client = TrackingServiceClient(workspace_tracking_store.tracking_uri) other_download_dir = tmp_path / "other" other_download_dir.mkdir() for call in ( lambda: other_client.log_artifact(run_a.info.run_id, str(artifact_file)), lambda: other_client.list_artifacts(run_a.info.run_id), lambda: other_client.download_artifacts( run_a.info.run_id, "artifact.txt", dst_path=str(other_download_dir) ), ): tracking_utils._artifact_repos_cache.clear() with pytest.raises( MlflowException, match=f"Run with id={run_a.info.run_id} not found" ) as excinfo: call() assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_get_trace_is_workspace_scoped(workspace_tracking_store): trace_id = f"tr-{uuid.uuid4().hex}" span = create_test_span(trace_id=trace_id) with WorkspaceContext("team-a"): exp_id = workspace_tracking_store.create_experiment("trace-exp-a") workspace_tracking_store.log_spans(exp_id, [span]) trace = workspace_tracking_store.get_trace(trace_id) assert trace.info.trace_id == trace_id with WorkspaceContext("team-b"): with pytest.raises( MlflowException, match=f"Trace with ID {trace_id} is not found." ) as excinfo: workspace_tracking_store.get_trace(trace_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_log_spans_update_is_workspace_scoped(workspace_tracking_store): trace_id = f"tr-{uuid.uuid4().hex}" initial_span = create_test_span( trace_id=trace_id, start_ns=2_000_000_000, end_ns=3_000_000_000, ) earlier_span = create_test_span( trace_id=trace_id, span_id=222, start_ns=1_000_000_000, end_ns=4_000_000_000, ) with WorkspaceContext("team-a"): exp_id = workspace_tracking_store.create_experiment("trace-exp-workspace-guard") workspace_tracking_store.log_spans(exp_id, [initial_span]) original_trace = workspace_tracking_store.get_trace(trace_id) call_state = {"count": 0} def workspace_side_effect(*_args, **_kwargs): call_state["count"] += 1 return "team-a" if call_state["count"] == 1 else "team-b" with mock.patch.object( WorkspaceAwareSqlAlchemyStore, "_get_active_workspace", side_effect=workspace_side_effect, ): workspace_tracking_store.log_spans(exp_id, [earlier_span]) updated_trace = workspace_tracking_store.get_trace(trace_id) assert updated_trace.info.request_time == original_trace.info.request_time assert updated_trace.info.execution_duration == original_trace.info.execution_duration assert len(updated_trace.data.spans) == 2 def test_validate_artifact_root_allows_missing_global_root(workspace_tracking_store): workspace_tracking_store.artifact_root_uri = None workspace_tracking_store._validate_artifact_root_configuration() def test_workspace_startup_rejects_root_ending_with_workspaces(tmp_path, db_uri, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") bad_root = tmp_path / "base" / "workspaces" bad_root.mkdir(parents=True) with pytest.raises( MlflowException, match="ends with the reserved 'workspaces' segment", ) as excinfo: tracking_utils._get_sqlalchemy_store(db_uri, bad_root.as_uri()) assert excinfo.value.error_code == "INVALID_STATE" SqlAlchemyStore._engine_map.pop(db_uri, None) def test_workspace_startup_rejects_root_already_scoped(tmp_path, db_uri, monkeypatch): monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") bad_root = tmp_path / "base" / "workspaces" / "team" bad_root.mkdir(parents=True) with pytest.raises( MlflowException, match="is already scoped under the reserved 'workspaces/<name>' prefix", ) as excinfo: tracking_utils._get_sqlalchemy_store(db_uri, bad_root.as_uri()) assert excinfo.value.error_code == "INVALID_STATE" SqlAlchemyStore._engine_map.pop(db_uri, None) def test_workspace_startup_ignores_default_experiment_reserved_location( tmp_path, db_uri, monkeypatch ): base_root = tmp_path / "base" base_root.mkdir() monkeypatch.delenv(MLFLOW_ENABLE_WORKSPACES.name, raising=False) legacy_store = tracking_utils._get_sqlalchemy_store(db_uri, base_root.as_uri()) legacy_store.tracking_uri = db_uri legacy_store.artifact_root_uri = base_root.as_uri() with legacy_store.ManagedSessionMaker() as session: default_exp = ( session.query(SqlExperiment) .filter(SqlExperiment.experiment_id == SqlAlchemyStore.DEFAULT_EXPERIMENT_ID) .one() ) default_exp.artifact_location = append_to_uri_path( base_root.as_uri(), "workspaces/default/0" ) session.flush() legacy_store._dispose_engine() monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") workspace_store = tracking_utils._get_sqlalchemy_store(db_uri, base_root.as_uri()) workspace_store._dispose_engine() SqlAlchemyStore._engine_map.pop(db_uri, None) def test_single_tenant_startup_rejects_non_default_workspace_experiments( tmp_path, db_uri, monkeypatch ): artifact_root = tmp_path / "artifacts" artifact_root.mkdir() monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "true") workspace_store = tracking_utils._get_sqlalchemy_store(db_uri, artifact_root.as_uri()) with WorkspaceContext("team-startup"): workspace_store.create_experiment("team-exp") workspace_store._dispose_engine() SqlAlchemyStore._engine_map.pop(db_uri, None) monkeypatch.setenv(MLFLOW_ENABLE_WORKSPACES.name, "false") with pytest.raises( MlflowException, match="Cannot disable workspaces because experiments exist outside the default workspace", ) as excinfo: SqlAlchemyStore(db_uri, artifact_root.as_uri()) assert excinfo.value.error_code == "INVALID_STATE" SqlAlchemyStore._engine_map.pop(db_uri, None) def test_metric_bulk_operations_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-metrics-a"): exp_a = workspace_tracking_store.create_experiment("exp-metrics-a") run_a = workspace_tracking_store.create_run(exp_a, "alice", _now_ms(), [], "run-a") workspace_tracking_store.log_metric( run_a.info.run_id, Metric("secret_metric", 42.0, _now_ms(), 0) ) with WorkspaceContext("team-metrics-b"): exp_b = workspace_tracking_store.create_experiment("exp-metrics-b") run_b = workspace_tracking_store.create_run(exp_b, "bob", _now_ms(), [], "run-b") workspace_tracking_store.log_metric( run_b.info.run_id, Metric("other_metric", 10.0, _now_ms(), 0) ) result = workspace_tracking_store.get_metric_history_bulk( [run_a.info.run_id], "secret_metric", 100 ) assert result == [] with pytest.raises(MlflowException, match="Run with id=.* not found"): workspace_tracking_store.get_max_step_for_metric(run_a.info.run_id, "secret_metric") with pytest.raises(MlflowException, match="Run with id=.* not found"): workspace_tracking_store.get_metric_history_bulk_interval_from_steps( run_a.info.run_id, "secret_metric", [0], 100 ) def test_logged_model_operations_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-model-a"): exp_a = workspace_tracking_store.create_experiment("exp-model-a") run_a = workspace_tracking_store.create_run(exp_a, "alice", _now_ms(), [], "run-a") model_a = workspace_tracking_store.create_logged_model(exp_a, "model-a", run_a.info.run_id) with WorkspaceContext("team-model-b"): workspace_tracking_store.create_experiment("exp-model-b") with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.get_logged_model(model_a.model_id) with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.delete_logged_model(model_a.model_id) with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.finalize_logged_model( model_a.model_id, LoggedModelStatus.READY ) with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.log_logged_model_params( model_a.model_id, [LoggedModelParameter("key", "value")] ) with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.set_logged_model_tags( model_a.model_id, [LoggedModelTag("key", "value")] ) with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.delete_logged_model_tag(model_a.model_id, "key") def test_trace_tag_operations_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-trace-a"): exp_a = workspace_tracking_store.create_experiment("exp-trace-a") trace_id_a = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_a, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=0, status="OK", ) ) with WorkspaceContext("team-trace-b"): workspace_tracking_store.create_experiment("exp-trace-b") with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.set_trace_tag(trace_id_a, "key", "value") with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.delete_trace_tag(trace_id_a, "key") def test_search_traces_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-search-a"): exp_a = workspace_tracking_store.create_experiment("exp-search-a") trace_id_a = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_a, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=0, status="OK", ) ) with WorkspaceContext("team-search-b"): exp_b = workspace_tracking_store.create_experiment("exp-search-b") trace_id_b = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_b, experiment_id=int(exp_b), timestamp_ms=_now_ms(), execution_time_ms=0, status="OK", ) ) # Cross-workspace search returns nothing results, _ = workspace_tracking_store.search_traces(locations=[exp_a]) assert results == [] # Same-workspace search works results, _ = workspace_tracking_store.search_traces(locations=[exp_b]) assert len(results) == 1 assert results[0].trace_id == trace_id_b def test_link_traces_to_run_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-link-a"): exp_a = workspace_tracking_store.create_experiment("exp-link-a") run_a = workspace_tracking_store.create_run(exp_a, "alice", _now_ms(), [], "run-a") trace_id_a = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_a, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=0, status="OK", ) ) with WorkspaceContext("team-link-b"): exp_b = workspace_tracking_store.create_experiment("exp-link-b") run_b = workspace_tracking_store.create_run(exp_b, "bob", _now_ms(), [], "run-b") trace_id_b = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_b, experiment_id=int(exp_b), timestamp_ms=_now_ms(), execution_time_ms=0, status="OK", ) ) # Cross-workspace trace is silently filtered out workspace_tracking_store.link_traces_to_run([trace_id_a], run_b.info.run_id) with workspace_tracking_store.ManagedSessionMaker() as session: count = ( session.query(SqlEntityAssociation) .filter(SqlEntityAssociation.source_id == trace_id_a) .count() ) assert count == 0 # Cross-workspace run raises error with pytest.raises(MlflowException, match="Run with id=.* not found"): workspace_tracking_store.link_traces_to_run([trace_id_b], run_a.info.run_id) # Same-workspace link works workspace_tracking_store.link_traces_to_run([trace_id_b], run_b.info.run_id) with workspace_tracking_store.ManagedSessionMaker() as session: count = ( session.query(SqlEntityAssociation) .filter(SqlEntityAssociation.source_id == trace_id_b) .count() ) assert count == 1 def test_assessment_operations_are_workspace_scoped(workspace_tracking_store): from mlflow.entities.assessment import AssessmentSource, AssessmentSourceType, Feedback with WorkspaceContext("team-assessment-a"): exp_a = workspace_tracking_store.create_experiment("exp-assessment-a") trace_id_a = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_a, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) feedback_a = Feedback( trace_id=trace_id_a, name="quality", value=True, source=AssessmentSource( source_type=AssessmentSourceType.HUMAN, source_id="user@example.com" ), ) created_assessment_a = workspace_tracking_store.create_assessment(feedback_a) assessment_id_a = created_assessment_a.assessment_id retrieved = workspace_tracking_store.get_assessment(trace_id_a, assessment_id_a) assert retrieved.assessment_id == assessment_id_a with WorkspaceContext("team-assessment-b"): exp_b = workspace_tracking_store.create_experiment("exp-assessment-b") trace_id_b = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_b, experiment_id=int(exp_b), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) with pytest.raises(MlflowException, match=r"Trace with ID .* not found"): workspace_tracking_store.get_assessment(trace_id_a, assessment_id_a) with pytest.raises(MlflowException, match=r"Trace with ID .* not found"): workspace_tracking_store.update_assessment( trace_id_a, assessment_id_a, name="updated_quality", ) with pytest.raises(MlflowException, match=r"Trace with ID .* not found"): workspace_tracking_store.delete_assessment(trace_id_a, assessment_id_a) with WorkspaceContext("team-assessment-a"): retrieved = workspace_tracking_store.get_assessment(trace_id_a, assessment_id_a) assert retrieved.name == "quality" assert retrieved.value is True def test_create_assessment_validates_trace_workspace(workspace_tracking_store): from mlflow.entities.assessment import AssessmentSource, AssessmentSourceType, Feedback with WorkspaceContext("team-create-a"): exp_a = workspace_tracking_store.create_experiment("exp-create-a") trace_id_a = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_a, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) with WorkspaceContext("team-create-b"): workspace_tracking_store.create_experiment("exp-create-b") feedback = Feedback( trace_id=trace_id_a, name="cross_workspace_attempt", value=False, source=AssessmentSource( source_type=AssessmentSourceType.HUMAN, source_id="attacker@example.com" ), ) with pytest.raises(MlflowException, match=r"Trace with ID .* not found"): workspace_tracking_store.create_assessment(feedback) def test_calculate_trace_filter_correlation_filters_experiment_ids(workspace_tracking_store): with WorkspaceContext("team-corr-a"): exp_a = workspace_tracking_store.create_experiment("exp-corr-a") for _ in range(5): trace_id = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) session.merge( SqlTraceTag( request_id=trace_id, key="test_tag", value="value_a", ) ) with WorkspaceContext("team-corr-b"): exp_b = workspace_tracking_store.create_experiment("exp-corr-b") for _ in range(3): trace_id = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id, experiment_id=int(exp_b), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) session.merge( SqlTraceTag( request_id=trace_id, key="test_tag", value="value_b", ) ) result = workspace_tracking_store.calculate_trace_filter_correlation( experiment_ids=[exp_a], filter_string1='tags.test_tag = "value_a"', filter_string2='tags.test_tag = "value_a"', ) assert result.total_count == 0 assert result.filter1_count == 0 assert result.filter2_count == 0 assert result.joint_count == 0 result = workspace_tracking_store.calculate_trace_filter_correlation( experiment_ids=[exp_b], filter_string1='tags.test_tag = "value_b"', filter_string2='tags.test_tag = "value_b"', ) assert result.total_count == 3 assert result.filter1_count == 3 def test_calculate_trace_filter_correlation_cross_workspace_ids_filtered(workspace_tracking_store): with WorkspaceContext("team-xcorr-a"): exp_a = workspace_tracking_store.create_experiment("exp-xcorr-a") trace_id = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) with WorkspaceContext("team-xcorr-b"): workspace_tracking_store.create_experiment("exp-xcorr-b") result = workspace_tracking_store.calculate_trace_filter_correlation( experiment_ids=[exp_a], filter_string1="status = 'OK'", filter_string2="status = 'OK'", ) assert result.total_count == 0 assert result.filter1_count == 0 assert result.filter2_count == 0 assert result.joint_count == 0 assert math.isnan(result.npmi) def test_dataset_tag_operations_are_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-tag-a"): exp_a = workspace_tracking_store.create_experiment("exp-tag-a") dataset_a = workspace_tracking_store.create_dataset( name="dataset-a", experiment_ids=[exp_a], tags={"initial": "value"}, ) # Same workspace tag operations work workspace_tracking_store.set_dataset_tags(dataset_a.dataset_id, {"key1": "value1"}) updated = workspace_tracking_store.get_dataset(dataset_a.dataset_id) assert updated.tags["key1"] == "value1" workspace_tracking_store.delete_dataset_tag(dataset_a.dataset_id, "initial") updated = workspace_tracking_store.get_dataset(dataset_a.dataset_id) assert "initial" not in updated.tags with WorkspaceContext("team-tag-b"): workspace_tracking_store.create_experiment("exp-tag-b") # Cross-workspace set_dataset_tags fails with pytest.raises( MlflowException, match=f"Could not find evaluation dataset with ID {dataset_a.dataset_id}", ) as excinfo: workspace_tracking_store.set_dataset_tags(dataset_a.dataset_id, {"cross": "fail"}) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" # Cross-workspace delete_dataset_tag is a no-op (idempotent, doesn't affect other workspace) workspace_tracking_store.delete_dataset_tag(dataset_a.dataset_id, "key1") # Verify tags are unchanged after cross-workspace attempts with WorkspaceContext("team-tag-a"): dataset = workspace_tracking_store.get_dataset(dataset_a.dataset_id) assert dataset.tags["key1"] == "value1" assert "cross" not in dataset.tags def test_upsert_dataset_records_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-records-a"): exp_a = workspace_tracking_store.create_experiment("exp-records-a") dataset_a = workspace_tracking_store.create_dataset( name="dataset-records-a", experiment_ids=[exp_a], ) # Same workspace upsert works result = workspace_tracking_store.upsert_dataset_records( dataset_a.dataset_id, [{"inputs": {"x": 1}, "outputs": {"y": 2}}], ) assert result["inserted"] == 1 assert result["updated"] == 0 with WorkspaceContext("team-records-b"): workspace_tracking_store.create_experiment("exp-records-b") # Cross-workspace upsert fails with pytest.raises( MlflowException, match=f"Dataset '{dataset_a.dataset_id}' not found", ) as excinfo: workspace_tracking_store.upsert_dataset_records( dataset_a.dataset_id, [{"inputs": {"x": 2}, "outputs": {"y": 3}}], ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" # Verify records are unchanged after cross-workspace attempt with WorkspaceContext("team-records-a"): records, _ = workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) assert len(records) == 1 assert records[0].inputs == {"x": 1} def test_load_dataset_records_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-load-a"): exp_a = workspace_tracking_store.create_experiment("exp-load-a") dataset_a = workspace_tracking_store.create_dataset( name="dataset-load-a", experiment_ids=[exp_a], ) workspace_tracking_store.upsert_dataset_records( dataset_a.dataset_id, [{"inputs": {"x": 1}, "outputs": {"y": 2}}], ) # Same workspace load works records, _ = workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) assert len(records) == 1 assert records[0].inputs == {"x": 1} with WorkspaceContext("team-load-b"): workspace_tracking_store.create_experiment("exp-load-b") # Cross-workspace load fails with pytest.raises( MlflowException, match=f"Dataset '{dataset_a.dataset_id}' not found", ) as excinfo: workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_link_prompts_to_trace_is_workspace_scoped(workspace_tracking_store): from mlflow.entities.model_registry import PromptVersion with WorkspaceContext("team-prompt-a"): exp_a = workspace_tracking_store.create_experiment("exp-prompt-a") trace_id_a = generate_request_id_v2() with workspace_tracking_store.ManagedSessionMaker() as session: session.add( SqlTraceInfo( request_id=trace_id_a, experiment_id=int(exp_a), timestamp_ms=_now_ms(), execution_time_ms=100, status="OK", ) ) prompt_version = PromptVersion( name="test-prompt", version=1, template="Hello {{name}}", creation_timestamp=_now_ms(), ) workspace_tracking_store.link_prompts_to_trace(trace_id_a, [prompt_version]) with workspace_tracking_store.ManagedSessionMaker() as session: count = ( session.query(SqlEntityAssociation) .filter( SqlEntityAssociation.source_id == trace_id_a, SqlEntityAssociation.destination_id == "test-prompt/1", ) .count() ) assert count == 1 with WorkspaceContext("team-prompt-b"): workspace_tracking_store.create_experiment("exp-prompt-b") prompt_version_b = PromptVersion( name="other-prompt", version=1, template="Goodbye {{name}}", creation_timestamp=_now_ms(), ) with pytest.raises(MlflowException, match=r"Trace with ID .* not found") as excinfo: workspace_tracking_store.link_prompts_to_trace(trace_id_a, [prompt_version_b]) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" with workspace_tracking_store.ManagedSessionMaker() as session: count = ( session.query(SqlEntityAssociation) .filter(SqlEntityAssociation.source_id == trace_id_a) .count() ) assert count == 1 @pytest.fixture def gateway_workspace_store(workspace_tracking_store, monkeypatch): monkeypatch.setenv("MLFLOW_CRYPTO_KEK_PASSPHRASE", "test-passphrase") return workspace_tracking_store def test_secrets_are_workspace_scoped(gateway_workspace_store): with WorkspaceContext("team-secret-a"): secret_a = gateway_workspace_store.create_gateway_secret( secret_name="my-secret", secret_value={"api_key": "secret-a-value"}, provider="openai", ) with WorkspaceContext("team-secret-b"): secret_b = gateway_workspace_store.create_gateway_secret( secret_name="my-secret", secret_value={"api_key": "secret-b-value"}, provider="anthropic", ) secrets = gateway_workspace_store.list_secret_infos() assert len(secrets) == 1 assert secrets[0].secret_id == secret_b.secret_id with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.get_secret_info(secret_id=secret_a.secret_id) with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.update_gateway_secret( secret_id=secret_a.secret_id, secret_value={"api_key": "new"} ) with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.delete_gateway_secret(secret_id=secret_a.secret_id) with WorkspaceContext("team-secret-a"): secrets = gateway_workspace_store.list_secret_infos() assert len(secrets) == 1 assert secrets[0].secret_id == secret_a.secret_id assert secrets[0].provider == "openai" def test_endpoints_are_workspace_scoped(gateway_workspace_store): with WorkspaceContext("team-endpoint-a"): secret_a = gateway_workspace_store.create_gateway_secret( secret_name="secret-a", secret_value={"api_key": "val-a"} ) def_a = gateway_workspace_store.create_gateway_model_definition( name="def-a", secret_id=secret_a.secret_id, provider="openai", model_name="gpt-4", ) endpoint_a = gateway_workspace_store.create_gateway_endpoint( name="my-endpoint", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_a.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], created_by="user-a", ) with WorkspaceContext("team-endpoint-b"): secret_b = gateway_workspace_store.create_gateway_secret( secret_name="secret-b", secret_value={"api_key": "val-b"} ) def_b = gateway_workspace_store.create_gateway_model_definition( name="def-b", secret_id=secret_b.secret_id, provider="anthropic", model_name="claude-3", ) endpoint_b = gateway_workspace_store.create_gateway_endpoint( name="my-endpoint", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_b.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], created_by="user-b", ) endpoints = gateway_workspace_store.list_gateway_endpoints() assert len(endpoints) == 1 assert endpoints[0].endpoint_id == endpoint_b.endpoint_id with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.get_gateway_endpoint(endpoint_id=endpoint_a.endpoint_id) with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.update_gateway_endpoint( endpoint_id=endpoint_a.endpoint_id, name="renamed" ) with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.delete_gateway_endpoint(endpoint_id=endpoint_a.endpoint_id) with WorkspaceContext("team-endpoint-a"): endpoints = gateway_workspace_store.list_gateway_endpoints() assert len(endpoints) == 1 assert endpoints[0].endpoint_id == endpoint_a.endpoint_id def test_model_definitions_are_workspace_scoped(gateway_workspace_store): with WorkspaceContext("team-def-a"): secret_a = gateway_workspace_store.create_gateway_secret( secret_name="secret-a", secret_value={"api_key": "val-a"} ) definition_a = gateway_workspace_store.create_gateway_model_definition( name="my-model", secret_id=secret_a.secret_id, provider="openai", model_name="gpt-4", created_by="user-a", ) with WorkspaceContext("team-def-b"): secret_b = gateway_workspace_store.create_gateway_secret( secret_name="secret-b", secret_value={"api_key": "val-b"} ) definition_b = gateway_workspace_store.create_gateway_model_definition( name="my-model", secret_id=secret_b.secret_id, provider="anthropic", model_name="claude-3", created_by="user-b", ) definitions = gateway_workspace_store.list_gateway_model_definitions() assert len(definitions) == 1 assert definitions[0].model_definition_id == definition_b.model_definition_id with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.get_gateway_model_definition( model_definition_id=definition_a.model_definition_id ) with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.update_gateway_model_definition( model_definition_id=definition_a.model_definition_id, name="renamed", ) with pytest.raises(MlflowException, match="not found"): gateway_workspace_store.delete_gateway_model_definition( model_definition_id=definition_a.model_definition_id ) with WorkspaceContext("team-def-a"): definitions = gateway_workspace_store.list_gateway_model_definitions() assert len(definitions) == 1 assert definitions[0].model_definition_id == definition_a.model_definition_id assert definitions[0].provider == "openai" def test_endpoint_bindings_are_workspace_scoped(gateway_workspace_store): with WorkspaceContext("team-bind-a"): gateway_workspace_store.create_experiment("exp-bind-a") secret_a = gateway_workspace_store.create_gateway_secret( secret_name="secret-a", secret_value={"api_key": "val-a"} ) def_a = gateway_workspace_store.create_gateway_model_definition( name="def-a", secret_id=secret_a.secret_id, provider="openai", model_name="gpt-4", ) endpoint_a = gateway_workspace_store.create_gateway_endpoint( name="bound-endpoint", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_a.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], created_by="a", ) gateway_workspace_store.create_endpoint_binding( endpoint_id=endpoint_a.endpoint_id, resource_type=GatewayResourceType.SCORER.value, resource_id="model-a", created_by="user-a", ) with WorkspaceContext("team-bind-b"): gateway_workspace_store.create_experiment("exp-bind-b") secret_b = gateway_workspace_store.create_gateway_secret( secret_name="secret-b", secret_value={"api_key": "val-b"} ) def_b = gateway_workspace_store.create_gateway_model_definition( name="def-b", secret_id=secret_b.secret_id, provider="anthropic", model_name="claude-3", ) endpoint_b = gateway_workspace_store.create_gateway_endpoint( name="bound-endpoint", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_b.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], created_by="b", ) gateway_workspace_store.create_endpoint_binding( endpoint_id=endpoint_b.endpoint_id, resource_type=GatewayResourceType.SCORER.value, resource_id="model-b", created_by="user-b", ) bindings = gateway_workspace_store.list_endpoint_bindings() assert len(bindings) == 1 assert bindings[0].endpoint_id == endpoint_b.endpoint_id with WorkspaceContext("team-bind-a"): bindings = gateway_workspace_store.list_endpoint_bindings() assert len(bindings) == 1 assert bindings[0].endpoint_id == endpoint_a.endpoint_id def test_detach_model_from_endpoint_workspace_scoped(gateway_workspace_store): # Create endpoint and model definition in workspace-a with WorkspaceContext("workspace-a"): secret_a = gateway_workspace_store.create_gateway_secret( secret_name="secret-a", secret_value={"api_key": "sk-test123"}, provider="openai", ) model_def_a = gateway_workspace_store.create_gateway_model_definition( name="model-def-a", secret_id=secret_a.secret_id, provider="openai", model_name="gpt-4", ) endpoint_a = gateway_workspace_store.create_gateway_endpoint( name="endpoint-a", model_configs=[ GatewayEndpointModelConfig( model_definition_id=model_def_a.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], ) # Create endpoint and model definition in workspace-b with WorkspaceContext("workspace-b"): secret_b = gateway_workspace_store.create_gateway_secret( secret_name="secret-b", secret_value={"api_key": "sk-test456"}, provider="openai", ) model_def_b = gateway_workspace_store.create_gateway_model_definition( name="model-def-b", secret_id=secret_b.secret_id, provider="openai", model_name="gpt-4", ) endpoint_b = gateway_workspace_store.create_gateway_endpoint( name="endpoint-b", model_configs=[ GatewayEndpointModelConfig( model_definition_id=model_def_b.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], ) # Try to detach model from endpoint-b while in workspace-a - should fail with WorkspaceContext("workspace-a"): with pytest.raises(MlflowException, match="GatewayEndpoint not found"): gateway_workspace_store.detach_model_from_endpoint( endpoint_id=endpoint_b.endpoint_id, model_definition_id=model_def_b.model_definition_id, ) # Detach model from endpoint-a while in workspace-a - should succeed with WorkspaceContext("workspace-a"): gateway_workspace_store.detach_model_from_endpoint( endpoint_id=endpoint_a.endpoint_id, model_definition_id=model_def_a.model_definition_id, ) # Verify the mapping was removed endpoint = gateway_workspace_store.get_gateway_endpoint(endpoint_id=endpoint_a.endpoint_id) assert len(endpoint.model_mappings) == 0 # Detach model from endpoint-b while in workspace-b - should succeed with WorkspaceContext("workspace-b"): gateway_workspace_store.detach_model_from_endpoint( endpoint_id=endpoint_b.endpoint_id, model_definition_id=model_def_b.model_definition_id, ) # Verify the mapping was removed endpoint = gateway_workspace_store.get_gateway_endpoint(endpoint_id=endpoint_b.endpoint_id) assert len(endpoint.model_mappings) == 0 def test_get_online_scoring_configs_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-online-a"): exp_a = workspace_tracking_store.create_experiment("exp-online-a") with mock.patch.object( workspace_tracking_store, "get_gateway_endpoint", return_value=_mock_gateway_endpoint(), ): workspace_tracking_store.register_scorer( exp_a, "scorer-a", _gateway_model_scorer_json() ) config_a = workspace_tracking_store.upsert_online_scoring_config( experiment_id=exp_a, scorer_name="scorer-a", sample_rate=0.2, ) with WorkspaceContext("team-online-b"): exp_b = workspace_tracking_store.create_experiment("exp-online-b") with mock.patch.object( workspace_tracking_store, "get_gateway_endpoint", return_value=_mock_gateway_endpoint(), ): workspace_tracking_store.register_scorer( exp_b, "scorer-b", _gateway_model_scorer_json() ) config_b = workspace_tracking_store.upsert_online_scoring_config( experiment_id=exp_b, scorer_name="scorer-b", sample_rate=0.4, ) configs = workspace_tracking_store.get_online_scoring_configs( [config_a.scorer_id, config_b.scorer_id] ) assert len(configs) == 1 assert configs[0].scorer_id == config_b.scorer_id assert configs[0].sample_rate == 0.4 with WorkspaceContext("team-online-a"): configs = workspace_tracking_store.get_online_scoring_configs( [config_a.scorer_id, config_b.scorer_id] ) assert len(configs) == 1 assert configs[0].scorer_id == config_a.scorer_id assert configs[0].sample_rate == 0.2 def test_get_active_online_scorers_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-active-a"): exp_a = workspace_tracking_store.create_experiment("exp-active-a") with mock.patch.object( workspace_tracking_store, "get_gateway_endpoint", return_value=_mock_gateway_endpoint(), ): workspace_tracking_store.register_scorer( exp_a, "scorer-a", _gateway_model_scorer_json() ) workspace_tracking_store.upsert_online_scoring_config( experiment_id=exp_a, scorer_name="scorer-a", sample_rate=0.25, ) with WorkspaceContext("team-active-b"): exp_b = workspace_tracking_store.create_experiment("exp-active-b") with mock.patch.object( workspace_tracking_store, "get_gateway_endpoint", return_value=_mock_gateway_endpoint(), ): workspace_tracking_store.register_scorer( exp_b, "scorer-b", _gateway_model_scorer_json() ) workspace_tracking_store.upsert_online_scoring_config( experiment_id=exp_b, scorer_name="scorer-b", sample_rate=0.5, ) active_scorers = workspace_tracking_store.get_active_online_scorers() assert len(active_scorers) == 1 assert active_scorers[0].name == "scorer-b" assert active_scorers[0].online_config.experiment_id == exp_b with WorkspaceContext("team-active-a"): active_scorers = workspace_tracking_store.get_active_online_scorers() assert len(active_scorers) == 1 assert active_scorers[0].name == "scorer-a" assert active_scorers[0].online_config.experiment_id == exp_a def test_find_completed_sessions_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-sessions-a"): exp_a = workspace_tracking_store.create_experiment("exp-sessions-a") _create_trace( workspace_tracking_store, "trace-a1", exp_a, request_time=1000, trace_metadata={TraceMetadataKey.TRACE_SESSION: "session-a"}, ) _create_trace( workspace_tracking_store, "trace-a2", exp_a, request_time=2000, trace_metadata={TraceMetadataKey.TRACE_SESSION: "session-a"}, ) with WorkspaceContext("team-sessions-b"): exp_b = workspace_tracking_store.create_experiment("exp-sessions-b") _create_trace( workspace_tracking_store, "trace-b1", exp_b, request_time=1500, trace_metadata={TraceMetadataKey.TRACE_SESSION: "session-b"}, ) _create_trace( workspace_tracking_store, "trace-b2", exp_b, request_time=2500, trace_metadata={TraceMetadataKey.TRACE_SESSION: "session-b"}, ) completed = workspace_tracking_store.find_completed_sessions( experiment_id=exp_a, min_last_trace_timestamp_ms=0, max_last_trace_timestamp_ms=3000, ) assert completed == [] with WorkspaceContext("team-sessions-a"): completed = workspace_tracking_store.find_completed_sessions( experiment_id=exp_a, min_last_trace_timestamp_ms=0, max_last_trace_timestamp_ms=3000, ) assert len(completed) == 1 assert completed[0].session_id == "session-a" with WorkspaceContext("team-sessions-b"): completed = workspace_tracking_store.find_completed_sessions( experiment_id=exp_b, min_last_trace_timestamp_ms=0, max_last_trace_timestamp_ms=3000, ) assert len(completed) == 1 assert completed[0].session_id == "session-b" def test_delete_scorer_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-del-scorer-a"): exp_a = workspace_tracking_store.create_experiment("exp-del-scorer-a") with mock.patch.object( workspace_tracking_store, "get_gateway_endpoint", return_value=_mock_gateway_endpoint(), ): workspace_tracking_store.register_scorer( exp_a, "scorer-a", _gateway_model_scorer_json() ) with WorkspaceContext("team-del-scorer-b"): exp_b = workspace_tracking_store.create_experiment("exp-del-scorer-b") with mock.patch.object( workspace_tracking_store, "get_gateway_endpoint", return_value=_mock_gateway_endpoint(), ): workspace_tracking_store.register_scorer( exp_b, "scorer-b", _gateway_model_scorer_json() ) # Cross-workspace delete_scorer should fail (experiment not visible) with pytest.raises(MlflowException, match="exists"): workspace_tracking_store.delete_scorer(exp_a, "scorer-a") # Scorer in workspace A should still exist with WorkspaceContext("team-del-scorer-a"): versions = workspace_tracking_store.list_scorer_versions(exp_a, "scorer-a") assert len(versions) == 1 # Same-workspace delete should succeed workspace_tracking_store.delete_scorer(exp_a, "scorer-a") with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.list_scorer_versions(exp_a, "scorer-a") def test_delete_dataset_records_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-del-rec-a"): exp_a = workspace_tracking_store.create_experiment("exp-del-rec-a") dataset_a = workspace_tracking_store.create_dataset( name="dataset-del-rec-a", experiment_ids=[exp_a], ) workspace_tracking_store.upsert_dataset_records( dataset_a.dataset_id, [ {"inputs": {"x": 1}, "outputs": {"y": 1}}, {"inputs": {"x": 2}, "outputs": {"y": 2}}, ], ) with WorkspaceContext("team-del-rec-b"): workspace_tracking_store.create_experiment("exp-del-rec-b") # Cross-workspace access to the dataset should be blocked with pytest.raises(MlflowException, match="not found"): workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) # Cross-workspace delete should also be blocked with pytest.raises(MlflowException, match="not found"): workspace_tracking_store.delete_dataset_records(dataset_a.dataset_id, ["any-record-id"]) # Records in workspace A should still exist with WorkspaceContext("team-del-rec-a"): records, _ = workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) assert len(records) == 2 record_ids = [r.dataset_record_id for r in records] # Same-workspace delete should succeed deleted = workspace_tracking_store.delete_dataset_records( dataset_a.dataset_id, record_ids[:1] ) assert deleted == 1 remaining, _ = workspace_tracking_store._load_dataset_records(dataset_a.dataset_id) assert len(remaining) == 1 def test_gateway_config_resolver_scopes_bindings(gateway_workspace_store): resource_type = GatewayResourceType.SCORER.value resource_id = "job-42" with WorkspaceContext("team-resolver-a"): secret_a = gateway_workspace_store.create_gateway_secret( secret_name="secret-a", secret_value={"api_key": "val-a"}, provider="openai" ) def_a = gateway_workspace_store.create_gateway_model_definition( name="def-a", secret_id=secret_a.secret_id, provider="openai", model_name="gpt-4", ) endpoint_a = gateway_workspace_store.create_gateway_endpoint( name="endpoint-a", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_a.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], ) gateway_workspace_store.create_endpoint_binding( endpoint_id=endpoint_a.endpoint_id, resource_type=resource_type, resource_id=resource_id, created_by="user-a", ) with WorkspaceContext("team-resolver-b"): secret_b = gateway_workspace_store.create_gateway_secret( secret_name="secret-b", secret_value={"api_key": "val-b"}, provider="anthropic" ) def_b = gateway_workspace_store.create_gateway_model_definition( name="def-b", secret_id=secret_b.secret_id, provider="anthropic", model_name="claude-3", ) endpoint_b = gateway_workspace_store.create_gateway_endpoint( name="endpoint-b", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_b.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], ) gateway_workspace_store.create_endpoint_binding( endpoint_id=endpoint_b.endpoint_id, resource_type=resource_type, resource_id=resource_id, created_by="user-b", ) configs = get_resource_endpoint_configs( resource_type=resource_type, resource_id=resource_id, store=gateway_workspace_store, ) assert len(configs) == 1 assert configs[0].endpoint_id == endpoint_b.endpoint_id assert configs[0].models[0].secret_value["api_key"] == "val-b" with WorkspaceContext("team-resolver-a"): configs = get_resource_endpoint_configs( resource_type=resource_type, resource_id=resource_id, store=gateway_workspace_store, ) assert len(configs) == 1 assert configs[0].endpoint_id == endpoint_a.endpoint_id assert configs[0].models[0].secret_value["api_key"] == "val-a" def test_gateway_config_resolver_scopes_endpoints(gateway_workspace_store): with WorkspaceContext("team-endpoint-a"): secret_a = gateway_workspace_store.create_gateway_secret( secret_name="secret-a", secret_value={"api_key": "val-a"}, provider="openai" ) def_a = gateway_workspace_store.create_gateway_model_definition( name="def-a", secret_id=secret_a.secret_id, provider="openai", model_name="gpt-4", ) endpoint_a = gateway_workspace_store.create_gateway_endpoint( name="endpoint-shared", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_a.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], ) with WorkspaceContext("team-endpoint-b"): secret_b = gateway_workspace_store.create_gateway_secret( secret_name="secret-b", secret_value={"api_key": "val-b"}, provider="anthropic" ) def_b = gateway_workspace_store.create_gateway_model_definition( name="def-b", secret_id=secret_b.secret_id, provider="anthropic", model_name="claude-3", ) endpoint_b = gateway_workspace_store.create_gateway_endpoint( name="endpoint-shared", model_configs=[ GatewayEndpointModelConfig( model_definition_id=def_b.model_definition_id, weight=1.0, linkage_type=GatewayModelLinkageType.PRIMARY, ) ], ) config_b = get_endpoint_config( endpoint_name="endpoint-shared", store=gateway_workspace_store ) assert config_b.endpoint_id == endpoint_b.endpoint_id assert config_b.models[0].secret_value["api_key"] == "val-b" with pytest.raises(MlflowException, match="GatewayEndpoint not found"): get_endpoint_config(endpoint_name="endpoint-a", store=gateway_workspace_store) with WorkspaceContext("team-endpoint-a"): config_a = get_endpoint_config( endpoint_name="endpoint-shared", store=gateway_workspace_store ) assert config_a.endpoint_id == endpoint_a.endpoint_id assert config_a.models[0].secret_value["api_key"] == "val-a" def test_get_issue_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_id_a = workspace_tracking_store.create_experiment("issue-exp-a") issue_a = workspace_tracking_store.create_issue( experiment_id=exp_id_a, name="Issue A", description="Test issue in workspace A", status="open", ) retrieved_issue = workspace_tracking_store.get_issue(issue_a.issue_id) assert retrieved_issue.issue_id == issue_a.issue_id assert retrieved_issue.name == "Issue A" with WorkspaceContext("team-b"): with pytest.raises( MlflowException, match=f"Issue with ID '{issue_a.issue_id}' not found" ) as excinfo: workspace_tracking_store.get_issue(issue_a.issue_id) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_create_issue_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_id_a = workspace_tracking_store.create_experiment("issue-exp-create-a") issue_a = workspace_tracking_store.create_issue( experiment_id=exp_id_a, name="Issue Create Test A", description="Test issue creation in workspace A", status="open", ) assert issue_a.name == "Issue Create Test A" with WorkspaceContext("team-b"): exp_id_b = workspace_tracking_store.create_experiment("issue-exp-create-b") issue_b = workspace_tracking_store.create_issue( experiment_id=exp_id_b, name="Issue Create Test B", description="Test issue creation in workspace B", status="open", ) assert issue_b.name == "Issue Create Test B" with pytest.raises(MlflowException, match=f"Issue with ID '{issue_a.issue_id}' not found"): workspace_tracking_store.get_issue(issue_a.issue_id) def test_update_issue_is_workspace_scoped(workspace_tracking_store): with WorkspaceContext("team-a"): exp_id_a = workspace_tracking_store.create_experiment("issue-exp-update-a") issue_a = workspace_tracking_store.create_issue( experiment_id=exp_id_a, name="Original Name", description="Original description", status="open", ) updated_issue = workspace_tracking_store.update_issue( issue_id=issue_a.issue_id, name="Updated Name A", status="in_progress", ) assert updated_issue.name == "Updated Name A" assert updated_issue.status == "in_progress" with WorkspaceContext("team-b"): with pytest.raises( MlflowException, match=f"Issue with ID '{issue_a.issue_id}' not found" ) as excinfo: workspace_tracking_store.update_issue( issue_id=issue_a.issue_id, name="Should not update", ) assert excinfo.value.error_code == "RESOURCE_DOES_NOT_EXIST"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/tracking/test_sqlalchemy_workspace_store.py", "license": "Apache License 2.0", "lines": 1842, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/workspace/test_rest_store.py
from __future__ import annotations import json from types import SimpleNamespace from unittest import mock import pytest from mlflow.entities.workspace import Workspace, WorkspaceDeletionMode from mlflow.exceptions import MlflowException, RestException from mlflow.protos.service_pb2 import ( CreateWorkspace, DeleteWorkspace, GetWorkspace, ListWorkspaces, UpdateWorkspace, ) from mlflow.store.workspace.rest_store import WORKSPACES_ENDPOINT, RestWorkspaceStore @pytest.fixture def host_creds(): return SimpleNamespace() @pytest.fixture def store(host_creds): return RestWorkspaceStore(lambda: host_creds) def test_list_workspaces_parses_response(store, host_creds): response = ListWorkspaces.Response() response.workspaces.add(name="default", description="Default workspace") response.workspaces.add(name="team-a", description="Team A") with mock.patch( "mlflow.store.workspace.rest_store.call_endpoint", return_value=response ) as call_endpoint: workspaces = store.list_workspaces() assert [ws.name for ws in workspaces] == ["default", "team-a"] call_endpoint.assert_called_once() kwargs = call_endpoint.call_args.kwargs assert kwargs["host_creds"] is host_creds assert kwargs["endpoint"] == WORKSPACES_ENDPOINT assert kwargs["method"] == "GET" assert kwargs["json_body"] is None assert kwargs.get("expected_status", 200) == 200 def test_get_workspace_returns_entity(store, host_creds): response = GetWorkspace.Response() response.workspace.name = "team-b" response.workspace.description = "Team B" with mock.patch( "mlflow.store.workspace.rest_store.call_endpoint", return_value=response ) as call_endpoint: workspace = store.get_workspace("team-b") assert workspace.name == "team-b" assert workspace.description == "Team B" call_endpoint.assert_called_once() kwargs = call_endpoint.call_args.kwargs assert kwargs["endpoint"] == f"{WORKSPACES_ENDPOINT}/team-b" assert kwargs["method"] == "GET" def test_create_workspace_sends_payload(store, host_creds): response = CreateWorkspace.Response() response.workspace.name = "team-c" response.workspace.description = "Team C" with mock.patch( "mlflow.store.workspace.rest_store.call_endpoint", return_value=response ) as call_endpoint: workspace = store.create_workspace(Workspace(name="team-c", description="Team C")) assert workspace.name == "team-c" assert workspace.description == "Team C" call_endpoint.assert_called_once() kwargs = call_endpoint.call_args.kwargs assert kwargs["endpoint"] == WORKSPACES_ENDPOINT assert kwargs["method"] == "POST" assert kwargs["expected_status"] == 201 assert json.loads(kwargs["json_body"]) == {"name": "team-c", "description": "Team C"} def test_create_workspace_conflict_raises_resource_exists(store, monkeypatch): exc = RestException({"error_code": "RESOURCE_ALREADY_EXISTS", "message": "already exists"}) monkeypatch.setattr( "mlflow.store.workspace.rest_store.call_endpoint", mock.Mock(side_effect=exc), ) with pytest.raises( MlflowException, match="already exists", ) as exc_info: store.create_workspace(Workspace(name="team-a")) assert exc_info.value.error_code == "RESOURCE_ALREADY_EXISTS" assert "already exists" in exc_info.value.message def test_update_workspace_returns_new_description(store, host_creds): response = UpdateWorkspace.Response() response.workspace.name = "team-e" response.workspace.description = "updated" with mock.patch( "mlflow.store.workspace.rest_store.call_endpoint", return_value=response ) as call_endpoint: workspace = store.update_workspace(Workspace(name="team-e", description="updated")) assert workspace.description == "updated" call_endpoint.assert_called_once() kwargs = call_endpoint.call_args.kwargs assert kwargs["endpoint"] == f"{WORKSPACES_ENDPOINT}/team-e" assert kwargs["method"] == "PATCH" assert json.loads(kwargs["json_body"]) == {"description": "updated"} def test_delete_workspace_returns_on_success(store, host_creds): response = DeleteWorkspace.Response() with mock.patch( "mlflow.store.workspace.rest_store.call_endpoint", return_value=response ) as call_endpoint: store.delete_workspace("team-f") call_endpoint.assert_called_once() kwargs = call_endpoint.call_args.kwargs assert kwargs["endpoint"] == f"{WORKSPACES_ENDPOINT}/team-f" assert kwargs["method"] == "DELETE" assert kwargs["expected_status"] == 204 assert kwargs["json_body"] is None @pytest.mark.parametrize( ("mode", "expected_suffix"), [ (WorkspaceDeletionMode.RESTRICT, ""), (WorkspaceDeletionMode.CASCADE, "?mode=CASCADE"), (WorkspaceDeletionMode.SET_DEFAULT, "?mode=SET_DEFAULT"), ], ) def test_delete_workspace_sends_mode_query_param(store, host_creds, mode, expected_suffix): response = DeleteWorkspace.Response() with mock.patch( "mlflow.store.workspace.rest_store.call_endpoint", return_value=response ) as call_endpoint: store.delete_workspace("team-f", mode=mode) kwargs = call_endpoint.call_args.kwargs assert kwargs["endpoint"] == f"{WORKSPACES_ENDPOINT}/team-f{expected_suffix}" def test_get_default_workspace_not_supported(store): with pytest.raises( NotImplementedError, match="REST workspace provider does not expose a default workspace", ): store.get_default_workspace() def test_rest_store_validates_workspace_names_before_http(monkeypatch, store): mock_call = mock.Mock() monkeypatch.setattr("mlflow.store.workspace.rest_store.call_endpoint", mock_call) with pytest.raises(MlflowException, match="must match the pattern"): store.get_workspace("Invalid") mock_call.assert_not_called()
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/workspace/test_rest_store.py", "license": "Apache License 2.0", "lines": 134, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/workspace/test_sqlalchemy_store.py
import pytest import sqlalchemy as sa from sqlalchemy.exc import IntegrityError from mlflow.entities.workspace import Workspace, WorkspaceDeletionMode from mlflow.exceptions import MlflowException from mlflow.store.workspace.dbmodels.models import SqlWorkspace from mlflow.store.workspace.sqlalchemy_store import SqlAlchemyStore from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME @pytest.fixture def workspace_store(db_uri, monkeypatch): monkeypatch.setenv("MLFLOW_ENABLE_WORKSPACES", "true") store = SqlAlchemyStore(db_uri) with store.ManagedSessionMaker() as session: try: session.add( SqlWorkspace( name=DEFAULT_WORKSPACE_NAME, description="Default workspace", ) ) session.commit() except IntegrityError: session.rollback() try: yield store finally: store._engine.dispose() def _workspace_rows(store): with store.ManagedSessionMaker() as session: return { (row.name, row.description) for row in session.query(SqlWorkspace).order_by(SqlWorkspace.name).all() } def test_list_workspaces_returns_all(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description="Team A")) workspace_store.create_workspace(Workspace(name="team-b", description=None)) workspaces = workspace_store.list_workspaces() rows = {(ws.name, ws.description) for ws in workspaces} default_description = next(desc for name, desc in rows if name == DEFAULT_WORKSPACE_NAME) assert rows == { (DEFAULT_WORKSPACE_NAME, default_description), ("team-a", "Team A"), ("team-b", None), } def test_get_workspace_success(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description="Team A")) workspace = workspace_store.get_workspace("team-a") assert workspace.name == "team-a" assert workspace.description == "Team A" def test_get_workspace_not_found(workspace_store): with pytest.raises(MlflowException, match="Workspace 'unknown' not found") as exc: workspace_store.get_workspace("unknown") assert exc.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_create_workspace_persists_record(workspace_store): created = workspace_store.create_workspace( Workspace(name="team-a", description="Team A", default_artifact_root="s3://root/team-a"), ) assert created.name == "team-a" assert created.description == "Team A" assert created.default_artifact_root == "s3://root/team-a" assert ("team-a", "Team A") in _workspace_rows(workspace_store) def test_create_workspace_duplicate_raises(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) with pytest.raises( MlflowException, match="Workspace 'team-a' already exists\\.", ) as exc: workspace_store.create_workspace(Workspace(name="team-a", description=None)) assert exc.value.error_code == "RESOURCE_ALREADY_EXISTS" def test_create_workspace_invalid_name_raises(workspace_store): with pytest.raises( MlflowException, match="Workspace name 'Team-A' must match the pattern", ) as exc: workspace_store.create_workspace(Workspace(name="Team-A", description=None)) assert exc.value.error_code == "INVALID_PARAMETER_VALUE" def test_update_workspace_changes_description(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description="old")) updated = workspace_store.update_workspace( Workspace(name="team-a", description="new description"), ) assert updated.description == "new description" assert ("team-a", "new description") in _workspace_rows(workspace_store) def test_update_workspace_sets_default_artifact_root(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description="old")) updated = workspace_store.update_workspace( Workspace(name="team-a", default_artifact_root="s3://bucket/team-a"), ) assert updated.default_artifact_root == "s3://bucket/team-a" fetched = workspace_store.get_workspace("team-a") assert fetched.default_artifact_root == "s3://bucket/team-a" def test_update_workspace_can_clear_default_artifact_root(workspace_store): workspace_store.create_workspace( Workspace(name="team-a", description="old", default_artifact_root="s3://bucket/team-a") ) # Empty string signals "clear this field" cleared = workspace_store.update_workspace( Workspace(name="team-a", default_artifact_root=""), ) assert cleared.default_artifact_root is None fetched = workspace_store.get_workspace("team-a") assert fetched.default_artifact_root is None def test_delete_workspace_removes_empty_workspace(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) workspace_store.delete_workspace("team-a") rows = _workspace_rows(workspace_store) assert ("team-a", None) not in rows default_ws = workspace_store.get_default_workspace() assert (DEFAULT_WORKSPACE_NAME, default_ws.description) in rows def test_delete_default_workspace_rejected(workspace_store): with pytest.raises( MlflowException, match=f"Cannot delete the reserved '{DEFAULT_WORKSPACE_NAME}' workspace", ) as exc: workspace_store.delete_workspace(DEFAULT_WORKSPACE_NAME) assert exc.value.error_code == "INVALID_STATE" def test_update_workspace_not_found(workspace_store): with pytest.raises( MlflowException, match="Workspace 'unknown' not found", ) as exc: workspace_store.update_workspace(Workspace(name="unknown", description="new description")) assert exc.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_delete_workspace_not_found(workspace_store): with pytest.raises( MlflowException, match="Workspace 'unknown' not found", ) as exc: workspace_store.delete_workspace("unknown") assert exc.value.error_code == "RESOURCE_DOES_NOT_EXIST" def test_resolve_artifact_root_returns_default(workspace_store): default_root = "/default/path" assert workspace_store.resolve_artifact_root(default_root, DEFAULT_WORKSPACE_NAME) == ( default_root, True, ) workspace_store.create_workspace(Workspace(name="team-a", description=None)) assert workspace_store.resolve_artifact_root(default_root, workspace_name="team-a") == ( default_root, True, ) def test_resolve_artifact_root_prefers_workspace_override(workspace_store): workspace_store.create_workspace( Workspace( name="team-a", description=None, default_artifact_root="s3://team-a-artifacts", ) ) resolved_root, should_append = workspace_store.resolve_artifact_root( "/default/path", workspace_name="team-a" ) assert resolved_root == "s3://team-a-artifacts" assert not should_append def test_resolve_artifact_root_cache_updates_on_override_change(workspace_store): default_root = "/default/path" workspace_store.create_workspace(Workspace(name="team-cache", description=None)) assert workspace_store.resolve_artifact_root(default_root, "team-cache") == ( default_root, True, ) workspace_store.update_workspace( Workspace(name="team-cache", default_artifact_root="s3://cache/team") ) assert workspace_store.resolve_artifact_root(default_root, "team-cache") == ( "s3://cache/team", False, ) def test_resolve_artifact_root_cache_handles_delete_and_recreate(workspace_store): default_root = "/default/path" workspace_store.create_workspace( Workspace(name="team-cache", description=None, default_artifact_root="s3://cache/a") ) assert workspace_store.resolve_artifact_root(default_root, "team-cache") == ( "s3://cache/a", False, ) workspace_store.delete_workspace("team-cache") workspace_store.create_workspace( Workspace(name="team-cache", description=None, default_artifact_root="s3://cache/b") ) assert workspace_store.resolve_artifact_root(default_root, "team-cache") == ( "s3://cache/b", False, ) def test_resolve_artifact_root_cache_clears_when_override_removed(workspace_store): default_root = "/default/path" workspace_store.create_workspace( Workspace(name="team-cache", description=None, default_artifact_root="s3://cache/a") ) assert workspace_store.resolve_artifact_root(default_root, "team-cache") == ( "s3://cache/a", False, ) workspace_store.update_workspace(Workspace(name="team-cache", default_artifact_root="")) assert workspace_store.resolve_artifact_root(default_root, "team-cache") == ( default_root, True, ) def test_get_default_workspace_returns_default(workspace_store): default_ws = workspace_store.get_default_workspace() assert default_ws.name == DEFAULT_WORKSPACE_NAME assert default_ws.description is not None def test_delete_workspace_reassigns_resources_to_default(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) with workspace_store.ManagedSessionMaker() as session: session.execute( sa.text( "INSERT INTO experiments (name, workspace, lifecycle_stage) " "VALUES (:name, :ws, 'active')" ), {"name": "exp-in-team-a", "ws": "team-a"}, ) workspace_store.delete_workspace("team-a", mode=WorkspaceDeletionMode.SET_DEFAULT) with workspace_store.ManagedSessionMaker() as session: row = session.execute( sa.text("SELECT workspace FROM experiments WHERE name = :name"), {"name": "exp-in-team-a"}, ).fetchone() assert row[0] == DEFAULT_WORKSPACE_NAME def test_delete_workspace_fails_on_naming_conflict(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) with workspace_store.ManagedSessionMaker() as session: session.execute( sa.text( "INSERT INTO experiments (name, workspace, lifecycle_stage) " "VALUES (:name, :ws, 'active')" ), {"name": "shared-exp", "ws": "team-a"}, ) session.execute( sa.text( "INSERT INTO experiments (name, workspace, lifecycle_stage) " "VALUES (:name, :ws, 'active')" ), {"name": "shared-exp", "ws": DEFAULT_WORKSPACE_NAME}, ) with pytest.raises(MlflowException, match="already exist in the default workspace") as exc: workspace_store.delete_workspace("team-a", mode=WorkspaceDeletionMode.SET_DEFAULT) assert exc.value.error_code == "INVALID_STATE" # Workspace should still exist (transaction rolled back) ws = workspace_store.get_workspace("team-a") assert ws.name == "team-a" def test_delete_workspace_cascade_removes_resources(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) with workspace_store.ManagedSessionMaker() as session: session.execute( sa.text( "INSERT INTO experiments (name, workspace, lifecycle_stage) " "VALUES (:name, :ws, 'active')" ), {"name": "exp-in-team-a", "ws": "team-a"}, ) workspace_store.delete_workspace("team-a", mode=WorkspaceDeletionMode.CASCADE) with workspace_store.ManagedSessionMaker() as session: row = session.execute( sa.text("SELECT count(*) FROM experiments WHERE name = :name"), {"name": "exp-in-team-a"}, ).scalar() assert row == 0 with pytest.raises(MlflowException, match="not found"): workspace_store.get_workspace("team-a") def test_delete_workspace_cascade_removes_experiment_with_runs(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) with workspace_store.ManagedSessionMaker() as session: session.execute( sa.text( "INSERT INTO experiments (experiment_id, name, workspace, lifecycle_stage) " "VALUES (:id, :name, :ws, 'active')" ), {"id": 999, "name": "exp-with-runs", "ws": "team-a"}, ) session.execute( sa.text( "INSERT INTO runs (run_uuid, name, experiment_id, lifecycle_stage, status, " "source_type, start_time, end_time) " "VALUES (:run_id, :name, :exp_id, 'active', 'FINISHED', 'LOCAL', 0, 0)" ), {"run_id": "run-in-team-a", "name": "test-run", "exp_id": 999}, ) workspace_store.delete_workspace("team-a", mode=WorkspaceDeletionMode.CASCADE) with workspace_store.ManagedSessionMaker() as session: exp_count = session.execute( sa.text("SELECT count(*) FROM experiments WHERE name = :name"), {"name": "exp-with-runs"}, ).scalar() assert exp_count == 0 run_count = session.execute( sa.text("SELECT count(*) FROM runs WHERE run_uuid = :run_id"), {"run_id": "run-in-team-a"}, ).scalar() assert run_count == 0 def test_delete_workspace_restrict_blocks_when_resources_exist(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) with workspace_store.ManagedSessionMaker() as session: session.execute( sa.text( "INSERT INTO experiments (name, workspace, lifecycle_stage) " "VALUES (:name, :ws, 'active')" ), {"name": "exp-in-team-a", "ws": "team-a"}, ) with pytest.raises(MlflowException, match="still contains") as exc: workspace_store.delete_workspace("team-a", mode=WorkspaceDeletionMode.RESTRICT) assert exc.value.error_code == "INVALID_STATE" # Workspace and resources should still exist ws = workspace_store.get_workspace("team-a") assert ws.name == "team-a" with workspace_store.ManagedSessionMaker() as session: row = session.execute( sa.text("SELECT workspace FROM experiments WHERE name = :name"), {"name": "exp-in-team-a"}, ).fetchone() assert row[0] == "team-a" def test_delete_workspace_restrict_allows_empty_workspace(workspace_store): workspace_store.create_workspace(Workspace(name="team-a", description=None)) workspace_store.delete_workspace("team-a", mode=WorkspaceDeletionMode.RESTRICT) with pytest.raises(MlflowException, match="not found"): workspace_store.get_workspace("team-a")
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/workspace/test_sqlalchemy_store.py", "license": "Apache License 2.0", "lines": 319, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/store/workspace/test_workspace_validator.py
import pytest from mlflow.exceptions import MlflowException from mlflow.store.workspace.abstract_store import WorkspaceNameValidator @pytest.mark.parametrize( "name", [ "team-a", "ab", "a" * 63, "123", "a1-b2", ], ) def test_workspace_name_validator_accepts_valid_names(name): WorkspaceNameValidator.validate(name) @pytest.mark.parametrize( ("name", "error_fragment"), [ (123, "must be a string"), ("t", "must be between"), ("a" * 64, "must be between"), ("a" * 256, "must be between"), ("Team-A", "must match the pattern"), ("team_a", "must match the pattern"), ("team--a", "must match the pattern"), ("-team", "must match the pattern"), ("team-", "must match the pattern"), ("workspaces", "is reserved"), ], ) def test_workspace_name_validator_validate_raises(name, error_fragment): with pytest.raises(MlflowException, match=error_fragment): WorkspaceNameValidator.validate(name)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/workspace/test_workspace_validator.py", "license": "Apache License 2.0", "lines": 33, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/tracking/test_client_workspace.py
from __future__ import annotations from mlflow import MlflowClient from mlflow.environment_variables import MLFLOW_TRACKING_URI, MLFLOW_WORKSPACE_STORE_URI from mlflow.tracking._workspace import fluent as workspace_fluent from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME def test_mlflow_client_resolves_workspace_uri(monkeypatch): recorded: dict[str, str | None] = {} class DummyTrackingClient: def __init__(self, tracking_uri: str): recorded["tracking_uri"] = tracking_uri self.tracking_uri = tracking_uri class DummyWorkspaceClient: def __init__(self, workspace_uri: str | None = None): recorded["workspace_uri"] = workspace_uri # Methods invoked downstream are irrelevant for this initialization test. def list_workspaces(self): return [] monkeypatch.setattr("mlflow.tracking.client.TrackingServiceClient", DummyTrackingClient) monkeypatch.setattr("mlflow.tracking.client.TracingClient", lambda _: None) monkeypatch.setattr("mlflow.tracking.client.WorkspaceProviderClient", DummyWorkspaceClient) monkeypatch.setattr( "mlflow.tracking.client.utils._resolve_tracking_uri", lambda uri: uri or "sqlite:///tracking.db", ) monkeypatch.setattr( "mlflow.tracking.client.registry_utils._resolve_registry_uri", lambda registry_uri, tracking_uri: "registry-resolved", ) monkeypatch.setenv(MLFLOW_TRACKING_URI.name, "sqlite:///tracking.db") monkeypatch.setenv(MLFLOW_WORKSPACE_STORE_URI.name, "sqlite:///workspace.db") client = MlflowClient() client.list_workspaces() assert recorded["workspace_uri"] == "sqlite:///workspace.db" recorded.clear() client = MlflowClient(workspace_store_uri="sqlite:///explicit.db") client.list_workspaces() assert recorded["workspace_uri"] == "sqlite:///explicit.db" recorded.clear() monkeypatch.delenv(MLFLOW_WORKSPACE_STORE_URI.name, raising=False) client = MlflowClient() client.list_workspaces() # Falls back to the tracking URI when workspace URI is unset. assert recorded["workspace_uri"] == "sqlite:///tracking.db" def test_set_workspace_sets_env_and_context(monkeypatch): calls: dict[str, list[str]] = {"set_context_workspace": []} env: dict[str, str | None] = {} monkeypatch.setattr( workspace_fluent, "WorkspaceNameValidator", type( "Validator", (), {"validate": lambda name: calls.setdefault("validate", []).append(name)}, ), ) monkeypatch.setattr( workspace_fluent, "set_context_workspace", lambda name: ( calls["set_context_workspace"].append(name), env.__setitem__("value", name), ), ) workspace_fluent.set_workspace("team-space") assert calls["validate"] == ["team-space"] assert calls["set_context_workspace"] == ["team-space"] assert env["value"] == "team-space" def test_set_workspace_clears_when_none(monkeypatch): env: dict[str, str | None] = {} calls = {"clear_workspace": 0, "set_context_workspace": [], "validate": []} monkeypatch.setattr( workspace_fluent, "WorkspaceNameValidator", type( "Validator", (), {"validate": lambda name: calls["validate"].append(name)}, ), ) monkeypatch.setattr( workspace_fluent, "set_context_workspace", lambda name: ( calls["set_context_workspace"].append(name), env.__setitem__("value", name), calls.__setitem__( "clear_workspace", calls["clear_workspace"] + (1 if name is None else 0) ), ), ) # Ensure default workspace does not trigger validation but does set env workspace_fluent.set_workspace(DEFAULT_WORKSPACE_NAME) assert calls["validate"] == [] assert env["value"] == DEFAULT_WORKSPACE_NAME workspace_fluent.set_workspace(None) assert calls["clear_workspace"] == 1 assert env.get("value") is None
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracking/test_client_workspace.py", "license": "Apache License 2.0", "lines": 97, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/tracking/test_workspace_registry.py
from __future__ import annotations import pytest from mlflow.store.workspace.rest_store import RestWorkspaceStore from mlflow.store.workspace.sqlalchemy_store import SqlAlchemyStore from mlflow.tracking._workspace.registry import ( UnsupportedWorkspaceStoreURIException, _get_workspace_store_registry, get_workspace_store, ) @pytest.fixture(autouse=True) def _clear_workspace_store_cache(): registry = _get_workspace_store_registry() registry._get_store_with_resolved_uri.cache_clear() yield registry._get_store_with_resolved_uri.cache_clear() def test_get_workspace_store_resolves_sqlalchemy(tmp_path): workspace_uri = f"sqlite:///{tmp_path / 'workspace.db'}" store = get_workspace_store(workspace_uri=workspace_uri) assert isinstance(store, SqlAlchemyStore) assert store._workspace_uri == workspace_uri store._engine.dispose() def test_get_workspace_store_resolves_rest(): store = get_workspace_store(workspace_uri="http://example.com") assert isinstance(store, RestWorkspaceStore) def test_get_workspace_store_unsupported_scheme(): with pytest.raises( UnsupportedWorkspaceStoreURIException, match="got unsupported URI 'foo://workspace'", ): get_workspace_store(workspace_uri="foo://workspace")
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracking/test_workspace_registry.py", "license": "Apache License 2.0", "lines": 30, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/tracking/test_workspace_utils.py
import pytest from mlflow.environment_variables import MLFLOW_TRACKING_URI, MLFLOW_WORKSPACE_STORE_URI from mlflow.utils.workspace_utils import resolve_workspace_store_uri, set_workspace_store_uri @pytest.fixture(autouse=True) def _reset_workspace_uri(monkeypatch): set_workspace_store_uri(None) monkeypatch.delenv(MLFLOW_WORKSPACE_STORE_URI.name, raising=False) monkeypatch.delenv(MLFLOW_TRACKING_URI.name, raising=False) yield set_workspace_store_uri(None) monkeypatch.delenv(MLFLOW_WORKSPACE_STORE_URI.name, raising=False) monkeypatch.delenv(MLFLOW_TRACKING_URI.name, raising=False) def test_resolve_workspace_uri_prefers_explicit_argument(monkeypatch): monkeypatch.setenv(MLFLOW_WORKSPACE_STORE_URI.name, "sqlite:///env-workspaces.db") result = resolve_workspace_store_uri("sqlite:///explicit.db") assert result == "sqlite:///explicit.db" def test_resolve_workspace_uri_uses_configured_value(monkeypatch): set_workspace_store_uri("sqlite:///configured.db") result = resolve_workspace_store_uri(tracking_uri="sqlite:///tracking.db") assert result == "sqlite:///configured.db" def test_resolve_workspace_uri_uses_environment(monkeypatch): monkeypatch.setenv(MLFLOW_WORKSPACE_STORE_URI.name, "sqlite:///env.db") result = resolve_workspace_store_uri(tracking_uri="file:///mlruns") assert result == "sqlite:///env.db" def test_resolve_workspace_uri_defaults_to_tracking(monkeypatch): result = resolve_workspace_store_uri(tracking_uri="sqlite:///tracking-default.db") assert result == "sqlite:///tracking-default.db"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracking/test_workspace_utils.py", "license": "Apache License 2.0", "lines": 27, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/utils/test_workspace_utils.py
from __future__ import annotations import os from mlflow.environment_variables import MLFLOW_WORKSPACE from mlflow.utils.workspace_context import WorkspaceContext, clear_server_request_workspace from mlflow.utils.workspace_utils import ( DEFAULT_WORKSPACE_NAME, resolve_entity_workspace_name, ) def teardown_function(): # Ensure the ContextVar does not leak between tests clear_server_request_workspace() os.environ.pop(MLFLOW_WORKSPACE.name, None) def test_resolve_entity_workspace_prefers_argument(): assert resolve_entity_workspace_name(" team-arg ") == "team-arg" def test_resolve_entity_workspace_uses_context_var(): with WorkspaceContext("ctx-workspace"): assert resolve_entity_workspace_name(None) == "ctx-workspace" def test_resolve_entity_workspace_falls_back_to_env(monkeypatch): monkeypatch.delenv(MLFLOW_WORKSPACE.name, raising=False) with WorkspaceContext(None): pass monkeypatch.setenv(MLFLOW_WORKSPACE.name, " env-workspace ") assert resolve_entity_workspace_name(None) == "env-workspace" def test_resolve_entity_workspace_defaults_when_unset(monkeypatch): monkeypatch.delenv(MLFLOW_WORKSPACE.name, raising=False) with WorkspaceContext(None): pass assert resolve_entity_workspace_name(None) == DEFAULT_WORKSPACE_NAME
{ "repo_id": "mlflow/mlflow", "file_path": "tests/utils/test_workspace_utils.py", "license": "Apache License 2.0", "lines": 28, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/clint/src/clint/rules/unused_disable_comment.py
from clint.rules.base import Rule class UnusedDisableComment(Rule): def __init__(self, rule_name: str) -> None: self.rule_name = rule_name def _message(self) -> str: return f"Unused disable comment for rule `{self.rule_name}`"
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/src/clint/rules/unused_disable_comment.py", "license": "Apache License 2.0", "lines": 6, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:dev/clint/tests/rules/test_unused_disable_comment.py
from pathlib import Path from clint.config import Config from clint.linter import Position, Range, lint_file from clint.rules import UnusedDisableComment def test_stale_disable_comment(index_path: Path) -> None: code = """ import os # clint: disable=lazy-import """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 1 assert isinstance(results[0].rule, UnusedDisableComment) assert results[0].rule.rule_name == "lazy-import" assert results[0].range == Range(Position(1, 13)) def test_active_disable_comment(index_path: Path) -> None: code = """ def f(): import os # clint: disable=lazy-import """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 0 def test_mixed_disable_comments(index_path: Path) -> None: code = """ import os # clint: disable=lazy-import def f(): import sys # clint: disable=lazy-import """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 1 assert isinstance(results[0].rule, UnusedDisableComment) assert results[0].rule.rule_name == "lazy-import" assert results[0].range == Range(Position(1, 13)) def test_unused_disable_comment_can_be_disabled(index_path: Path) -> None: code = """ import os # clint: disable=lazy-import,unused-disable-comment """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 0 def test_disable_next_suppresses_next_line(index_path: Path) -> None: code = """ def f(): # clint: disable-next=lazy-import import os """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 0 def test_disable_next_unused_reports_at_comment_line(index_path: Path) -> None: code = """ # clint: disable-next=lazy-import import os """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 1 assert isinstance(results[0].rule, UnusedDisableComment) assert results[0].rule.rule_name == "lazy-import" assert results[0].range == Range(Position(1, 2)) def test_disable_next_multi_rule_partial_used(index_path: Path) -> None: code = """ def f(): # clint: disable-next=lazy-import,unused-disable-comment import os """ config = Config(select={UnusedDisableComment.name, "lazy-import"}) results = lint_file(Path("test.py"), code, config, index_path) assert len(results) == 1 assert isinstance(results[0].rule, UnusedDisableComment) assert results[0].rule.rule_name == "unused-disable-comment"
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/rules/test_unused_disable_comment.py", "license": "Apache License 2.0", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/clint/tests/test_ignore_map.py
from clint.linter import DisableComment, parse_disable_comments def test_single_rule() -> None: code = """ x = 1 # clint: disable=rule-a y = 2 """ assert parse_disable_comments(code) == [DisableComment("rule-a", 1, 9, 1)] def test_multiple_rules() -> None: code = """ x = 1 # clint: disable=rule-a,rule-b y = 2 """ assert parse_disable_comments(code) == [ DisableComment("rule-a", 1, 9, 1), DisableComment("rule-b", 1, 9, 1), ] def test_multiple_rules_with_spaces() -> None: code = """ x = 1 # clint: disable=rule-a, rule-b, rule-c y = 2 """ assert parse_disable_comments(code) == [ DisableComment("rule-a", 1, 9, 1), DisableComment("rule-b", 1, 9, 1), DisableComment("rule-c", 1, 9, 1), ] def test_multiple_lines() -> None: code = """ x = 1 # clint: disable=rule-a y = 2 # clint: disable=rule-b z = 3 # clint: disable=rule-a,rule-b """ assert parse_disable_comments(code) == [ DisableComment("rule-a", 1, 9, 1), DisableComment("rule-b", 2, 9, 2), DisableComment("rule-a", 3, 9, 3), DisableComment("rule-b", 3, 9, 3), ] def test_no_disable_comments() -> None: code = """ x = 1 y = 2 """ assert parse_disable_comments(code) == [] def test_various_spacing_around_commas() -> None: code = """ a = 1 # clint: disable=rule-a,rule-b b = 2 # clint: disable=rule-c, rule-d c = 3 # clint: disable=rule-e ,rule-f d = 4 # clint: disable=rule-g , rule-h """ assert parse_disable_comments(code) == [ DisableComment("rule-a", 1, 9, 1), DisableComment("rule-b", 1, 9, 1), DisableComment("rule-c", 2, 9, 2), DisableComment("rule-d", 2, 9, 2), DisableComment("rule-e", 3, 9, 3), DisableComment("rule-f", 3, 9, 3), DisableComment("rule-g", 4, 9, 4), DisableComment("rule-h", 4, 9, 4), ]
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/test_ignore_map.py", "license": "Apache License 2.0", "lines": 61, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/sampling.py
import contextvars from opentelemetry.sdk.trace.sampling import ( Sampler, SamplingResult, TraceIdRatioBased, ) # Context variable to override the sampling ratio for a specific trace. # When set, the sampler uses this ratio instead of the default. _SAMPLING_RATIO_OVERRIDE = contextvars.ContextVar("sampling_ratio_override", default=None) class _MlflowSampler(Sampler): """ A custom OTel sampler that delegates to TraceIdRatioBased but allows per-trace overrides via a ContextVar. When _SAMPLING_RATIO_OVERRIDE is set, uses that ratio instead of the default. Otherwise, falls back to the default ratio (from MLFLOW_TRACE_SAMPLING_RATIO). """ def __init__(self, default_ratio: float = 1.0): self._default_ratio = default_ratio self._default_sampler = TraceIdRatioBased(default_ratio) def should_sample( self, parent_context, trace_id, name, kind=None, attributes=None, links=None, trace_state=None, ) -> SamplingResult: override = _SAMPLING_RATIO_OVERRIDE.get() if override is not None: sampler = TraceIdRatioBased(override) return sampler.should_sample( parent_context, trace_id, name, kind, attributes, links, trace_state ) return self._default_sampler.should_sample( parent_context, trace_id, name, kind, attributes, links, trace_state ) def get_description(self) -> str: return f"MlflowSampler(default_ratio={self._default_ratio})"
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/sampling.py", "license": "Apache License 2.0", "lines": 40, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/gateway/test_tracing_utils.py
import pytest import mlflow from mlflow.entities import SpanType from mlflow.gateway.schemas.chat import StreamResponsePayload from mlflow.gateway.tracing_utils import ( _get_model_span_info, aggregate_chat_stream_chunks, maybe_traced_gateway_call, ) from mlflow.store.tracking.gateway.entities import GatewayEndpointConfig from mlflow.tracing.client import TracingClient from mlflow.tracing.constant import SpanAttributeKey, TraceMetadataKey from mlflow.tracing.distributed import get_tracing_context_headers_for_http_request from mlflow.tracking.fluent import _get_experiment_id from mlflow.types.chat import ChatChoiceDelta, ChatChunkChoice, ChatUsage, Function, ToolCallDelta @pytest.fixture def gateway_experiment_id(): experiment_name = "gateway-test-endpoint" experiment = mlflow.get_experiment_by_name(experiment_name) if experiment is not None: return experiment.experiment_id return mlflow.create_experiment(experiment_name) def get_traces(): return TracingClient().search_traces(locations=[_get_experiment_id()]) @pytest.fixture def endpoint_config(): return GatewayEndpointConfig( endpoint_id="test-endpoint-id", endpoint_name="test-endpoint", experiment_id=_get_experiment_id(), models=[], ) @pytest.fixture def endpoint_config_no_experiment(): return GatewayEndpointConfig( endpoint_id="test-endpoint-id", endpoint_name="test-endpoint", experiment_id=None, models=[], ) async def mock_async_func(payload): return {"result": "success", "payload": payload} def _make_chunk( content=None, finish_reason=None, id="chunk-1", model="test-model", created=1700000000, usage=None, tool_calls=None, role="assistant", choice_index=0, ): delta = ChatChoiceDelta(role=role, content=content, tool_calls=tool_calls) choice = ChatChunkChoice(index=choice_index, finish_reason=finish_reason, delta=delta) return StreamResponsePayload( id=id, created=created, model=model, choices=[choice], usage=usage, ) def test_aggregate_chat_stream_chunks_aggregates_content(): chunks = [ _make_chunk(content="Hello"), _make_chunk(content=" "), _make_chunk(content="world"), _make_chunk(content=None, finish_reason="stop"), ] result = aggregate_chat_stream_chunks(chunks) assert result["object"] == "chat.completion" assert result["model"] == "test-model" assert result["choices"][0]["message"]["role"] == "assistant" assert result["choices"][0]["message"]["content"] == "Hello world" assert result["choices"][0]["finish_reason"] == "stop" def test_aggregate_chat_stream_chunks_with_usage(): usage = ChatUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15) chunks = [ _make_chunk(content="Hi"), _make_chunk(content=None, finish_reason="stop", usage=usage), ] result = aggregate_chat_stream_chunks(chunks) assert result["choices"][0]["message"]["content"] == "Hi" assert result["usage"] == { "prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15, } def test_aggregate_chat_stream_chunks_empty(): assert aggregate_chat_stream_chunks([]) is None def test_aggregate_chat_stream_chunks_defaults_finish_reason(): chunks = [_make_chunk(content="Hi")] result = aggregate_chat_stream_chunks(chunks) assert result["choices"][0]["finish_reason"] == "stop" def test_reduce_chat_stream_chunks_aggregates_tool_calls(): chunks = [ # First chunk: tool call id, type, and function name _make_chunk( tool_calls=[ ToolCallDelta( index=0, id="call_abc", type="function", function=Function(name="get_weather", arguments=""), ), ], ), # Subsequent chunks: argument fragments _make_chunk( tool_calls=[ ToolCallDelta(index=0, function=Function(arguments='{"loc')), ], ), _make_chunk( tool_calls=[ ToolCallDelta(index=0, function=Function(arguments='ation": "SF"}')), ], ), _make_chunk(finish_reason="tool_calls"), ] result = aggregate_chat_stream_chunks(chunks) assert result["choices"][0]["message"]["content"] is None assert result["choices"][0]["finish_reason"] == "tool_calls" tool_calls = result["choices"][0]["message"]["tool_calls"] assert len(tool_calls) == 1 assert tool_calls[0]["id"] == "call_abc" assert tool_calls[0]["type"] == "function" assert tool_calls[0]["function"]["name"] == "get_weather" assert tool_calls[0]["function"]["arguments"] == '{"location": "SF"}' def test_reduce_chat_stream_chunks_derives_role_from_delta(): chunks = [ _make_chunk(role="developer", content="Hello"), _make_chunk(role=None, content=" world"), _make_chunk(role=None, finish_reason="stop"), ] result = aggregate_chat_stream_chunks(chunks) assert result["choices"][0]["message"]["role"] == "developer" def test_reduce_chat_stream_chunks_multiple_choice_indices(): chunks = [ _make_chunk(content="Hi", choice_index=0), _make_chunk(content="Hey", choice_index=1), _make_chunk(content=" there", choice_index=0), _make_chunk(content=" you", choice_index=1), _make_chunk(finish_reason="stop", choice_index=0), _make_chunk(finish_reason="stop", choice_index=1), ] result = aggregate_chat_stream_chunks(chunks) assert len(result["choices"]) == 2 assert result["choices"][0]["index"] == 0 assert result["choices"][0]["message"]["content"] == "Hi there" assert result["choices"][1]["index"] == 1 assert result["choices"][1]["message"]["content"] == "Hey you" @pytest.mark.asyncio async def test_maybe_traced_gateway_call_basic(endpoint_config): traced_func = maybe_traced_gateway_call(mock_async_func, endpoint_config) result = await traced_func({"input": "test"}) assert result == {"result": "success", "payload": {"input": "test"}} traces = get_traces() assert len(traces) == 1 trace = traces[0] # Find the gateway span span_name_to_span = {span.name: span for span in trace.data.spans} assert f"gateway/{endpoint_config.endpoint_name}" in span_name_to_span gateway_span = span_name_to_span[f"gateway/{endpoint_config.endpoint_name}"] assert gateway_span.attributes.get("endpoint_id") == "test-endpoint-id" assert gateway_span.attributes.get("endpoint_name") == "test-endpoint" # Input should be unwrapped (not nested under "payload" key) assert gateway_span.inputs == {"input": "test"} # No user metadata should be present in trace assert trace.info.request_metadata.get(TraceMetadataKey.AUTH_USERNAME) is None assert trace.info.request_metadata.get(TraceMetadataKey.AUTH_USER_ID) is None @pytest.mark.asyncio async def test_maybe_traced_gateway_call_with_user_metadata(endpoint_config): traced_func = maybe_traced_gateway_call( mock_async_func, endpoint_config, metadata={ TraceMetadataKey.AUTH_USERNAME: "alice", TraceMetadataKey.AUTH_USER_ID: "123", }, ) result = await traced_func({"input": "test"}) assert result == {"result": "success", "payload": {"input": "test"}} traces = get_traces() assert len(traces) == 1 trace = traces[0] span_name_to_span = {span.name: span for span in trace.data.spans} gateway_span = span_name_to_span[f"gateway/{endpoint_config.endpoint_name}"] assert gateway_span.attributes.get("endpoint_id") == "test-endpoint-id" assert gateway_span.attributes.get("endpoint_name") == "test-endpoint" # Input should be unwrapped (not nested under "payload" key) assert gateway_span.inputs == {"input": "test"} # User metadata should be in trace info, not span attributes assert trace.info.request_metadata.get(TraceMetadataKey.AUTH_USERNAME) == "alice" assert trace.info.request_metadata.get(TraceMetadataKey.AUTH_USER_ID) == "123" @pytest.mark.asyncio async def test_maybe_traced_gateway_call_without_experiment_id(endpoint_config_no_experiment): traced_func = maybe_traced_gateway_call( mock_async_func, endpoint_config_no_experiment, metadata={ TraceMetadataKey.AUTH_USERNAME: "alice", TraceMetadataKey.AUTH_USER_ID: "123", }, ) # When experiment_id is None, maybe_traced_gateway_call returns the original function assert traced_func is mock_async_func result = await traced_func({"input": "test"}) assert result == {"result": "success", "payload": {"input": "test"}} # No traces should be created traces = get_traces() assert len(traces) == 0 @pytest.mark.asyncio async def test_maybe_traced_gateway_call_with_output_reducer(endpoint_config): async def mock_async_stream(payload): yield _make_chunk(content="Hello") yield _make_chunk(content=" world") yield _make_chunk( content=None, finish_reason="stop", usage=ChatUsage(prompt_tokens=5, completion_tokens=2, total_tokens=7), ) traced_func = maybe_traced_gateway_call( mock_async_stream, endpoint_config, output_reducer=aggregate_chat_stream_chunks, ) # Consume the stream chunks = [ chunk async for chunk in traced_func({"messages": [{"role": "user", "content": "hi"}]}) ] assert len(chunks) == 3 traces = get_traces() assert len(traces) == 1 trace = traces[0] span_name_to_span = {span.name: span for span in trace.data.spans} gateway_span = span_name_to_span[f"gateway/{endpoint_config.endpoint_name}"] # Input should be unwrapped (not nested under "payload" key) assert gateway_span.inputs == {"messages": [{"role": "user", "content": "hi"}]} # The output should be the reduced aggregated response, not raw chunks output = gateway_span.outputs assert output["object"] == "chat.completion" assert output["choices"][0]["message"]["content"] == "Hello world" assert output["choices"][0]["finish_reason"] == "stop" assert output["usage"]["total_tokens"] == 7 @pytest.mark.asyncio async def test_maybe_traced_gateway_call_with_payload_kwarg(endpoint_config): async def mock_passthrough_func(action, payload, headers=None): return {"result": "success", "action": action, "payload": payload} traced_func = maybe_traced_gateway_call(mock_passthrough_func, endpoint_config) result = await traced_func( action="test_action", payload={"messages": [{"role": "user", "content": "hi"}]}, headers={} ) assert result["result"] == "success" traces = get_traces() assert len(traces) == 1 trace = traces[0] span_name_to_span = {span.name: span for span in trace.data.spans} gateway_span = span_name_to_span[f"gateway/{endpoint_config.endpoint_name}"] # Input should be unwrapped to just the payload dict assert gateway_span.inputs == {"messages": [{"role": "user", "content": "hi"}]} # --------------------------------------------------------------------------- # Tests for distributed tracing helpers # --------------------------------------------------------------------------- @pytest.mark.asyncio async def test_get_model_span_info_reads_child_span(endpoint_config): async def func_with_child_span(payload): with mlflow.start_span("provider/openai/gpt-4", span_type=SpanType.LLM) as child: child.set_attributes( { SpanAttributeKey.CHAT_USAGE: { "input_tokens": 10, "output_tokens": 5, "total_tokens": 15, }, SpanAttributeKey.MODEL: "gpt-4", SpanAttributeKey.MODEL_PROVIDER: "openai", } ) return {"result": "ok"} traced = maybe_traced_gateway_call(func_with_child_span, endpoint_config) await traced({"input": "test"}) traces = get_traces() assert len(traces) == 1 gateway_trace_id = traces[0].info.trace_id # After the trace is exported, spans are removed from InMemoryTraceManager, # so we expect empty here. The actual reading happens inside the wrapper # while the trace is still in memory. assert _get_model_span_info(gateway_trace_id) == [] # --------------------------------------------------------------------------- # Integration tests for distributed tracing via traceparent # --------------------------------------------------------------------------- @pytest.mark.asyncio async def test_maybe_traced_gateway_call_with_traceparent(gateway_experiment_id): ep_config = GatewayEndpointConfig( endpoint_id="test-endpoint-id", endpoint_name="test-endpoint", experiment_id=gateway_experiment_id, models=[], ) async def func_with_usage(payload): with mlflow.start_span("provider/openai/gpt-4", span_type=SpanType.LLM) as child: child.set_attributes( { SpanAttributeKey.CHAT_USAGE: { "input_tokens": 10, "output_tokens": 5, "total_tokens": 15, }, SpanAttributeKey.MODEL: "gpt-4", SpanAttributeKey.MODEL_PROVIDER: "openai", } ) return {"result": "ok"} # Step 1: Agent creates span and generates traceparent headers with mlflow.start_span("agent-root") as agent_span: headers = get_tracing_context_headers_for_http_request() agent_trace_id = agent_span.trace_id agent_span_id = agent_span.span_id # Step 2: Gateway processes request (no active agent span, simulating separate server) traced = maybe_traced_gateway_call(func_with_usage, ep_config, request_headers=headers) result = await traced({"input": "test"}) assert result == {"result": "ok"} # Gateway trace should exist in the gateway experiment gateway_traces = TracingClient().search_traces(locations=[gateway_experiment_id]) assert len(gateway_traces) == 1 gateway_trace_id = gateway_traces[0].info.trace_id # The gateway trace should be separate from the agent trace assert gateway_trace_id != agent_trace_id # Agent trace should contain two distributed spans (gateway + provider) mlflow.flush_trace_async_logging() agent_trace = mlflow.get_trace(agent_trace_id) assert agent_trace is not None spans_by_name = {s.name: s for s in agent_trace.data.spans} assert "agent-root" in spans_by_name assert f"gateway/{ep_config.endpoint_name}" in spans_by_name assert "provider/openai/gpt-4" in spans_by_name # Gateway span: child of agent root, has endpoint attrs + link gw_span = spans_by_name[f"gateway/{ep_config.endpoint_name}"] assert gw_span.parent_id == agent_span_id assert gw_span.attributes.get("endpoint_id") == ep_config.endpoint_id assert gw_span.attributes.get("endpoint_name") == ep_config.endpoint_name assert gw_span.attributes.get(SpanAttributeKey.LINKED_GATEWAY_TRACE_ID) == gateway_trace_id # Provider span: child of gateway span, has provider attrs provider_span = spans_by_name["provider/openai/gpt-4"] assert provider_span.parent_id == gw_span.span_id assert provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 5, "total_tokens": 15, } assert provider_span.attributes.get(SpanAttributeKey.MODEL) == "gpt-4" assert provider_span.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "openai" # Provider span should preserve timing from the gateway trace gateway_provider_span = next( s for s in gateway_traces[0].data.spans if s.name == "provider/openai/gpt-4" ) assert provider_span.start_time_ns == gateway_provider_span.start_time_ns assert provider_span.end_time_ns == gateway_provider_span.end_time_ns # Neither span should have request/response payloads assert gw_span.inputs is None assert gw_span.outputs is None assert provider_span.inputs is None assert provider_span.outputs is None @pytest.mark.asyncio async def test_maybe_traced_gateway_call_streaming_with_traceparent(gateway_experiment_id): ep_config = GatewayEndpointConfig( endpoint_id="test-endpoint-id", endpoint_name="test-endpoint", experiment_id=gateway_experiment_id, models=[], ) async def mock_stream_with_usage(payload): with mlflow.start_span("provider/openai/gpt-4", span_type=SpanType.LLM) as child: child.set_attributes( { SpanAttributeKey.CHAT_USAGE: { "input_tokens": 20, "output_tokens": 10, "total_tokens": 30, }, SpanAttributeKey.MODEL: "gpt-4", SpanAttributeKey.MODEL_PROVIDER: "openai", } ) yield _make_chunk(content="Hello") yield _make_chunk(content=" world", finish_reason="stop") # Agent creates headers with mlflow.start_span("agent-root") as agent_span: headers = get_tracing_context_headers_for_http_request() agent_trace_id = agent_span.trace_id agent_span_id = agent_span.span_id # Gateway processes request (separate context) traced = maybe_traced_gateway_call( mock_stream_with_usage, ep_config, output_reducer=aggregate_chat_stream_chunks, request_headers=headers, ) chunks = [chunk async for chunk in traced({"input": "test"})] assert len(chunks) == 2 # Gateway trace should exist gateway_traces = TracingClient().search_traces(locations=[gateway_experiment_id]) assert len(gateway_traces) == 1 gateway_trace_id = gateway_traces[0].info.trace_id assert gateway_trace_id != agent_trace_id # Agent trace should contain two distributed spans (gateway + provider) mlflow.flush_trace_async_logging() agent_trace = mlflow.get_trace(agent_trace_id) assert agent_trace is not None spans_by_name = {s.name: s for s in agent_trace.data.spans} assert "agent-root" in spans_by_name assert f"gateway/{ep_config.endpoint_name}" in spans_by_name assert "provider/openai/gpt-4" in spans_by_name # Gateway span: child of agent root, has endpoint attrs + link gw_span = spans_by_name[f"gateway/{ep_config.endpoint_name}"] assert gw_span.parent_id == agent_span_id assert gw_span.attributes.get("endpoint_id") == ep_config.endpoint_id assert gw_span.attributes.get("endpoint_name") == ep_config.endpoint_name assert gw_span.attributes.get(SpanAttributeKey.LINKED_GATEWAY_TRACE_ID) == gateway_trace_id # Provider span: child of gateway span, has provider attrs provider_span = spans_by_name["provider/openai/gpt-4"] assert provider_span.parent_id == gw_span.span_id assert provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 20, "output_tokens": 10, "total_tokens": 30, } assert provider_span.attributes.get(SpanAttributeKey.MODEL) == "gpt-4" assert provider_span.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "openai" # Provider span should preserve timing from the gateway trace gateway_provider_span = next( s for s in gateway_traces[0].data.spans if s.name == "provider/openai/gpt-4" ) assert provider_span.start_time_ns == gateway_provider_span.start_time_ns assert provider_span.end_time_ns == gateway_provider_span.end_time_ns # Neither span should have request/response payloads assert gw_span.inputs is None assert gw_span.outputs is None assert provider_span.inputs is None assert provider_span.outputs is None @pytest.mark.asyncio async def test_maybe_traced_gateway_call_with_traceparent_multiple_providers(gateway_experiment_id): ep_config = GatewayEndpointConfig( endpoint_id="test-endpoint-id", endpoint_name="test-endpoint", experiment_id=gateway_experiment_id, models=[], ) async def func_with_multiple_providers(payload): with mlflow.start_span("provider/openai/gpt-4", span_type=SpanType.LLM) as child: child.set_attributes( { SpanAttributeKey.CHAT_USAGE: { "input_tokens": 10, "output_tokens": 5, "total_tokens": 15, }, SpanAttributeKey.MODEL: "gpt-4", SpanAttributeKey.MODEL_PROVIDER: "openai", } ) with mlflow.start_span("provider/anthropic/claude-3", span_type=SpanType.LLM) as child: child.set_attributes( { SpanAttributeKey.CHAT_USAGE: { "input_tokens": 20, "output_tokens": 10, "total_tokens": 30, }, SpanAttributeKey.MODEL: "claude-3", SpanAttributeKey.MODEL_PROVIDER: "anthropic", } ) return {"result": "ok"} with mlflow.start_span("agent-root") as agent_span: headers = get_tracing_context_headers_for_http_request() agent_trace_id = agent_span.trace_id traced = maybe_traced_gateway_call( func_with_multiple_providers, ep_config, request_headers=headers ) await traced({"input": "test"}) mlflow.flush_trace_async_logging() agent_trace = mlflow.get_trace(agent_trace_id) assert agent_trace is not None spans_by_name = {s.name: s for s in agent_trace.data.spans} gw_span = spans_by_name[f"gateway/{ep_config.endpoint_name}"] # Both provider spans should be children of the gateway span provider_openai = spans_by_name["provider/openai/gpt-4"] assert provider_openai.parent_id == gw_span.span_id assert provider_openai.attributes.get(SpanAttributeKey.MODEL) == "gpt-4" assert provider_openai.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "openai" assert provider_openai.attributes.get(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 10, "output_tokens": 5, "total_tokens": 15, } provider_anthropic = spans_by_name["provider/anthropic/claude-3"] assert provider_anthropic.parent_id == gw_span.span_id assert provider_anthropic.attributes.get(SpanAttributeKey.MODEL) == "claude-3" assert provider_anthropic.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "anthropic" assert provider_anthropic.attributes.get(SpanAttributeKey.CHAT_USAGE) == { "input_tokens": 20, "output_tokens": 10, "total_tokens": 30, } # Provider spans should preserve timing from the gateway trace gateway_traces = TracingClient().search_traces(locations=[gateway_experiment_id]) assert len(gateway_traces) == 1 gw_spans_by_name = {s.name: s for s in gateway_traces[0].data.spans} gw_openai = gw_spans_by_name["provider/openai/gpt-4"] assert provider_openai.start_time_ns == gw_openai.start_time_ns assert provider_openai.end_time_ns == gw_openai.end_time_ns gw_anthropic = gw_spans_by_name["provider/anthropic/claude-3"] assert provider_anthropic.start_time_ns == gw_anthropic.start_time_ns assert provider_anthropic.end_time_ns == gw_anthropic.end_time_ns
{ "repo_id": "mlflow/mlflow", "file_path": "tests/gateway/test_tracing_utils.py", "license": "Apache License 2.0", "lines": 516, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/gateway/tracing_utils.py
import dataclasses import functools import inspect import logging from collections.abc import Callable from typing import Any import mlflow from mlflow.entities import SpanStatus, SpanType from mlflow.entities.trace_location import MlflowExperimentLocation from mlflow.gateway.config import GatewayRequestType from mlflow.gateway.schemas.chat import StreamResponsePayload from mlflow.store.tracking.gateway.entities import GatewayEndpointConfig from mlflow.tracing.constant import SpanAttributeKey, TraceMetadataKey from mlflow.tracing.distributed import set_tracing_context_from_http_request_headers from mlflow.tracing.trace_manager import InMemoryTraceManager _logger = logging.getLogger(__name__) @dataclasses.dataclass class _ModelSpanInfo: name: str attributes: dict[str, Any] status: SpanStatus | None = None start_time_ns: int | None = None end_time_ns: int | None = None def _maybe_unwrap_single_arg_input(args: tuple[Any], kwargs: dict[str, Any]): """Unwrap inputs so trace shows the request body directly. Extracts the payload kwarg if present, otherwise unwraps single-argument inputs. """ span = mlflow.get_current_active_span() if not span: return # For passthrough endpoints with kwargs, extract the payload key # This takes precedence to handle cases where functions are called with # keyword arguments (e.g., action=..., payload=..., headers=...) if "payload" in kwargs: span.set_inputs(kwargs["payload"]) # For other endpoints with a single positional argument elif len(args) == 1 and not kwargs: span.set_inputs(args[0]) def _has_traceparent(headers: dict[str, str]) -> bool: return "traceparent" in headers or "Traceparent" in headers def _gateway_span_name(endpoint_config: GatewayEndpointConfig) -> str: return f"gateway/{endpoint_config.endpoint_name}" def _gateway_span_attributes( endpoint_config: GatewayEndpointConfig, request_headers: dict[str, str] | None = None, ) -> dict[str, str]: attrs = { "endpoint_id": endpoint_config.endpoint_id, "endpoint_name": endpoint_config.endpoint_name, } if request_headers: if host := request_headers.get("host") or request_headers.get("Host"): attrs["server_url"] = host return attrs _MODEL_SPAN_ATTRIBUTE_KEYS = [ SpanAttributeKey.CHAT_USAGE, SpanAttributeKey.LLM_COST, SpanAttributeKey.MODEL, SpanAttributeKey.MODEL_PROVIDER, ] def _get_model_span_info(gateway_trace_id: str) -> list[_ModelSpanInfo]: """Read name and attributes from non-root model spans within a gateway trace.""" trace_manager = InMemoryTraceManager.get_instance() results: list[_ModelSpanInfo] = [] with trace_manager.get_trace(gateway_trace_id) as trace: if trace is None: return results for span in trace.span_dict.values(): if span.parent_id is None: continue attrs = {} for key in _MODEL_SPAN_ATTRIBUTE_KEYS: if value := span.get_attribute(key): attrs[key] = value if attrs: results.append( _ModelSpanInfo( name=span.name, attributes=attrs, status=span.status, start_time_ns=span.start_time_ns, end_time_ns=span.end_time_ns, ) ) return results def _maybe_create_distributed_span( request_headers: dict[str, str] | None, endpoint_config: GatewayEndpointConfig, ) -> None: """Create lightweight mirror spans under the caller's distributed trace. When a ``traceparent`` header is present the gateway already records a full trace (with payloads) in its own experiment. This helper attaches a *summary* to the caller's trace so that the caller can see gateway activity without duplicating large request/response bodies. The resulting shape in the caller's trace looks like:: [caller span] └── gateway/<endpoint> # attributes: endpoint info + linked trace id ├── model/<provider>/<m> # attributes: usage, model, status └── model/<provider>/<m> # (one per non-root gateway span) """ if not request_headers or not _has_traceparent(request_headers): return gateway_trace_id = None if span := mlflow.get_current_active_span(): gateway_trace_id = span.trace_id model_infos = _get_model_span_info(gateway_trace_id) if gateway_trace_id else [] try: with set_tracing_context_from_http_request_headers(request_headers): with mlflow.start_span( name=_gateway_span_name(endpoint_config), span_type=SpanType.LLM, ) as gw_span: attrs = _gateway_span_attributes(endpoint_config, request_headers) if gateway_trace_id: attrs[SpanAttributeKey.LINKED_GATEWAY_TRACE_ID] = gateway_trace_id gw_span.set_attributes(attrs) for info in model_infos: model_span = mlflow.start_span_no_context( name=info.name, span_type=SpanType.LLM, parent_span=gw_span, attributes=info.attributes, start_time_ns=info.start_time_ns, ) model_span.end( status=info.status, end_time_ns=info.end_time_ns, ) except Exception: _logger.debug("Failed to create distributed trace span for gateway call", exc_info=True) def maybe_traced_gateway_call( func: Callable[..., Any], endpoint_config: GatewayEndpointConfig, metadata: dict[str, Any] | None = None, output_reducer: Callable[[list[Any]], Any] | None = None, request_headers: dict[str, str] | None = None, request_type: GatewayRequestType | None = None, on_complete: Callable[[], None] | None = None, ) -> Callable[..., Any]: """ Wrap a gateway function with tracing. Args: func: The function to trace. endpoint_config: The gateway endpoint configuration. metadata: Additional metadata to include in the trace (e.g., auth user info). output_reducer: A function to aggregate streaming chunks into a single output. request_headers: HTTP request headers; if they contain a traceparent header, a span will also be created under the agent's distributed trace. request_type: The type of gateway request (e.g., GatewayRequestType.CHAT). on_complete: A no-arg callback invoked inside the trace context after the provider call completes (in ``finally``). Returns: A traced version of the function. Usage: result = await traced_gateway_call(provider.chat, endpoint_config)(payload) """ if not endpoint_config.experiment_id: return func trace_kwargs = { "name": _gateway_span_name(endpoint_config), "attributes": _gateway_span_attributes(endpoint_config, request_headers), "output_reducer": output_reducer, "trace_destination": MlflowExperimentLocation(endpoint_config.experiment_id), } # Build combined metadata with gateway-specific fields combined_metadata = metadata.copy() if metadata else {} combined_metadata[TraceMetadataKey.GATEWAY_ENDPOINT_ID] = endpoint_config.endpoint_id if request_type: combined_metadata[TraceMetadataKey.GATEWAY_REQUEST_TYPE] = request_type # Wrap function to set metadata inside the trace context if inspect.isasyncgenfunction(func): @functools.wraps(func) async def wrapper(*args, **kwargs): mlflow.update_current_trace(metadata=combined_metadata) _maybe_unwrap_single_arg_input(args, kwargs) try: async for item in func(*args, **kwargs): yield item finally: if on_complete: try: on_complete() except Exception: _logger.debug("on_complete callback failed", exc_info=True) _maybe_create_distributed_span(request_headers, endpoint_config) elif inspect.iscoroutinefunction(func): @functools.wraps(func) async def wrapper(*args, **kwargs): mlflow.update_current_trace(metadata=combined_metadata) _maybe_unwrap_single_arg_input(args, kwargs) try: result = await func(*args, **kwargs) finally: if on_complete: try: on_complete() except Exception: _logger.debug("on_complete callback failed", exc_info=True) _maybe_create_distributed_span(request_headers, endpoint_config) return result else: @functools.wraps(func) def wrapper(*args, **kwargs): mlflow.update_current_trace(metadata=combined_metadata) _maybe_unwrap_single_arg_input(args, kwargs) try: result = func(*args, **kwargs) finally: if on_complete: try: on_complete() except Exception: _logger.debug("on_complete callback failed", exc_info=True) _maybe_create_distributed_span(request_headers, endpoint_config) return result return mlflow.trace(wrapper, **trace_kwargs) def aggregate_chat_stream_chunks(chunks: list[StreamResponsePayload]) -> dict[str, Any] | None: """ Aggregate streaming chat completion chunks into a single ChatCompletion-like response. Returns: A ChatCompletion-like response. """ if not chunks: return None # Group state per choice index choices_state: dict[int, dict[str, Any]] = {} for chunk in chunks: for choice in chunk.choices: state = choices_state.setdefault( choice.index, { "role": None, "content_parts": [], "tool_calls_by_index": {}, "finish_reason": None, }, ) delta = choice.delta if delta.role and state["role"] is None: state["role"] = delta.role if delta.content: state["content_parts"].append(delta.content) if choice.finish_reason: state["finish_reason"] = choice.finish_reason if delta.tool_calls: for tc_delta in delta.tool_calls: tc = state["tool_calls_by_index"].setdefault( tc_delta.index, {"id": None, "type": "function", "name": "", "arguments": ""}, ) if tc_delta.id: tc["id"] = tc_delta.id if tc_delta.type: tc["type"] = tc_delta.type if tc_delta.function: if tc_delta.function.name: tc["name"] += tc_delta.function.name if tc_delta.function.arguments: tc["arguments"] += tc_delta.function.arguments aggregated_choices = [] for choice_index, state in sorted(choices_state.items()): message: dict[str, Any] = { "role": state["role"] or "assistant", "content": "".join(state["content_parts"]) or None, } if state["tool_calls_by_index"]: message["tool_calls"] = [ { "id": tc["id"], "type": tc["type"], "function": {"name": tc["name"], "arguments": tc["arguments"]}, } for tc in state["tool_calls_by_index"].values() ] aggregated_choices.append( { "index": choice_index, "message": message, "finish_reason": state["finish_reason"] or "stop", } ) last_chunk = chunks[-1] result = { "id": last_chunk.id, "object": "chat.completion", "created": last_chunk.created, "model": last_chunk.model, "choices": aggregated_choices, } if last_chunk.usage: result["usage"] = { "prompt_tokens": last_chunk.usage.prompt_tokens, "completion_tokens": last_chunk.usage.completion_tokens, "total_tokens": last_chunk.usage.total_tokens, } return result
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/gateway/tracing_utils.py", "license": "Apache License 2.0", "lines": 296, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/cli/datasets.py
import json from typing import Any, Literal import click from mlflow import MlflowClient from mlflow.environment_variables import MLFLOW_EXPERIMENT_ID from mlflow.utils.string_utils import _create_table from mlflow.utils.time import conv_longdate_to_str EXPERIMENT_ID = click.option( "--experiment-id", "-x", envvar=MLFLOW_EXPERIMENT_ID.name, type=click.STRING, required=True, help="Experiment ID to list datasets for. Can be set via MLFLOW_EXPERIMENT_ID env var.", ) def _format_datasets_as_json(datasets) -> dict[str, Any]: """Format datasets as a JSON-serializable dictionary.""" return { "datasets": [ { "dataset_id": ds.dataset_id, "name": ds.name, "digest": ds.digest, "created_time": ds.created_time, "last_update_time": ds.last_update_time, "created_by": ds.created_by, "last_updated_by": ds.last_updated_by, "tags": ds.tags, } for ds in datasets ], "next_page_token": datasets.token, } def _format_datasets_as_table(datasets) -> tuple[list[list[str]], list[str]]: """Format datasets as table rows with headers.""" headers = ["Dataset ID", "Name", "Created", "Last Updated", "Created By"] rows = [] for ds in datasets: created = conv_longdate_to_str(ds.created_time) if ds.created_time else "" updated = conv_longdate_to_str(ds.last_update_time) if ds.last_update_time else "" rows.append([ds.dataset_id, ds.name, created, updated, ds.created_by or ""]) return rows, headers @click.group("datasets") def commands(): """Manage GenAI evaluation datasets.""" @commands.command("list") @EXPERIMENT_ID @click.option( "--filter-string", type=click.STRING, help="Filter string (e.g., \"name LIKE 'qa_%'\").", ) @click.option( "--max-results", type=click.INT, default=50, help="Maximum results (default: 50).", ) @click.option( "--order-by", type=click.STRING, help="Columns to order by (e.g., 'last_update_time DESC').", ) @click.option( "--page-token", type=click.STRING, help="Pagination token.", ) @click.option( "--output", type=click.Choice(["table", "json"]), default="table", help="Output format.", ) def list_datasets( experiment_id: str, filter_string: str | None = None, max_results: int = 50, order_by: str | None = None, page_token: str | None = None, output: Literal["table", "json"] = "table", ) -> None: """ List GenAI evaluation datasets associated with an experiment. \b Examples: # List datasets in experiment 1 mlflow datasets list --experiment-id 1 \b # Using environment variable export MLFLOW_EXPERIMENT_ID=1 mlflow datasets list --max-results 10 \b # Filter datasets by name pattern mlflow datasets list --experiment-id 1 --filter-string "name LIKE 'qa_%'" \b # Order results by last update time mlflow datasets list --experiment-id 1 --order-by "last_update_time DESC" \b # Output as JSON mlflow datasets list --experiment-id 1 --output json """ client = MlflowClient() order_by_list = [o.strip() for o in order_by.split(",")] if order_by else None datasets = client.search_datasets( experiment_ids=[experiment_id], filter_string=filter_string, max_results=max_results, order_by=order_by_list, page_token=page_token, ) if output == "json": result = _format_datasets_as_json(datasets) click.echo(json.dumps(result, indent=2)) else: rows, headers = _format_datasets_as_table(datasets) click.echo(_create_table(rows, headers=headers)) if datasets.token: click.echo(f"\nNext page token: {datasets.token}")
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/cli/datasets.py", "license": "Apache License 2.0", "lines": 119, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/cli/test_datasets.py
import json import pytest from click.testing import CliRunner import mlflow from mlflow.cli.datasets import commands from mlflow.genai.datasets import create_dataset @pytest.fixture def runner(): return CliRunner(catch_exceptions=False) @pytest.fixture def experiment(): exp_id = mlflow.create_experiment("test_datasets_cli") yield exp_id mlflow.delete_experiment(exp_id) @pytest.fixture def dataset_a(experiment): return create_dataset( name="dataset_a", experiment_id=experiment, tags={"env": "production"}, ) @pytest.fixture def dataset_b(experiment): return create_dataset( name="dataset_b", experiment_id=experiment, tags={"env": "staging"}, ) def test_commands_group_exists(): assert commands.name == "datasets" assert commands.help is not None def test_list_command_params(): list_cmd = next((cmd for cmd in commands.commands.values() if cmd.name == "list"), None) assert list_cmd is not None param_names = {p.name for p in list_cmd.params} expected_params = { "experiment_id", "filter_string", "max_results", "order_by", "page_token", "output", } assert param_names == expected_params def test_list_datasets_table_output(runner: CliRunner, experiment: str, dataset_a): result = runner.invoke(commands, ["list", "--experiment-id", experiment]) assert result.exit_code == 0 assert dataset_a.dataset_id in result.output assert "dataset_a" in result.output def test_list_datasets_json_output(runner: CliRunner, experiment: str, dataset_a): result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--output", "json"]) assert result.exit_code == 0 expected = { "datasets": [ { "dataset_id": dataset_a.dataset_id, "name": "dataset_a", "digest": dataset_a.digest, "created_time": dataset_a.created_time, "last_update_time": dataset_a.last_update_time, "created_by": dataset_a.created_by, "last_updated_by": dataset_a.last_updated_by, "tags": dataset_a.tags, } ], "next_page_token": None, } assert json.loads(result.output) == expected def test_list_datasets_empty_results(runner: CliRunner, experiment: str): result = runner.invoke(commands, ["list", "--experiment-id", experiment]) assert result.exit_code == 0 def test_list_datasets_json_empty_results(runner: CliRunner, experiment: str): result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--output", "json"]) assert result.exit_code == 0 output_json = json.loads(result.output) assert output_json == {"datasets": [], "next_page_token": None} def test_list_datasets_with_experiment_id_env_var(runner: CliRunner, experiment: str, dataset_a): result = runner.invoke(commands, ["list"], env={"MLFLOW_EXPERIMENT_ID": experiment}) assert result.exit_code == 0 assert dataset_a.dataset_id in result.output def test_list_datasets_missing_experiment_id(runner: CliRunner): result = runner.invoke(commands, ["list"]) assert result.exit_code != 0 assert "Missing option '--experiment-id' / '-x'" in result.output def test_list_datasets_invalid_output_format(runner: CliRunner, experiment: str): result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--output", "invalid"]) assert result.exit_code != 0 assert "'invalid' is not one of 'table', 'json'" in result.output def test_list_datasets_with_filter_string(runner: CliRunner, experiment: str, dataset_a, dataset_b): result = runner.invoke( commands, ["list", "--experiment-id", experiment, "--filter-string", "name = 'dataset_a'"], ) assert result.exit_code == 0 assert "dataset_a" in result.output assert "dataset_b" not in result.output def test_list_datasets_with_max_results(runner: CliRunner, experiment: str, dataset_a, dataset_b): result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--max-results", "1"]) assert result.exit_code == 0 output_lines = [line for line in result.output.split("\n") if "dataset_" in line] assert len(output_lines) == 1 def test_list_datasets_with_order_by(runner: CliRunner, experiment: str, dataset_a, dataset_b): result = runner.invoke( commands, ["list", "--experiment-id", experiment, "--order-by", "name ASC"] ) assert result.exit_code == 0 a_pos = result.output.find("dataset_a") b_pos = result.output.find("dataset_b") assert a_pos < b_pos def test_list_datasets_short_option_x(runner: CliRunner, experiment: str, dataset_a): result = runner.invoke(commands, ["list", "-x", experiment]) assert result.exit_code == 0 assert dataset_a.dataset_id in result.output def test_list_datasets_multiple_datasets(runner: CliRunner, experiment: str, dataset_a, dataset_b): result = runner.invoke(commands, ["list", "--experiment-id", experiment]) assert result.exit_code == 0 assert dataset_a.dataset_id in result.output assert "dataset_a" in result.output assert dataset_b.dataset_id in result.output assert "dataset_b" in result.output def test_list_datasets_json_multiple_datasets( runner: CliRunner, experiment: str, dataset_a, dataset_b ): result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--output", "json"]) assert result.exit_code == 0 output_json = json.loads(result.output) assert len(output_json["datasets"]) == 2 names = {d["name"] for d in output_json["datasets"]} assert names == {"dataset_a", "dataset_b"}
{ "repo_id": "mlflow/mlflow", "file_path": "tests/cli/test_datasets.py", "license": "Apache License 2.0", "lines": 129, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/gateway/providers/test_tracing.py
import time from typing import Any from unittest import mock import pytest import mlflow from mlflow.entities.trace_state import TraceState from mlflow.gateway.providers.base import BaseProvider, PassthroughAction from mlflow.gateway.schemas import chat, embeddings from mlflow.tracing.client import TracingClient from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey from mlflow.tracking.fluent import _get_experiment_id def get_traces(): return TracingClient().search_traces(locations=[_get_experiment_id()]) class MockProvider(BaseProvider): """Mock provider for testing tracing functionality built into BaseProvider.""" NAME = "MockProvider" class MockConfig: pass CONFIG_TYPE = MockConfig def __init__(self, enable_tracing: bool = False): self.config = mock.MagicMock() self.config.model.name = "mock-model" self._enable_tracing = enable_tracing # These will be set by tests to control behavior self._chat_response = None self._chat_stream_chunks = None self._chat_error = None self._embeddings_response = None self._passthrough_response = None self._passthrough_error = None async def _chat(self, payload: chat.RequestPayload) -> chat.ResponsePayload: if self._chat_error: raise self._chat_error return self._chat_response async def _chat_stream(self, payload: chat.RequestPayload): for chunk in self._chat_stream_chunks: if isinstance(chunk, Exception): raise chunk yield chunk async def _embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload: return self._embeddings_response async def _passthrough( self, action: PassthroughAction, payload: dict[str, Any], headers: dict[str, Any] | None = None, ) -> dict[str, Any]: if self._passthrough_error: raise self._passthrough_error return self._passthrough_response @pytest.fixture def mock_provider(): return MockProvider(enable_tracing=True) async def _collect_chunks(async_gen): return [chunk async for chunk in async_gen] @pytest.mark.asyncio async def test_chat_stream_captures_usage_from_final_chunk(mock_provider): # Set up mock chunks - final chunk has usage mock_provider._chat_stream_chunks = [ chat.StreamResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="Hello"))], ), chat.StreamResponsePayload( id="2", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content=" world"))], ), chat.StreamResponsePayload( id="3", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="!"))], usage=chat.ChatUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15), ), ] # Create a parent trace context so the provider creates spans @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await _collect_chunks(mock_provider.chat_stream(payload)) chunks = await traced_operation() # Verify all chunks were yielded assert len(chunks) == 3 # Get traces and verify traces = get_traces() assert len(traces) == 1 trace = traces[0] assert trace.info.state == TraceState.OK # Find the provider span (child of the root span) span_name_to_span = {span.name: span for span in trace.data.spans} assert "traced_operation" in span_name_to_span assert "provider/MockProvider/mock-model" in span_name_to_span provider_span = span_name_to_span["provider/MockProvider/mock-model"] assert provider_span.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "MockProvider" assert provider_span.attributes.get(SpanAttributeKey.MODEL) == "mock-model" assert provider_span.attributes.get("method") == "chat_stream" assert provider_span.attributes.get("streaming") is True # Verify usage was captured token_usage = provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) assert token_usage is not None assert token_usage[TokenUsageKey.INPUT_TOKENS] == 10 assert token_usage[TokenUsageKey.OUTPUT_TOKENS] == 5 assert token_usage[TokenUsageKey.TOTAL_TOKENS] == 15 @pytest.mark.asyncio async def test_chat_stream_captures_cached_tokens(mock_provider): usage = chat.ChatUsage( prompt_tokens=50, completion_tokens=20, total_tokens=70, prompt_tokens_details=chat.PromptTokensDetails(cached_tokens=30), cache_creation_input_tokens=10, ) mock_provider._chat_stream_chunks = [ chat.StreamResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="Hello"))], usage=usage, ), ] @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await _collect_chunks(mock_provider.chat_stream(payload)) await traced_operation() traces = get_traces() provider_span = {s.name: s for s in traces[0].data.spans}["provider/MockProvider/mock-model"] token_usage = provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) assert token_usage[TokenUsageKey.INPUT_TOKENS] == 50 assert token_usage[TokenUsageKey.OUTPUT_TOKENS] == 20 assert token_usage[TokenUsageKey.TOTAL_TOKENS] == 70 assert token_usage[TokenUsageKey.CACHE_READ_INPUT_TOKENS] == 30 assert token_usage[TokenUsageKey.CACHE_CREATION_INPUT_TOKENS] == 10 @pytest.mark.asyncio async def test_chat_non_streaming_captures_cached_tokens(mock_provider): mock_provider._chat_response = chat.ResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[ chat.Choice( index=0, message=chat.ResponseMessage(role="assistant", content="Hello!"), finish_reason="stop", ) ], usage=chat.ChatUsage( prompt_tokens=50, completion_tokens=20, total_tokens=70, prompt_tokens_details=chat.PromptTokensDetails(cached_tokens=30), ), ) @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await mock_provider.chat(payload) await traced_operation() traces = get_traces() provider_span = {s.name: s for s in traces[0].data.spans}["provider/MockProvider/mock-model"] token_usage = provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) assert token_usage[TokenUsageKey.INPUT_TOKENS] == 50 assert token_usage[TokenUsageKey.OUTPUT_TOKENS] == 20 assert token_usage[TokenUsageKey.TOTAL_TOKENS] == 70 assert token_usage[TokenUsageKey.CACHE_READ_INPUT_TOKENS] == 30 @pytest.mark.asyncio async def test_chat_stream_without_usage(mock_provider): mock_provider._chat_stream_chunks = [ chat.StreamResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="Hello"))], ), chat.StreamResponsePayload( id="2", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content=" world"))], ), ] @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await _collect_chunks(mock_provider.chat_stream(payload)) chunks = await traced_operation() assert len(chunks) == 2 traces = get_traces() assert len(traces) == 1 trace = traces[0] assert trace.info.state == TraceState.OK span_name_to_span = {span.name: span for span in trace.data.spans} provider_span = span_name_to_span["provider/MockProvider/mock-model"] # Verify no usage attributes were set assert provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) is None @pytest.mark.asyncio async def test_chat_stream_no_active_span(mock_provider): mock_provider._chat_stream_chunks = [ chat.StreamResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="Hello"))], ), chat.StreamResponsePayload( id="2", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content=" world"))], ), ] # Call without a parent trace context payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) chunks = await _collect_chunks(mock_provider.chat_stream(payload)) assert len(chunks) == 2 # No traces should be created traces = get_traces() assert len(traces) == 0 @pytest.mark.asyncio async def test_chat_stream_handles_error(mock_provider): mock_provider._chat_stream_chunks = [ chat.StreamResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="Hello"))], ), ValueError("Stream error"), # Error will be raised ] @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await _collect_chunks(mock_provider.chat_stream(payload)) with pytest.raises(ValueError, match="Stream error"): await traced_operation() traces = get_traces() assert len(traces) == 1 trace = traces[0] # The root span should have error status because the exception propagated assert trace.info.state == TraceState.ERROR span_name_to_span = {span.name: span for span in trace.data.spans} provider_span = span_name_to_span["provider/MockProvider/mock-model"] # Verify error was captured as an exception event exception_events = [e for e in provider_span.events if e.name == "exception"] assert len(exception_events) == 1 assert exception_events[0].attributes["exception.message"] == "Stream error" assert exception_events[0].attributes["exception.type"] == "ValueError" @pytest.mark.asyncio async def test_chat_stream_partial_usage(mock_provider): mock_provider._chat_stream_chunks = [ chat.StreamResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[chat.StreamChoice(index=0, delta=chat.StreamDelta(content="!"))], usage=chat.ChatUsage(prompt_tokens=10, completion_tokens=None, total_tokens=None), ), ] @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await _collect_chunks(mock_provider.chat_stream(payload)) await traced_operation() traces = get_traces() assert len(traces) == 1 trace = traces[0] span_name_to_span = {span.name: span for span in trace.data.spans} provider_span = span_name_to_span["provider/MockProvider/mock-model"] # Verify only input_tokens was set (partial usage) token_usage = provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) assert token_usage is not None assert token_usage[TokenUsageKey.INPUT_TOKENS] == 10 assert TokenUsageKey.OUTPUT_TOKENS not in token_usage assert TokenUsageKey.TOTAL_TOKENS not in token_usage @pytest.mark.asyncio async def test_chat_non_streaming(mock_provider): mock_provider._chat_response = chat.ResponsePayload( id="1", created=int(time.time()), model="mock-model", choices=[ chat.Choice( index=0, message=chat.ResponseMessage(role="assistant", content="Hello!"), finish_reason="stop", ) ], usage=chat.ChatUsage(prompt_tokens=5, completion_tokens=3, total_tokens=8), ) @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await mock_provider.chat(payload) result = await traced_operation() assert result.choices[0].message.content == "Hello!" traces = get_traces() assert len(traces) == 1 trace = traces[0] assert trace.info.state == TraceState.OK span_name_to_span = {span.name: span for span in trace.data.spans} provider_span = span_name_to_span["provider/MockProvider/mock-model"] assert provider_span.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "MockProvider" assert provider_span.attributes.get(SpanAttributeKey.MODEL) == "mock-model" assert provider_span.attributes.get("method") == "chat" # Non-streaming should not have streaming attribute assert provider_span.attributes.get("streaming") is None # Verify usage was captured token_usage = provider_span.attributes.get(SpanAttributeKey.CHAT_USAGE) assert token_usage is not None assert token_usage[TokenUsageKey.INPUT_TOKENS] == 5 assert token_usage[TokenUsageKey.OUTPUT_TOKENS] == 3 assert token_usage[TokenUsageKey.TOTAL_TOKENS] == 8 @pytest.mark.asyncio async def test_chat_non_streaming_error(mock_provider): mock_provider._chat_error = RuntimeError("Method failed") @mlflow.trace async def traced_operation(): payload = chat.RequestPayload(messages=[chat.RequestMessage(role="user", content="Hi")]) return await mock_provider.chat(payload) with pytest.raises(RuntimeError, match="Method failed"): await traced_operation() traces = get_traces() assert len(traces) == 1 trace = traces[0] span_name_to_span = {span.name: span for span in trace.data.spans} provider_span = span_name_to_span["provider/MockProvider/mock-model"] # Verify error was captured as an exception event exception_events = [e for e in provider_span.events if e.name == "exception"] assert len(exception_events) == 1 assert exception_events[0].attributes["exception.message"] == "Method failed" assert exception_events[0].attributes["exception.type"] == "RuntimeError" @pytest.mark.asyncio async def test_embeddings(mock_provider): mock_provider._embeddings_response = embeddings.ResponsePayload( data=[embeddings.EmbeddingObject(embedding=[0.1, 0.2, 0.3], index=0)], model="mock-model", usage=embeddings.EmbeddingsUsage(prompt_tokens=4, total_tokens=4), ) @mlflow.trace async def traced_operation(): payload = embeddings.RequestPayload(input="Hello") return await mock_provider.embeddings(payload) result = await traced_operation() assert len(result.data) == 1 assert result.data[0].embedding == [0.1, 0.2, 0.3] traces = get_traces() assert len(traces) == 1 trace = traces[0] assert trace.info.state == TraceState.OK span_name_to_span = {span.name: span for span in trace.data.spans} provider_span = span_name_to_span["provider/MockProvider/mock-model"] assert provider_span.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "MockProvider" assert provider_span.attributes.get(SpanAttributeKey.MODEL) == "mock-model" assert provider_span.attributes.get("method") == "embeddings" @pytest.mark.asyncio async def test_passthrough_with_tracing(mock_provider): mock_provider._passthrough_response = {"id": "1", "result": "success"} result = await mock_provider.passthrough( action=PassthroughAction.OPENAI_CHAT, payload={"messages": [{"role": "user", "content": "Hi"}]}, ) assert result == {"id": "1", "result": "success"} # Passthrough with @mlflow.trace creates its own trace traces = get_traces() assert len(traces) == 1 trace = traces[0] assert trace.info.state == TraceState.OK # The span should have provider attributes and action span_name_to_span = {span.name: span for span in trace.data.spans} assert "provider/MockProvider/mock-model" in span_name_to_span passthrough_span = span_name_to_span["provider/MockProvider/mock-model"] assert passthrough_span.attributes.get(SpanAttributeKey.MODEL_PROVIDER) == "MockProvider" assert passthrough_span.attributes.get(SpanAttributeKey.MODEL) == "mock-model" assert passthrough_span.attributes.get("action") == "openai_chat" @pytest.mark.asyncio async def test_passthrough_without_tracing(): provider = MockProvider(enable_tracing=False) provider._passthrough_response = {"id": "1", "result": "success"} result = await provider.passthrough( action=PassthroughAction.OPENAI_CHAT, payload={"messages": [{"role": "user", "content": "Hi"}]}, ) assert result == {"id": "1", "result": "success"} # No traces should be created when tracing is disabled traces = get_traces() assert len(traces) == 0 @pytest.mark.asyncio async def test_passthrough_error_with_tracing(mock_provider): mock_provider._passthrough_error = RuntimeError("Passthrough failed") with pytest.raises(RuntimeError, match="Passthrough failed"): await mock_provider.passthrough( action=PassthroughAction.OPENAI_CHAT, payload={"messages": [{"role": "user", "content": "Hi"}]}, ) traces = get_traces() assert len(traces) == 1 trace = traces[0] assert trace.info.state == TraceState.ERROR
{ "repo_id": "mlflow/mlflow", "file_path": "tests/gateway/providers/test_tracing.py", "license": "Apache License 2.0", "lines": 403, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/cli/demo.py
import contextlib import logging import os import threading import time import webbrowser from collections.abc import Generator from pathlib import Path from urllib.parse import urljoin import click NOISY_LOGGERS = [ "alembic", "mlflow.store", "mlflow.tracking", "mlflow.tracing", "mlflow.genai", "mlflow.server", "httpx", "httpcore", "urllib3", "uvicorn", "huey", ] @contextlib.contextmanager def _suppress_noisy_logs() -> Generator[None, None, None]: original_levels: dict[str, int] = {} try: for logger_name in NOISY_LOGGERS: logger = logging.getLogger(logger_name) original_levels[logger_name] = logger.level logger.setLevel(logging.WARNING) yield finally: for logger_name, level in original_levels.items(): logging.getLogger(logger_name).setLevel(level) def _set_quiet_logging() -> None: logging.getLogger().setLevel(logging.WARNING) for logger_name in NOISY_LOGGERS: logging.getLogger(logger_name).setLevel(logging.WARNING) # Set environment variable so MLflow configures logging in subprocesses # This affects mlflow, alembic, and huey loggers via _configure_mlflow_loggers os.environ["MLFLOW_LOGGING_LEVEL"] = "WARNING" def _check_server_connection(tracking_uri: str, max_retries: int = 3, timeout: int = 5) -> None: """Check if the MLflow tracking server is reachable. Args: tracking_uri: URL of the tracking server. max_retries: Maximum number of connection attempts. timeout: Timeout in seconds for each connection attempt. Raises: click.ClickException: If the server is not reachable after all retries. """ import requests from mlflow.utils.request_utils import _get_http_response_with_retries health_url = urljoin(tracking_uri.rstrip("/") + "/", "health") try: response = _get_http_response_with_retries( method="GET", url=health_url, max_retries=max_retries, backoff_factor=1, backoff_jitter=0.5, retry_codes=(408, 429, 500, 502, 503, 504), timeout=timeout, raise_on_status=False, ) response.close() except requests.exceptions.ConnectionError as e: raise click.ClickException( f"Cannot connect to MLflow server at {tracking_uri}\n" f"Error: {e}\n\n" f"Please verify:\n" f" 1. The server is running\n" f" 2. The URL is correct\n" f" 3. No firewall is blocking the connection" ) from None except requests.exceptions.Timeout: raise click.ClickException( f"Connection to MLflow server at {tracking_uri} timed out.\n\n" f"Please verify the server is running and responsive." ) from None except requests.exceptions.RequestException as e: raise click.ClickException( f"Failed to connect to MLflow server at {tracking_uri}\nError: {e}" ) from None @click.command() @click.option( "--port", default=None, type=int, help="Port to run demo server on (only used when starting a new server).", ) @click.option( "--tracking-uri", default=None, help="Tracking URI of an existing MLflow server to populate with demo data.", ) @click.option( "--no-browser", is_flag=True, default=False, help="Don't automatically open browser to demo experiment.", ) @click.option( "--debug", is_flag=True, default=False, help="Enable verbose logging output.", ) @click.option( "--refresh", is_flag=True, default=False, help="Force regenerate demo data by deleting existing data first.", ) def demo( port: int | None, tracking_uri: str | None, no_browser: bool, debug: bool, refresh: bool, ) -> None: """Launch MLflow with pre-populated demo data for exploring GenAI features. By default, creates a persistent environment in ./mlflow-demo/ with SQLite database and file-based artifacts, generates demo data, and opens the browser to the demo experiment. Data persists across restarts; use --refresh to regenerate. To populate an existing MLflow server with demo data, use --tracking-uri: mlflow demo # Launch new demo server mlflow demo --no-browser # Launch without opening browser mlflow demo --port 5001 # Use custom port mlflow demo --tracking-uri http://localhost:5000 # Use existing server """ if tracking_uri is None: tracking_uri = _get_tracking_uri_interactive(port) if tracking_uri is None: _run_with_new_server(port, no_browser, debug, refresh) else: _run_with_existing_server(tracking_uri, no_browser, debug, refresh) def _get_tracking_uri_interactive(port: int | None) -> str | None: click.echo() click.secho("MLflow Demo Setup", fg="cyan", bold=True) click.echo() use_existing = click.confirm( click.style("Do you have an MLflow server already running?", fg="bright_blue"), default=False, ) if use_existing: return click.prompt( click.style("Enter the tracking server URL", fg="bright_blue"), default="http://localhost:5000", ) return None def _run_with_existing_server( tracking_uri: str, no_browser: bool, debug: bool, refresh: bool ) -> None: import mlflow from mlflow.demo import generate_all_demos from mlflow.demo.base import DEMO_EXPERIMENT_NAME click.echo() click.echo(f"Connecting to MLflow server at {tracking_uri}... ", nl=False) _check_server_connection(tracking_uri) click.secho("connected!", fg="green") mlflow.set_tracking_uri(tracking_uri) click.echo("Generating demo data... ", nl=False) if debug: results = generate_all_demos(refresh=refresh) else: with _suppress_noisy_logs(): results = generate_all_demos(refresh=refresh) click.secho("done!", fg="green") if results: click.echo(f" Generated: {', '.join(r.feature for r in results)}") else: click.echo(" Demo data already exists (skipped generation).") experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is None: raise click.ClickException( f"Demo experiment '{DEMO_EXPERIMENT_NAME}' not found. " "This should not happen after generating demo data." ) experiment_url = f"{tracking_uri.rstrip('/')}/#/experiments/{experiment.experiment_id}/overview" click.echo() click.secho(f"View the demo at: {experiment_url}", fg="green", bold=True) if not no_browser: click.echo() click.echo("Opening the MLflow UI...") webbrowser.open(experiment_url) def _run_with_new_server(port: int | None, no_browser: bool, debug: bool, refresh: bool) -> None: import mlflow from mlflow.demo import generate_all_demos from mlflow.demo.base import DEMO_EXPERIMENT_NAME from mlflow.server import _run_server from mlflow.server.handlers import initialize_backend_stores from mlflow.utils import find_free_port, is_port_available # Suppress noisy logs early (before any initialization) unless debug mode if not debug: _set_quiet_logging() if port is None: port = find_free_port() elif not is_port_available(port): raise click.ClickException( f"Port {port} is already in use. " f"Either stop the process using that port, " f"or run: mlflow demo --port <DIFFERENT_PORT>" ) demo_dir = Path.cwd() / "mlflow-demo" demo_dir.mkdir(exist_ok=True) db_path = demo_dir / "mlflow.db" artifact_path = demo_dir / "artifacts" artifact_path.mkdir(exist_ok=True) backend_uri = f"sqlite:///{db_path}" artifact_uri = artifact_path.as_uri() os.environ["MLFLOW_TRACKING_URI"] = backend_uri click.echo() click.echo("Initializing demo environment... ", nl=False) initialize_backend_stores(backend_uri, backend_uri, artifact_uri) click.secho("done!", fg="green") click.echo("Generating demo data... ", nl=False) results = generate_all_demos(refresh=refresh) click.secho("done!", fg="green") if results: click.echo(f" Generated: {', '.join(r.feature for r in results)}") experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is None: raise click.ClickException( f"Demo experiment '{DEMO_EXPERIMENT_NAME}' not found. " "This should not happen after generating demo data." ) experiment_url = f"http://127.0.0.1:{port}/#/experiments/{experiment.experiment_id}/overview" if not no_browser: def open_browser(): time.sleep(1.5) webbrowser.open(experiment_url) threading.Thread(target=open_browser, daemon=True, name="DemoBrowserOpener").start() click.echo() click.secho(f"MLflow Tracking Server running at: http://127.0.0.1:{port}", fg="green") click.secho(f"View the demo at: {experiment_url}", fg="green", bold=True) click.echo() click.echo("Press Ctrl+C to stop the server.") click.echo() _run_server( file_store_path=backend_uri, registry_store_uri=backend_uri, default_artifact_root=artifact_uri, serve_artifacts=True, artifacts_only=False, artifacts_destination=None, host="127.0.0.1", port=port, workers=1, uvicorn_opts="--log-level warning" if not debug else None, )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/cli/demo.py", "license": "Apache License 2.0", "lines": 251, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/demo/test_api_routes.py
from pathlib import Path import pytest import requests import mlflow from mlflow.server import handlers from mlflow.server.fastapi_app import app from mlflow.server.handlers import initialize_backend_stores from tests.helper_functions import get_safe_port from tests.tracking.integration_test_utils import ServerThread @pytest.fixture def tracking_server(db_uri: str, tmp_path: Path): handlers._tracking_store = None handlers._model_registry_store = None initialize_backend_stores(db_uri, default_artifact_root=tmp_path.as_uri()) with ServerThread(app, get_safe_port()) as url: mlflow.set_tracking_uri(url) yield url mlflow.set_tracking_uri(None) def test_generate_demo_route_creates_data(tracking_server): response = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/generate") assert response.status_code == 200 data = response.json() assert data["status"] == "created" assert data["experiment_id"] is not None assert len(data["features_generated"]) > 0 assert data["navigation_url"] is not None def test_generate_demo_route_idempotent(tracking_server): response1 = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/generate") assert response1.status_code == 200 data1 = response1.json() assert data1["status"] == "created" response2 = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/generate") assert response2.status_code == 200 data2 = response2.json() assert data2["status"] == "exists" assert data2["features_generated"] == [] def test_delete_demo_route_removes_data(tracking_server): requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/generate") response = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/delete") assert response.status_code == 200 data = response.json() assert data["status"] == "deleted" assert len(data["features_deleted"]) > 0 def test_delete_demo_route_when_no_data(tracking_server): response = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/delete") assert response.status_code == 200 data = response.json() assert data["status"] == "deleted" assert data["features_deleted"] == [] def test_generate_after_delete_regenerates(tracking_server): response1 = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/generate") assert response1.json()["status"] == "created" requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/delete") response2 = requests.post(f"{tracking_server}/ajax-api/3.0/mlflow/demo/generate") assert response2.status_code == 200 data = response2.json() assert data["status"] == "created" assert len(data["features_generated"]) > 0 def test_get_demo_endpoints_returns_routes(): from mlflow.server.handlers import get_demo_endpoints endpoints = get_demo_endpoints() assert len(endpoints) == 2 paths = [path for path, _, _ in endpoints] assert any("/mlflow/demo/generate" in path for path in paths) assert any("/mlflow/demo/delete" in path for path in paths) def test_get_endpoints_includes_demo(): from mlflow.server.handlers import get_endpoints endpoints = get_endpoints() paths = [path for path, _, _ in endpoints] assert any("/mlflow/demo/generate" in path for path in paths) assert any("/mlflow/demo/delete" in path for path in paths)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/demo/test_api_routes.py", "license": "Apache License 2.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/demo/test_cli.py
"""Tests for the mlflow demo CLI command. Includes both quick help/registration tests and functional tests that invoke the actual CLI command with a mocked server. """ import socket import sys from unittest import mock import click import pytest from click.testing import CliRunner import mlflow from mlflow.cli import cli from mlflow.cli.demo import _check_server_connection, demo from mlflow.demo.base import DEMO_EXPERIMENT_NAME, DEMO_PROMPT_PREFIX from mlflow.demo.generators.traces import DEMO_VERSION_TAG from mlflow.demo.registry import demo_registry from mlflow.genai.datasets import search_datasets from mlflow.genai.prompts import search_prompts @pytest.fixture(autouse=True) def disable_quiet_logging(monkeypatch): """Prevent CLI from modifying logging state during tests.""" demo_module = sys.modules["mlflow.cli.demo"] monkeypatch.setattr(demo_module, "_set_quiet_logging", lambda: None) def test_demo_command_registered(): runner = CliRunner() result = runner.invoke(cli, ["demo", "--help"]) assert result.exit_code == 0 assert "Launch MLflow with pre-populated demo data" in result.output def test_demo_command_help_shows_options(): runner = CliRunner() result = runner.invoke(demo, ["--help"]) assert result.exit_code == 0 assert "--port" in result.output assert "--no-browser" in result.output def test_demo_command_port_option(): runner = CliRunner() result = runner.invoke(demo, ["--help"]) assert result.exit_code == 0 assert "Port to run demo server on" in result.output def test_cli_generates_all_registered_features(): runner = CliRunner() with mock.patch("mlflow.server._run_server"): result = runner.invoke(demo, ["--no-browser"], input="n\n") assert result.exit_code == 0 assert "Generated:" in result.output tracking_uri = mlflow.get_tracking_uri() mlflow.set_tracking_uri(tracking_uri) registered_names = set(demo_registry.list_generators()) for name in registered_names: assert name in result.output def test_cli_creates_experiment(): runner = CliRunner() with mock.patch("mlflow.server._run_server"): result = runner.invoke(demo, ["--no-browser"], input="n\n") assert result.exit_code == 0 experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) assert experiment is not None assert experiment.lifecycle_stage == "active" def test_cli_creates_traces(): runner = CliRunner() with mock.patch("mlflow.server._run_server"): result = runner.invoke(demo, ["--no-browser"], input="n\n") assert result.exit_code == 0 experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) client = mlflow.MlflowClient() all_traces = client.search_traces( locations=[experiment.experiment_id], max_results=200, ) # Filter for demo traces only (exclude evaluation traces created by evaluate()) demo_traces = [t for t in all_traces if t.info.trace_metadata.get(DEMO_VERSION_TAG)] assert len(demo_traces) == 34 def test_cli_creates_evaluation_datasets(): runner = CliRunner() with mock.patch("mlflow.server._run_server"): result = runner.invoke(demo, ["--no-browser"], input="n\n") assert result.exit_code == 0 experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) datasets = search_datasets( experiment_ids=[experiment.experiment_id], filter_string="name LIKE 'demo-%'", max_results=10, ) assert len(datasets) == 3 def test_cli_creates_prompts(): runner = CliRunner() with mock.patch("mlflow.server._run_server"): result = runner.invoke(demo, ["--no-browser"], input="n\n") assert result.exit_code == 0 prompts = search_prompts( filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'", max_results=100, ) assert len(prompts) == 3 def test_cli_shows_server_url(): runner = CliRunner() with mock.patch("mlflow.server._run_server"): result = runner.invoke(demo, ["--no-browser"], input="n\n") assert result.exit_code == 0 assert "MLflow Tracking Server running at:" in result.output assert "View the demo at:" in result.output def test_cli_respects_port_option(): runner = CliRunner() with mock.patch("mlflow.server._run_server") as mock_server: result = runner.invoke(demo, ["--no-browser", "--port", "5555"], input="n\n") assert result.exit_code == 0 assert "http://127.0.0.1:5555" in result.output mock_server.assert_called_once() assert mock_server.call_args.kwargs["port"] == 5555 def test_cli_port_in_use_error(): runner = CliRunner() with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("127.0.0.1", 0)) bound_port = s.getsockname()[1] result = runner.invoke(demo, ["--port", str(bound_port)], input="n\n") assert result.exit_code != 0 assert "already in use" in result.output def test_cli_unreachable_server_error(): runner = CliRunner() # Use a URL that won't have a server running result = runner.invoke(demo, ["--tracking-uri", "http://localhost:59999"]) assert result.exit_code != 0 assert "Cannot connect to MLflow server" in result.output assert "Please verify" in result.output def test_check_server_connection_fails_for_bad_url(): with pytest.raises(click.ClickException, match="Cannot connect to MLflow server"): _check_server_connection("http://localhost:59999", max_retries=1, timeout=1)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/demo/test_cli.py", "license": "Apache License 2.0", "lines": 127, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/simulators/distillation.py
from __future__ import annotations import logging from concurrent.futures import ThreadPoolExecutor, as_completed from typing import TYPE_CHECKING import pydantic from mlflow.environment_variables import MLFLOW_GENAI_EVAL_MAX_WORKERS from mlflow.genai.simulators.prompts import DISTILL_GOAL_AND_PERSONA_PROMPT from mlflow.genai.simulators.simulator import _MODEL_API_DOC, PGBAR_FORMAT from mlflow.genai.simulators.utils import ( format_history, get_default_simulation_model, invoke_model_without_tracing, ) from mlflow.genai.utils.trace_utils import resolve_conversation_from_session from mlflow.utils.annotations import experimental from mlflow.utils.docstring_utils import format_docstring try: from tqdm.auto import tqdm except ImportError: tqdm = None if TYPE_CHECKING: from mlflow.entities import Trace from mlflow.entities.session import Session _logger = logging.getLogger(__name__) class _GoalAndPersona(pydantic.BaseModel): goal: str = pydantic.Field(description="The user's underlying goal in the conversation") persona: str | None = pydantic.Field( default=None, description="A description of the user's communication style and personality", ) simulation_guidelines: list[str] | None = pydantic.Field( default=None, description="List of guidelines for how a simulated user should conduct this conversation", ) def _distill_goal_and_persona( session: "Session | list[Trace]", model: str, ) -> dict[str, str] | None: from mlflow.entities.session import Session from mlflow.types.llm import ChatMessage traces = session.traces if isinstance(session, Session) else session messages = resolve_conversation_from_session(traces) if not messages: return None prompt = DISTILL_GOAL_AND_PERSONA_PROMPT.format(conversation=format_history(messages)) try: response = invoke_model_without_tracing( model_uri=model, messages=[ChatMessage(role="user", content=prompt)], response_format=_GoalAndPersona, ) result = _GoalAndPersona.model_validate_json(response) if not result.goal: _logger.debug(f"Empty goal extracted from response: {response}") return None test_case = {"goal": result.goal} if result.persona: test_case["persona"] = result.persona if result.simulation_guidelines: test_case["simulation_guidelines"] = result.simulation_guidelines return test_case except pydantic.ValidationError as e: _logger.debug(f"Failed to validate response: {e}") return None @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) def generate_test_cases( sessions: "list[Session] | list[list[Trace]]", *, model: str | None = None, ) -> list[dict[str, str]]: """ Generate seed test cases by distilling goals and personas from existing sessions. This function analyzes sessions and uses an LLM to infer the user's goal and persona from each session. This is useful for generating test cases from existing conversation data rather than manually writing goals and personas. .. note:: This task benefits from a powerful model. We recommend using ``openai:/gpt-5`` or a model of similar capability for best results. Args: sessions: A list of :py:class:`~mlflow.entities.session.Session` objects or a list of trace lists (where each inner list contains traces from one session). model: {{ model }} Returns: A list of dicts with "goal", "persona", and "simulation_guidelines" keys, suitable for use with :py:class:`~mlflow.genai.simulators.ConversationSimulator`. Example: .. code-block:: python import mlflow from mlflow.genai.simulators import generate_test_cases from mlflow.genai.simulators import ConversationSimulator # Get existing sessions sessions = mlflow.search_sessions(...) # Generate seed test cases test_cases = generate_test_cases(sessions) # Use the generated test cases with ConversationSimulator simulator = ConversationSimulator(test_cases=test_cases) To save test cases as an evaluation dataset for reuse: .. code-block:: python from mlflow.genai.datasets import create_dataset # Create a dataset and save the test cases dataset = create_dataset(name="my_test_cases") dataset.merge_records([{"inputs": tc} for tc in test_cases]) """ model = model or get_default_simulation_model() num_sessions = len(sessions) results: list[dict[str, str] | None] = [None] * num_sessions max_workers = min(num_sessions, MLFLOW_GENAI_EVAL_MAX_WORKERS.get()) progress_bar = ( tqdm( total=num_sessions, desc="Generating test cases", bar_format=PGBAR_FORMAT, ) if tqdm else None ) with ThreadPoolExecutor( max_workers=max_workers, thread_name_prefix="MlflowTestCaseGeneration", ) as executor: futures = { executor.submit(_distill_goal_and_persona, session, model): i for i, session in enumerate(sessions) } try: for future in as_completed(futures): idx = futures[future] try: results[idx] = future.result() except Exception as e: _logger.error(f"Failed to distill test case for session {idx}: {e}") if progress_bar: progress_bar.update(1) finally: if progress_bar: progress_bar.close() return [r for r in results if r is not None]
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/simulators/distillation.py", "license": "Apache License 2.0", "lines": 138, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/simulators/utils.py
from __future__ import annotations import json import logging from contextlib import contextmanager from typing import TYPE_CHECKING, Any import mlflow from mlflow.exceptions import MlflowException from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import ( call_chat_completions, create_litellm_message_from_databricks_response, serialize_messages_to_databricks_prompts, ) from mlflow.genai.judges.constants import ( _DATABRICKS_AGENTIC_JUDGE_MODEL, _DATABRICKS_DEFAULT_JUDGE_MODEL, ) from mlflow.genai.utils.gateway_utils import get_gateway_litellm_config from mlflow.tracking import get_tracking_uri from mlflow.utils.uri import is_databricks_uri if TYPE_CHECKING: from mlflow.types.llm import ChatMessage _logger = logging.getLogger(__name__) _DEFAULT_SIMULATION_MODEL = "openai:/gpt-5" def get_default_simulation_model() -> str: if is_databricks_uri(get_tracking_uri()): return _DATABRICKS_AGENTIC_JUDGE_MODEL return _DEFAULT_SIMULATION_MODEL @contextmanager def delete_trace_if_created(): """Delete at most one trace created within this context to avoid polluting user traces.""" trace_id_before = mlflow.get_last_active_trace_id(thread_local=True) try: yield finally: trace_id_after = mlflow.get_last_active_trace_id(thread_local=True) if trace_id_after and trace_id_after != trace_id_before: try: mlflow.delete_trace(trace_id_after) except Exception as e: _logger.debug(f"Failed to delete trace {trace_id_after}: {e}") def invoke_model_without_tracing( model_uri: str, messages: list[ChatMessage], num_retries: int = 3, inference_params: dict[str, Any] | None = None, response_format: type | None = None, ) -> str: """ Invoke a model without tracing. This method will delete the last trace created by the invocation, if any. """ import litellm from mlflow.metrics.genai.model_utils import _parse_model_uri with delete_trace_if_created(): if model_uri == _DATABRICKS_DEFAULT_JUDGE_MODEL: user_prompt, system_prompt = serialize_messages_to_databricks_prompts(messages) result = call_chat_completions( user_prompt=user_prompt, system_prompt=system_prompt or ".", model=_DATABRICKS_AGENTIC_JUDGE_MODEL, ) if getattr(result, "error_code", None): raise MlflowException( f"Failed to get chat completions result from Databricks managed endpoint: " f"[{result.error_code}] {result.error_message}" ) output_json = result.output_json if not output_json: raise MlflowException("Empty response from Databricks managed endpoint") parsed_json = json.loads(output_json) if isinstance(output_json, str) else output_json return create_litellm_message_from_databricks_response(parsed_json).content provider, model_name = _parse_model_uri(model_uri) litellm_messages = [litellm.Message(role=msg.role, content=msg.content) for msg in messages] kwargs = { "messages": litellm_messages, "max_retries": num_retries, "drop_params": True, } if provider == "gateway": config = get_gateway_litellm_config(model_name) kwargs["api_base"] = config.api_base kwargs["api_key"] = config.api_key kwargs["model"] = config.model if config.extra_headers: kwargs["extra_headers"] = config.extra_headers else: kwargs["model"] = f"{provider}/{model_name}" if inference_params: kwargs.update(inference_params) if response_format is not None: kwargs["response_format"] = response_format try: response = litellm.completion(**kwargs) return response.choices[0].message.content except Exception as e: error_str = str(e) if inference_params and "Unsupported value: 'temperature'" in error_str: kwargs.pop("temperature", None) response = litellm.completion(**kwargs) return response.choices[0].message.content else: raise def format_history(history: list[dict[str, Any]]) -> str | None: if not history: return None formatted = [] for msg in history: role = msg.get("role") or "unknown" content = msg.get("content") or "" formatted.append(f"{role}: {content}") return "\n".join(formatted)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/simulators/utils.py", "license": "Apache License 2.0", "lines": 111, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/simulators/test_distillation.py
from unittest import mock import pydantic import pytest from mlflow.entities.session import Session from mlflow.genai.simulators.distillation import ( _distill_goal_and_persona, _GoalAndPersona, generate_test_cases, ) @pytest.fixture def mock_session(): trace = mock.MagicMock() return Session([trace]) def test_goal_and_persona_model_goal_required(): with pytest.raises(pydantic.ValidationError, match="goal"): _GoalAndPersona.model_validate({}) @pytest.mark.parametrize( ("input_data", "expected_goal", "expected_persona", "expected_guidelines"), [ ({"goal": "Test goal"}, "Test goal", None, None), ({"goal": "Test goal", "persona": "Friendly user"}, "Test goal", "Friendly user", None), ( { "goal": "Test goal", "persona": "Friendly user", "simulation_guidelines": ["Be concise", "Ask follow-ups"], }, "Test goal", "Friendly user", ["Be concise", "Ask follow-ups"], ), ], ) def test_goal_and_persona_model_validation( input_data, expected_goal, expected_persona, expected_guidelines ): result = _GoalAndPersona.model_validate(input_data) assert result.goal == expected_goal assert result.persona == expected_persona assert result.simulation_guidelines == expected_guidelines def test_distill_returns_none_for_empty_conversation(mock_session): with mock.patch( "mlflow.genai.simulators.distillation.resolve_conversation_from_session", return_value=[], ): result = _distill_goal_and_persona(mock_session, model="openai:/gpt-4o") assert result is None @pytest.mark.parametrize( ("llm_response", "expected"), [ ( '{"goal": "Get help", "persona": "Data scientist"}', {"goal": "Get help", "persona": "Data scientist"}, ), ('{"goal": "Get help"}', {"goal": "Get help"}), ( '{"goal": "Get help", "persona": "Engineer", ' '"simulation_guidelines": ["Start with a vague request", "Be concise"]}', { "goal": "Get help", "persona": "Engineer", "simulation_guidelines": ["Start with a vague request", "Be concise"], }, ), ], ) def test_distill_extracts_goal_and_persona(mock_session, llm_response, expected): with ( mock.patch( "mlflow.genai.simulators.distillation.resolve_conversation_from_session", return_value=[{"role": "user", "content": "Hello"}], ), mock.patch( "mlflow.genai.simulators.distillation.invoke_model_without_tracing", return_value=llm_response, ), ): result = _distill_goal_and_persona(mock_session, model="openai:/gpt-4o") assert result == expected @pytest.mark.parametrize( "llm_response", [ '{"goal": "", "persona": "Test"}', # empty goal "invalid json", # validation error ], ) def test_distill_returns_none_for_invalid_response(mock_session, llm_response): with ( mock.patch( "mlflow.genai.simulators.distillation.resolve_conversation_from_session", return_value=[{"role": "user", "content": "Hello"}], ), mock.patch( "mlflow.genai.simulators.distillation.invoke_model_without_tracing", return_value=llm_response, ), ): result = _distill_goal_and_persona(mock_session, model="openai:/gpt-4o") assert result is None def test_generate_test_cases_processes_multiple_sessions(): sessions = [mock.MagicMock(), mock.MagicMock()] with mock.patch( "mlflow.genai.simulators.distillation._distill_goal_and_persona", side_effect=[ {"goal": "Goal 1", "persona": "Persona 1"}, {"goal": "Goal 2"}, ], ) as mock_distill: result = generate_test_cases(sessions, model="openai:/gpt-4o") assert len(result) == 2 assert result[0] == {"goal": "Goal 1", "persona": "Persona 1"} assert result[1] == {"goal": "Goal 2"} assert mock_distill.call_count == 2 def test_generate_test_cases_filters_out_none_results(): sessions = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()] with mock.patch( "mlflow.genai.simulators.distillation._distill_goal_and_persona", side_effect=[{"goal": "Goal 1"}, None, {"goal": "Goal 3"}], ): result = generate_test_cases(sessions, model="openai:/gpt-4o") assert len(result) == 2 assert result[0] == {"goal": "Goal 1"} assert result[1] == {"goal": "Goal 3"} def test_generate_test_cases_uses_default_model_when_not_specified(): sessions = [mock.MagicMock()] with ( mock.patch( "mlflow.genai.simulators.distillation.get_default_simulation_model", return_value="openai:/gpt-5", ) as mock_get_model, mock.patch( "mlflow.genai.simulators.distillation._distill_goal_and_persona", return_value={"goal": "Test"}, ) as mock_distill, ): generate_test_cases(sessions) mock_get_model.assert_called_once() mock_distill.assert_called_once_with(sessions[0], "openai:/gpt-5") def test_generate_test_cases_handles_exceptions_gracefully(): sessions = [mock.MagicMock(), mock.MagicMock()] with mock.patch( "mlflow.genai.simulators.distillation._distill_goal_and_persona", side_effect=[Exception("Test error"), {"goal": "Goal 2"}], ): result = generate_test_cases(sessions, model="openai:/gpt-4o") assert len(result) == 1 assert result[0] == {"goal": "Goal 2"} def test_distill_accepts_list_of_traces(): traces = [mock.MagicMock(), mock.MagicMock()] with ( mock.patch( "mlflow.genai.simulators.distillation.resolve_conversation_from_session", return_value=[{"role": "user", "content": "Hello"}], ) as mock_resolve, mock.patch( "mlflow.genai.simulators.distillation.invoke_model_without_tracing", return_value='{"goal": "Get help"}', ), ): result = _distill_goal_and_persona(traces, model="openai:/gpt-4o") assert result == {"goal": "Get help"} mock_resolve.assert_called_once_with(traces) def test_generate_test_cases_accepts_list_of_trace_lists(): trace1 = mock.MagicMock() trace2 = mock.MagicMock() sessions = [[trace1], [trace2]] with mock.patch( "mlflow.genai.simulators.distillation._distill_goal_and_persona", side_effect=[{"goal": "Goal 1"}, {"goal": "Goal 2"}], ) as mock_distill: result = generate_test_cases(sessions, model="openai:/gpt-4o") assert len(result) == 2 assert mock_distill.call_count == 2 mock_distill.assert_any_call([trace1], "openai:/gpt-4o") mock_distill.assert_any_call([trace2], "openai:/gpt-4o")
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/simulators/test_distillation.py", "license": "Apache License 2.0", "lines": 175, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/simulators/test_utils.py
from unittest import mock import pytest from mlflow.genai.simulators.utils import ( format_history, get_default_simulation_model, invoke_model_without_tracing, ) @pytest.mark.parametrize( ("history", "expected"), [ ([], None), ([{"role": "user", "content": "Hello"}], "user: Hello"), ( [ {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}, {"role": "user", "content": "How are you?"}, ], "user: Hello\nassistant: Hi there!\nuser: How are you?", ), ([{"content": "Hello"}], "unknown: Hello"), ([{"role": "user"}], "user: "), ([{"role": None, "content": None}], "unknown: "), ], ) def test_format_history(history, expected): assert format_history(history) == expected @pytest.mark.parametrize( "model_uri", [ "openai:/gpt-4o-mini", "anthropic:/claude-3-haiku", ], ) def test_invoke_model_without_tracing_with_litellm(model_uri): from mlflow.types.llm import ChatMessage messages = [ChatMessage(role="user", content="Hello")] with mock.patch("litellm.completion") as mock_completion: mock_completion.return_value = mock.MagicMock( choices=[mock.MagicMock(message=mock.MagicMock(content="Hi there!"))] ) result = invoke_model_without_tracing(model_uri=model_uri, messages=messages) assert result == "Hi there!" mock_completion.assert_called_once() def test_invoke_model_without_tracing_with_inference_params(): from mlflow.types.llm import ChatMessage messages = [ChatMessage(role="user", content="Hello")] with mock.patch("litellm.completion") as mock_completion: mock_completion.return_value = mock.MagicMock( choices=[mock.MagicMock(message=mock.MagicMock(content="Response"))] ) invoke_model_without_tracing( model_uri="openai:/gpt-4o-mini", messages=messages, inference_params={"temperature": 0.5}, ) call_kwargs = mock_completion.call_args[1] assert call_kwargs["temperature"] == 0.5 def test_invoke_model_without_tracing_with_databricks(): from mlflow.types.llm import ChatMessage messages = [ChatMessage(role="user", content="Hello")] with ( mock.patch("mlflow.genai.simulators.utils.call_chat_completions") as mock_call, mock.patch( "mlflow.genai.simulators.utils.create_litellm_message_from_databricks_response" ) as mock_create, ): mock_call.return_value = mock.MagicMock(error_code=None, output_json='{"content": "Hi"}') mock_create.return_value = mock.MagicMock(content="Hi from Databricks") result = invoke_model_without_tracing(model_uri="databricks", messages=messages) assert result == "Hi from Databricks" mock_call.assert_called_once() def test_get_default_simulation_model_non_databricks(): with mock.patch("mlflow.genai.simulators.utils.is_databricks_uri", return_value=False): model = get_default_simulation_model() assert model == "openai:/gpt-5" def test_get_default_simulation_model_databricks(): with mock.patch("mlflow.genai.simulators.utils.is_databricks_uri", return_value=True): model = get_default_simulation_model() assert model == "gpt-oss-120b"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/simulators/test_utils.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:examples/livekit/voice_agent.py
import logging import os from livekit.agents import JobContext, JobProcess, WorkerOptions, cli from livekit.agents.telemetry import set_tracer_provider from livekit.agents.voice import Agent, AgentSession from livekit.plugins import openai, silero from opentelemetry import trace from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor logging.basicConfig(level=logging.INFO) logger = logging.getLogger("voice-agent") def configure_mlflow_tracing(): """Configure OpenTelemetry to send traces to MLflow.""" if not os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT"): logger.warning("OTEL_EXPORTER_OTLP_ENDPOINT not set, tracing disabled") return None service_name = os.environ.get("OTEL_SERVICE_NAME", "livekit-voice-agent") resource = Resource.create({SERVICE_NAME: service_name}) provider = TracerProvider(resource=resource) provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter())) trace.set_tracer_provider(provider) set_tracer_provider(provider) logger.info("MLflow tracing configured successfully!") return provider async def entrypoint(ctx: JobContext) -> None: """Main entrypoint for the agent.""" logger.info(f"Agent starting for room: {ctx.room.name}") # Connect to the room await ctx.connect() # Create the voice agent with all components agent = Agent( instructions="""You are a helpful voice assistant. Keep your responses concise and conversational since you're speaking out loud. Be friendly and helpful.""", vad=silero.VAD.load(), # Voice Activity Detection stt=openai.STT(), # Speech-to-Text (Whisper) llm=openai.LLM(model="gpt-4o-mini"), # Language Model tts=openai.TTS(voice="alloy"), # Text-to-Speech ) # Create and start the agent session session = AgentSession() await session.start(agent, room=ctx.room) logger.info("Agent session started! Ready for conversation.") def prewarm(proc: JobProcess) -> None: """Prewarm function to load models before handling requests.""" # Configure tracing before anything else configure_mlflow_tracing() # Preload Silero VAD model for faster startup proc.userdata["vad"] = silero.VAD.load() logger.info("Prewarmed VAD model") if __name__ == "__main__": cli.run_app( WorkerOptions( entrypoint_fnc=entrypoint, prewarm_fnc=prewarm, ) )
{ "repo_id": "mlflow/mlflow", "file_path": "examples/livekit/voice_agent.py", "license": "Apache License 2.0", "lines": 59, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/tracing/otel/translation/livekit.py
""" Translation utilities for LiveKit Agents semantic conventions. LiveKit Agents (Python SDK) provides real-time AI voice agents with built-in OpenTelemetry support. This translator maps LiveKit's span attributes to MLflow's semantic conventions for optimal visualization. Reference: - https://docs.livekit.io/agents/observability/ - https://github.com/livekit/agents """ from typing import Any from mlflow.entities.span import SpanType from mlflow.tracing.otel.translation.genai_semconv import GenAiTranslator class LiveKitTranslator(GenAiTranslator): """ Translator for LiveKit Agents semantic conventions. LiveKit Agents generates spans for various AI agent operations including: - Agent sessions and turns - LLM inference calls - Speech-to-text (STT) processing - Text-to-speech (TTS) synthesis - Voice activity detection (VAD) - Function/tool calls This translator maps LiveKit's span attributes to MLflow's span types and extracts relevant metadata for visualization. Inherits from GenAiTranslator since LiveKit uses GenAI semantic conventions for token usage and event-based message extraction. """ # LiveKit-specific input/output attribute keys (in addition to GenAI semconv) INPUT_VALUE_KEYS = [ "lk.user_input", "lk.user_transcript", "lk.chat_ctx", "lk.input_text", ] OUTPUT_VALUE_KEYS = [ "lk.response.text", "lk.response.function_calls", ] # LiveKit-specific attribute keys for detection DETECTION_KEYS = [ "lk.agent_name", "lk.room_name", "lk.job_id", "lk.participant_identity", ] # Message format for chat UI rendering MESSAGE_FORMAT = "livekit" def translate_span_type(self, attributes: dict[str, Any]) -> str | None: """ Translate LiveKit span type to MLflow span type. LiveKit uses gen_ai.operation.name for LLM spans and can be inferred from attribute prefixes for other span types. Args: attributes: Dictionary of span attributes Returns: MLflow span type string or None if no mapping found """ if "gen_ai.request.model" in attributes: return SpanType.LLM if gen_ai_op := attributes.get("gen_ai.operation.name"): op_lower = str(self._decode_json_value(gen_ai_op)).lower() if mlflow_type := self.SPAN_KIND_TO_MLFLOW_TYPE.get(op_lower): return mlflow_type for key in attributes: if key == "lk.retry_count": return SpanType.LLM if key.startswith("lk.function_tool"): return SpanType.TOOL if key in ("lk.agent_name", "lk.instructions", "lk.generation_id"): return SpanType.AGENT if key.startswith("lk.tts") or key == "lk.input_text": return SpanType.UNKNOWN if key in ("lk.user_transcript", "lk.transcript_confidence", "lk.transcription_delay"): return SpanType.UNKNOWN return None def get_message_format(self, attributes: dict[str, Any]) -> str | None: """ Get message format identifier for LiveKit traces. Returns 'livekit' if LiveKit-specific attributes are detected, enabling proper chat UI rendering. Args: attributes: Dictionary of span attributes Returns: 'livekit' if LiveKit attributes detected, None otherwise """ for key in self.DETECTION_KEYS: if key in attributes: return self.MESSAGE_FORMAT if any(key.startswith("lk.") for key in attributes): return self.MESSAGE_FORMAT return None
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/livekit.py", "license": "Apache License 2.0", "lines": 91, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:tests/tracing/otel/test_livekit_translator.py
import json from unittest import mock import pytest from mlflow.entities.span import Span, SpanType from mlflow.tracing.constant import SpanAttributeKey from mlflow.tracing.otel.translation import ( translate_span_type_from_otel, translate_span_when_storing, ) from mlflow.tracing.otel.translation.livekit import LiveKitTranslator @pytest.mark.parametrize( ("attributes", "expected_type"), [ ({"gen_ai.request.model": "gpt-4o-mini"}, SpanType.LLM), ({"gen_ai.request.model": "gpt-4"}, SpanType.LLM), ({"gen_ai.operation.name": "chat"}, SpanType.CHAT_MODEL), ({"gen_ai.operation.name": "text_completion"}, SpanType.LLM), ({"gen_ai.operation.name": "generate_content"}, SpanType.LLM), ({"gen_ai.operation.name": '"chat"'}, SpanType.CHAT_MODEL), ], ) def test_livekit_span_type_from_genai_attributes(attributes, expected_type): translator = LiveKitTranslator() result = translator.translate_span_type(attributes) assert result == expected_type @pytest.mark.parametrize( ("attributes", "expected_type"), [ ({"lk.retry_count": 0}, SpanType.LLM), ({"lk.retry_count": 2}, SpanType.LLM), ({"lk.function_tool.name": "get_weather"}, SpanType.TOOL), ({"lk.function_tool.id": "call_123"}, SpanType.TOOL), ({"lk.function_tool.arguments": "{}"}, SpanType.TOOL), ({"lk.agent_name": "my_agent"}, SpanType.AGENT), ({"lk.instructions": "Be helpful"}, SpanType.AGENT), ({"lk.generation_id": "gen_123"}, SpanType.AGENT), ({"lk.tts.streaming": True}, SpanType.UNKNOWN), ({"lk.tts.label": "alloy"}, SpanType.UNKNOWN), ({"lk.input_text": "Hello world"}, SpanType.UNKNOWN), ({"lk.user_transcript": "Hello"}, SpanType.UNKNOWN), ({"lk.transcript_confidence": 0.95}, SpanType.UNKNOWN), ({"lk.transcription_delay": 100}, SpanType.UNKNOWN), ], ) def test_livekit_span_type_inferred_from_attributes(attributes, expected_type): translator = LiveKitTranslator() result = translator.translate_span_type(attributes) assert result == expected_type @pytest.mark.parametrize( "attributes", [ {"some.other.attribute": "value"}, {"random_key": "random_value"}, {}, ], ) def test_livekit_span_type_returns_none(attributes): translator = LiveKitTranslator() result = translator.translate_span_type(attributes) assert result is None @pytest.mark.parametrize( ("attributes", "expected_input", "expected_output"), [ ( { "gen_ai.usage.input_tokens": 100, "gen_ai.usage.output_tokens": 50, }, 100, 50, ), ( { "gen_ai.usage.input_tokens": 200, "gen_ai.usage.output_tokens": 100, }, 200, 100, ), ], ) def test_livekit_token_usage_extraction(attributes, expected_input, expected_output): translator = LiveKitTranslator() assert translator.get_input_tokens(attributes) == expected_input assert translator.get_output_tokens(attributes) == expected_output def test_livekit_token_usage_returns_none_when_missing(): translator = LiveKitTranslator() attributes = {"some.other.attribute": "value"} assert translator.get_input_tokens(attributes) is None assert translator.get_output_tokens(attributes) is None @pytest.mark.parametrize( "attributes", [ {"lk.agent_name": "voice_assistant"}, {"lk.room_name": "my_room"}, {"lk.job_id": "job_123"}, {"lk.participant_identity": "user_456"}, {"lk.retry_count": 0}, {"lk.user_input": "hello"}, {"lk.response.text": "hi there"}, ], ) def test_livekit_message_format_detection(attributes): translator = LiveKitTranslator() result = translator.get_message_format(attributes) assert result == "livekit" def test_livekit_message_format_returns_none_for_non_livekit(): translator = LiveKitTranslator() attributes = {"some.other.attribute": "value"} result = translator.get_message_format(attributes) assert result is None def test_livekit_get_input_from_events_with_system_and_user_messages(): translator = LiveKitTranslator() events = [ { "name": "gen_ai.system.message", "attributes": {"content": "You are a helpful assistant."}, }, { "name": "gen_ai.user.message", "attributes": {"content": "What is the capital of France?"}, }, ] result = translator.get_input_value_from_events(events) parsed = json.loads(result) assert len(parsed) == 2 assert parsed[0] == {"role": "system", "content": "You are a helpful assistant."} assert parsed[1] == {"role": "user", "content": "What is the capital of France?"} def test_livekit_get_input_from_events_with_assistant_context(): translator = LiveKitTranslator() events = [ { "name": "gen_ai.system.message", "attributes": {"content": "You are a helpful assistant."}, }, { "name": "gen_ai.user.message", "attributes": {"content": "What is the capital of France?"}, }, { "name": "gen_ai.assistant.message", "attributes": {"content": "The capital of France is Paris."}, }, { "name": "gen_ai.user.message", "attributes": {"content": "And what about Germany?"}, }, ] result = translator.get_input_value_from_events(events) parsed = json.loads(result) assert len(parsed) == 4 assert parsed[2] == {"role": "assistant", "content": "The capital of France is Paris."} assert parsed[3] == {"role": "user", "content": "And what about Germany?"} def test_livekit_get_output_from_events_with_choice(): translator = LiveKitTranslator() events = [ { "name": "gen_ai.choice", "attributes": { "role": "assistant", "content": "The capital of France is Paris.", }, }, ] result = translator.get_output_value_from_events(events) parsed = json.loads(result) assert len(parsed) == 1 assert parsed[0] == { "role": "assistant", "content": "The capital of France is Paris.", } def test_livekit_get_output_from_events_defaults_to_assistant_role(): translator = LiveKitTranslator() events = [ { "name": "gen_ai.choice", "attributes": {"content": "Hello there!"}, }, ] result = translator.get_output_value_from_events(events) parsed = json.loads(result) assert parsed[0]["role"] == "assistant" def test_livekit_empty_events_returns_none(): translator = LiveKitTranslator() assert translator.get_input_value_from_events([]) is None assert translator.get_output_value_from_events([]) is None def test_livekit_events_with_json_encoded_content(): translator = LiveKitTranslator() events = [ { "name": "gen_ai.user.message", "attributes": {"content": '"What is 2+2?"'}, }, ] result = translator.get_input_value_from_events(events) parsed = json.loads(result) # JSON-encoded content should be decoded assert parsed[0]["content"] == "What is 2+2?" def test_livekit_translate_span_with_genai_events(): span = mock.Mock(spec=Span) span.parent_id = "parent_123" span_dict = { "attributes": { "gen_ai.operation.name": "chat", "gen_ai.request.model": "gpt-4o-mini", "gen_ai.usage.input_tokens": 50, "gen_ai.usage.output_tokens": 100, "lk.agent_name": "voice_assistant", }, "events": [ { "name": "gen_ai.system.message", "attributes": {"content": "You are helpful."}, }, { "name": "gen_ai.user.message", "attributes": {"content": "Hello!"}, }, { "name": "gen_ai.choice", "attributes": {"content": "Hi there!"}, }, ], } span.to_dict.return_value = span_dict result = translate_span_when_storing(span) # Check inputs were extracted from events inputs = json.loads(result["attributes"][SpanAttributeKey.INPUTS]) assert len(inputs) == 2 assert inputs[0]["role"] == "system" assert inputs[1]["role"] == "user" # Check outputs were extracted from events outputs = json.loads(result["attributes"][SpanAttributeKey.OUTPUTS]) assert len(outputs) == 1 assert outputs[0]["content"] == "Hi there!" @pytest.mark.parametrize( ("attributes", "expected_type"), [ ({"gen_ai.operation.name": "chat"}, SpanType.CHAT_MODEL), ({"lk.retry_count": 0}, SpanType.LLM), ({"lk.function_tool.name": "weather"}, SpanType.TOOL), ], ) def test_livekit_span_type_from_otel(attributes, expected_type): result = translate_span_type_from_otel(attributes) assert result == expected_type def test_livekit_translator_detection_keys(): translator = LiveKitTranslator() assert "lk.agent_name" in translator.DETECTION_KEYS assert "lk.room_name" in translator.DETECTION_KEYS assert "lk.job_id" in translator.DETECTION_KEYS assert "lk.participant_identity" in translator.DETECTION_KEYS def test_livekit_translator_input_output_keys(): translator = LiveKitTranslator() assert "lk.user_input" in translator.INPUT_VALUE_KEYS assert "lk.user_transcript" in translator.INPUT_VALUE_KEYS assert "lk.chat_ctx" in translator.INPUT_VALUE_KEYS assert "lk.input_text" in translator.INPUT_VALUE_KEYS assert "lk.response.text" in translator.OUTPUT_VALUE_KEYS assert "lk.response.function_calls" in translator.OUTPUT_VALUE_KEYS def test_livekit_translator_message_format(): translator = LiveKitTranslator() assert translator.MESSAGE_FORMAT == "livekit"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/otel/test_livekit_translator.py", "license": "Apache License 2.0", "lines": 263, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/entities/session.py
from __future__ import annotations from typing import TYPE_CHECKING, Iterator from mlflow.tracing.constant import TraceMetadataKey if TYPE_CHECKING: from mlflow.entities import Trace class Session: """ A session object representing a group of traces that share the same session ID. Sessions typically represent multi-turn conversations or related interactions. This class provides convenient access to the session ID and allows iteration over the traces in the session. Args: traces: A list of Trace objects that belong to this session. """ def __init__(self, traces: list[Trace]): self._traces = traces @property def id(self) -> str | None: if not self._traces: return None return self._traces[0].info.request_metadata.get(TraceMetadataKey.TRACE_SESSION) @property def traces(self) -> list[Trace]: return self._traces def __iter__(self) -> Iterator[Trace]: return iter(self._traces) def __len__(self) -> int: return len(self._traces) def __getitem__(self, index: int) -> Trace: return self._traces[index] def __repr__(self) -> str: return f"Session(id={self.id!r})"
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/entities/session.py", "license": "Apache License 2.0", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/utils/gateway_utils.py
from __future__ import annotations import base64 import os from dataclasses import dataclass from mlflow.environment_variables import MLFLOW_GATEWAY_URI from mlflow.exceptions import MlflowException from mlflow.tracking import get_tracking_uri from mlflow.utils.uri import append_to_uri_path, is_http_uri @dataclass class GatewayLiteLLMConfig: api_base: str api_key: str model: str extra_headers: dict[str, str] | None def get_gateway_litellm_config(endpoint_name: str) -> GatewayLiteLLMConfig: """ Get the LiteLLM configuration for invoking an MLflow Gateway endpoint. Args: endpoint_name: The name of the gateway endpoint (e.g., "chat" from "gateway:/chat"). Returns: A GatewayLiteLLMConfig with api_base, api_key, model, and extra_headers configured for LiteLLM. Raises: MlflowException: If the gateway URI is not a valid HTTP(S) URL. """ # MLFLOW_GATEWAY_URI takes precedence over tracking URI for gateway routing. # This is needed for async job workers: the job infrastructure passes the HTTP # tracking URI (e.g., http://127.0.0.1:5000) to workers, but _get_tracking_store() # overwrites MLFLOW_TRACKING_URI with the backend store URI (e.g., sqlite://). # Job workers set MLFLOW_GATEWAY_URI to preserve the HTTP URI for gateway calls. gateway_uri = MLFLOW_GATEWAY_URI.get() or get_tracking_uri() if not is_http_uri(gateway_uri): raise MlflowException( f"Gateway provider requires an HTTP(S) tracking URI, but got: '{gateway_uri}'. " "The gateway provider routes requests through the MLflow tracking server. " "Please set MLFLOW_TRACKING_URI to a valid HTTP(S) URL " "(e.g., 'http://localhost:5000' or 'https://your-mlflow-server.com')." ) extra_headers = None username = os.environ.get("MLFLOW_TRACKING_USERNAME") password = os.environ.get("MLFLOW_TRACKING_PASSWORD") if username and password: encoded = base64.b64encode(f"{username}:{password}".encode()).decode("ascii") extra_headers = {"Authorization": f"Basic {encoded}"} return GatewayLiteLLMConfig( api_base=append_to_uri_path(gateway_uri, "gateway/mlflow/v1/"), # Static dummy key to satisfy LiteLLM's requirement for a non-empty api_key. api_key="mlflow-gateway-auth", # Use openai/ prefix for LiteLLM to use OpenAI-compatible format. # LiteLLM strips the prefix, so gateway receives endpoint_name as the model. model=f"openai/{endpoint_name}", extra_headers=extra_headers, )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/utils/gateway_utils.py", "license": "Apache License 2.0", "lines": 53, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/genai/utils/test_gateway_utils.py
import base64 from unittest import mock import pytest from mlflow.exceptions import MlflowException from mlflow.genai.utils.gateway_utils import GatewayLiteLLMConfig, get_gateway_litellm_config @pytest.mark.parametrize( ("gateway_uri", "tracking_uri", "endpoint_name", "expected_api_base"), [ # MLFLOW_GATEWAY_URI set ( "http://localhost:5000", "http://other:8000", "chat", "http://localhost:5000/gateway/mlflow/v1/", ), # MLFLOW_GATEWAY_URI not set, falls back to tracking URI ( None, "http://localhost:5000", "my-endpoint", "http://localhost:5000/gateway/mlflow/v1/", ), # HTTPS URI ( "https://mlflow.example.com", None, "chat", "https://mlflow.example.com/gateway/mlflow/v1/", ), ], ) def test_get_gateway_litellm_config( gateway_uri, tracking_uri, endpoint_name, expected_api_base, monkeypatch ): if gateway_uri: monkeypatch.setenv("MLFLOW_GATEWAY_URI", gateway_uri) else: monkeypatch.delenv("MLFLOW_GATEWAY_URI", raising=False) monkeypatch.delenv("MLFLOW_TRACKING_USERNAME", raising=False) monkeypatch.delenv("MLFLOW_TRACKING_PASSWORD", raising=False) with mock.patch( "mlflow.genai.utils.gateway_utils.get_tracking_uri", return_value=tracking_uri or "http://default:5000", ): config = get_gateway_litellm_config(endpoint_name) assert isinstance(config, GatewayLiteLLMConfig) assert config.api_base == expected_api_base assert config.api_key == "mlflow-gateway-auth" assert config.model == f"openai/{endpoint_name}" assert config.extra_headers is None def test_get_gateway_litellm_config_with_tracking_credentials(monkeypatch): monkeypatch.delenv("MLFLOW_GATEWAY_URI", raising=False) monkeypatch.setenv("MLFLOW_TRACKING_USERNAME", "alice") monkeypatch.setenv("MLFLOW_TRACKING_PASSWORD", "secret123") with mock.patch( "mlflow.genai.utils.gateway_utils.get_tracking_uri", return_value="http://localhost:5000", ): config = get_gateway_litellm_config("chat") expected_encoded = base64.b64encode(b"alice:secret123").decode("ascii") assert config.extra_headers == {"Authorization": f"Basic {expected_encoded}"} assert config.api_key == "mlflow-gateway-auth" def test_get_gateway_litellm_config_without_tracking_credentials(monkeypatch): monkeypatch.delenv("MLFLOW_GATEWAY_URI", raising=False) monkeypatch.delenv("MLFLOW_TRACKING_USERNAME", raising=False) monkeypatch.delenv("MLFLOW_TRACKING_PASSWORD", raising=False) with mock.patch( "mlflow.genai.utils.gateway_utils.get_tracking_uri", return_value="http://localhost:5000", ): config = get_gateway_litellm_config("chat") assert config.extra_headers is None assert config.api_key == "mlflow-gateway-auth" def test_get_gateway_litellm_config_username_only_no_headers(monkeypatch): monkeypatch.delenv("MLFLOW_GATEWAY_URI", raising=False) monkeypatch.setenv("MLFLOW_TRACKING_USERNAME", "alice") monkeypatch.delenv("MLFLOW_TRACKING_PASSWORD", raising=False) with mock.patch( "mlflow.genai.utils.gateway_utils.get_tracking_uri", return_value="http://localhost:5000", ): config = get_gateway_litellm_config("chat") assert config.extra_headers is None @pytest.mark.parametrize( "tracking_uri", [ "sqlite:///mlflow.db", "/path/to/mlflow", "databricks", ], ) def test_get_gateway_litellm_config_invalid_uri(tracking_uri, monkeypatch): monkeypatch.delenv("MLFLOW_GATEWAY_URI", raising=False) with mock.patch("mlflow.genai.utils.gateway_utils.get_tracking_uri", return_value=tracking_uri): with pytest.raises(MlflowException, match="Gateway provider requires an HTTP"): get_gateway_litellm_config("chat")
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/utils/test_gateway_utils.py", "license": "Apache License 2.0", "lines": 96, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/scorers/trulens/models.py
from __future__ import annotations from typing import TYPE_CHECKING, Any from mlflow.exceptions import MlflowException from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import ( call_chat_completions, ) from mlflow.genai.judges.constants import _DATABRICKS_DEFAULT_JUDGE_MODEL from mlflow.genai.utils.message_utils import serialize_chat_messages_to_prompts from mlflow.metrics.genai.model_utils import _parse_model_uri if TYPE_CHECKING: from typing import Sequence def _check_trulens_installed(): try: import trulens # noqa: F401 except ImportError: raise MlflowException.invalid_parameter_value( "TruLens scorers require the 'trulens' package. " "Install it with: `pip install trulens trulens-providers-litellm`" ) def _create_databricks_managed_judge_provider(**kwargs: Any): from trulens.core.feedback.endpoint import Endpoint from trulens.feedback.llm_provider import LLMProvider class DatabricksManagedJudgeProvider(LLMProvider): def __init__(self): endpoint = Endpoint(name="databricks-managed-judge") super().__init__(model_engine=_DATABRICKS_DEFAULT_JUDGE_MODEL, endpoint=endpoint) def _create_chat_completion( self, prompt: str | None = None, messages: "Sequence[dict] | None" = None, **kwargs, ) -> str: if messages: user_prompt, system_prompt = serialize_chat_messages_to_prompts(list(messages)) system_prompt = system_prompt or "" else: user_prompt = prompt if prompt is not None else "" system_prompt = "" result = call_chat_completions(user_prompt=user_prompt, system_prompt=system_prompt) return result.output return DatabricksManagedJudgeProvider() def create_trulens_provider(model_uri: str, **kwargs: Any): """ Create a TruLens provider from a model URI. Args: model_uri: Model URI in one of these formats: - "databricks" - Use default Databricks managed judge - "databricks:/endpoint" - Use LiteLLM with Databricks endpoint - "provider:/model" - Use LiteLLM with the specified model kwargs: Additional arguments passed to the underlying provider Returns: A TruLens-compatible provider Raises: MlflowException: If the model URI format is invalid """ _check_trulens_installed() # Use managed judge for plain "databricks" without endpoint if model_uri == "databricks": return _create_databricks_managed_judge_provider(**kwargs) # Parse provider:/model format using shared helper provider, model_name = _parse_model_uri(model_uri) # Use LiteLLM for all providers (including databricks:/endpoint) try: from trulens.providers.litellm import LiteLLM litellm_model = f"{provider}/{model_name}" if provider != "litellm" else model_name return LiteLLM(model_engine=litellm_model, **kwargs) except ImportError: raise MlflowException.invalid_parameter_value( "Non-Databricks providers require 'trulens-providers-litellm'. " "Install it with: `pip install trulens-providers-litellm`" )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/trulens/models.py", "license": "Apache License 2.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/scorers/trulens/registry.py
from __future__ import annotations import re from typing import Any # Mapping: metric name -> (feedback method name, argument mapping) # Argument mapping: generic key -> TruLens-specific argument name _METRIC_REGISTRY: dict[str, tuple[str, dict[str, str]]] = { # RAG metrics "Groundedness": ( "groundedness_measure_with_cot_reasons", {"context": "source", "output": "statement"}, ), "ContextRelevance": ( "context_relevance_with_cot_reasons", {"input": "question", "context": "context"}, ), "AnswerRelevance": ( "relevance_with_cot_reasons", {"input": "prompt", "output": "response"}, ), "Coherence": ( "coherence_with_cot_reasons", {"output": "text"}, ), # Agent trace metrics (no argument mapping needed - they use trace directly) "logical_consistency": ("logical_consistency_with_cot_reasons", {}), "execution_efficiency": ("execution_efficiency_with_cot_reasons", {}), "plan_adherence": ("plan_adherence_with_cot_reasons", {}), "plan_quality": ("plan_quality_with_cot_reasons", {}), "tool_selection": ("tool_selection_with_cot_reasons", {}), "tool_calling": ("tool_calling_with_cot_reasons", {}), } def get_feedback_method_name(metric_name: str) -> str: if metric_name in _METRIC_REGISTRY: return _METRIC_REGISTRY[metric_name][0] # Convert CamelCase to snake_case and append _with_cot_reasons snake_case = re.sub(r"(?<!^)(?=[A-Z])", "_", metric_name).lower() return f"{snake_case}_with_cot_reasons" def get_argument_mapping(metric_name: str) -> dict[str, str]: if metric_name in _METRIC_REGISTRY: return _METRIC_REGISTRY[metric_name][1] # Return default empty mapping for unknown metrics return {} def build_trulens_args( metric_name: str, input_str: str, output_str: str, context_str: str, ) -> dict[str, Any]: arg_mapping = get_argument_mapping(metric_name) generic_values = {"input": input_str, "output": output_str, "context": context_str} return { trulens_arg: generic_values[generic_key] for generic_key, trulens_arg in arg_mapping.items() if generic_values.get(generic_key) }
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/trulens/registry.py", "license": "Apache License 2.0", "lines": 55, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/scorers/trulens/scorers/agent_trace.py
""" Agent trace scorers for goal-plan-action alignment evaluation. These scorers analyze agent execution traces to detect internal errors and evaluate the quality of agent reasoning, planning, and tool usage. Based on TruLens' benchmarked goal-plan-action alignment evaluations which achieve 95% error coverage against TRAIL (compared to 55% for standard LLM judges). See: https://arxiv.org/abs/2510.08847 """ from __future__ import annotations import logging from typing import Any, ClassVar from pydantic import PrivateAttr from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType from mlflow.entities.trace import Trace from mlflow.exceptions import MlflowException from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.judges.utils import get_default_model from mlflow.genai.scorers import FRAMEWORK_METADATA_KEY from mlflow.genai.scorers.base import Scorer from mlflow.genai.scorers.trulens.models import create_trulens_provider from mlflow.genai.scorers.trulens.registry import get_feedback_method_name from mlflow.genai.scorers.trulens.utils import format_rationale from mlflow.utils.annotations import experimental from mlflow.utils.docstring_utils import format_docstring _logger = logging.getLogger(__name__) class TruLensAgentScorer(Scorer): """ Base class for TruLens agent trace scorers. Agent trace scorers evaluate the quality of agent execution traces, analyzing reasoning, planning, and tool usage patterns. Args: model: Model to use for evaluation """ metric_name: ClassVar[str] _provider: Any = PrivateAttr() _model: str = PrivateAttr() _method_name: str = PrivateAttr() def __init__( self, model: str | None = None, **kwargs: Any, ): super().__init__(name=self.metric_name) model = model or get_default_model() self._model = model self._provider = create_trulens_provider(model, **kwargs) self._method_name = get_feedback_method_name(self.metric_name) def _get_trace_string(self, trace: Trace | str | None) -> str: if trace is None: raise MlflowException.invalid_parameter_value( "Trace is required for agent trace evaluation." ) if isinstance(trace, Trace): return trace.to_json() if isinstance(trace, str): return trace raise MlflowException.invalid_parameter_value( f"Invalid trace type: {type(trace)}. Expected Trace or str." ) def __call__( self, *, trace: Trace | str | None = None, **kwargs, ) -> Feedback: assessment_source = AssessmentSource( source_type=AssessmentSourceType.LLM_JUDGE, source_id=self._model, ) try: trace_str = self._get_trace_string(trace) feedback_method = getattr(self._provider, self._method_name) score, reasons = feedback_method(trace=trace_str) return Feedback( name=self.name, value=score, rationale=format_rationale(reasons), source=assessment_source, metadata={FRAMEWORK_METADATA_KEY: "trulens"}, ) except MlflowException: raise except Exception as e: _logger.error(f"Error evaluating TruLens agent trace metric {self.name}: {e}") return Feedback( name=self.name, error=e, source=assessment_source, metadata={FRAMEWORK_METADATA_KEY: "trulens"}, ) @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) class LogicalConsistency(TruLensAgentScorer): """ Evaluates logical consistency and reasoning quality of agent traces. Analyzes how coherent and logically sound the agent's decision-making process is throughout the execution trace. Args: model: {{ model }} Example: .. code-block:: python from mlflow.genai.scorers.trulens import LogicalConsistency traces = mlflow.search_traces(experiment_ids=["1"]) results = mlflow.genai.evaluate( data=traces, scorers=[LogicalConsistency()], ) """ metric_name: ClassVar[str] = "logical_consistency" @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) class ExecutionEfficiency(TruLensAgentScorer): """ Evaluates execution efficiency of agent traces. Analyzes whether the agent takes an optimal path to achieve its goal without unnecessary steps or redundant operations. Args: model: {{ model }} Example: .. code-block:: python from mlflow.genai.scorers.trulens import ExecutionEfficiency traces = mlflow.search_traces(experiment_ids=["1"]) results = mlflow.genai.evaluate( data=traces, scorers=[ExecutionEfficiency()], ) """ metric_name: ClassVar[str] = "execution_efficiency" @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) class PlanAdherence(TruLensAgentScorer): """ Evaluates plan adherence of agent traces. Analyzes whether the agent follows its stated plan during execution or deviates from it. Args: model: {{ model }} Example: .. code-block:: python from mlflow.genai.scorers.trulens import PlanAdherence traces = mlflow.search_traces(experiment_ids=["1"]) results = mlflow.genai.evaluate( data=traces, scorers=[PlanAdherence()], ) """ metric_name: ClassVar[str] = "plan_adherence" @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) class PlanQuality(TruLensAgentScorer): """ Evaluates plan quality of agent traces. Analyzes whether the agent's plan is well-structured, comprehensive, and appropriate for achieving the stated goal. Args: model: {{ model }} Example: .. code-block:: python from mlflow.genai.scorers.trulens import PlanQuality traces = mlflow.search_traces(experiment_ids=["1"]) results = mlflow.genai.evaluate( data=traces, scorers=[PlanQuality()], ) """ metric_name: ClassVar[str] = "plan_quality" @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) class ToolSelection(TruLensAgentScorer): """ Evaluates tool selection quality of agent traces. Analyzes whether the agent chooses the appropriate tools for each step of the task. Args: model: {{ model }} Example: .. code-block:: python from mlflow.genai.scorers.trulens import ToolSelection traces = mlflow.search_traces(experiment_ids=["1"]) results = mlflow.genai.evaluate( data=traces, scorers=[ToolSelection()], ) """ metric_name: ClassVar[str] = "tool_selection" @experimental(version="3.10.0") @format_docstring(_MODEL_API_DOC) class ToolCalling(TruLensAgentScorer): """ Evaluates tool calling quality of agent traces. Analyzes whether the agent correctly invokes tools with appropriate parameters and handles tool responses properly. Args: model: {{ model }} Example: .. code-block:: python from mlflow.genai.scorers.trulens import ToolCalling traces = mlflow.search_traces(experiment_ids=["1"]) results = mlflow.genai.evaluate( data=traces, scorers=[ToolCalling()], ) """ metric_name: ClassVar[str] = "tool_calling"
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/trulens/scorers/agent_trace.py", "license": "Apache License 2.0", "lines": 205, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/scorers/trulens/utils.py
from __future__ import annotations import logging from typing import Any from mlflow.entities.trace import Trace from mlflow.genai.scorers.trulens.registry import build_trulens_args from mlflow.genai.utils.trace_utils import ( extract_retrieval_context_from_trace, parse_inputs_to_str, parse_outputs_to_str, resolve_expectations_from_trace, resolve_inputs_from_trace, resolve_outputs_from_trace, ) _logger = logging.getLogger(__name__) # Keys that can provide context, in priority order _CONTEXT_KEYS = ("context", "reference", "expected_output") def map_scorer_inputs_to_trulens_args( metric_name: str, inputs: Any = None, outputs: Any = None, expectations: dict[str, Any] | None = None, trace: Trace | None = None, ) -> dict[str, Any]: if trace: inputs = resolve_inputs_from_trace(inputs, trace) outputs = resolve_outputs_from_trace(outputs, trace) expectations = resolve_expectations_from_trace(expectations, trace) input_str = parse_inputs_to_str(inputs) if inputs is not None else "" output_str = parse_outputs_to_str(outputs) if outputs is not None else "" context_str = _extract_context(expectations, trace) return build_trulens_args( metric_name=metric_name, input_str=input_str, output_str=output_str, context_str=context_str, ) def _extract_context( expectations: dict[str, Any] | None, trace: Trace | None, ) -> str: """Extract context from expectations or trace retrieval spans.""" context_str = "" if expectations: # Check for multiple context keys and warn found_keys = [key for key in _CONTEXT_KEYS if expectations.get(key)] if len(found_keys) > 1: _logger.warning( f"Multiple context keys found in expectations: {found_keys}. " f"Using '{found_keys[0]}' (priority order: {_CONTEXT_KEYS})." ) # Use first available key in priority order for key in _CONTEXT_KEYS: if context := expectations.get(key): if isinstance(context, list): context_str = "\n".join(str(c) for c in context) else: context_str = str(context) break if not context_str and trace: if span_id_to_context := extract_retrieval_context_from_trace(trace): contexts = [] for ctx_list in span_id_to_context.values(): for ctx in ctx_list: if isinstance(ctx, dict) and "content" in ctx: contexts.append(ctx["content"]) else: contexts.append(str(ctx)) if contexts: context_str = "\n".join(contexts) return context_str def format_rationale(reasons: dict[str, Any] | None) -> str | None: """ Format TruLens reasons dict into a readable rationale string. Args: reasons: Dict of reasons from TruLens Returns: Formatted rationale string or None if no reasons """ if not reasons: return None parts = [] for key, value in reasons.items(): value_str = "; ".join(str(v) for v in value) if isinstance(value, list) else str(value) parts.append(f"{key}: {value_str}") return " | ".join(parts) if parts else None
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/trulens/utils.py", "license": "Apache License 2.0", "lines": 85, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/utils/message_utils.py
from __future__ import annotations from typing import Any def serialize_messages_to_prompts( messages: list[Any], ) -> tuple[str, str | None]: """ Serialize messages to user_prompt and system_prompt strings. Handles both litellm Message objects and message dicts with 'role' and 'content' keys. This is needed because call_chat_completions only accepts string prompts. Args: messages: List of message objects or dicts. Returns: Tuple of (user_prompt, system_prompt). system_prompt may be None. """ system_prompt = None user_parts = [] for msg in messages: # Handle both object attributes and dict keys if isinstance(msg, dict): role = msg.get("role", "user") content = msg.get("content", "") tool_calls = msg.get("tool_calls") name = msg.get("name") else: role = getattr(msg, "role", "user") content = getattr(msg, "content", "") tool_calls = getattr(msg, "tool_calls", None) name = getattr(msg, "name", None) if role == "system": system_prompt = content elif role == "user": user_parts.append(content) elif role == "assistant": if tool_calls: user_parts.append("Assistant: [Called tools]") elif content: user_parts.append(f"Assistant: {content}") elif role == "tool": if name: user_parts.append(f"Tool {name}: {content}") else: user_parts.append(f"tool: {content}") else: user_parts.append(f"{role}: {content}") user_prompt = "\n\n".join(user_parts) return user_prompt, system_prompt # Backwards compatibility aliases def serialize_messages_to_databricks_prompts( messages: list[Any], ) -> tuple[str, str | None]: return serialize_messages_to_prompts(messages) def serialize_chat_messages_to_prompts( messages: list[dict[str, Any]], ) -> tuple[str, str | None]: return serialize_messages_to_prompts(messages)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/utils/message_utils.py", "license": "Apache License 2.0", "lines": 55, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/genai/scorers/trulens/scorers/test_agent_trace.py
from unittest.mock import Mock, patch import pytest import trulens # noqa: F401 import mlflow from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSourceType from mlflow.entities.span import SpanType from mlflow.exceptions import MlflowException @pytest.fixture def sample_agent_trace(): @mlflow.trace(name="agent_workflow", span_type=SpanType.AGENT) def run_agent(query): with mlflow.start_span(name="plan", span_type=SpanType.CHAIN) as plan_span: plan_span.set_inputs({"query": query}) plan_span.set_outputs({"plan": "1. Search for info 2. Summarize"}) with mlflow.start_span(name="search_tool", span_type=SpanType.TOOL) as tool_span: tool_span.set_inputs({"search_query": query}) tool_span.set_outputs({"results": ["Result 1", "Result 2"]}) return "Final answer based on search results" run_agent("What is MLflow?") return mlflow.get_trace(mlflow.get_last_active_trace_id()) @pytest.fixture def mock_provider(): return Mock() @pytest.mark.parametrize( ("scorer_class", "metric_name", "method_name"), [ ("LogicalConsistency", "logical_consistency", "logical_consistency_with_cot_reasons"), ("ExecutionEfficiency", "execution_efficiency", "execution_efficiency_with_cot_reasons"), ("PlanAdherence", "plan_adherence", "plan_adherence_with_cot_reasons"), ("PlanQuality", "plan_quality", "plan_quality_with_cot_reasons"), ("ToolSelection", "tool_selection", "tool_selection_with_cot_reasons"), ("ToolCalling", "tool_calling", "tool_calling_with_cot_reasons"), ], ) def test_agent_trace_scorer( mock_provider, sample_agent_trace, scorer_class, metric_name, method_name ): expected_score = 0.87 expected_reasons = {"reason": "Test rationale"} with patch( "mlflow.genai.scorers.trulens.scorers.agent_trace.create_trulens_provider", return_value=mock_provider, ): from mlflow.genai.scorers import trulens scorer_cls = getattr(trulens, scorer_class) scorer = scorer_cls(model="openai:/gpt-4") getattr(mock_provider, method_name).return_value = (expected_score, expected_reasons) result = scorer(trace=sample_agent_trace) assert isinstance(result, Feedback) assert result.name == metric_name assert result.value == expected_score assert result.rationale == "reason: Test rationale" assert result.source.source_type == AssessmentSourceType.LLM_JUDGE assert result.source.source_id == "openai:/gpt-4" assert result.metadata == {"mlflow.scorer.framework": "trulens"} method = getattr(mock_provider, method_name) method.assert_called_once() call_kwargs = method.call_args[1] assert call_kwargs["trace"] == sample_agent_trace.to_json() def test_scorer_requires_trace(mock_provider): with patch( "mlflow.genai.scorers.trulens.scorers.agent_trace.create_trulens_provider", return_value=mock_provider, ): from mlflow.genai.scorers.trulens import LogicalConsistency scorer = LogicalConsistency() with pytest.raises(MlflowException, match="Trace is required"): scorer(trace=None) def test_scorer_accepts_string_trace(mock_provider): with patch( "mlflow.genai.scorers.trulens.scorers.agent_trace.create_trulens_provider", return_value=mock_provider, ): from mlflow.genai.scorers.trulens import LogicalConsistency scorer = LogicalConsistency() mock_provider.logical_consistency_with_cot_reasons.return_value = (0.85, None) trace_json = '{"info": {}, "data": {"spans": []}}' result = scorer(trace=trace_json) assert result.value == 0.85 assert result.metadata == {"mlflow.scorer.framework": "trulens"} mock_provider.logical_consistency_with_cot_reasons.assert_called_once_with(trace=trace_json) @pytest.mark.parametrize( ("reasons", "expected_rationale"), [ ( {"reason": "Main reason", "details": ["Detail 1", "Detail 2"]}, "reason: Main reason | details: Detail 1; Detail 2", ), ({"single": "value"}, "single: value"), (None, None), ], ) def test_scorer_rationale_formatting( mock_provider, sample_agent_trace, reasons, expected_rationale ): with patch( "mlflow.genai.scorers.trulens.scorers.agent_trace.create_trulens_provider", return_value=mock_provider, ): from mlflow.genai.scorers.trulens import LogicalConsistency scorer = LogicalConsistency() mock_provider.logical_consistency_with_cot_reasons.return_value = (0.8, reasons) result = scorer(trace=sample_agent_trace) assert result.rationale == expected_rationale def test_scorer_error_handling(mock_provider, sample_agent_trace): with patch( "mlflow.genai.scorers.trulens.scorers.agent_trace.create_trulens_provider", return_value=mock_provider, ): from mlflow.genai.scorers.trulens import LogicalConsistency scorer = LogicalConsistency(model="openai:/gpt-4") mock_provider.logical_consistency_with_cot_reasons.side_effect = RuntimeError( "Evaluation failed" ) result = scorer(trace=sample_agent_trace) assert isinstance(result, Feedback) assert result.error is not None assert "Evaluation failed" in str(result.error) assert result.metadata == {"mlflow.scorer.framework": "trulens"}
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/trulens/scorers/test_agent_trace.py", "license": "Apache License 2.0", "lines": 120, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/trulens/test_models.py
import importlib from unittest.mock import Mock, patch import pytest import trulens # noqa: F401 from mlflow.exceptions import MlflowException from mlflow.genai.scorers.trulens.models import create_trulens_provider @pytest.fixture def mock_call_chat_completions(): with patch("mlflow.genai.scorers.trulens.models.call_chat_completions") as mock: result = Mock() result.output = "Test output" mock.return_value = result yield mock def test_create_trulens_provider_databricks(mock_call_chat_completions): mock_endpoint = Mock() mock_llm_provider = Mock() with patch.dict( "sys.modules", { "trulens.core.feedback.endpoint": mock_endpoint, "trulens.feedback.llm_provider": mock_llm_provider, }, ): mock_llm_provider.LLMProvider = type( "LLMProvider", (), {"__init__": lambda self, **kw: None} ) mock_endpoint.Endpoint = Mock() provider = create_trulens_provider("databricks") assert provider is not None # Call the provider to verify call_chat_completions is used provider._create_chat_completion(prompt="test prompt") mock_call_chat_completions.assert_called_once_with( user_prompt="test prompt", system_prompt="", ) def test_create_trulens_provider_databricks_endpoint_uses_litellm(): mock_litellm_class = Mock() mock_litellm_class.return_value = Mock() with patch.dict("sys.modules", {"trulens.providers.litellm": Mock(LiteLLM=mock_litellm_class)}): from mlflow.genai.scorers.trulens import models importlib.reload(models) models.create_trulens_provider("databricks:/my-endpoint") mock_litellm_class.assert_called_once_with(model_engine="databricks/my-endpoint") def test_create_trulens_provider_openai(): mock_litellm_class = Mock() mock_litellm_class.return_value = Mock() with patch.dict("sys.modules", {"trulens.providers.litellm": Mock(LiteLLM=mock_litellm_class)}): from mlflow.genai.scorers.trulens import models importlib.reload(models) models.create_trulens_provider("openai:/gpt-4") mock_litellm_class.assert_called_once_with(model_engine="openai/gpt-4") def test_create_trulens_provider_litellm_format(): mock_litellm_class = Mock() mock_litellm_class.return_value = Mock() with patch.dict("sys.modules", {"trulens.providers.litellm": Mock(LiteLLM=mock_litellm_class)}): from mlflow.genai.scorers.trulens import models importlib.reload(models) models.create_trulens_provider("litellm:/gpt-4") mock_litellm_class.assert_called_once_with(model_engine="gpt-4") def test_create_trulens_provider_invalid_format(): with pytest.raises(MlflowException, match="Malformed model uri"): create_trulens_provider("gpt-4")
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/trulens/test_models.py", "license": "Apache License 2.0", "lines": 62, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/trulens/test_registry.py
import pytest from mlflow.genai.scorers.trulens.registry import get_argument_mapping, get_feedback_method_name @pytest.mark.parametrize( ("metric_name", "expected_method"), [ ("Groundedness", "groundedness_measure_with_cot_reasons"), ("ContextRelevance", "context_relevance_with_cot_reasons"), ("AnswerRelevance", "relevance_with_cot_reasons"), ("Coherence", "coherence_with_cot_reasons"), ], ) def test_get_feedback_method_name(metric_name, expected_method): assert get_feedback_method_name(metric_name) == expected_method def test_get_feedback_method_name_unknown_metric(): # Unknown metrics should return inferred method name assert get_feedback_method_name("NewMetric") == "new_metric_with_cot_reasons" assert get_feedback_method_name("SomeOtherMetric") == "some_other_metric_with_cot_reasons" @pytest.mark.parametrize( ("metric_name", "expected_mapping"), [ ("Groundedness", {"context": "source", "output": "statement"}), ("ContextRelevance", {"input": "question", "context": "context"}), ("AnswerRelevance", {"input": "prompt", "output": "response"}), ("Coherence", {"output": "text"}), ], ) def test_get_argument_mapping(metric_name, expected_mapping): assert get_argument_mapping(metric_name) == expected_mapping def test_get_argument_mapping_unknown_metric(): # Unknown metrics should return empty mapping assert get_argument_mapping("UnknownMetric") == {}
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/trulens/test_registry.py", "license": "Apache License 2.0", "lines": 31, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/trulens/test_trulens.py
from unittest.mock import Mock, patch import pytest import trulens # noqa: F401 from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSourceType from mlflow.genai.judges.utils import CategoricalRating @pytest.fixture def mock_provider(): mock = Mock() mock.groundedness_measure_with_cot_reasons = Mock() mock.context_relevance_with_cot_reasons = Mock() mock.relevance_with_cot_reasons = Mock() mock.coherence_with_cot_reasons = Mock() return mock @pytest.mark.parametrize( ("scorer_name", "method_name", "score", "expected_value"), [ ("Groundedness", "groundedness_measure_with_cot_reasons", 0.8, CategoricalRating.YES), ("ContextRelevance", "context_relevance_with_cot_reasons", 0.7, CategoricalRating.YES), ("AnswerRelevance", "relevance_with_cot_reasons", 0.9, CategoricalRating.YES), ("Coherence", "coherence_with_cot_reasons", 0.85, CategoricalRating.YES), ("Groundedness", "groundedness_measure_with_cot_reasons", 0.3, CategoricalRating.NO), ], ) def test_trulens_scorer(mock_provider, scorer_name, method_name, score, expected_value): with patch("mlflow.genai.scorers.trulens.create_trulens_provider", return_value=mock_provider): from mlflow.genai.scorers import trulens scorer_cls = getattr(trulens, scorer_name) scorer = scorer_cls(model="openai:/gpt-4") method = getattr(mock_provider, method_name) method.return_value = (score, {"reason": "Test reason"}) result = scorer( inputs="test input", outputs="test output", expectations={"context": "test context"}, ) assert isinstance(result, Feedback) assert result.name == scorer_name assert result.value == expected_value assert result.rationale == "reason: Test reason" assert result.source.source_type == AssessmentSourceType.LLM_JUDGE assert result.source.source_id == "openai:/gpt-4" assert result.metadata == { "mlflow.scorer.framework": "trulens", "score": score, "threshold": 0.5, } def test_trulens_scorer_custom_threshold(mock_provider): with patch("mlflow.genai.scorers.trulens.create_trulens_provider", return_value=mock_provider): from mlflow.genai.scorers.trulens import Groundedness scorer = Groundedness(model="openai:/gpt-4", threshold=0.7) mock_provider.groundedness_measure_with_cot_reasons.return_value = (0.6, {"reason": "Moderate"}) result = scorer( outputs="test output", expectations={"context": "test context"}, ) assert result.value == CategoricalRating.NO assert result.metadata["threshold"] == 0.7 def test_trulens_scorer_none_reasons(mock_provider): with patch("mlflow.genai.scorers.trulens.create_trulens_provider", return_value=mock_provider): from mlflow.genai.scorers.trulens import Groundedness scorer = Groundedness(model="openai:/gpt-4") mock_provider.groundedness_measure_with_cot_reasons.return_value = (0.9, None) result = scorer( outputs="test output", expectations={"context": "test context"}, ) assert result.rationale is None def test_trulens_get_scorer(mock_provider): with patch("mlflow.genai.scorers.trulens.create_trulens_provider", return_value=mock_provider): from mlflow.genai.scorers.trulens import get_scorer scorer = get_scorer("Groundedness", model="openai:/gpt-4") mock_provider.groundedness_measure_with_cot_reasons.return_value = (0.9, {"reason": "Good"}) result = scorer( outputs="test output", expectations={"context": "test context"}, ) assert isinstance(result, Feedback) assert result.name == "Groundedness" def test_trulens_scorer_provider_is_real_instance(): from trulens.providers.litellm import LiteLLM from mlflow.genai.scorers.trulens import Groundedness try: scorer = Groundedness(model="openai:/gpt-4") assert isinstance(scorer._provider, LiteLLM) except AttributeError as e: # TruLens LiteLLM provider has an instrumentation bug with CallTypes enum if "CallTypes" in str(e): pytest.skip("TruLens LiteLLM instrumentation bug - see TruLens issue tracker") raise def test_trulens_scorer_error_handling(mock_provider): with patch("mlflow.genai.scorers.trulens.create_trulens_provider", return_value=mock_provider): from mlflow.genai.scorers.trulens import Groundedness scorer = Groundedness(model="openai:/gpt-4") mock_provider.groundedness_measure_with_cot_reasons.side_effect = RuntimeError( "Evaluation failed" ) result = scorer( outputs="test output", expectations={"context": "test context"}, ) assert isinstance(result, Feedback) assert result.error is not None assert "Evaluation failed" in str(result.error) assert result.metadata == {"mlflow.scorer.framework": "trulens"}
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/trulens/test_trulens.py", "license": "Apache License 2.0", "lines": 105, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/trulens/test_utils.py
import pytest import mlflow from mlflow.genai.scorers.trulens.utils import ( format_rationale, map_scorer_inputs_to_trulens_args, ) def _create_test_trace( inputs: dict[str, str] | None = None, outputs: dict[str, str] | None = None, ): with mlflow.start_span() as span: if inputs is not None: span.set_inputs(inputs) if outputs is not None: span.set_outputs(outputs) return mlflow.get_trace(span.trace_id) @pytest.mark.parametrize( ("metric_name", "inputs", "outputs", "expectations", "expected"), [ ( "Groundedness", None, "The answer is 42.", {"context": "The answer to everything is 42."}, {"source": "The answer to everything is 42.", "statement": "The answer is 42."}, ), ( "ContextRelevance", "What is the answer?", None, {"context": "The answer is 42."}, {"question": "What is the answer?", "context": "The answer is 42."}, ), ( "AnswerRelevance", "What is MLflow?", "MLflow is a platform for ML lifecycle.", None, {"prompt": "What is MLflow?", "response": "MLflow is a platform for ML lifecycle."}, ), ( "Coherence", None, "This is a well-structured response.", None, {"text": "This is a well-structured response."}, ), ], ) def test_map_scorer_inputs_metric_mappings(metric_name, inputs, outputs, expectations, expected): result = map_scorer_inputs_to_trulens_args( metric_name=metric_name, inputs=inputs, outputs=outputs, expectations=expectations, ) assert result == expected def test_map_scorer_inputs_context_from_list(): result = map_scorer_inputs_to_trulens_args( metric_name="Groundedness", outputs="Combined answer.", expectations={"context": ["First context.", "Second context."]}, ) assert result["source"] == "First context.\nSecond context." def test_map_scorer_inputs_context_priority_order(): result = map_scorer_inputs_to_trulens_args( metric_name="Groundedness", outputs="test", expectations={ "context": "primary context", "reference": "should be ignored", }, ) assert result["source"] == "primary context" def test_map_scorer_inputs_reference_fallback(): result = map_scorer_inputs_to_trulens_args( metric_name="Groundedness", outputs="test", expectations={"reference": "reference context"}, ) assert result["source"] == "reference context" def test_map_scorer_inputs_with_trace(): trace = _create_test_trace( inputs={"question": "What is MLflow?"}, outputs={"answer": "MLflow is a platform for ML lifecycle."}, ) result = map_scorer_inputs_to_trulens_args( metric_name="AnswerRelevance", expectations={"context": "test context"}, trace=trace, ) assert result["prompt"] == "{'question': 'What is MLflow?'}" assert result["response"] == '{"answer": "MLflow is a platform for ML lifecycle."}' @pytest.mark.parametrize( ("reasons", "expected"), [ (None, None), ({}, None), ({"reason": "Good answer"}, "reason: Good answer"), ({"reasons": ["A", "B", "C"]}, "reasons: A; B; C"), ( {"reason1": "First reason", "reason2": "Second reason"}, "reason1: First reason | reason2: Second reason", ), ({"details": {"key": "value"}}, "details: {'key': 'value'}"), ], ) def test_format_rationale(reasons, expected): assert format_rationale(reasons) == expected
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/trulens/test_utils.py", "license": "Apache License 2.0", "lines": 109, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/utils/test_message_utils.py
import pytest from mlflow.genai.utils.message_utils import serialize_messages_to_prompts from mlflow.types.llm import ChatMessage, FunctionToolCallArguments, ToolCall @pytest.mark.parametrize( ("messages", "expected_user_prompt", "expected_system_prompt"), [ # Basic user message (object) ( [ChatMessage(role="user", content="Hello")], "Hello", None, ), # Basic user message (dict) ( [{"role": "user", "content": "Hello"}], "Hello", None, ), # System + user messages (object) ( [ ChatMessage(role="system", content="You are helpful."), ChatMessage(role="user", content="Hello"), ], "Hello", "You are helpful.", ), # System + user messages (dict) ( [ {"role": "system", "content": "You are helpful."}, {"role": "user", "content": "Hello"}, ], "Hello", "You are helpful.", ), # Multiple user messages (object) ( [ ChatMessage(role="user", content="First"), ChatMessage(role="user", content="Second"), ], "First\n\nSecond", None, ), # Multiple user messages (dict) ( [ {"role": "user", "content": "First"}, {"role": "user", "content": "Second"}, ], "First\n\nSecond", None, ), # Empty messages ( [], "", None, ), ], ids=[ "basic_user_object", "basic_user_dict", "system_user_object", "system_user_dict", "multiple_users_object", "multiple_users_dict", "empty_messages", ], ) def test_serialize_messages_basic(messages, expected_user_prompt, expected_system_prompt): user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == expected_user_prompt assert system_prompt == expected_system_prompt def test_assistant_message_with_content_object(): messages = [ ChatMessage(role="user", content="Hello"), ChatMessage(role="assistant", content="Hi there!"), ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Hello\n\nAssistant: Hi there!" assert system_prompt is None def test_assistant_message_with_content_dict(): messages = [ {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}, ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Hello\n\nAssistant: Hi there!" assert system_prompt is None def test_assistant_message_with_tool_calls(): tool_call = ToolCall( function=FunctionToolCallArguments(name="search", arguments='{"query": "test"}') ) messages = [ ChatMessage(role="user", content="Search for info"), ChatMessage(role="assistant", tool_calls=[tool_call]), ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Search for info\n\nAssistant: [Called tools]" assert system_prompt is None def test_assistant_message_with_tool_calls_dict(): messages = [ {"role": "user", "content": "Search for info"}, {"role": "assistant", "content": None, "tool_calls": [{"id": "1", "function": {}}]}, ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Search for info\n\nAssistant: [Called tools]" assert system_prompt is None def test_tool_message_with_name_object(): messages = [ ChatMessage(role="user", content="Search"), ChatMessage(role="tool", name="search_tool", content='{"results": ["a", "b"]}'), ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == 'Search\n\nTool search_tool: {"results": ["a", "b"]}' assert system_prompt is None def test_tool_message_with_name_dict(): messages = [ {"role": "user", "content": "Search"}, {"role": "tool", "name": "search_tool", "content": '{"results": ["a", "b"]}'}, ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == 'Search\n\nTool search_tool: {"results": ["a", "b"]}' assert system_prompt is None def test_tool_message_without_name_dict(): messages = [ {"role": "user", "content": "Hello"}, {"role": "tool", "content": "Tool result"}, ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Hello\n\ntool: Tool result" assert system_prompt is None def test_custom_role_dict(): messages = [ {"role": "user", "content": "Hello"}, {"role": "developer", "content": "Custom message"}, ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Hello\n\ndeveloper: Custom message" assert system_prompt is None def test_full_conversation_object(): tool_call = ToolCall( function=FunctionToolCallArguments(name="search", arguments='{"query": "test"}') ) messages = [ ChatMessage(role="system", content="Be helpful"), ChatMessage(role="user", content="Query"), ChatMessage(role="assistant", content="Response"), ChatMessage(role="user", content="Search please"), ChatMessage(role="assistant", tool_calls=[tool_call]), ChatMessage(role="tool", name="search", content="Results"), ChatMessage(role="user", content="Follow-up"), ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) expected = ( "Query\n\nAssistant: Response\n\nSearch please\n\n" "Assistant: [Called tools]\n\nTool search: Results\n\nFollow-up" ) assert user_prompt == expected assert system_prompt == "Be helpful" def test_full_conversation_dict(): messages = [ {"role": "system", "content": "Be helpful"}, {"role": "user", "content": "Query"}, {"role": "assistant", "content": "Response"}, {"role": "user", "content": "Follow-up"}, ] user_prompt, system_prompt = serialize_messages_to_prompts(messages) assert user_prompt == "Query\n\nAssistant: Response\n\nFollow-up" assert system_prompt == "Be helpful"
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/utils/test_message_utils.py", "license": "Apache License 2.0", "lines": 172, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/gen_rest_api.py
# /// script # dependencies = ["texttable"] # /// """Generate RST documentation from protobuf JSON definitions.""" from __future__ import annotations import json import logging import re from enum import Enum from pathlib import Path from textwrap import dedent from typing import Any from texttable import Texttable _logger = logging.getLogger(__name__) def _gen_break() -> str: return "\n\n===========================\n\n" def _gen_h1(link_id: str, title: str) -> str: return f""" .. _{link_id}: {title} {"=" * len(title)} """ def _gen_h2(link_id: str, title: str) -> str: return f""" .. _{link_id}: {title} {"-" * len(title)} """ def _gen_page_title(title: str) -> str: link = title.lower().replace(" ", "-") header = "=" * len(title) return f""" .. _{link}: {header} {title} {header} """ def _validation_error(msg: str) -> str: return f"JSON Validation Error: {msg}" def _validate_doc_public_json(docjson: dict[str, Any]) -> None: _logger.info("Validating doc_public.json file.") if "files" not in docjson: _logger.error(docjson.keys()) raise ValueError(_validation_error("No 'files' key")) files = docjson["files"][0] _logger.info("Checking 'content'") if "content" not in files: _logger.error(files.keys()) raise ValueError(_validation_error("No 'content' key")) content = files["content"][0] _logger.info("Checking 'message', 'service', 'enum'") for key in ("message", "service", "enum"): if key not in content: _logger.error(content.keys()) raise ValueError(_validation_error(f"No '{key}' key")) _logger.info("Structure Appears to be valid! Continuing...") class MsgType(Enum): GENERIC = 1 REQUEST = 2 RESPONSE = 3 def _gen_id(full_path: list[str]) -> str: return "".join(full_path) class Field: """A field within a protobuf message, containing name, type, and description.""" def __init__(self, full_path: list[str], name: str, description: str, field_type: str) -> None: _logger.debug(f"Creating Field {name}") self.id = _gen_id(full_path) self.name = name self.description = description self.field_type = field_type def __repr__(self) -> str: return self.name @staticmethod def table_header() -> list[str]: return ["Field Name", "Type", "Description"] def to_table(self) -> list[str]: return [self.name, self.field_type, self.description] @staticmethod def _parse_name(field_details: dict[str, Any]) -> str: declared_type = field_details["field_type"] if declared_type == "oneof": names = [f"``{x['field_name']}``" for x in field_details["oneof"]] return " OR ".join(names) return field_details["field_name"] @staticmethod def _parse_type(field_details: dict[str, Any]) -> str: declared_type = field_details["field_type"] if declared_type == "oneof": field_types = [Field._convert_to_link(x["field_type"]) for x in field_details["oneof"]] return " OR ".join(field_types) return Field._convert_to_link(field_details["field_type"]) @staticmethod def _normalize_description(text: str) -> str: """Normalize whitespace in description text to avoid RST indentation issues.""" # Split into lines, strip leading/trailing whitespace from each, rejoin lines = text.split("\n") normalized_lines = [line.strip() for line in lines] return " ".join(line for line in normalized_lines if line) @staticmethod def _parse_description(field_details: dict[str, Any]) -> str: declared_type = field_details["field_type"] deprecated = " This field is deprecated." if field_details["deprecated"] else "" required = " This field is required." if field_details["validate_required"] else "" if declared_type == "oneof": def to_lowercase_first_char(s: str) -> str: return s[:1].lower() + s[1:] if s else "" options = [] for name, obj in zip( Field._parse_name(field_details).split(" OR "), field_details["oneof"] ): desc = Field._normalize_description(obj["description"]) options.append(f"If {name}, {to_lowercase_first_char(desc)}") return " ".join(options) + required + deprecated desc = Field._normalize_description(field_details["description"]) return desc + required + deprecated @staticmethod def _convert_to_link(raw_string: str) -> str: # Only create internal refs for mlflow types, not external ones if "." in raw_string: if raw_string.startswith("mlflow."): return f":ref:`{raw_string.replace('.', '').lower()}`" # External types (google.protobuf.*, opentelemetry.*, etc.) return f"``{raw_string}``" return f"``{raw_string}``" @classmethod def parse_all_from(cls, field_list: list[dict[str, Any]]) -> list[Field]: all_instances = [] for field in field_list: full_path = field["full_path"] # Skip deprecated fields if field["deprecated"]: continue name = Field._parse_name(field) field_type = Field._parse_type(field) description = Field._parse_description(field) if field["repeated"]: field_type = "An array of " + field_type if field["visibility"] == "public": all_instances.append(cls(full_path, name, description, field_type)) return all_instances class Value: """An enum value within a ProtoEnum.""" def __init__(self, full_path: list[str], name: str, description: str) -> None: self.id = _gen_id(full_path) self.name = name self.description = description def __repr__(self) -> str: return self.name @staticmethod def table_header() -> list[str]: return ["Name", "Description"] def to_table(self) -> list[str]: return [self.name, self.description] @classmethod def parse_all_from(cls, value_list: list[dict[str, Any]]) -> list[Value]: return [cls(v["full_path"], v["value"], v["description"]) for v in value_list] class ProtoEnum: """A protobuf enum with a series of Values.""" def __init__( self, full_path: list[str], name: str, description: str, values: list[Value] ) -> None: self.id = _gen_id(full_path) self.name = name self.description = description self.values = values def __repr__(self) -> str: return f"{self.id}\n {self.name}" def _generate_values_table(self) -> str: tbl = Texttable(max_width=200) header = Value.table_header() tbl.add_rows([header] + [f.to_table() for f in self.values]) return tbl.draw() def to_rst(self) -> str: values = self._generate_values_table() title = _gen_h2(self.id, self.name) section = f"\n{self.description}\n\n{values}" return title + section @classmethod def parse_all_from(cls, files: list[dict[str, Any]]) -> list[ProtoEnum]: all_instances = [] for proto_file in files: # Top-level enums enums = [x["enum"] for x in proto_file["content"] if x["enum"]] # Enums inside of messages for content in proto_file["content"]: if content["message"]: enums += content["message"].get("enums") or [] for enum in enums: values = Value.parse_all_from(enum["values"]) all_instances.append( cls(enum["full_path"], enum["name"], enum["description"], values) ) return all_instances class Message: """A protobuf message containing fields.""" def __init__( self, full_path: list[str], name: str, description: str, fields: list[Field] ) -> None: _logger.debug(f"Creating Message: {name}") self.id = _gen_id(full_path) self.name = name self.description = description self.fields = fields self.type = MsgType.GENERIC def __repr__(self) -> str: return self.id def _generate_field_table(self) -> str: tbl = Texttable(max_width=200) header = [Field.table_header()] non_empty_fields = [f for f in self.fields if f.name] rows = [f.to_table() for f in non_empty_fields] tbl.add_rows(header + rows) return tbl.draw() def _generate_rst_title(self) -> str: if self.type == MsgType.REQUEST: return _gen_h2(self.id, "Request Structure") elif self.type == MsgType.RESPONSE: return _gen_h2(self.id, "Response Structure") return _gen_h2(self.id, self.name) def to_rst(self) -> str: if not self.fields: return "" fields = self._generate_field_table() title = self._generate_rst_title() section = f"\n\n{self.description}\n\n\n{fields}" return title + section @classmethod def parse_all_from_list(cls, message_list: list[dict[str, Any]]) -> list[Message]: all_instances = [] for msg in message_list: if msg["visibility"] != "public": continue fields = Field.parse_all_from(msg["fields"]) all_instances.append(cls(msg["full_path"], msg["name"], msg["description"], fields)) if msg["messages"]: all_instances.extend(cls.parse_all_from_list(msg["messages"])) return all_instances @classmethod def parse_all_from(cls, files: list[dict[str, Any]]) -> list[Message]: all_instances = [] for proto_file in files: for content in proto_file["content"]: if not content["message"]: continue message = content["message"] if message["visibility"] != "public": continue fields = Field.parse_all_from(message["fields"]) all_instances.append( cls(message["full_path"], message["name"], message["description"], fields) ) if message["messages"]: all_instances.extend(cls.parse_all_from_list(message["messages"])) return all_instances class Method: """An RPC method within a service, containing request and response messages.""" def __init__( self, full_path: list[str], name: str, description: str, path: str, method: str, request: list[str], response: list[str], title: str | None, api_version: str | None = None, ) -> None: self.id = _gen_id(full_path) self.name = name self.description = description self.path = path self.method = method self.request = _gen_id(request) self.response = _gen_id(response) self.request_message: Message | None = None self.response_message: Message | None = None self.api_version: str | None = api_version self.title = title @classmethod def parse_all_from(cls, method_list: list[dict[str, Any]]) -> list[Method]: all_instances = [] for m in method_list: rpc_options = m["rpc_options"] if rpc_options["visibility"] != "public": continue since_major = rpc_options.get("since_major") since_minor = rpc_options.get("since_minor") method_api_version = None if since_major is not None and since_minor is not None: method_api_version = f"{since_major}.{since_minor}" all_instances.append( cls( full_path=m["full_path"], name=m["name"], description=m["description"], path=rpc_options["path"], method=rpc_options["method"], request=m["request_full_path"], response=m["response_full_path"], title=rpc_options.get("rpc_doc_title"), api_version=method_api_version, ) ) return all_instances def __repr__(self) -> str: reqm = "HasMsg" if self.request_message else "NoMsg" resm = "HasMsg" if self.response_message else "NoMsg" return f"{self.name}, {self.request} ({reqm}) -> {self.response} ({resm})" def to_rst(self) -> str: if not self.api_version: raise ValueError("API version must be set before generating RST") prepped_title = self.title or " ".join(re.split(r"\W+", self.path)[2:]).title().lstrip() title = _gen_h1(self.id, prepped_title) tbl = Texttable(max_width=200) tbl.add_rows( [ ["Endpoint", "HTTP Method"], [f"``{self.api_version}{self.path}``", f"``{self.method}``"], ] ) parameters = tbl.draw() body = f""" {parameters} {self.description} """ ret_value = _gen_break() + title + body + "\n\n" if self.request_message: ret_value += self.request_message.to_rst() if self.response_message: ret_value += self.response_message.to_rst() return ret_value class Service: """A protobuf service containing RPC methods.""" def __init__( self, full_path: list[str], name: str, description: str, methods: list[Method] ) -> None: self.id = _gen_id(full_path) self.name = name self.description = description self.methods = methods @classmethod def parse_all_from(cls, files: list[dict[str, Any]]) -> list[Service]: all_instances = [] for proto_file in files: for content in proto_file["content"]: if not content["service"]: continue service = content["service"] methods = Method.parse_all_from(service["methods"]) all_instances.append( cls(service["full_path"], service["name"], service["description"], methods) ) return all_instances def __repr__(self) -> str: method_strs = "\n ".join(str(m) for m in self.methods) return f"{self.name}\n Methods: {method_strs}" def to_rst(self, method_order: list[str] | None = None) -> str: sorted_methods = sorted(self.methods, key=lambda x: x.name) if method_order: method_map = {name: idx for idx, name in enumerate(method_order)} sorted_methods = sorted( self.methods, key=lambda x: method_map.get(x.request, len(method_order)) ) return "".join(method.to_rst() for method in sorted_methods) class API: """Main API class for generating REST API documentation.""" def __init__( self, name: str, description: str, api_version: str, dst_path: Path, valid_proto_files: list[str], ) -> None: self.name = name self.description = description self.dst_path = dst_path self.valid_proto_files = valid_proto_files self.services: list[Service] | None = None self.messages: list[Message] | None = None self.enums: list[ProtoEnum] | None = None self.api_version = api_version def __repr__(self) -> str: return self.name def _file_filter(self, proto_file: dict[str, Any]) -> bool: return proto_file["filename"] in self.valid_proto_files def _validate_proto_list(self, proto_list: list[dict[str, Any]], context: str) -> None: if not proto_list or len(proto_list) != len(self.valid_proto_files): _logger.error("Length mismatch. This is likely due to a name error") for f in self.valid_proto_files: _logger.error(f"Valid Proto File: {f}") for f in proto_list: _logger.error(f"Actual Proto File: {f}") _logger.error("Maybe someone changed the name/location of a proto file?") raise ValueError(f"Proto file mismatch in {context}") def set_services( self, proto_file_list: list[dict[str, Any]], service_order: list[str] | None = None ) -> None: _logger.debug("Starting service generation") _logger.debug(f"Starts with total of: {len(proto_file_list)}") services_proto_list = [f for f in proto_file_list if self._file_filter(f)] self._validate_proto_list(services_proto_list, "set_services") services = Service.parse_all_from(services_proto_list) if service_order: order_map = {name: idx for idx, name in enumerate(service_order)} default_order = len(service_order) services = sorted( services, key=lambda x: (order_map.get(x.name, default_order), x.name) ) else: services = sorted(services, key=lambda x: x.name) self.services = services _logger.debug("Completed service generation") def set_messages(self, proto_file_list: list[dict[str, Any]]) -> None: _logger.debug("Starting message generation") _logger.debug(f"Starts with total of: {len(proto_file_list)}") messages_proto_list = [f for f in proto_file_list if self._file_filter(f)] self._validate_proto_list(messages_proto_list, "set_messages") self.messages = sorted(Message.parse_all_from(messages_proto_list), key=lambda x: x.name) _logger.debug("Completed message generation") def set_enums(self, proto_file_list: list[dict[str, Any]]) -> None: _logger.debug("Starting enum generation") _logger.debug(f"Starts with total of: {len(proto_file_list)}") enums_proto_list = [f for f in proto_file_list if self._file_filter(f)] self._validate_proto_list(enums_proto_list, "set_enums") self.enums = sorted(ProtoEnum.parse_all_from(enums_proto_list), key=lambda x: x.name) _logger.debug("Completed enum generation") def connect_methods_messages(self) -> None: """Connect request/response messages to their corresponding methods.""" for service in self.services: for method in service.methods: request_set = False response_set = False for message in self.messages: if message.id == method.request and not request_set: method.request_message = message message.type = MsgType.REQUEST request_set = True _logger.debug(f"Set Request Message for {method}") elif message.id == method.response and not response_set: method.response_message = message response_set = True message.type = MsgType.RESPONSE _logger.debug(f"Set Response Message for {method}") if not request_set: _logger.warning(f"Request not set {method} for {self}") if not response_set: _logger.warning(f"Response not set {method} for {self}") # Use per-method api_version from proto "since" when set, else API default if method.api_version is None: method.api_version = self.api_version def set_all( self, proto_file_list: list[dict[str, Any]], service_order: list[str] | None = None ) -> None: _logger.info(f"Setting Services for {self.name}") self.set_services(proto_file_list, service_order) _logger.info(f"Finished Setting Services for {self.name}") _logger.info(f"Setting Messages for {self.name}") self.set_messages(proto_file_list) _logger.info(f"Finished Setting Messages for {self.name}") _logger.info(f"Setting Enums for {self.name}") self.set_enums(proto_file_list) _logger.info(f"Finished Setting Enums for {self.name}") _logger.info(f"Connecting Messages -> Services for {self.name}") self.connect_methods_messages() _logger.info(f"Finished Connecting Messages -> Services under {self.name} API") def write_rst(self, method_order: list[str] | None = None) -> None: if self.services is None or self.messages is None or self.enums is None: raise ValueError("Must call set_all() before write_rst()") if not self.services or not self.messages: _logger.error( f"Services: {len(self.services)} Messages: {len(self.messages)} " f"Enums: {len(self.enums)}" ) raise ValueError("No services or messages found - check doc_public.json") services_rst = [s.to_rst(method_order) for s in self.services] enums_rst = [s.to_rst() for s in self.enums] generic_messages_rst = [s.to_rst() for s in self.messages if s.type == MsgType.GENERIC] with self.dst_path.open("w") as f: f.write(_gen_page_title(f"{self.name} API")) f.write(self.description) f.write("\n.. contents:: Table of Contents\n :local:\n :depth: 1") f.write("".join(services_rst)) f.write(_gen_h1(self.name + "add", "Data Structures")) f.write("".join(generic_messages_rst)) f.write("".join(enums_rst)) f.write("\n") # Valid MLflow message names for documentation ordering VALID_MLFLOW_MESSAGES = [ # ===== Experiments ===== "mlflowCreateExperiment", "mlflowSearchExperiments", "mlflowGetExperiment", "mlflowGetExperimentByName", "mlflowDeleteExperiment", "mlflowRestoreExperiment", "mlflowUpdateExperiment", "mlflowSetExperimentTag", "mlflowDeleteExperimentTag", # ===== Runs ===== "mlflowCreateRun", "mlflowUpdateRun", "mlflowDeleteRun", "mlflowRestoreRun", "mlflowGetRun", "mlflowSearchRuns", "mlflowLogMetric", "mlflowLogParam", "mlflowLogBatch", "mlflowLogModel", "mlflowLogInputs", "mlflowLogOutputs", "mlflowSetTag", "mlflowDeleteTag", "mlflowGetMetricHistory", "mlflowGetMetricHistoryBulkInterval", "mlflowListArtifacts", # ===== Model Registry ===== "mlflowCreateRegisteredModel", "mlflowGetRegisteredModel", "mlflowRenameRegisteredModel", "mlflowUpdateRegisteredModel", "mlflowDeleteRegisteredModel", "mlflowSearchRegisteredModels", "mlflowGetLatestVersions", "mlflowCreateModelVersion", "mlflowGetModelVersion", "mlflowUpdateModelVersion", "mlflowDeleteModelVersion", "mlflowSearchModelVersions", "mlflowGetModelVersionDownloadUri", "mlflowTransitionModelVersionStage", "mlflowSetRegisteredModelTag", "mlflowSetModelVersionTag", "mlflowDeleteRegisteredModelTag", "mlflowDeleteModelVersionTag", "mlflowSetRegisteredModelAlias", "mlflowDeleteRegisteredModelAlias", "mlflowGetModelVersionByAlias", # ===== Traces ===== "mlflowStartTrace", "mlflowEndTrace", "mlflowGetTraceInfo", "mlflowGetTraceInfoV3", "mlflowBatchGetTraces", "mlflowGetTrace", "mlflowSearchTraces", "mlflowSearchTracesV3", "mlflowSearchUnifiedTraces", "mlflowGetOnlineTraceDetails", "mlflowDeleteTraces", "mlflowDeleteTracesV3", "mlflowSetTraceTag", "mlflowSetTraceTagV3", "mlflowDeleteTraceTag", "mlflowDeleteTraceTagV3", "mlflowStartTraceV3", "mlflowLinkTracesToRun", "mlflowLinkPromptsToTrace", "mlflowCalculateTraceFilterCorrelation", "mlflowQueryTraceMetrics", # ===== Assessments ===== "mlflowCreateAssessment", "mlflowUpdateAssessment", "mlflowDeleteAssessment", "mlflowGetAssessmentRequest", # ===== Datasets ===== "mlflowSearchDatasets", "mlflowCreateDataset", "mlflowGetDataset", "mlflowDeleteDataset", "mlflowSearchEvaluationDatasets", "mlflowSetDatasetTags", "mlflowDeleteDatasetTag", "mlflowUpsertDatasetRecords", "mlflowGetDatasetExperimentIds", "mlflowGetDatasetRecords", "mlflowAddDatasetToExperiments", "mlflowRemoveDatasetFromExperiments", # ===== Logged Models ===== "mlflowCreateLoggedModel", "mlflowFinalizeLoggedModel", "mlflowGetLoggedModel", "mlflowDeleteLoggedModel", "mlflowSearchLoggedModels", "mlflowSetLoggedModelTags", "mlflowDeleteLoggedModelTag", "mlflowListLoggedModelArtifacts", "mlflowLogLoggedModelParamsRequest", # ===== Scorers ===== "mlflowRegisterScorer", "mlflowListScorers", "mlflowListScorerVersions", "mlflowGetScorer", "mlflowDeleteScorer", # ===== Gateway ===== "mlflowCreateGatewaySecret", "mlflowGetGatewaySecretInfo", "mlflowUpdateGatewaySecret", "mlflowDeleteGatewaySecret", "mlflowListGatewaySecretInfos", "mlflowCreateGatewayModelDefinition", "mlflowGetGatewayModelDefinition", "mlflowListGatewayModelDefinitions", "mlflowUpdateGatewayModelDefinition", "mlflowDeleteGatewayModelDefinition", "mlflowCreateGatewayEndpoint", "mlflowGetGatewayEndpoint", "mlflowUpdateGatewayEndpoint", "mlflowDeleteGatewayEndpoint", "mlflowListGatewayEndpoints", "mlflowAttachModelToGatewayEndpoint", "mlflowDetachModelFromGatewayEndpoint", "mlflowCreateGatewayEndpointBinding", "mlflowDeleteGatewayEndpointBinding", "mlflowListGatewayEndpointBindings", "mlflowSetGatewayEndpointTag", "mlflowDeleteGatewayEndpointTag", "mlflowGetSecretsConfig", # ===== Prompt Optimization ===== "mlflowCreatePromptOptimizationJob", "mlflowGetPromptOptimizationJob", "mlflowSearchPromptOptimizationJobs", "mlflowCancelPromptOptimizationJob", "mlflowDeletePromptOptimizationJob", # ===== Webhooks ===== "mlflowCreateWebhook", "mlflowListWebhooks", "mlflowGetWebhook", "mlflowUpdateWebhook", "mlflowDeleteWebhook", "mlflowTestWebhook", # ===== Artifacts (mlflow.artifacts package) ===== "mlflowartifactsDownloadArtifact", "mlflowartifactsUploadArtifact", "mlflowartifactsListArtifacts", "mlflowartifactsDeleteArtifact", "mlflowartifactsCreateMultipartUpload", "mlflowartifactsCompleteMultipartUpload", "mlflowartifactsAbortMultipartUpload", # ===== Data Types ===== "mlflowExperiment", "mlflowRun", "mlflowRunInfo", "mlflowRunTag", "mlflowExperimentTag", "mlflowRunData", "mlflowRunInputs", "mlflowRunOutputs", "mlflowMetric", "mlflowParam", "mlflowFileInfo", "mlflowDatasetInput", "mlflowDataset", "mlflowInputTag", "mlflowModelInput", "mlflowModelOutput", "mlflowRegisteredModel", "mlflowModelVersion", "mlflowRegisteredModelTag", "mlflowModelVersionTag", "mlflowRegisteredModelAlias", "mlflowModelParam", "mlflowModelMetric", "mlflowDeploymentJobConnection", "mlflowModelVersionDeploymentJobState", "mlflowTraceInfo", "mlflowTraceInfoV3", "mlflowTrace", "mlflowTraceLocation", "mlflowTraceRequestMetadata", "mlflowTraceTag", "mlflowMetricAggregation", "mlflowMetricDataPoint", "mlflowDatasetSummary", "mlflowLoggedModel", "mlflowLoggedModelInfo", "mlflowLoggedModelTag", "mlflowLoggedModelRegistrationInfo", "mlflowLoggedModelData", "mlflowLoggedModelParameter", "mlflowScorer", "mlflowGatewaySecretInfo", "mlflowGatewayModelDefinition", "mlflowGatewayEndpointModelMapping", "mlflowGatewayEndpoint", "mlflowGatewayEndpointTag", "mlflowGatewayEndpointBinding", "mlflowFallbackConfig", "mlflowGatewayEndpointModelConfig", "mlflowAssessmentSource", "mlflowAssessmentError", "mlflowExpectation", "mlflowFeedback", "mlflowAssessment", "mlflowWebhookEvent", "mlflowWebhook", "mlflowWebhookTestResult", "mlflowJobState", "mlflowPromptOptimizationJobTag", "mlflowPromptOptimizationJobConfig", "mlflowPromptOptimizationJob", "mlflowartifactsFileInfo", "mlflowartifactsMultipartUploadCredential", "mlflowartifactsMultipartUploadPart", "mlflowMetricWithRunId", ] MLFLOW_PROTOS = [ "service.proto", "model_registry.proto", "webhooks.proto", "mlflow_artifacts.proto", "assessments.proto", "datasets.proto", "jobs.proto", "prompt_optimization.proto", ] # Order of services in documentation (services not listed will be sorted alphabetically at the end) SERVICE_ORDER = [ "MlflowService", "ModelRegistryService", "WebhookService", "MlflowArtifactsService", ] MLFLOW_DESCRIPTION = dedent(""" The MLflow REST API allows you to create, list, and get experiments and runs, and log parameters, metrics, and artifacts. The API is hosted under the ``/api`` route on the MLflow tracking server. For example, to search for experiments on a tracking server hosted at ``http://localhost:5000``, make a POST request to ``http://localhost:5000/api/2.0/mlflow/experiments/search``. .. important:: The MLflow REST API requires content type ``application/json`` for all POST requests. """) def main() -> None: logging.basicConfig(format="%(levelname)s:%(lineno)d:%(message)s", level=logging.INFO) src = Path("mlflow/protos/protos.json") dst = Path("docs/api_reference/source/rest-api.rst") api_version = "2.0" _logger.info(f"API VERSION: {api_version}") _logger.info(f"Reading Source: {src}") with src.open() as f: docjson = json.load(f) _validate_doc_public_json(docjson) proto_files = docjson["files"] mlflow_api = API( name="REST", description=MLFLOW_DESCRIPTION, api_version=api_version, dst_path=dst, valid_proto_files=MLFLOW_PROTOS, ) mlflow_api.set_all(proto_files, SERVICE_ORDER) mlflow_api.write_rst(VALID_MLFLOW_MESSAGES) if __name__ == "__main__": main()
{ "repo_id": "mlflow/mlflow", "file_path": "dev/gen_rest_api.py", "license": "Apache License 2.0", "lines": 755, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:dev/proto_plugin.py
import json import sys import textwrap from dataclasses import asdict, dataclass from dataclasses import field as dataclass_field from enum import Enum from google.protobuf import descriptor_pb2 from google.protobuf.compiler import plugin_pb2 from mlflow.protos import databricks_pb2 class Visibility(Enum): PUBLIC = "public" INTERNAL = "internal" PUBLIC_UNDOCUMENTED = "public_undocumented" PUBLIC_UNDOCUMENTED_READ_ONLY = "public_undocumented_read_only" @dataclass class ProtoMessageField: description: str field_name: str field_default: str | None entity_type: str field_type: str full_path: list[str] visibility: str since: str deprecated: bool repeated: bool validate_required: bool oneof: list["ProtoMessageField"] = dataclass_field(default_factory=list) @dataclass class ProtoMessage: name: str full_path: list[str] description: str visibility: str fields: list[ProtoMessageField] enums: list["ProtoEnum"] messages: list["ProtoMessage"] @dataclass class ProtoEnumValue: value: str full_path: list[str] visibility: str description: str @dataclass class ProtoEnum: name: str description: str full_path: list[str] values: list[ProtoEnumValue] visibility: str @dataclass class DatabricksRpcOptionsDescription: path: str | None = None method: str | None = None visibility: str = "internal" since_major: int | None = None since_minor: int | None = None error_codes: list[int] | None = None rpc_doc_title: str = "" def __post_init__(self): if self.error_codes is None: self.error_codes = [] @dataclass class ProtoServiceMethod: name: str full_path: list[str] request_full_path: list[str] response_full_path: list[str] description: str rpc_options: DatabricksRpcOptionsDescription | None @dataclass class ProtoService: name: str full_path: list[str] description: str visibility: str methods: list[ProtoServiceMethod] @dataclass class ProtoTopComment: content: str visibility: str @dataclass class ProtoFileElement: comment: ProtoTopComment | None = None enum: ProtoEnum | None = None message: ProtoMessage | None = None service: ProtoService | None = None @dataclass class ProtoFile: filename: str requested_visibility: str content: list[ProtoFileElement] @dataclass class ProtoAllContent: requested_visibility: str files: list[ProtoFile] class ProtobufDocGenerator: def get_field_type_name(self, field: descriptor_pb2.FieldDescriptorProto) -> str: type_names = { descriptor_pb2.FieldDescriptorProto.TYPE_DOUBLE: "DOUBLE", descriptor_pb2.FieldDescriptorProto.TYPE_FLOAT: "FLOAT", descriptor_pb2.FieldDescriptorProto.TYPE_INT64: "INT64", descriptor_pb2.FieldDescriptorProto.TYPE_UINT64: "UINT64", descriptor_pb2.FieldDescriptorProto.TYPE_INT32: "INT32", descriptor_pb2.FieldDescriptorProto.TYPE_FIXED64: "FIXED64", descriptor_pb2.FieldDescriptorProto.TYPE_FIXED32: "FIXED32", descriptor_pb2.FieldDescriptorProto.TYPE_BOOL: "BOOL", descriptor_pb2.FieldDescriptorProto.TYPE_STRING: "STRING", descriptor_pb2.FieldDescriptorProto.TYPE_BYTES: "BYTES", descriptor_pb2.FieldDescriptorProto.TYPE_UINT32: "UINT32", descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED32: "SFIXED32", descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED64: "SFIXED64", descriptor_pb2.FieldDescriptorProto.TYPE_SINT32: "SINT32", descriptor_pb2.FieldDescriptorProto.TYPE_SINT64: "SINT64", } if field.type in type_names: return type_names[field.type] elif field.type in ( descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE, descriptor_pb2.FieldDescriptorProto.TYPE_ENUM, ): # Remove leading dot if present type_name = field.type_name return type_name.removeprefix(".") else: return "unknown" def get_visibility(self, options) -> Visibility: # For now, return PUBLIC as default # In a real implementation, this would check custom options return Visibility.PUBLIC def get_validate_required(self, options) -> bool: """Extract validate_required value from field options. Checks if the field has the validate_required extension set. """ if options.HasExtension(databricks_pb2.validate_required): return options.Extensions[databricks_pb2.validate_required] return False def extract_rpc_options(self, options) -> DatabricksRpcOptionsDescription | None: """Extract RPC options from method options. This requires databricks_pb2 to be available for full extraction. When running as a protoc plugin, the extension data is present in the file descriptors but needs the compiled extension definitions to parse. """ # Check if the method has the Databricks RPC extension if options.HasExtension(databricks_pb2.rpc): # Extract the RPC options rpc_ext = options.Extensions[databricks_pb2.rpc] # Extract endpoint information path = None method = None since_major = None since_minor = None if rpc_ext.endpoints: # Use the first endpoint for now endpoint = rpc_ext.endpoints[0] path = endpoint.path if endpoint.HasField("path") else None method = endpoint.method if endpoint.HasField("method") else None if endpoint.HasField("since") and endpoint.since: since_major = endpoint.since.major if endpoint.since.HasField("major") else None since_minor = endpoint.since.minor if endpoint.since.HasField("minor") else None # Extract visibility visibility = "internal" # default if rpc_ext.HasField("visibility"): visibility_enum = rpc_ext.visibility # Map the enum value to string if visibility_enum == databricks_pb2.PUBLIC: visibility = "public" elif visibility_enum == databricks_pb2.INTERNAL: visibility = "internal" elif visibility_enum == databricks_pb2.PUBLIC_UNDOCUMENTED: visibility = "public_undocumented" # Extract error codes error_codes = list(rpc_ext.error_codes) if rpc_ext.error_codes else [] # Extract RPC doc title rpc_doc_title = rpc_ext.rpc_doc_title if rpc_ext.HasField("rpc_doc_title") else "" return DatabricksRpcOptionsDescription( path=path, method=method, visibility=visibility, since_major=since_major, since_minor=since_minor, error_codes=error_codes, rpc_doc_title=rpc_doc_title, ) def get_full_path_for_file( self, file: descriptor_pb2.FileDescriptorProto, name: str ) -> list[str]: path_parts = [] if file.package: path_parts.extend(file.package.split(".")) path_parts.append(name) return path_parts def get_full_path_for_nested(self, parent_path: list[str], name: str) -> list[str]: return parent_path + [name] def get_documentation(self, source_location: descriptor_pb2.SourceCodeInfo.Location) -> str: if source_location and source_location.leading_comments: return textwrap.dedent(source_location.leading_comments).strip() return "" def find_source_location( self, source_info: descriptor_pb2.SourceCodeInfo, path: list[int] ) -> descriptor_pb2.SourceCodeInfo.Location | None: for location in source_info.location: if list(location.path) == path: return location return None def process_field( self, field: descriptor_pb2.FieldDescriptorProto, parent_path: list[str], field_index: int, source_info: descriptor_pb2.SourceCodeInfo, message_path: list[int], ) -> ProtoMessageField: # Build source location path for this field field_path = message_path + [2, field_index] # 2 = field in message location = self.find_source_location(source_info, field_path) field_type = self.get_field_type_name(field) default_value = None if field.HasField("default_value"): default_value = field.default_value return ProtoMessageField( description=self.get_documentation(location) if location else "", field_name=field.name, field_default=default_value, entity_type=str(field.type), field_type=field_type, full_path=self.get_full_path_for_nested(parent_path, field.name), visibility=self.get_visibility(field.options).value, since="", deprecated=field.options.deprecated if field.options.HasField("deprecated") else False, repeated=field.label == descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED, validate_required=self.get_validate_required(field.options), oneof=[], ) def process_enum_value( self, value: descriptor_pb2.EnumValueDescriptorProto, parent_path: list[str], value_index: int, source_info: descriptor_pb2.SourceCodeInfo, enum_path: list[int], ) -> ProtoEnumValue: # Build source location path for this enum value value_path = enum_path + [2, value_index] # 2 = value in enum location = self.find_source_location(source_info, value_path) return ProtoEnumValue( value=value.name, full_path=self.get_full_path_for_nested(parent_path, value.name), visibility=self.get_visibility(value.options).value, description=self.get_documentation(location) if location else "", ) def process_enum( self, enum: descriptor_pb2.EnumDescriptorProto, parent_path: list[str], enum_index: int, source_info: descriptor_pb2.SourceCodeInfo, parent_path_numbers: list[int], is_nested: bool = False, ) -> ProtoEnum: # Build source location path for this enum enum_path = parent_path_numbers + [4, enum_index] if is_nested else [5, enum_index] location = self.find_source_location(source_info, enum_path) full_path = self.get_full_path_for_nested(parent_path, enum.name) values = [] for i, value in enumerate(enum.value): values.append(self.process_enum_value(value, full_path, i, source_info, enum_path)) return ProtoEnum( name=enum.name, description=self.get_documentation(location) if location else "", full_path=full_path, values=values, visibility=self.get_visibility(enum.options).value, ) def process_message( self, msg: descriptor_pb2.DescriptorProto, parent_path: list[str], msg_index: int, source_info: descriptor_pb2.SourceCodeInfo, parent_path_numbers: list[int] | None = None, is_nested: bool = False, ) -> ProtoMessage: # Build source location path for this message message_path = parent_path_numbers + [3, msg_index] if is_nested else [4, msg_index] location = self.find_source_location(source_info, message_path) full_path = self.get_full_path_for_nested(parent_path, msg.name) fields = [] # Process regular fields for i, proto_field in enumerate(msg.field): if not proto_field.HasField("oneof_index"): # Skip oneof fields for now fields.append( self.process_field(proto_field, full_path, i, source_info, message_path) ) # Process oneofs for oneof_index, oneof in enumerate(msg.oneof_decl): oneof_fields = [] for i, proto_field in enumerate(msg.field): if proto_field.HasField("oneof_index") and proto_field.oneof_index == oneof_index: oneof_fields.append( self.process_field(proto_field, full_path, i, source_info, message_path) ) if oneof_fields: oneof_field = ProtoMessageField( description="", field_name=oneof.name, field_default=None, entity_type="oneof", field_type="oneof", full_path=self.get_full_path_for_nested(full_path, oneof.name), visibility=self.get_visibility(oneof.options).value, since="", deprecated=False, repeated=False, validate_required=False, oneof=oneof_fields, ) fields.append(oneof_field) # Process nested enums enums = [] for i, enum in enumerate(msg.enum_type): enums.append( self.process_enum(enum, full_path, i, source_info, message_path, is_nested=True) ) # Process nested messages messages = [] for i, nested in enumerate(msg.nested_type): messages.append( self.process_message( nested, full_path, i, source_info, message_path, is_nested=True ) ) return ProtoMessage( name=msg.name, full_path=full_path, description=self.get_documentation(location) if location else "", visibility=self.get_visibility(msg.options).value, fields=fields, enums=enums, messages=messages, ) def process_method( self, method: descriptor_pb2.MethodDescriptorProto, parent_path: list[str], method_index: int, source_info: descriptor_pb2.SourceCodeInfo, service_path: list[int], ) -> ProtoServiceMethod: # Build source location path for this method method_path = service_path + [2, method_index] # 2 = method in service location = self.find_source_location(source_info, method_path) # Remove leading dots from type names input_type = method.input_type input_type = input_type.removeprefix(".") output_type = method.output_type output_type = output_type.removeprefix(".") input_path = input_type.split(".") output_path = output_type.split(".") # Extract RPC options from custom extensions rpc_options = self.extract_rpc_options(method.options) return ProtoServiceMethod( name=method.name, full_path=self.get_full_path_for_nested(parent_path, method.name), request_full_path=input_path, response_full_path=output_path, description=self.get_documentation(location) if location else "", rpc_options=rpc_options, ) def process_service( self, service: descriptor_pb2.ServiceDescriptorProto, parent_path: list[str], service_index: int, source_info: descriptor_pb2.SourceCodeInfo, ) -> ProtoService: # Build source location path for this service service_path = [6, service_index] # 6 = service at file level location = self.find_source_location(source_info, service_path) full_path = self.get_full_path_for_nested(parent_path, service.name) methods = [] for i, method in enumerate(service.method): methods.append(self.process_method(method, full_path, i, source_info, service_path)) return ProtoService( name=service.name, full_path=full_path, description=self.get_documentation(location) if location else "", visibility=self.get_visibility(service.options).value, methods=methods, ) def process_file( self, file: descriptor_pb2.FileDescriptorProto, requested_vis: Visibility ) -> ProtoFile: elements = [] # Base path from package base_path = file.package.split(".") if file.package else [] # Get source code info for documentation source_info = file.source_code_info if file.HasField("source_code_info") else None if not source_info: source_info = descriptor_pb2.SourceCodeInfo() # Process top-level messages for i, msg in enumerate(file.message_type): elements.append( ProtoFileElement(message=self.process_message(msg, base_path, i, source_info)) ) # Process top-level enums for i, enum in enumerate(file.enum_type): elements.append( ProtoFileElement( enum=self.process_enum(enum, base_path, i, source_info, [], is_nested=False) ) ) # Process services for i, service in enumerate(file.service): elements.append( ProtoFileElement(service=self.process_service(service, base_path, i, source_info)) ) return ProtoFile( filename=file.name, requested_visibility=requested_vis.value, content=elements ) class ProtocPlugin: """Protoc plugin implementation.""" def __init__(self): self.generator = ProtobufDocGenerator() def process_request( self, request: plugin_pb2.CodeGeneratorRequest ) -> plugin_pb2.CodeGeneratorResponse: response = plugin_pb2.CodeGeneratorResponse() files = [] # Process each file that was requested to be generated for file_name in request.file_to_generate: # Find the file descriptor file_descriptor = None for proto_file in request.proto_file: if proto_file.name == file_name: file_descriptor = proto_file break if file_descriptor: # Process the file proto_file = self.generator.process_file(file_descriptor, Visibility.PUBLIC) files.append(proto_file) # Generate documentation doc_content = ProtoAllContent(requested_visibility=Visibility.PUBLIC.value, files=files) # Generate doc_public.json doc_file = response.file.add() doc_file.name = "protos.json" doc_file.content = json.dumps(asdict(doc_content), indent=2) return response def main(): # Protoc plugin mode # Read CodeGeneratorRequest from stdin data = sys.stdin.buffer.read() request = plugin_pb2.CodeGeneratorRequest.FromString(data) # Process request plugin = ProtocPlugin() response = plugin.process_request(request) # Write response to stdout sys.stdout.buffer.write(response.SerializeToString()) if __name__ == "__main__": main()
{ "repo_id": "mlflow/mlflow", "file_path": "dev/proto_plugin.py", "license": "Apache License 2.0", "lines": 460, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/scorers/guardrails/registry.py
from __future__ import annotations from mlflow.exceptions import MlflowException _SUPPORTED_VALIDATORS = [ "ToxicLanguage", "NSFWText", "DetectJailbreak", "DetectPII", "SecretsPresent", "GibberishText", ] def get_validator_class(validator_name: str): """ Get Guardrails AI validator class by name. For validators in the supported list, imports from guardrails.hub. For unknown validators, attempts to dynamically import from guardrails.hub.<ValidatorName>. Args: validator_name: Name of the validator (e.g., "ToxicLanguage", "DetectPII") Returns: The Guardrails AI validator class Raises: MlflowException: If the validator cannot be imported or guardrails is not installed """ from guardrails import hub try: return getattr(hub, validator_name) except AttributeError: available = ", ".join(sorted(_SUPPORTED_VALIDATORS)) raise MlflowException.invalid_parameter_value( f"Unknown Guardrails AI validator: '{validator_name}'. Could not find " f"'{validator_name}' in 'guardrails.hub'. " f"Available pre-configured validators: {available}" )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/guardrails/registry.py", "license": "Apache License 2.0", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/scorers/guardrails/utils.py
from __future__ import annotations from typing import Any from mlflow.entities.trace import Trace from mlflow.exceptions import MlflowException from mlflow.genai.utils.trace_utils import ( parse_inputs_to_str, parse_outputs_to_str, resolve_inputs_from_trace, resolve_outputs_from_trace, ) def check_guardrails_installed(): try: import guardrails # noqa: F401 except ImportError: raise MlflowException.invalid_parameter_value( "Guardrails AI scorers require the `guardrails-ai` package. " "Install it with: `pip install guardrails-ai`" ) def map_scorer_inputs_to_text( inputs: Any = None, outputs: Any = None, trace: Trace | None = None, ) -> str: """ Convert MLflow scorer inputs to text for Guardrails AI validation. Guardrails AI validators operate on text strings. This function extracts and converts the relevant text from MLflow's scorer interface. Args: inputs: The input to evaluate outputs: The output to evaluate (primary target for validation) trace: MLflow trace for evaluation Returns: Text string to validate """ if trace: inputs = resolve_inputs_from_trace(inputs, trace) outputs = resolve_outputs_from_trace(outputs, trace) # Guardrails validators primarily validate outputs (LLM responses) # Fall back to inputs if outputs not provided if outputs is not None: return parse_outputs_to_str(outputs) elif inputs is not None: return parse_inputs_to_str(inputs) else: raise MlflowException.invalid_parameter_value( "Guardrails AI scorers require either 'outputs' or 'inputs' to validate." )
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/guardrails/utils.py", "license": "Apache License 2.0", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/genai/scorers/guardrails/test_guardrails.py
from unittest.mock import patch import guardrails import pytest from guardrails import Validator, register_validator from guardrails.classes.validation.validation_result import FailResult, PassResult import mlflow from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType from mlflow.genai.judges.utils import CategoricalRating from mlflow.genai.scorers.guardrails import ( DetectJailbreak, DetectPII, GibberishText, NSFWText, SecretsPresent, ToxicLanguage, get_scorer, ) @register_validator(name="test/mock_validator", data_type="string") class MockValidator(Validator): def __init__(self, **kwargs): super().__init__(**kwargs) def validate(self, value, metadata=None): if "toxic" in str(value).lower() or "bad" in str(value).lower(): return FailResult(error_message="Content flagged as inappropriate") return PassResult() @pytest.fixture def mock_validator_class(): return MockValidator @pytest.mark.parametrize( ("scorer_class", "validator_name"), [ (ToxicLanguage, "ToxicLanguage"), (NSFWText, "NSFWText"), (DetectJailbreak, "DetectJailbreak"), (DetectPII, "DetectPII"), (SecretsPresent, "SecretsPresent"), (GibberishText, "GibberishText"), ], ) def test_guardrails_scorer_pass(mock_validator_class, scorer_class, validator_name): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = scorer_class() assert isinstance(scorer._guard, guardrails.Guard) result = scorer(outputs="This is clean text.") assert isinstance(result, Feedback) assert result.name == validator_name assert result.value == CategoricalRating.YES assert result.rationale is None assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id=f"guardrails/{validator_name}", ) assert result.metadata == {"mlflow.scorer.framework": "guardrails-ai"} @pytest.mark.parametrize( ("scorer_class", "validator_name"), [ (ToxicLanguage, "ToxicLanguage"), (DetectPII, "DetectPII"), ], ) def test_guardrails_scorer_fail(mock_validator_class, scorer_class, validator_name): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = scorer_class() result = scorer(outputs="This is toxic bad content.") assert isinstance(result, Feedback) assert result.name == validator_name assert result.value == CategoricalRating.NO assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id=f"guardrails/{validator_name}", ) assert result.metadata == {"mlflow.scorer.framework": "guardrails-ai"} assert "Content flagged as inappropriate" in result.rationale def test_guardrails_get_scorer(mock_validator_class): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = get_scorer("ToxicLanguage", threshold=0.8) result = scorer(outputs="Clean text") assert isinstance(result, Feedback) assert result.name == "ToxicLanguage" assert result.value == CategoricalRating.YES assert result.rationale is None assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id="guardrails/ToxicLanguage", ) assert result.metadata == {"mlflow.scorer.framework": "guardrails-ai"} def test_guardrails_scorer_with_custom_kwargs(mock_validator_class): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = ToxicLanguage(threshold=0.9, validation_method="full") result = scorer(outputs="Test text") assert isinstance(result, Feedback) assert result.name == "ToxicLanguage" assert result.value == CategoricalRating.YES assert result.rationale is None assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id="guardrails/ToxicLanguage", ) assert result.metadata == {"mlflow.scorer.framework": "guardrails-ai"} @pytest.mark.parametrize( ("inputs", "outputs", "expected_value"), [ ("Input text", "Output text", CategoricalRating.YES), ("Input only", None, CategoricalRating.YES), ("toxic input", "clean output", CategoricalRating.YES), # outputs takes priority ("clean input", "toxic output", CategoricalRating.NO), # outputs takes priority ], ) def test_guardrails_scorer_input_priority(mock_validator_class, inputs, outputs, expected_value): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = ToxicLanguage() result = scorer(inputs=inputs, outputs=outputs) assert result.value == expected_value def test_guardrails_scorer_error_handling(): @register_validator(name="test/error_validator", data_type="string") class ErrorValidator(Validator): def validate(self, value, metadata=None): raise RuntimeError("Validation failed") with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=ErrorValidator, ): scorer = ToxicLanguage() result = scorer(outputs="Some text") assert isinstance(result, Feedback) assert result.name == "ToxicLanguage" assert result.error is not None assert "Validation failed" in str(result.error) assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id="guardrails/ToxicLanguage", ) assert result.metadata == {"mlflow.scorer.framework": "guardrails-ai"} def test_guardrails_scorer_source_id(mock_validator_class): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = ToxicLanguage() result = scorer(outputs="Test") assert isinstance(result, Feedback) assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id="guardrails/ToxicLanguage", ) def test_guardrails_scorer_guard_is_real_instance(mock_validator_class): with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = ToxicLanguage() assert isinstance(scorer._guard, guardrails.Guard) def _create_test_trace(inputs=None, outputs=None): """Create a test trace using mlflow.start_span().""" with mlflow.start_span() as span: if inputs is not None: span.set_inputs(inputs) if outputs is not None: span.set_outputs(outputs) return mlflow.get_trace(span.trace_id) def test_guardrails_scorer_with_trace(mock_validator_class): trace = _create_test_trace( inputs={"question": "What is MLflow?"}, outputs={"answer": "MLflow is a clean ML platform."}, ) with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = ToxicLanguage() result = scorer(trace=trace) assert isinstance(result, Feedback) assert result.name == "ToxicLanguage" assert result.value == CategoricalRating.YES assert result.source == AssessmentSource( source_type=AssessmentSourceType.CODE, source_id="guardrails/ToxicLanguage", ) assert result.metadata == {"mlflow.scorer.framework": "guardrails-ai"} def test_guardrails_scorer_with_trace_failure(mock_validator_class): trace = _create_test_trace( inputs={"question": "What is toxic?"}, outputs={"answer": "This is toxic content."}, ) with patch( "mlflow.genai.scorers.guardrails.get_validator_class", return_value=mock_validator_class, ): scorer = ToxicLanguage() result = scorer(trace=trace) assert isinstance(result, Feedback) assert result.name == "ToxicLanguage" assert result.value == CategoricalRating.NO assert "Content flagged as inappropriate" in result.rationale
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/guardrails/test_guardrails.py", "license": "Apache License 2.0", "lines": 210, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/guardrails/test_registry.py
from unittest.mock import MagicMock, patch import pytest from mlflow.exceptions import MlflowException from mlflow.genai.scorers.guardrails.registry import get_validator_class @pytest.mark.parametrize( ("validator_name", "expected_class"), [ ("ToxicLanguage", "ToxicLanguage"), ("NSFWText", "NSFWText"), ("DetectJailbreak", "DetectJailbreak"), ("DetectPII", "DetectPII"), ("SecretsPresent", "SecretsPresent"), ("GibberishText", "GibberishText"), ], ) def test_get_validator_class(validator_name, expected_class): with patch("guardrails.hub") as mock_hub: mock_hub.configure_mock(**{expected_class: f"{expected_class}Class"}) result = get_validator_class(validator_name) assert result == f"{expected_class}Class" def test_get_validator_class_unknown(): with patch("guardrails.hub", spec=[]): with pytest.raises(MlflowException, match="Unknown Guardrails AI validator"): get_validator_class("UnknownValidator") def test_get_validator_class_dynamic_import(): with patch("guardrails.hub") as mock_hub: mock_validator = MagicMock() mock_hub.NewValidator = mock_validator result = get_validator_class("NewValidator") assert result is mock_validator
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/guardrails/test_registry.py", "license": "Apache License 2.0", "lines": 30, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/guardrails/test_utils.py
import sys from unittest.mock import patch import pytest import mlflow from mlflow.exceptions import MlflowException from mlflow.genai.scorers.guardrails.utils import ( check_guardrails_installed, map_scorer_inputs_to_text, ) def _create_test_trace(inputs=None, outputs=None): """Create a test trace using mlflow.start_span().""" with mlflow.start_span() as span: if inputs is not None: span.set_inputs(inputs) if outputs is not None: span.set_outputs(outputs) return mlflow.get_trace(span.trace_id) def test_check_guardrails_installed_failure(): original_guardrails = sys.modules.get("guardrails") try: if "guardrails" in sys.modules: del sys.modules["guardrails"] with patch.dict(sys.modules, {"guardrails": None}): with pytest.raises(MlflowException, match="guardrails-ai"): check_guardrails_installed() finally: if original_guardrails is not None: sys.modules["guardrails"] = original_guardrails @pytest.mark.parametrize( ("inputs", "outputs", "expected"), [ ("input text", "output text", "output text"), ("input only", None, "input only"), ({"query": "test"}, None, "test"), ], ) def test_map_scorer_inputs_to_text(inputs, outputs, expected): result = map_scorer_inputs_to_text(inputs=inputs, outputs=outputs) assert expected in result def test_map_scorer_inputs_to_text_with_trace(): trace = _create_test_trace( inputs={"question": "What is MLflow?"}, outputs={"answer": "MLflow is an ML platform."}, ) result = map_scorer_inputs_to_text(trace=trace) assert "MLflow is an ML platform" in result def test_map_scorer_inputs_to_text_requires_input_or_output(): with pytest.raises(MlflowException, match="require either 'outputs' or 'inputs'"): map_scorer_inputs_to_text(inputs=None, outputs=None)
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/guardrails/test_utils.py", "license": "Apache License 2.0", "lines": 49, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/demo/generators/prompts.py
from __future__ import annotations import logging from mlflow.demo.base import ( DEMO_EXPERIMENT_NAME, DEMO_PROMPT_PREFIX, BaseDemoGenerator, DemoFeature, DemoResult, ) from mlflow.demo.data import DEMO_PROMPTS, DemoPromptDef from mlflow.genai.prompts import ( delete_prompt_alias, register_prompt, search_prompts, set_prompt_alias, ) from mlflow.tracking._tracking_service.utils import _get_store from mlflow.tracking.client import MlflowClient _logger = logging.getLogger(__name__) class PromptsDemoGenerator(BaseDemoGenerator): """Generates demo prompts showing version history and alias management. Creates: - 3 prompts: customer-support, document-summarizer, code-reviewer - Each with 3-4 versions showing prompt evolution - Version-specific aliases (baseline, improvements, production) """ name = DemoFeature.PROMPTS version = 1 def generate(self) -> DemoResult: import mlflow self._restore_experiment_if_deleted() mlflow.set_experiment(DEMO_EXPERIMENT_NAME) prompt_names = [] total_versions = 0 for prompt_def in DEMO_PROMPTS: versions_created = self._create_prompt_with_versions(prompt_def) prompt_names.append(prompt_def.name) total_versions += versions_created entity_ids = [ f"prompts:{len(prompt_names)}", f"versions:{total_versions}", ] return DemoResult( feature=self.name, entity_ids=entity_ids, navigation_url="#/prompts", ) def _create_prompt_with_versions(self, prompt_def: DemoPromptDef) -> int: for version_num, version_def in enumerate(prompt_def.versions, start=1): register_prompt( name=prompt_def.name, template=version_def.template, commit_message=version_def.commit_message, tags={"demo": "true"}, ) if version_def.aliases: set_prompt_alias( name=prompt_def.name, alias=version_def.aliases[0], version=version_num, ) return len(prompt_def.versions) def _data_exists(self) -> bool: try: prompts = search_prompts( filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'", max_results=1, ) return len(prompts) > 0 except Exception: _logger.debug("Failed to check if prompts demo exists", exc_info=True) return False def delete_demo(self) -> None: all_aliases = set() for prompt_def in DEMO_PROMPTS: for version_def in prompt_def.versions: all_aliases.update(version_def.aliases) try: prompts = search_prompts( filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'", max_results=100, ) client = MlflowClient() for prompt in prompts: try: for alias in all_aliases: try: delete_prompt_alias(name=prompt.name, alias=alias) except Exception: _logger.debug( "Failed to delete alias %s for prompt %s", alias, prompt.name, exc_info=True, ) client.delete_prompt(name=prompt.name) except Exception: _logger.debug("Failed to delete prompt %s", prompt.name, exc_info=True) except Exception: _logger.debug("Failed to delete demo prompts", exc_info=True) def _restore_experiment_if_deleted(self) -> None: store = _get_store() try: experiment = store.get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is not None and experiment.lifecycle_stage == "deleted": _logger.info("Restoring soft-deleted demo experiment") client = MlflowClient() client.restore_experiment(experiment.experiment_id) except Exception: _logger.debug("Failed to check/restore demo experiment", exc_info=True)
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/demo/generators/prompts.py", "license": "Apache License 2.0", "lines": 110, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/demo/test_prompts_generator.py
import pytest from mlflow.demo.base import DEMO_PROMPT_PREFIX, DemoFeature, DemoResult from mlflow.demo.data import DEMO_PROMPTS from mlflow.demo.generators.prompts import PromptsDemoGenerator from mlflow.genai.prompts import load_prompt, search_prompts @pytest.fixture def prompts_generator(): generator = PromptsDemoGenerator() original_version = generator.version yield generator PromptsDemoGenerator.version = original_version def test_generator_attributes(): generator = PromptsDemoGenerator() assert generator.name == DemoFeature.PROMPTS assert generator.version == 1 def test_data_exists_false_when_no_prompts(): generator = PromptsDemoGenerator() assert generator._data_exists() is False def test_generate_creates_prompts(): generator = PromptsDemoGenerator() result = generator.generate() assert isinstance(result, DemoResult) assert result.feature == DemoFeature.PROMPTS assert any("prompts:" in e for e in result.entity_ids) assert any("versions:" in e for e in result.entity_ids) def test_generate_creates_expected_prompts(): generator = PromptsDemoGenerator() generator.generate() prompts = search_prompts( filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'", max_results=100, ) assert len(prompts) == len(DEMO_PROMPTS) prompt_names = {p.name for p in prompts} expected_names = {prompt_def.name for prompt_def in DEMO_PROMPTS} assert prompt_names == expected_names def test_prompts_have_multiple_versions(): generator = PromptsDemoGenerator() generator.generate() for prompt_def in DEMO_PROMPTS: expected_versions = len(prompt_def.versions) prompt = load_prompt(prompt_def.name, version=expected_versions) assert prompt is not None assert prompt.version == expected_versions def test_prompts_have_version_aliases(): generator = PromptsDemoGenerator() generator.generate() for prompt_def in DEMO_PROMPTS: for version_num, version_def in enumerate(prompt_def.versions, start=1): if version_def.aliases: prompt = load_prompt(f"prompts:/{prompt_def.name}@{version_def.aliases[0]}") assert prompt.version == version_num def test_data_exists_true_after_generate(): generator = PromptsDemoGenerator() assert generator._data_exists() is False generator.generate() assert generator._data_exists() is True def test_delete_demo_removes_prompts(): generator = PromptsDemoGenerator() generator.generate() assert generator._data_exists() is True generator.delete_demo() assert generator._data_exists() is False def test_prompts_have_demo_tag(): generator = PromptsDemoGenerator() generator.generate() for prompt_def in DEMO_PROMPTS: prompt = load_prompt(prompt_def.name, version=1) assert prompt.tags.get("demo") == "true" def test_is_generated_checks_version(prompts_generator): prompts_generator.generate() prompts_generator.store_version() assert prompts_generator.is_generated() is True PromptsDemoGenerator.version = 99 fresh_generator = PromptsDemoGenerator() assert fresh_generator.is_generated() is False def test_prompt_templates_are_valid(): generator = PromptsDemoGenerator() generator.generate() for prompt_def in DEMO_PROMPTS: latest_version = len(prompt_def.versions) prompt = load_prompt(prompt_def.name, version=latest_version) assert prompt.template is not None if isinstance(prompt.template, str): assert len(prompt.template) > 0 else: assert len(prompt.template) > 0 for msg in prompt.template: assert "role" in msg assert "content" in msg def test_demo_prompt_definitions(): assert len(DEMO_PROMPTS) == 3 for prompt_def in DEMO_PROMPTS: assert prompt_def.name.startswith(DEMO_PROMPT_PREFIX) assert len(prompt_def.versions) >= 3 for version_def in prompt_def.versions: assert version_def.template is not None assert version_def.commit_message is not None
{ "repo_id": "mlflow/mlflow", "file_path": "tests/demo/test_prompts_generator.py", "license": "Apache License 2.0", "lines": 98, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/demo/generators/evaluation.py
from __future__ import annotations import contextlib import hashlib import io import logging import os from collections.abc import Callable from typing import TYPE_CHECKING, Literal import mlflow if TYPE_CHECKING: from mlflow.genai.datasets import EvaluationDataset from mlflow.demo.base import ( DEMO_EXPERIMENT_NAME, BaseDemoGenerator, DemoFeature, DemoResult, ) from mlflow.demo.data import EXPECTED_ANSWERS from mlflow.demo.generators.traces import DEMO_TRACE_TYPE_TAG, DEMO_VERSION_TAG, TracesDemoGenerator from mlflow.entities.assessment import AssessmentSource, Expectation, Feedback from mlflow.entities.trace import Trace from mlflow.entities.view_type import ViewType from mlflow.genai.datasets import create_dataset, delete_dataset, search_datasets from mlflow.genai.scorers import scorer _logger = logging.getLogger(__name__) @contextlib.contextmanager def _suppress_evaluation_output(): """Suppress tqdm progress bars and evaluation completion messages.""" original_tqdm_disable = os.environ.get("TQDM_DISABLE") os.environ["TQDM_DISABLE"] = "1" try: # Suppress both stdout (evaluation messages) and stderr (tqdm progress bars) with ( contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(io.StringIO()), ): yield finally: if original_tqdm_disable is None: os.environ.pop("TQDM_DISABLE", None) else: os.environ["TQDM_DISABLE"] = original_tqdm_disable DEMO_DATASET_TRACE_LEVEL_NAME = "demo-trace-level-dataset" DEMO_DATASET_BASELINE_SESSION_NAME = "demo-baseline-session-dataset" DEMO_DATASET_IMPROVED_SESSION_NAME = "demo-improved-session-dataset" def _get_relevance_rationale(is_relevant: bool) -> str: if is_relevant: return "The response directly addresses the question with relevant information." return "The response is not sufficiently relevant to the question asked." def _get_correctness_rationale(is_correct: bool) -> str: if is_correct: return "The response accurately captures the key information from the expected answer." return ( "The response contains relevant information but differs " "significantly from the expected answer." ) def _get_groundedness_rationale(is_grounded: bool) -> str: if is_grounded: return "The response is well-grounded in the provided context with clear references." return "The response includes claims not supported by the provided context." def _get_safety_rationale(is_safe: bool) -> str: if is_safe: return "The response contains no harmful, offensive, or inappropriate content." return "The response may contain potentially harmful or inappropriate content." def _create_quality_aware_scorer( name: str, baseline_pass_rate: float, improved_pass_rate: float, rationale_fn: Callable[[bool], str], ): """Create a deterministic scorer that simulates quality-aware evaluation. The scorer detects response quality based on content characteristics: - Longer, more detailed responses get evaluated with higher pass rates - Shorter, less detailed responses get evaluated with lower pass rates This simulates the real-world scenario where improved model outputs naturally score better when evaluated by the same scorers. """ quality_threshold = 400 @scorer(name=name) def quality_aware_scorer(inputs, outputs, trace) -> Feedback: content = str(inputs) + str(outputs) output_str = str(outputs) if len(output_str) > quality_threshold: effective_pass_rate = improved_pass_rate else: effective_pass_rate = baseline_pass_rate # Use content hash for deterministic but varied results hash_input = f"{content}:{name}" hash_val = int(hashlib.md5(hash_input.encode(), usedforsecurity=False).hexdigest()[:8], 16) normalized = hash_val / 0xFFFFFFFF is_passing = normalized < effective_pass_rate # Use the trace timestamp so the quality overview chart shows a trend # across days instead of a single dot at the current time. trace_timestamp_ms = trace.info.timestamp_ms if trace else None return Feedback( value="yes" if is_passing else "no", rationale=rationale_fn(is_passing), source=AssessmentSource( source_type="LLM_JUDGE", source_id=f"judges/{name}", ), create_time_ms=trace_timestamp_ms, last_update_time_ms=trace_timestamp_ms, ) return quality_aware_scorer SCORER_PASS_RATES = { "relevance": {"baseline": 0.65, "improved": 0.92}, "correctness": {"baseline": 0.58, "improved": 0.88}, "groundedness": {"baseline": 0.52, "improved": 0.85}, "safety": {"baseline": 0.95, "improved": 1.0}, } class EvaluationDemoGenerator(BaseDemoGenerator): """Generates demo evaluation data. Creates: - Ground truth expectations on all demo traces - Three datasets and evaluation runs, each in a single mode: - trace-level-evaluation: non-session traces (v1 + v2 combined) - baseline-session-evaluation: v1 session traces - improved-session-evaluation: v2 session traces Assessment timestamps are spread to match trace timestamps so the quality overview chart shows a trend across days. """ name = DemoFeature.EVALUATION version = 1 def generate(self) -> DemoResult: traces_generator = TracesDemoGenerator() if not traces_generator.is_generated(): traces_generator.generate() traces_generator.store_version() experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) experiment_id = experiment.experiment_id # Fetch traces split by session vs non-session v1_non_session = self._fetch_demo_traces(experiment_id, "v1", session=False) v2_non_session = self._fetch_demo_traces(experiment_id, "v2", session=False) v1_session = self._fetch_demo_traces(experiment_id, "v1", session=True) v2_session = self._fetch_demo_traces(experiment_id, "v2", session=True) all_traces = v1_non_session + v2_non_session + v1_session + v2_session self._add_expectations_to_traces(all_traces) # Re-fetch to include expectations v1_non_session = self._fetch_demo_traces(experiment_id, "v1", session=False) v2_non_session = self._fetch_demo_traces(experiment_id, "v2", session=False) v1_session = self._fetch_demo_traces(experiment_id, "v1", session=True) v2_session = self._fetch_demo_traces(experiment_id, "v2", session=True) trace_level_traces = v1_non_session + v2_non_session # Create datasets self._create_evaluation_dataset( trace_level_traces, experiment_id, DEMO_DATASET_TRACE_LEVEL_NAME ) self._create_evaluation_dataset( v1_session, experiment_id, DEMO_DATASET_BASELINE_SESSION_NAME ) self._create_evaluation_dataset( v2_session, experiment_id, DEMO_DATASET_IMPROVED_SESSION_NAME ) # Create evaluation runs trace_level_run_id = self._create_evaluation_run( traces=trace_level_traces, experiment_id=experiment_id, run_name="trace-level-evaluation", ) baseline_session_run_id = self._create_evaluation_run( traces=v1_session, experiment_id=experiment_id, run_name="baseline-session-evaluation", ) improved_session_run_id = self._create_evaluation_run( traces=v2_session, experiment_id=experiment_id, run_name="improved-session-evaluation", ) return DemoResult( feature=self.name, entity_ids=[trace_level_run_id, baseline_session_run_id, improved_session_run_id], navigation_url=f"#/experiments/{experiment_id}/evaluation-runs", ) def _data_exists(self) -> bool: experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is None or experiment.lifecycle_stage != "active": return False try: client = mlflow.MlflowClient() runs = client.search_runs( experiment_ids=[experiment.experiment_id], filter_string="params.demo = 'true'", max_results=1, ) return len(runs) > 0 except Exception: _logger.debug("Failed to check if evaluation demo exists", exc_info=True) return False def delete_demo(self) -> None: experiment = mlflow.get_experiment_by_name(DEMO_EXPERIMENT_NAME) if experiment is None: return try: client = mlflow.MlflowClient() runs = client.search_runs( experiment_ids=[experiment.experiment_id], filter_string="params.demo = 'true'", run_view_type=ViewType.ALL, max_results=100, ) for run in runs: try: if run.info.lifecycle_stage == "deleted": client.restore_run(run.info.run_id) client.delete_run(run.info.run_id) except Exception: _logger.debug("Failed to delete run %s", run.info.run_id, exc_info=True) except Exception: _logger.debug("Failed to delete evaluation demo runs", exc_info=True) for name in [ DEMO_DATASET_TRACE_LEVEL_NAME, DEMO_DATASET_BASELINE_SESSION_NAME, DEMO_DATASET_IMPROVED_SESSION_NAME, ]: self._delete_demo_dataset(experiment.experiment_id, name) def _fetch_demo_traces( self, experiment_id: str, version: Literal["v1", "v2"], session: bool | None = None, ) -> list[Trace]: filter_parts = [f"metadata.`{DEMO_VERSION_TAG}` = '{version}'"] operator = "=" if session else "!=" filter_parts.append(f"metadata.`{DEMO_TRACE_TYPE_TAG}` {operator} 'session'") return mlflow.search_traces( locations=[experiment_id], filter_string=" AND ".join(filter_parts), max_results=100, return_type="list", ) def _add_expectations_to_traces(self, traces: list[Trace]) -> int: expectation_count = 0 for trace in traces: trace_id = trace.info.trace_id trace_timestamp_ms = trace.info.timestamp_ms root_span = next((span for span in trace.data.spans if span.parent_id is None), None) if root_span is None: continue inputs = root_span.inputs or {} query = inputs.get("query") or inputs.get("message") if expected_answer := self._find_expected_answer(query): try: expectation = Expectation( name="expected_response", value=expected_answer, source=AssessmentSource( source_type="HUMAN", source_id="demo_annotator", ), metadata={"demo": "true"}, trace_id=trace_id, create_time_ms=trace_timestamp_ms, last_update_time_ms=trace_timestamp_ms, ) mlflow.log_assessment(trace_id=trace_id, assessment=expectation) expectation_count += 1 except Exception: _logger.debug("Failed to log expectation for trace %s", trace_id, exc_info=True) return expectation_count def _find_expected_answer(self, query: str) -> str | None: query_lower = query.lower().strip() if query_lower in EXPECTED_ANSWERS: return EXPECTED_ANSWERS[query_lower] for q, answer in EXPECTED_ANSWERS.items(): if q in query_lower or query_lower in q: return answer return None def _create_evaluation_dataset( self, traces: list[Trace], experiment_id: str, dataset_name: str ) -> "EvaluationDataset": from mlflow.genai.datasets import get_dataset dataset = create_dataset( name=dataset_name, experiment_id=experiment_id, tags={"demo": "true", "description": f"Demo evaluation dataset: {dataset_name}"}, ) dataset.merge_records(traces) return get_dataset(dataset_id=dataset.dataset_id) def _delete_demo_dataset(self, experiment_id: str, dataset_name: str) -> None: datasets = search_datasets( experiment_ids=[experiment_id], filter_string=f"name = '{dataset_name}'", max_results=10, ) for ds in datasets: try: delete_dataset(dataset_id=ds.dataset_id) except Exception: _logger.debug("Failed to delete dataset %s", ds.dataset_id, exc_info=True) def _create_evaluation_run( self, traces: list[Trace], experiment_id: str, run_name: str, ) -> str: demo_scorers = [ _create_quality_aware_scorer( name="relevance", baseline_pass_rate=SCORER_PASS_RATES["relevance"]["baseline"], improved_pass_rate=SCORER_PASS_RATES["relevance"]["improved"], rationale_fn=_get_relevance_rationale, ), _create_quality_aware_scorer( name="correctness", baseline_pass_rate=SCORER_PASS_RATES["correctness"]["baseline"], improved_pass_rate=SCORER_PASS_RATES["correctness"]["improved"], rationale_fn=_get_correctness_rationale, ), _create_quality_aware_scorer( name="groundedness", baseline_pass_rate=SCORER_PASS_RATES["groundedness"]["baseline"], improved_pass_rate=SCORER_PASS_RATES["groundedness"]["improved"], rationale_fn=_get_groundedness_rationale, ), _create_quality_aware_scorer( name="safety", baseline_pass_rate=SCORER_PASS_RATES["safety"]["baseline"], improved_pass_rate=SCORER_PASS_RATES["safety"]["improved"], rationale_fn=_get_safety_rationale, ), ] mlflow.set_experiment(experiment_id=experiment_id) with _suppress_evaluation_output(): result = mlflow.genai.evaluate( data=traces, scorers=demo_scorers, ) client = mlflow.MlflowClient() client.set_tag(result.run_id, "mlflow.runName", run_name) client.log_param(result.run_id, "demo", "true") return result.run_id
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/demo/generators/evaluation.py", "license": "Apache License 2.0", "lines": 333, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/demo/test_evaluation_generator.py
import pytest import mlflow from mlflow.demo.base import DEMO_EXPERIMENT_NAME, DemoFeature, DemoResult from mlflow.demo.data import ALL_DEMO_TRACES from mlflow.demo.generators.evaluation import EvaluationDemoGenerator from mlflow.demo.generators.traces import TracesDemoGenerator @pytest.fixture def evaluation_generator(): generator = EvaluationDemoGenerator() original_version = generator.version yield generator EvaluationDemoGenerator.version = original_version @pytest.fixture def traces_generator(): return TracesDemoGenerator() def test_generator_attributes(evaluation_generator): assert evaluation_generator.name == DemoFeature.EVALUATION assert evaluation_generator.version == 1 def test_data_exists_false_when_no_experiment(evaluation_generator): assert evaluation_generator._data_exists() is False def test_data_exists_false_when_no_eval_runs(evaluation_generator, traces_generator): traces_generator.generate() assert evaluation_generator._data_exists() is False def test_generate_creates_eval_runs(evaluation_generator): result = evaluation_generator.generate() assert isinstance(result, DemoResult) assert result.feature == DemoFeature.EVALUATION assert len(result.entity_ids) == 3 # Three run IDs returned def test_generate_creates_three_runs(evaluation_generator): evaluation_generator.generate() client = mlflow.MlflowClient() experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME) runs = client.search_runs( experiment_ids=[experiment.experiment_id], filter_string="params.demo = 'true'", ) assert len(runs) == 3 def test_data_exists_true_after_generate(evaluation_generator): evaluation_generator.generate() assert evaluation_generator._data_exists() is True def test_delete_demo_removes_runs(evaluation_generator): evaluation_generator.generate() assert evaluation_generator._data_exists() is True evaluation_generator.delete_demo() assert evaluation_generator._data_exists() is False def test_runs_have_demo_param(evaluation_generator): evaluation_generator.generate() client = mlflow.MlflowClient() experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME) runs = client.search_runs( experiment_ids=[experiment.experiment_id], filter_string="params.demo = 'true'", ) for run in runs: assert run.data.params.get("demo") == "true" def test_runs_have_different_names(evaluation_generator): evaluation_generator.generate() client = mlflow.MlflowClient() experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME) runs = client.search_runs( experiment_ids=[experiment.experiment_id], filter_string="params.demo = 'true'", ) run_names = {run.data.tags.get("mlflow.runName") for run in runs} assert "trace-level-evaluation" in run_names assert "baseline-session-evaluation" in run_names assert "improved-session-evaluation" in run_names def test_demo_traces_have_responses(): assert len(ALL_DEMO_TRACES) > 0 for trace in ALL_DEMO_TRACES: assert isinstance(trace.query, str) assert isinstance(trace.v1_response, str) assert isinstance(trace.v2_response, str) assert isinstance(trace.expected_response, str) assert len(trace.v1_response) > 20 assert len(trace.v2_response) > 20 assert len(trace.expected_response) > 20 def test_is_generated_checks_version(evaluation_generator): evaluation_generator.generate() evaluation_generator.store_version() assert evaluation_generator.is_generated() is True EvaluationDemoGenerator.version = 99 fresh_generator = EvaluationDemoGenerator() assert fresh_generator.is_generated() is False
{ "repo_id": "mlflow/mlflow", "file_path": "tests/demo/test_evaluation_generator.py", "license": "Apache License 2.0", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/demo/data.py
from __future__ import annotations from dataclasses import dataclass, field from typing import Any from mlflow.demo.base import DEMO_PROMPT_PREFIX from mlflow.entities.model_registry import PromptVersion # ============================================================================= # Prompt Data Definitions # ============================================================================= _CUSTOMER_SUPPORT_NAME = f"{DEMO_PROMPT_PREFIX}.prompts.customer-support" _DOCUMENT_SUMMARIZER_NAME = f"{DEMO_PROMPT_PREFIX}.prompts.document-summarizer" _CODE_REVIEWER_NAME = f"{DEMO_PROMPT_PREFIX}.prompts.code-reviewer" @dataclass class DemoPromptDef: name: str versions: list[PromptVersion] CUSTOMER_SUPPORT_PROMPT = DemoPromptDef( name=_CUSTOMER_SUPPORT_NAME, versions=[ PromptVersion( name=_CUSTOMER_SUPPORT_NAME, version=1, template="You are a customer support agent. Help the user with: {{query}}", commit_message="Initial customer support prompt", aliases=["baseline"], ), PromptVersion( name=_CUSTOMER_SUPPORT_NAME, version=2, template=( "You are a friendly and professional customer support agent. " "Respond in a helpful, empathetic tone.\n\n" "User query: {{query}}" ), commit_message="Add tone and style guidance", aliases=["tone-guidance"], ), PromptVersion( name=_CUSTOMER_SUPPORT_NAME, version=3, template=( "You are a friendly and professional customer support agent for {{company_name}}. " "Respond in a helpful, empathetic tone.\n\n" "Context: {{context}}\n\n" "User query: {{query}}" ), commit_message="Add company context and conversation history", aliases=["with-context"], ), PromptVersion( name=_CUSTOMER_SUPPORT_NAME, version=4, template=[ { "role": "system", "content": ( "You are a friendly and professional customer support agent " "for {{company_name}}. Follow these guidelines:\n" "- Be empathetic and patient\n" "- Provide clear, actionable solutions\n" "- Escalate complex issues appropriately\n" "- Always verify customer satisfaction before closing" ), }, {"role": "user", "content": "Context: {{context}}\n\nQuery: {{query}}"}, ], commit_message="Convert to chat format with detailed guidelines", aliases=["production"], ), ], ) DOCUMENT_SUMMARIZER_PROMPT = DemoPromptDef( name=_DOCUMENT_SUMMARIZER_NAME, versions=[ PromptVersion( name=_DOCUMENT_SUMMARIZER_NAME, version=1, template="Summarize the following document:\n\n{{document}}", commit_message="Initial summarization prompt", aliases=["baseline"], ), PromptVersion( name=_DOCUMENT_SUMMARIZER_NAME, version=2, template=( "Summarize the following document in {{max_words}} words or less:\n\n{{document}}" ), commit_message="Add length constraint parameter", aliases=["length-constraint"], ), PromptVersion( name=_DOCUMENT_SUMMARIZER_NAME, version=3, template=( "Summarize the following document for a {{audience}} audience. " "Keep the summary under {{max_words}} words.\n\n" "Document:\n{{document}}" ), commit_message="Add audience targeting", aliases=["audience-targeting"], ), PromptVersion( name=_DOCUMENT_SUMMARIZER_NAME, version=4, template=[ { "role": "system", "content": ( "You are a document summarization expert. Create concise, accurate " "summaries that capture the essential information while maintaining " "the original meaning." ), }, { "role": "user", "content": ( "Summarize this document for a {{audience}} audience.\n" "Maximum length: {{max_words}} words.\n\n" "Include:\n" "1. Main topic/thesis\n" "2. Key points (3-5 bullets)\n" "3. Conclusion or main takeaway\n\n" "Document:\n{{document}}" ), }, ], commit_message="Add structured output format with key points", aliases=["production"], ), ], ) CODE_REVIEWER_PROMPT = DemoPromptDef( name=_CODE_REVIEWER_NAME, versions=[ PromptVersion( name=_CODE_REVIEWER_NAME, version=1, template=( "Review the following code and provide feedback:\n\n```{{language}}\n{{code}}\n```" ), commit_message="Initial code review prompt", aliases=["baseline"], ), PromptVersion( name=_CODE_REVIEWER_NAME, version=2, template=( "Review the following {{language}} code for:\n" "- Bugs and errors\n" "- Performance issues\n" "- Code style\n\n" "```{{language}}\n{{code}}\n```" ), commit_message="Add specific review categories", aliases=["review-categories"], ), PromptVersion( name=_CODE_REVIEWER_NAME, version=3, template=( "Review the following {{language}} code. For each issue found, specify:\n" "- Severity: Critical, Major, Minor, or Suggestion\n" "- Category: Bug, Performance, Security, Style, or Maintainability\n" "- Line number (if applicable)\n" "- Recommended fix\n\n" "```{{language}}\n{{code}}\n```" ), commit_message="Add severity levels and structured feedback format", aliases=["severity-levels"], ), PromptVersion( name=_CODE_REVIEWER_NAME, version=4, template=[ { "role": "system", "content": ( "You are an expert code reviewer. Analyze code for bugs, security " "vulnerabilities, performance issues, and maintainability concerns. " "Provide actionable feedback with clear explanations and suggested fixes." ), }, { "role": "user", "content": ( "Review this {{language}} code:\n\n" "```{{language}}\n{{code}}\n```\n\n" "Provide feedback in this format:\n" "## Summary\n" "Brief overview of code quality.\n\n" "## Issues Found\n" "For each issue:\n" "- **[Severity]** Category: Description\n" " - Line: X\n" " - Fix: Recommendation\n\n" "## Positive Aspects\n" "What the code does well." ), }, ], commit_message="Production-ready with structured markdown output", aliases=["production"], ), ], ) DEMO_PROMPTS: list[DemoPromptDef] = [ CUSTOMER_SUPPORT_PROMPT, DOCUMENT_SUMMARIZER_PROMPT, CODE_REVIEWER_PROMPT, ] # ============================================================================= # Trace Data Definitions # ============================================================================= @dataclass class LinkedPromptRef: """Reference to a prompt version for linking to traces.""" prompt_name: str version: int @dataclass class ToolCall: """Tool call with input/output for agent traces.""" name: str input: dict[str, Any] output: dict[str, Any] @dataclass class PromptTemplateValues: """Template values for prompt-based traces. Contains the prompt name, template, and variable values used to render the prompt. This allows traces to show the resolved prompt with interpolated values. """ prompt_name: str template: str variables: dict[str, str] def render(self) -> str: """Render the template with the variable values.""" result = self.template for key, value in self.variables.items(): result = result.replace(f"{{{{{key}}}}}", value) return result @dataclass class DemoTrace: """Demo trace with query, two response versions, and expected ground truth. - v1_response: Initial/baseline agent output (less accurate, more verbose) - v2_response: Improved agent output (better quality, closer to expected) - expected_response: Ground truth for evaluation - prompt_template: Optional prompt template info for prompt-based traces """ query: str v1_response: str v2_response: str expected_response: str trace_type: str tools: list[ToolCall] = field(default_factory=list) session_id: str | None = None session_user: str | None = None turn_index: int | None = None prompt_template: PromptTemplateValues | None = None # ============================================================================= # RAG Traces (2 traces) # ============================================================================= RAG_TRACES: list[DemoTrace] = [ DemoTrace( query="What is MLflow Tracing and how does it help with LLM observability?", v1_response=( "MLflow Tracing is a feature that helps you understand what's happening " "in your LLM applications. It captures information about your app's execution " "and shows it in the UI somewhere." ), v2_response=( "MLflow Tracing provides comprehensive observability for LLM applications by " "capturing the execution flow as hierarchical spans. Each span records inputs, " "outputs, latency, and metadata, making it easy to debug and optimize your AI systems." ), expected_response=( "MLflow Tracing provides observability for LLM applications, capturing " "prompts, model calls, and tool invocations as hierarchical spans with " "inputs, outputs, and latency information." ), trace_type="rag", ), DemoTrace( query="How do I use mlflow.evaluate() to assess my LLM's output quality?", v1_response=( "MLflow has an evaluate() function. You pass it some data and scorers " "and it gives you back metrics. The results are logged automatically I think." ), v2_response=( "Use mlflow.evaluate() by passing your model/data and a list of scorers like " "relevance() or faithfulness(). It returns per-row scores and aggregate metrics, " "all automatically logged to your MLflow experiment for easy comparison." ), expected_response=( "Use mlflow.evaluate() with your model and scorers (e.g., relevance, faithfulness). " "Results include per-row scores and aggregate metrics, logged to MLflow." ), trace_type="rag", ), ] # ============================================================================= # Agent Traces (2 traces) # ============================================================================= AGENT_TRACES: list[DemoTrace] = [ DemoTrace( query="What's the weather in San Francisco and should I bring an umbrella today?", v1_response=( "The weather in San Francisco is currently 62 degrees with partly cloudy skies. " "There's some chance of rain today, but I'm not sure exactly how much." ), v2_response=( "It's currently 62F and partly cloudy in San Francisco with only a 15% chance " "of rain. You probably don't need an umbrella today, but a light jacket might " "be nice for the evening fog!" ), expected_response=( "San Francisco is 62F and partly cloudy with 15% rain chance. " "No umbrella needed, but consider a light jacket for evening fog." ), trace_type="agent", tools=[ ToolCall( name="get_weather", input={"city": "San Francisco", "units": "fahrenheit"}, output={ "temperature": 62, "condition": "partly cloudy", "rain_chance": 15, "humidity": 68, }, ), ], ), DemoTrace( query="Calculate the compound interest on $10,000 at 5% annual rate for 10 years", v1_response=( "Based on my calculation, $10,000 invested at 5% annual interest " "compounded yearly for 10 years would grow to around $16,289 or so." ), v2_response=( "With annual compounding, $10,000 at 5% interest for 10 years grows to " "$16,288.95. The formula is: Principal x (1 + rate)^years = 10000 x 1.05^10. " "You'll earn $6,288.95 in interest over the period." ), expected_response=( "$10,000 at 5% annual compound interest for 10 years = $16,288.95. " "Total interest earned: $6,288.95." ), trace_type="agent", tools=[ ToolCall( name="calculator", input={"expression": "10000 * (1.05 ** 10)", "precision": 2}, output={"result": 16288.95, "formula": "10000 * 1.05^10"}, ), ], ), ] # ============================================================================= # Prompt-Based Traces (6 traces - 2 per prompt type) # ============================================================================= PROMPT_TRACES: list[DemoTrace] = [ # Customer Support Prompts (2 traces) DemoTrace( query=( "I can't log into my account. It says my password is wrong but I'm sure it's correct." ), v1_response=( "Try resetting your password using the forgot password link. " "If that doesn't work, contact support." ), v2_response=( "I'm sorry you're having trouble logging in! This is frustrating, I know. " "Let's fix this: First, try the 'Forgot Password' link on the login page. " "If you still can't get in after resetting, I can manually verify your account " "and send you a secure reset link. Which would you prefer?" ), expected_response=( "I understand login issues are frustrating. Try 'Forgot Password' first, " "or I can manually reset your account access." ), trace_type="prompt", prompt_template=PromptTemplateValues( prompt_name="customer-support", template=( "You are a customer support agent for {{company_name}}. " "Help the user with their issue.\n\n" "Context: {{context}}\n\n" "User query: {{query}}" ), variables={ "company_name": "TechCorp", "context": "User has been a customer for 2 years with no previous issues", "query": "I can't log into my account. Password seems wrong.", }, ), ), DemoTrace( query="I was charged twice for my subscription this month. Can you help?", v1_response=( "I see the duplicate charge. I'll process a refund which should " "appear in 3-5 business days." ), v2_response=( "I sincerely apologize for the duplicate charge - that shouldn't have happened! " "I can see both charges on your account. I've already initiated a refund for " "$29.99, which will appear in your account within 2-3 business days. I've also " "added a $10 credit to your account for the inconvenience. Is there anything " "else I can help you with?" ), expected_response=( "I apologize for the duplicate charge. Refund of $29.99 initiated, " "arriving in 2-3 business days, plus $10 credit for the inconvenience." ), trace_type="prompt", prompt_template=PromptTemplateValues( prompt_name="customer-support", template=( "You are a customer support agent for {{company_name}}. " "Help the user with their issue.\n\n" "Context: {{context}}\n\n" "User query: {{query}}" ), variables={ "company_name": "TechCorp", "context": "Premium subscriber, $29.99/month plan, charged on 1st and 3rd", "query": "I was charged twice for my subscription this month.", }, ), ), # Document Summarizer Prompts (2 traces) DemoTrace( query="Summarize the Q3 2024 earnings report for investors.", v1_response="Q3 2024: Revenue up, profits increased, outlook is positive.", v2_response=( "**Q3 2024 Earnings Summary for Investors**\n\n" "- Revenue: $2.3B (+15% YoY)\n" "- Net Income: $450M (+23% YoY)\n" "- EPS: $1.82 (beat estimates by $0.12)\n\n" "Key Drivers: Cloud services growth (32%), cost optimization savings ($45M)\n" "Outlook: Full-year guidance raised to $9.1B revenue" ), expected_response=( "Q3 2024: Revenue $2.3B (+15%), Net Income $450M (+23%), EPS $1.82. " "Cloud growth and cost savings drove results. FY guidance raised to $9.1B." ), trace_type="prompt", prompt_template=PromptTemplateValues( prompt_name="document-summarizer", template=( "Summarize the following document for a {{audience}} audience. " "Keep the summary under {{max_words}} words.\n\n" "Document:\n{{document}}" ), variables={ "audience": "investor", "max_words": "150", "document": ( "Q3 2024 Financial Results: Total revenue reached $2.3 billion, " "representing a 15% increase year-over-year. Net income was $450 million, " "up 23% from Q3 2023. Earnings per share of $1.82 exceeded analyst " "estimates of $1.70. Cloud services segment grew 32%..." ), }, ), ), DemoTrace( query="Create a summary of the machine learning research paper for a technical blog.", v1_response="The paper proposes a new transformer architecture that is more efficient.", v2_response=( "**Novel Sparse Transformer Architecture**\n\n" "This paper introduces SparseFormer, achieving 12% better accuracy on standard " "benchmarks while reducing memory usage by 40%. Key innovations:\n\n" "1. **Sparse Attention**: O(n log n) complexity vs O(n^2) for vanilla transformers\n" "2. **Dynamic Positional Encoding**: Adapts to sequence length automatically\n" "3. **Gradient Checkpointing**: Enables training on longer sequences\n\n" "Validated on translation (WMT) and summarization (CNN/DailyMail) tasks." ), expected_response=( "SparseFormer: New transformer with 12% accuracy gain and 40% memory reduction. " "Uses O(n log n) sparse attention and dynamic positional encoding." ), trace_type="prompt", prompt_template=PromptTemplateValues( prompt_name="document-summarizer", template=( "Summarize the following document for a {{audience}} audience. " "Keep the summary under {{max_words}} words.\n\n" "Document:\n{{document}}" ), variables={ "audience": "technical", "max_words": "200", "document": ( "Abstract: We present SparseFormer, a novel transformer architecture " "that achieves state-of-the-art results while significantly reducing " "computational requirements. Our sparse attention mechanism reduces " "complexity from O(n^2) to O(n log n)..." ), }, ), ), # Code Reviewer Prompts (2 traces) DemoTrace( query="Review this Python function that calculates factorial.", v1_response="The code works but could be improved for performance.", v2_response=( "## Code Review: factorial function\n\n" "### Issues Found\n" "1. **[Critical] Bug - Line 3**: No handling for negative numbers\n" " - Fix: Add `if n < 0: raise ValueError('n must be non-negative')`\n\n" "2. **[Major] Performance - Line 5**: Recursive implementation has O(n) stack depth\n" " - Fix: Use iterative approach or `math.factorial()` for large n\n\n" "3. **[Minor] Style**: Missing type hints and docstring\n\n" "### Positive Aspects\n" "- Clean, readable implementation\n" "- Correct base case handling" ), expected_response=( "Critical: No negative number handling. Major: Recursive approach risks stack " "overflow for large n. Minor: Add type hints and docstring." ), trace_type="prompt", prompt_template=PromptTemplateValues( prompt_name="code-reviewer", template=( "Review the following {{language}} code. Identify bugs, performance issues, " "and style problems.\n\n```{{language}}\n{{code}}\n```" ), variables={ "language": "python", "code": ( "def factorial(n):\n" " if n == 0:\n" " return 1\n" " return n * factorial(n - 1)" ), }, ), ), DemoTrace( query="Review this JavaScript async function for API calls.", v1_response="There are some issues with error handling in the async code.", v2_response=( "## Code Review: fetchUserData async function\n\n" "### Issues Found\n" "1. **[Critical] Security - Line 4**: API key exposed in client-side code\n" " - Fix: Move to environment variable or backend proxy\n\n" "2. **[Critical] Bug - Line 7**: Missing try/catch around await\n" " - Fix: Wrap in try/catch or add .catch() handler\n\n" "3. **[Major] Performance - Lines 8-10**: Sequential awaits should be parallel\n" " - Fix: `const [user, posts] = await Promise.all([...]);`\n\n" "4. **[Minor] Style**: Inconsistent error message format\n\n" "### Positive Aspects\n" "- Good use of async/await syntax\n" "- Clear function naming" ), expected_response=( "Critical: API key exposure, missing error handling. Major: Use Promise.all() " "for parallel requests. Minor: Inconsistent error formatting." ), trace_type="prompt", prompt_template=PromptTemplateValues( prompt_name="code-reviewer", template=( "Review the following {{language}} code. Identify bugs, performance issues, " "and style problems.\n\n```{{language}}\n{{code}}\n```" ), variables={ "language": "javascript", "code": ( "async function fetchUserData(userId) {\n" " const apiKey = 'sk-1234567890';\n" " const user = await fetch(`/api/users/${userId}`);\n" " const posts = await fetch(`/api/users/${userId}/posts`);\n" " return { user: user.json(), posts: posts.json() };\n" "}" ), }, ), ), ] # ============================================================================= # Session Traces (3 sessions with varying turns: 2, 3, 2 = 7 traces total) # ============================================================================= SESSION_TRACES: list[DemoTrace] = [ # Session 1: MLflow Setup (2 turns) DemoTrace( query="I'm new to MLflow. How do I get started with experiment tracking?", v1_response=( "To get started with MLflow, first install it with pip. Then you can " "start using the tracking API to log your experiments." ), v2_response=( "Welcome to MLflow! Here's your quickstart:\n\n" "1. Install: `pip install mlflow`\n" "2. Start the UI: `mlflow server --port 5000`\n" "3. In your code:\n" "```python\n" "import mlflow\n" "with mlflow.start_run():\n" " mlflow.log_param('learning_rate', 0.01)\n" " mlflow.log_metric('accuracy', 0.95)\n" "```\n" "Open http://localhost:5000 to see your experiments!" ), expected_response=( "Install with `pip install mlflow`, run `mlflow server`, then use " "mlflow.start_run() with log_param() and log_metric() to track experiments." ), trace_type="session", session_id="session-mlflow-setup", session_user="alice@example.com", turn_index=1, ), DemoTrace( query="Great! How do I log a trained model so I can deploy it later?", v1_response=( "You can use mlflow.sklearn.log_model() or similar functions to log models. " "They get saved with the run." ), v2_response=( "After training, log your model with the appropriate flavor:\n\n" "```python\n" "# For scikit-learn\n" "mlflow.sklearn.log_model(model, 'model')\n\n" "# For PyTorch\n" "mlflow.pytorch.log_model(model, 'model')\n" "```\n\n" "The model is saved with your run and can be loaded later with:\n" "`mlflow.sklearn.load_model('runs:/<run_id>/model')`\n\n" "Ready to register it for deployment?" ), expected_response=( "Use mlflow.<flavor>.log_model() (e.g., sklearn, pytorch) to save models. " "Load later with mlflow.<flavor>.load_model('runs:/<run_id>/model')." ), trace_type="session", session_id="session-mlflow-setup", session_user="alice@example.com", turn_index=2, ), # Session 2: Debugging a RAG Application (3 turns) DemoTrace( query="My RAG application is giving inconsistent answers. How can I debug it?", v1_response=( "You should look at your prompts and retrieved documents to see what's happening. " "MLflow has some tools for this." ), v2_response=( "Let's debug this systematically with MLflow Tracing!\n\n" "1. **Enable tracing** to capture every step:\n" "```python\n" "mlflow.tracing.enable()\n" "```\n\n" "2. **Check the Traces tab** in MLflow UI to see:\n" " - What documents were retrieved\n" " - The actual prompts sent to the LLM\n" " - Token usage and latency per step\n\n" "What kind of inconsistency are you seeing - retrieval or generation issues?" ), expected_response=( "Enable mlflow.tracing.enable() to capture execution flow. " "Check the Traces tab to inspect retrieved docs, prompts, and latency." ), trace_type="session", session_id="session-rag-debug", session_user="bob@example.com", turn_index=1, ), DemoTrace( query="I think it's a retrieval issue - sometimes it finds irrelevant documents.", v1_response=( "You should look at the retrieval scores and maybe adjust your embedding model " "or chunk size." ), v2_response=( "Retrieval issues are common! Here's how to diagnose:\n\n" "1. **In MLflow Traces**, click on the RETRIEVER span to see:\n" " - Retrieved document IDs and relevance scores\n" " - The query embedding used\n\n" "2. **Common fixes**:\n" " - Lower your similarity threshold (try 0.7 instead of 0.8)\n" " - Experiment with chunk sizes (512 tokens often works well)\n" " - Try a different embedding model\n\n" "3. **Track experiments**: Log retrieval params to compare:\n" "```python\n" "mlflow.log_params({'chunk_size': 512, 'threshold': 0.7})\n" "```" ), expected_response=( "Check RETRIEVER span in traces for scores. Try adjusting similarity threshold, " "chunk size, or embedding model. Log params to compare experiments." ), trace_type="session", session_id="session-rag-debug", session_user="bob@example.com", turn_index=2, ), DemoTrace( query="How do I compare different retrieval configurations systematically?", v1_response=( "You can run multiple experiments with different settings and compare them " "in the MLflow UI." ), v2_response=( "Use MLflow's evaluation framework for systematic comparison:\n\n" "```python\n" "import mlflow\n" "from mlflow.metrics.genai import relevance, faithfulness\n\n" "# Create evaluation dataset\n" "eval_data = pd.DataFrame({\n" " 'inputs': questions,\n" " 'ground_truth': expected_answers\n" "})\n\n" "# Run evaluation\n" "results = mlflow.evaluate(\n" " model=your_rag_pipeline,\n" " data=eval_data,\n" " scorers=[relevance(), faithfulness()]\n" ")\n" "```\n\n" "Compare runs in the MLflow UI's comparison view to see which config wins!" ), expected_response=( "Use mlflow.evaluate() with relevance() and faithfulness() scorers. " "Create eval dataset with inputs and ground_truth, then compare runs in UI." ), trace_type="session", session_id="session-rag-debug", session_user="bob@example.com", turn_index=3, ), # Session 3: Model Deployment (2 turns) DemoTrace( query="I have a model logged in MLflow. How do I deploy it as an API?", v1_response=( "You can use MLflow's model serving. Register the model and then serve it " "with the mlflow models serve command." ), v2_response=( "Great! Here's the deployment path:\n\n" "1. **Register the model** (if not already):\n" "```python\n" "mlflow.register_model(\n" " 'runs:/<run_id>/model',\n" " 'my-classifier'\n" ")\n" "```\n\n" "2. **Serve it locally**:\n" "```bash\n" "mlflow models serve -m models:/my-classifier/1 -p 5001\n" "```\n\n" "3. **Test the endpoint**:\n" "```bash\n" "curl -X POST http://localhost:5001/invocations \\\n" " -H 'Content-Type: application/json' \\\n" " -d '{\"inputs\": [[1, 2, 3, 4]]}'\n" "```\n\n" "Want to deploy to Kubernetes or a cloud platform instead?" ), expected_response=( "Register with mlflow.register_model(), then serve with " "`mlflow models serve -m models:/name/version`. Test with curl POST to /invocations." ), trace_type="session", session_id="session-deployment", session_user="carol@example.com", turn_index=1, ), DemoTrace( query="Yes, I want to deploy to Kubernetes. What's the process?", v1_response=( "You can build a Docker image with MLflow and deploy it to Kubernetes using " "standard kubectl commands." ), v2_response=( "Here's the K8s deployment workflow:\n\n" "1. **Build a Docker image**:\n" "```bash\n" "mlflow models build-docker \\\n" " -m models:/my-classifier/1 \\\n" " -n my-classifier:v1\n" "```\n\n" "2. **Push to your registry**:\n" "```bash\n" "docker push your-registry/my-classifier:v1\n" "```\n\n" "3. **Deploy to K8s** (example deployment.yaml):\n" "```yaml\n" "apiVersion: apps/v1\n" "kind: Deployment\n" "spec:\n" " containers:\n" " - name: model\n" " image: your-registry/my-classifier:v1\n" " ports:\n" " - containerPort: 8080\n" "```\n\n" "The container exposes a `/invocations` endpoint compatible with MLflow's format." ), expected_response=( "Build image with `mlflow models build-docker`, push to registry, " "deploy with K8s manifests. Container exposes /invocations endpoint." ), trace_type="session", session_id="session-deployment", session_user="carol@example.com", turn_index=2, ), ] # ============================================================================= # Combined Trace Data # ============================================================================= ALL_DEMO_TRACES: list[DemoTrace] = RAG_TRACES + AGENT_TRACES + PROMPT_TRACES + SESSION_TRACES # Mapping of queries (lowercased) to expected responses for evaluation EXPECTED_ANSWERS: dict[str, str] = { trace.query.lower(): trace.expected_response for trace in ALL_DEMO_TRACES }
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/demo/data.py", "license": "Apache License 2.0", "lines": 816, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex