sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
mlflow/mlflow:mlflow/telemetry/utils.py | import logging
import os
from typing import Any
import requests
from packaging.version import Version
from mlflow.environment_variables import (
_MLFLOW_TELEMETRY_LOGGING,
_MLFLOW_TESTING_TELEMETRY,
MLFLOW_DISABLE_TELEMETRY,
)
from mlflow.telemetry.constant import (
CONFIG_STAGING_URL,
CONFIG_URL,
FALLBACK_UI_CONFIG,
UI_CONFIG_STAGING_URL,
UI_CONFIG_URL,
)
from mlflow.version import VERSION
_logger = logging.getLogger(__name__)
def _is_ci_env_or_testing() -> bool:
"""
Check if the current environment is a CI environment.
If so, we should not track telemetry.
"""
env_vars = {
"PYTEST_CURRENT_TEST", # https://docs.pytest.org/en/stable/example/simple.html#pytest-current-test-environment-variable
"GITHUB_ACTIONS", # https://docs.github.com/en/actions/reference/variables-reference?utm_source=chatgpt.com#default-environment-variables
"CI", # set by many CI providers
"CIRCLECI", # https://circleci.com/docs/variables/#built-in-environment-variables
"GITLAB_CI", # https://docs.gitlab.com/ci/variables/predefined_variables/#predefined-variables
"JENKINS_URL", # https://www.jenkins.io/doc/book/pipeline/jenkinsfile/#using-environment-variables
"TRAVIS", # https://docs.travis-ci.com/user/environment-variables/#default-environment-variables
"TF_BUILD", # https://learn.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#system-variables
"BITBUCKET_BUILD_NUMBER", # https://support.atlassian.com/bitbucket-cloud/docs/variables-and-secrets/
"CODEBUILD_BUILD_ARN", # https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-env-vars.html
"BUILDKITE", # https://buildkite.com/docs/pipelines/configure/environment-variables
"TEAMCITY_VERSION", # https://www.jetbrains.com/help/teamcity/predefined-build-parameters.html#Predefined+Server+Build+Parameters
"CLOUD_RUN_EXECUTION", # https://cloud.google.com/run/docs/reference/container-contract#env-vars
# runbots
"RUNBOT_HOST_URL",
"RUNBOT_BUILD_NAME",
"RUNBOT_WORKER_ID",
}
# For most of the cases, the env var existing means we are in CI
for var in env_vars:
if var in os.environ:
return True
return False
# NB: implement the function here to avoid unnecessary imports inside databricks_utils
def _is_in_databricks() -> bool:
# check if in databricks runtime
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
return True
if os.path.exists("/databricks/DBR_VERSION"):
return True
# check if in databricks model serving environment
if os.environ.get("IS_IN_DB_MODEL_SERVING_ENV", "false").lower() == "true":
return True
return False
_IS_MLFLOW_DEV_VERSION = Version(VERSION).is_devrelease
_IS_IN_CI_ENV_OR_TESTING = _is_ci_env_or_testing()
_IS_IN_DATABRICKS = _is_in_databricks()
_IS_MLFLOW_TESTING_TELEMETRY = _MLFLOW_TESTING_TELEMETRY.get()
def is_telemetry_disabled() -> bool:
try:
if _IS_MLFLOW_TESTING_TELEMETRY:
return False
return (
MLFLOW_DISABLE_TELEMETRY.get()
or os.environ.get("DO_NOT_TRACK", "false").lower() == "true"
or _IS_IN_CI_ENV_OR_TESTING
or _IS_IN_DATABRICKS
or _IS_MLFLOW_DEV_VERSION
)
except Exception as e:
_log_error(f"Failed to check telemetry disabled status: {e}")
return True
def _get_config_url(version: str, is_ui: bool = False) -> str | None:
"""
Get the config URL for the given MLflow version.
"""
version_obj = Version(version)
if version_obj.is_devrelease or _IS_MLFLOW_TESTING_TELEMETRY:
base_url = UI_CONFIG_STAGING_URL if is_ui else CONFIG_STAGING_URL
return f"{base_url}/{version}.json"
if version_obj.base_version == version or (
version_obj.is_prerelease and version_obj.pre[0] == "rc"
):
base_url = UI_CONFIG_URL if is_ui else CONFIG_URL
return f"{base_url}/{version}.json"
return None
def _log_error(message: str) -> None:
if _MLFLOW_TELEMETRY_LOGGING.get():
_logger.error(message, exc_info=True)
def fetch_ui_telemetry_config() -> dict[str, Any]:
# Check if telemetry is disabled
if is_telemetry_disabled():
return FALLBACK_UI_CONFIG
# Get config URL
config_url = _get_config_url(VERSION, is_ui=True)
if not config_url:
return FALLBACK_UI_CONFIG
# Fetch config from remote URL
try:
response = requests.get(config_url, timeout=1)
if response.status_code != 200:
return FALLBACK_UI_CONFIG
return response.json()
except Exception as e:
_log_error(f"Failed to fetch UI telemetry config: {e}")
return FALLBACK_UI_CONFIG
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/telemetry/utils.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/telemetry/helper_functions.py | import json
from typing import Any
def validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name: str,
params=None,
*,
status="success",
search_index=True,
check_params=True,
) -> dict[str, Any]:
"""
Validate the telemetry record at the given index.
"""
mock_telemetry_client.flush()
if search_index:
event_names = [record["data"]["event_name"] for record in mock_requests]
idx = event_names.index(event_name)
else:
idx = 0
record = mock_requests[idx]
data = record["data"]
assert data["event_name"] == event_name
if check_params:
if params:
# Compare as dictionaries instead of JSON strings to avoid order-dependency
actual_params = json.loads(data["params"]) if data["params"] else None
assert actual_params == params
else:
assert data["params"] is None
assert data["status"] == status
assert data["duration_ms"] is not None
mock_requests.clear()
return data
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/telemetry/helper_functions.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/telemetry/test_client.py | import logging
import threading
import time
import warnings
from unittest import mock
import pytest
import mlflow
from mlflow.environment_variables import _MLFLOW_TELEMETRY_SESSION_ID
from mlflow.telemetry.client import (
BATCH_SIZE,
BATCH_TIME_INTERVAL_SECONDS,
MAX_QUEUE_SIZE,
MAX_WORKERS,
TelemetryClient,
_is_localhost_uri,
get_telemetry_client,
)
from mlflow.telemetry.events import CreateLoggedModelEvent, CreateRunEvent
from mlflow.telemetry.schemas import Record, SourceSDK, Status
from mlflow.utils.os import is_windows
from mlflow.version import IS_TRACING_SDK_ONLY, VERSION
from tests.telemetry.helper_functions import validate_telemetry_record
if not IS_TRACING_SDK_ONLY:
from mlflow.tracking._tracking_service.utils import _use_tracking_uri
def test_telemetry_client_initialization(mock_telemetry_client: TelemetryClient, mock_requests):
assert mock_telemetry_client.info is not None
assert mock_telemetry_client._queue.maxsize == MAX_QUEUE_SIZE
assert mock_telemetry_client._max_workers == MAX_WORKERS
assert mock_telemetry_client._batch_size == BATCH_SIZE
assert mock_telemetry_client._batch_time_interval == BATCH_TIME_INTERVAL_SECONDS
def test_telemetry_client_session_id(
mock_telemetry_client: TelemetryClient, mock_requests, monkeypatch
):
monkeypatch.setenv(_MLFLOW_TELEMETRY_SESSION_ID.name, "test_session_id")
with TelemetryClient() as telemetry_client:
assert telemetry_client.info["session_id"] == "test_session_id"
monkeypatch.delenv(_MLFLOW_TELEMETRY_SESSION_ID.name, raising=False)
with TelemetryClient() as telemetry_client:
assert telemetry_client.info["session_id"] != "test_session_id"
def test_add_record_and_send(mock_telemetry_client: TelemetryClient, mock_requests):
# Create a test record
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
# Add record and wait for processing
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
received_record = next(
req for req in mock_requests if req["data"]["event_name"] == "test_event"
)
assert "data" in received_record
assert "partition-key" in received_record
data = received_record["data"]
assert data["event_name"] == "test_event"
assert data["status"] == "success"
def test_add_records_and_send(mock_telemetry_client: TelemetryClient, mock_requests):
# Pre-populate pending_records with 200 records
initial_records = [
Record(
event_name=f"initial_{i}",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
for i in range(200)
]
mock_telemetry_client.add_records(initial_records)
# We haven't hit the batch size limit yet, so expect no records to be sent
assert len(mock_telemetry_client._pending_records) == 200
assert len(mock_requests) == 0
# Add 1000 more records
# Expected behavior:
# - First 300 records fill to 500 -> send batch (200 + 300) to queue
# - Next 500 records -> send batch to queue
# - Last 200 records remain in pending (200 < 500)
additional_records = [
Record(
event_name=f"additional_{i}",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
for i in range(1000)
]
mock_telemetry_client.add_records(additional_records)
# Verify batching logic:
# - 2 batches should be in the queue
# - 200 records should remain in pending
assert mock_telemetry_client._queue.qsize() == 2
assert len(mock_telemetry_client._pending_records) == 200
# Flush to process queue and send the remaining partial batch
mock_telemetry_client.flush()
# Verify all 1200 records were sent
assert len(mock_requests) == 1200
event_names = {req["data"]["event_name"] for req in mock_requests}
assert all(f"initial_{i}" in event_names for i in range(200))
assert all(f"additional_{i}" in event_names for i in range(1000))
def test_record_with_session_and_installation_id(
mock_telemetry_client: TelemetryClient, mock_requests
):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
session_id="session_id_override",
installation_id="installation_id_override",
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
assert mock_requests[0]["data"]["session_id"] == "session_id_override"
assert mock_requests[0]["data"]["installation_id"] == "installation_id_override"
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
assert mock_requests[1]["data"]["session_id"] == mock_telemetry_client.info["session_id"]
assert (
mock_requests[1]["data"]["installation_id"] == mock_telemetry_client.info["installation_id"]
)
def test_batch_processing(mock_telemetry_client: TelemetryClient, mock_requests):
mock_telemetry_client._batch_size = 3 # Set small batch size for testing
# Add multiple records
for i in range(5):
record = Record(
event_name=f"test_event_{i}",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
events = {req["data"]["event_name"] for req in mock_requests}
assert all(event_name in events for event_name in [f"test_event_{i}" for i in range(5)])
def test_flush_functionality(mock_telemetry_client: TelemetryClient, mock_requests):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
events = {req["data"]["event_name"] for req in mock_requests}
assert record.event_name in events
def test_record_sent(mock_telemetry_client: TelemetryClient, mock_requests):
record_1 = Record(
event_name="test_event_1",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record_1)
mock_telemetry_client.flush()
assert len(mock_requests) == 1
data = mock_requests[0]["data"]
assert data["event_name"] == record_1.event_name
assert data["status"] == "success"
session_id = data.get("session_id")
installation_id = data.get("installation_id")
assert session_id is not None
assert installation_id is not None
record_2 = Record(
event_name="test_event_2",
timestamp_ns=time.time_ns(),
status=Status.FAILURE,
)
record_3 = Record(
event_name="test_event_3",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record_2)
mock_telemetry_client.add_record(record_3)
mock_telemetry_client.flush()
assert len(mock_requests) == 3
# all record should have the same session id and installation id
assert {req["data"].get("session_id") for req in mock_requests} == {session_id}
assert {req["data"].get("installation_id") for req in mock_requests} == {installation_id}
def test_client_shutdown(mock_telemetry_client: TelemetryClient, mock_requests):
for _ in range(100):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
start_time = time.time()
mock_telemetry_client.flush(terminate=True)
end_time = time.time()
assert end_time - start_time < 0.1
events = {req["data"]["event_name"] for req in mock_requests}
assert "test_event" not in events
assert not mock_telemetry_client.is_active
@pytest.mark.parametrize(
"url",
[
"http://127.0.0.1:9999/nonexistent",
"http://127.0.0.1:9999/unauthorized",
"http://127.0.0.1:9999/forbidden",
"http://127.0.0.1:9999/bad_request",
],
)
def test_telemetry_collection_stopped_on_error(mock_requests, mock_telemetry_client, url):
mock_telemetry_client.config.ingestion_url = url
# Add a record - should not crash
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush(terminate=True)
assert mock_telemetry_client._is_stopped is True
assert mock_telemetry_client.is_active is False
requests_count = len(mock_requests)
assert requests_count <= 1
# add record after stopping should be no-op
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush(terminate=True)
assert len(mock_requests) == requests_count
@pytest.mark.parametrize("error_code", [429, 500])
@pytest.mark.parametrize("terminate", [True, False])
def test_telemetry_retry_on_error(error_code, terminate):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
class MockPostTracker:
def __init__(self):
self.count = 0
self.responses = []
def mock_post(self, url, json=None, **kwargs):
self.count += 1
if self.count < 3:
return mock.Mock(status_code=error_code)
else:
self.responses.extend(json["records"])
return mock.Mock(status_code=200)
tracker = MockPostTracker()
with (
mock.patch("requests.post", side_effect=tracker.mock_post),
TelemetryClient() as telemetry_client,
):
telemetry_client.add_record(record)
start_time = time.time()
telemetry_client.flush(terminate=terminate)
duration = time.time() - start_time
if terminate:
assert duration < 1.5
else:
assert duration < 2.5
if terminate:
assert tracker.responses == []
else:
assert record.event_name in [resp["data"]["event_name"] for resp in tracker.responses]
@pytest.mark.parametrize("error_type", [ConnectionError, TimeoutError])
@pytest.mark.parametrize("terminate", [True, False])
def test_telemetry_retry_on_request_error(error_type, terminate):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
class MockPostTracker:
def __init__(self):
self.count = 0
self.responses = []
def mock_post(self, url, json=None, **kwargs):
self.count += 1
if self.count < 3:
raise error_type()
else:
self.responses.extend(json["records"])
return mock.Mock(status_code=200)
tracker = MockPostTracker()
with (
mock.patch("requests.post", side_effect=tracker.mock_post),
TelemetryClient() as telemetry_client,
):
telemetry_client.add_record(record)
start_time = time.time()
telemetry_client.flush(terminate=terminate)
duration = time.time() - start_time
if terminate:
assert duration < 1.5
else:
assert duration < 2.5
# no retry when terminating
if terminate:
assert tracker.responses == []
else:
assert record.event_name in [resp["data"]["event_name"] for resp in tracker.responses]
def test_stop_event(mock_telemetry_client: TelemetryClient, mock_requests):
mock_telemetry_client._is_stopped = True
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
# we need to terminate since the threads are stopped
mock_telemetry_client.flush(terminate=True)
# No records should be sent since the client is stopped
events = {req["data"]["event_name"] for req in mock_requests}
assert record.event_name not in events
def test_concurrent_record_addition(mock_telemetry_client: TelemetryClient, mock_requests):
def add_records(thread_id):
for i in range(5):
record = Record(
event_name=f"test_event_{thread_id}_{i}",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
time.sleep(0.1)
# Start multiple threads
threads = []
for i in range(3):
thread = threading.Thread(target=add_records, args=(i,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
mock_telemetry_client.flush()
# Should have received records from all threads
events = {req["data"]["event_name"] for req in mock_requests}
assert all(
event_name in events
for event_name in [
f"test_event_{thread_id}_{i}" for thread_id in range(3) for i in range(5)
]
)
def test_telemetry_info_inclusion(mock_telemetry_client: TelemetryClient, mock_requests):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
# Verify telemetry info is included
data = next(req["data"] for req in mock_requests if req["data"]["event_name"] == "test_event")
# Check that telemetry info fields are present
assert mock_telemetry_client.info.items() <= data.items()
# Check that record fields are present
assert data["event_name"] == "test_event"
assert data["status"] == "success"
def test_partition_key(mock_telemetry_client: TelemetryClient, mock_requests):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
# Verify partition key is random
assert mock_requests[0]["partition-key"] != mock_requests[1]["partition-key"]
def test_max_workers_setup(monkeypatch):
monkeypatch.setattr("mlflow.telemetry.client.MAX_WORKERS", 8)
with TelemetryClient() as telemetry_client:
assert telemetry_client._max_workers == 8
telemetry_client.activate()
# Test that correct number of threads are created
assert len(telemetry_client._consumer_threads) == 8
# Verify thread names
for i, thread in enumerate(telemetry_client._consumer_threads):
assert thread.name == f"MLflowTelemetryConsumer-{i}"
assert thread.daemon is True
def test_log_suppression_in_consumer_thread(mock_requests, capsys, mock_telemetry_client):
# Clear any existing captured output
capsys.readouterr()
# Log from main thread - this should be captured
logger = logging.getLogger("mlflow.telemetry.client")
logger.info("TEST LOG FROM MAIN THREAD")
original_process = mock_telemetry_client._process_records
def process_with_log(records):
logger.info("TEST LOG FROM CONSUMER THREAD")
original_process(records)
mock_telemetry_client._process_records = process_with_log
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
events = {req["data"]["event_name"] for req in mock_requests}
assert record.event_name in events
captured = capsys.readouterr()
assert "TEST LOG FROM MAIN THREAD" in captured.err
# Verify that the consumer thread log was suppressed
assert "TEST LOG FROM CONSUMER THREAD" not in captured.err
def test_consumer_thread_no_stderr_output(mock_requests, capsys, mock_telemetry_client):
# Clear any existing captured output
capsys.readouterr()
# Log from main thread - this should be captured
logger = logging.getLogger("mlflow.telemetry.client")
logger.info("MAIN THREAD LOG BEFORE CLIENT")
# Clear output after client initialization to focus on consumer thread output
capsys.readouterr()
# Add multiple records to ensure consumer thread processes them
for i in range(5):
record = Record(
event_name=f"test_event_{i}",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
# Wait for all records to be processed
events = {req["data"]["event_name"] for req in mock_requests}
assert all(event_name in events for event_name in [f"test_event_{i}" for i in range(5)])
# Capture output after consumer thread has processed all records
captured = capsys.readouterr()
# Verify consumer thread produced no stderr output
assert captured.err == ""
# Log from main thread after processing - this should be captured
logger.info("MAIN THREAD LOG AFTER PROCESSING")
captured_after = capsys.readouterr()
assert "MAIN THREAD LOG AFTER PROCESSING" in captured_after.err
def test_batch_time_interval(mock_requests, monkeypatch):
monkeypatch.setattr("mlflow.telemetry.client.BATCH_TIME_INTERVAL_SECONDS", 1)
telemetry_client = TelemetryClient()
assert telemetry_client._batch_time_interval == 1
# Add first record
record1 = Record(
event_name="test_event_1",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
telemetry_client.add_record(record1)
assert len(telemetry_client._pending_records) == 1
events = {req["data"]["event_name"] for req in mock_requests}
assert "test_event_1" not in events
# Add second record before time interval
record2 = Record(
event_name="test_event_2",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
telemetry_client.add_record(record2)
assert len(telemetry_client._pending_records) == 2
# Wait for time interval to pass
time.sleep(1.5)
assert len(telemetry_client._pending_records) == 0
# records are sent due to time interval
events = {req["data"]["event_name"] for req in mock_requests}
assert "test_event_1" in events
assert "test_event_2" in events
record3 = Record(
event_name="test_event_3",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
telemetry_client.add_record(record3)
telemetry_client.flush()
# Verify all records were sent
event_names = {req["data"]["event_name"] for req in mock_requests}
assert all(env in event_names for env in ["test_event_1", "test_event_2", "test_event_3"])
def test_set_telemetry_client_non_blocking():
start_time = time.time()
with TelemetryClient() as telemetry_client:
assert time.time() - start_time < 1
assert telemetry_client is not None
time.sleep(1.1)
assert not any(
thread.name.startswith("GetTelemetryConfig") for thread in threading.enumerate()
)
@pytest.mark.parametrize(
"mock_requests_return_value",
[
mock.Mock(status_code=403),
mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": True,
}
),
),
mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": "1.0.0",
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
}
),
),
mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 0,
}
),
),
mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 70,
}
),
),
],
)
@pytest.mark.no_mock_requests_get
def test_client_get_config_none(mock_requests_return_value):
with (
mock.patch("mlflow.telemetry.client.requests.get") as mock_requests,
mock.patch("random.randint", return_value=80),
):
mock_requests.return_value = mock_requests_return_value
client = TelemetryClient()
client._get_config()
assert client.config is None
@pytest.mark.no_mock_requests_get
def test_client_get_config_not_none():
with (
mock.patch("mlflow.telemetry.client.requests.get") as mock_requests,
mock.patch("random.randint", return_value=50),
):
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 70,
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client._get_config()
assert telemetry_client.config.ingestion_url == "http://localhost:9999"
assert telemetry_client.config.disable_events == set()
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests:
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client._get_config()
assert telemetry_client.config.ingestion_url == "http://localhost:9999"
assert telemetry_client.config.disable_events == set()
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests:
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_events": [],
"disable_sdks": ["mlflow-tracing"],
}
),
)
with (
mock.patch(
"mlflow.telemetry.client.get_source_sdk", return_value=SourceSDK.MLFLOW_TRACING
),
TelemetryClient() as telemetry_client,
):
telemetry_client._get_config()
assert telemetry_client.config is None
with (
mock.patch(
"mlflow.telemetry.client.get_source_sdk", return_value=SourceSDK.MLFLOW_SKINNY
),
TelemetryClient() as telemetry_client,
):
telemetry_client._get_config()
assert telemetry_client.config.ingestion_url == "http://localhost:9999"
assert telemetry_client.config.disable_events == set()
with (
mock.patch("mlflow.telemetry.client.get_source_sdk", return_value=SourceSDK.MLFLOW),
TelemetryClient() as telemetry_client,
):
telemetry_client._get_config()
assert telemetry_client.config.ingestion_url == "http://localhost:9999"
assert telemetry_client.config.disable_events == set()
@pytest.mark.no_mock_requests_get
@pytest.mark.skipif(is_windows(), reason="This test only passes on non-Windows")
def test_get_config_disable_non_windows():
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests_get:
mock_requests_get.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_os": ["linux", "darwin"],
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client._get_config()
assert telemetry_client.config is None
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests:
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_os": ["win32"],
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client._get_config()
assert telemetry_client.config.ingestion_url == "http://localhost:9999"
assert telemetry_client.config.disable_events == set()
@pytest.mark.no_mock_requests_get
@pytest.mark.skipif(not is_windows(), reason="This test only passes on Windows")
def test_get_config_windows():
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests:
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_os": ["win32"],
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client._get_config()
assert telemetry_client.config is None
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests:
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_os": ["linux", "darwin"],
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client._get_config()
assert telemetry_client.config.ingestion_url == "http://localhost:9999"
assert telemetry_client.config.disable_events == set()
@pytest.mark.no_mock_requests_get
def test_client_set_to_none_if_config_none():
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests:
mock_requests.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": True,
}
),
)
with TelemetryClient() as telemetry_client:
assert telemetry_client is not None
telemetry_client.activate()
telemetry_client._config_thread.join(timeout=3)
assert not telemetry_client._config_thread.is_alive()
assert telemetry_client.config is None
assert telemetry_client._is_config_fetched is True
assert telemetry_client._is_stopped
@pytest.mark.no_mock_requests_get
def test_records_not_dropped_when_fetching_config(mock_requests):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
duration_ms=0,
)
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests_get:
mock_requests_get.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
}
),
)
with TelemetryClient() as telemetry_client:
telemetry_client.activate()
# wait for config to be fetched
telemetry_client._config_thread.join(timeout=3)
telemetry_client.add_record(record)
telemetry_client.flush()
validate_telemetry_record(
telemetry_client, mock_requests, record.event_name, check_params=False
)
@pytest.mark.no_mock_requests_get
@pytest.mark.parametrize("error_code", [400, 401, 403, 404, 412, 500, 502, 503, 504])
def test_config_fetch_no_retry(mock_requests, error_code):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
def mock_requests_get(*args, **kwargs):
time.sleep(1)
return mock.Mock(status_code=error_code)
with (
mock.patch("mlflow.telemetry.client.requests.get", side_effect=mock_requests_get),
TelemetryClient() as telemetry_client,
):
telemetry_client.add_record(record)
telemetry_client.flush()
events = [req["data"]["event_name"] for req in mock_requests]
assert record.event_name not in events
assert get_telemetry_client() is None
def test_warning_suppression_in_shutdown(recwarn, mock_telemetry_client: TelemetryClient):
def flush_mock(*args, **kwargs):
warnings.warn("test warning")
with mock.patch.object(mock_telemetry_client, "flush", flush_mock):
mock_telemetry_client._at_exit_callback()
assert len(recwarn) == 0
@pytest.mark.parametrize("tracking_uri_scheme", ["databricks", "databricks-uc", "uc"])
@pytest.mark.parametrize("terminate", [True, False])
def test_databricks_tracking_uri_scheme(mock_requests, tracking_uri_scheme, terminate):
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
with (
_use_tracking_uri(f"{tracking_uri_scheme}://profile_name"),
TelemetryClient() as telemetry_client,
):
telemetry_client.add_record(record)
telemetry_client.flush(terminate=terminate)
assert len(mock_requests) == 0
assert get_telemetry_client() is None
@pytest.mark.no_mock_requests_get
def test_disable_events(mock_requests):
with mock.patch("mlflow.telemetry.client.requests.get") as mock_requests_get:
mock_requests_get.return_value = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_events": [CreateLoggedModelEvent.name],
"disable_sdks": [],
}
),
)
with (
TelemetryClient() as telemetry_client,
mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=telemetry_client
),
):
telemetry_client.activate()
telemetry_client._config_thread.join(timeout=1)
mlflow.initialize_logged_model(name="model", tags={"key": "value"})
telemetry_client.flush()
assert len(mock_requests) == 0
with mlflow.start_run():
pass
validate_telemetry_record(
telemetry_client, mock_requests, CreateRunEvent.name, check_params=False
)
@pytest.mark.no_mock_requests_get
def test_fetch_config_after_first_record():
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
duration_ms=0,
)
mock_response = mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 70,
}
),
)
with mock.patch(
"mlflow.telemetry.client.requests.get", return_value=mock_response
) as mock_requests_get:
with TelemetryClient() as telemetry_client:
assert telemetry_client._is_config_fetched is False
telemetry_client.add_record(record)
telemetry_client._config_thread.join(timeout=1)
assert telemetry_client._is_config_fetched is True
mock_requests_get.assert_called_once()
@pytest.mark.parametrize(
"uri",
[
"http://localhost",
"http://localhost:5000",
"http://127.0.0.1",
"http://127.0.0.1:5000/api/2.0/mlflow",
"http://[::1]",
],
)
def test_is_localhost_uri_returns_true_for_localhost(uri):
assert _is_localhost_uri(uri)
@pytest.mark.parametrize(
"uri",
[
"http://example.com",
"http://example.com:5000",
"https://mlflow.example.com",
"http://192.168.1.1",
"http://192.168.1.1:5000",
"http://10.0.0.1:5000",
"https://my-tracking-server.com/api/2.0/mlflow",
],
)
def test_is_localhost_uri_returns_false_for_remote(uri):
assert _is_localhost_uri(uri) is False
def test_is_localhost_uri_returns_none_for_empty_hostname():
assert _is_localhost_uri("file:///tmp/mlruns") is None
def test_is_localhost_uri_returns_none_on_parse_error():
# urlparse doesn't raise on most inputs, but we test the fallback behavior
# by mocking urlparse to raise
with mock.patch("urllib.parse.urlparse", side_effect=ValueError("Invalid URI")):
assert _is_localhost_uri("http://localhost") is None
def test_is_workspace_enabled_included_in_telemetry_info(
mock_telemetry_client: TelemetryClient, mock_requests, monkeypatch
):
monkeypatch.setenv("MLFLOW_WORKSPACE", "my-workspace")
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
data = next(req["data"] for req in mock_requests if req["data"]["event_name"] == "test_event")
assert data["ws_enabled"] is True
def test_is_workspace_disabled_included_in_telemetry_info(
mock_telemetry_client: TelemetryClient, mock_requests, monkeypatch
):
monkeypatch.delenv("MLFLOW_WORKSPACE", raising=False)
record = Record(
event_name="test_event",
timestamp_ns=time.time_ns(),
status=Status.SUCCESS,
)
mock_telemetry_client.add_record(record)
mock_telemetry_client.flush()
data = next(req["data"] for req in mock_requests if req["data"]["event_name"] == "test_event")
assert data["ws_enabled"] is False
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/telemetry/test_client.py",
"license": "Apache License 2.0",
"lines": 885,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/telemetry/test_events.py | from unittest.mock import Mock
import pytest
from mlflow.entities.evaluation_dataset import DatasetGranularity, EvaluationDataset
from mlflow.prompt.constants import IS_PROMPT_TAG_KEY
from mlflow.telemetry.events import (
AiCommandRunEvent,
AlignJudgeEvent,
CreateDatasetEvent,
CreateExperimentEvent,
CreateLoggedModelEvent,
CreateModelVersionEvent,
CreatePromptEvent,
CreateRegisteredModelEvent,
CreateRunEvent,
DatasetToDataFrameEvent,
EvaluateEvent,
GatewayCreateEndpointEvent,
GatewayCreateSecretEvent,
GatewayListEndpointsEvent,
GatewayListSecretsEvent,
GatewayUpdateEndpointEvent,
LogAssessmentEvent,
MakeJudgeEvent,
MergeRecordsEvent,
OptimizePromptsJobEvent,
PromptOptimizationEvent,
SimulateConversationEvent,
StartTraceEvent,
)
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
(
{"flavor": "mlflow.pyfunc"},
{"flavor": "pyfunc"},
),
(
{"flavor": "sklearn"},
{"flavor": "sklearn"},
),
(
{
"flavor": None,
},
None,
),
({}, None),
],
)
def test_logged_model_parse_params(arguments, expected_params):
assert CreateLoggedModelEvent.name == "create_logged_model"
assert CreateLoggedModelEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
({"tags": None}, {"is_prompt": False}),
({"tags": {}}, {"is_prompt": False}),
({"tags": {IS_PROMPT_TAG_KEY: "true"}}, {"is_prompt": True}),
({"tags": {IS_PROMPT_TAG_KEY: "false"}}, {"is_prompt": False}),
({}, {"is_prompt": False}),
],
)
def test_registered_model_parse_params(arguments, expected_params):
assert CreateRegisteredModelEvent.name == "create_registered_model"
assert CreateRegisteredModelEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
({"tags": None}, {"is_prompt": False}),
({"tags": {}}, {"is_prompt": False}),
({"tags": {IS_PROMPT_TAG_KEY: "true"}}, {"is_prompt": True}),
({"tags": {IS_PROMPT_TAG_KEY: "false"}}, {"is_prompt": False}),
({}, {"is_prompt": False}),
],
)
def test_create_model_version_parse_params(arguments, expected_params):
assert CreateModelVersionEvent.name == "create_model_version"
assert CreateModelVersionEvent.parse(arguments) == expected_params
def test_event_name():
assert AiCommandRunEvent.name == "ai_command_run"
assert CreatePromptEvent.name == "create_prompt"
assert CreateLoggedModelEvent.name == "create_logged_model"
assert CreateRegisteredModelEvent.name == "create_registered_model"
assert CreateModelVersionEvent.name == "create_model_version"
assert CreateRunEvent.name == "create_run"
assert CreateExperimentEvent.name == "create_experiment"
assert LogAssessmentEvent.name == "log_assessment"
assert StartTraceEvent.name == "start_trace"
assert EvaluateEvent.name == "evaluate"
assert CreateDatasetEvent.name == "create_dataset"
assert MergeRecordsEvent.name == "merge_records"
assert MakeJudgeEvent.name == "make_judge"
assert AlignJudgeEvent.name == "align_judge"
assert PromptOptimizationEvent.name == "prompt_optimization"
assert SimulateConversationEvent.name == "simulate_conversation"
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
# Records without 'inputs' field -> unknown dataset type
(
{"records": [{"test": "data"}]},
{"record_count": 1, "input_type": "list[dict]", "dataset_type": "unknown"},
),
(
{"records": [{"a": 1}, {"b": 2}]},
{"record_count": 2, "input_type": "list[dict]", "dataset_type": "unknown"},
),
# Trace records
(
{"records": [{"inputs": {"question": "What is MLflow?", "context": "docs"}}]},
{"record_count": 1, "input_type": "list[dict]", "dataset_type": "trace"},
),
(
{"records": [{"inputs": {"q": "a"}}, {"inputs": {"q": "b"}}]},
{"record_count": 2, "input_type": "list[dict]", "dataset_type": "trace"},
),
# Session records
(
{"records": [{"inputs": {"persona": "user", "goal": "test", "context": "info"}}]},
{"record_count": 1, "input_type": "list[dict]", "dataset_type": "session"},
),
# Edge cases
({"records": []}, None),
({"records": None}, None),
({}, None),
(None, None),
({"records": object()}, None),
],
)
def test_merge_records_parse_params(arguments, expected_params):
assert MergeRecordsEvent.parse(arguments) == expected_params
def _make_mock_dataset(granularity: DatasetGranularity) -> Mock:
mock = Mock(spec=EvaluationDataset)
mock._get_existing_granularity.return_value = granularity
return mock
@pytest.mark.parametrize(
("granularity", "expected_dataset_type"),
[
(DatasetGranularity.TRACE, "trace"),
(DatasetGranularity.SESSION, "session"),
(DatasetGranularity.UNKNOWN, "unknown"),
],
)
def test_dataset_to_df_parse(granularity, expected_dataset_type):
mock_dataset = _make_mock_dataset(granularity)
arguments = {"self": mock_dataset}
result = DatasetToDataFrameEvent.parse(arguments)
assert result == {"dataset_type": expected_dataset_type, "callsite": "direct_call"}
@pytest.mark.parametrize(
("result", "expected_params"),
[
([{"a": 1}, {"b": 2}, {"c": 3}], {"record_count": 3}),
([{"row": 1}], {"record_count": 1}),
([], {"record_count": 0}),
(None, {"record_count": 0}),
],
)
def test_dataset_to_df_parse_result(result, expected_params):
assert DatasetToDataFrameEvent.parse_result(result) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
({"model": "openai:/gpt-4"}, {"model_provider": "openai"}),
({"model": "databricks:/dbrx"}, {"model_provider": "databricks"}),
({"model": "custom"}, {"model_provider": None}),
({"model": None}, {"model_provider": None}),
({}, {"model_provider": None}),
],
)
def test_make_judge_parse_params(arguments, expected_params):
assert MakeJudgeEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
(
{"traces": [{}, {}], "optimizer": None},
{"trace_count": 2, "optimizer_type": "default"},
),
(
{"traces": [{}], "optimizer": type("MockOptimizer", (), {})()},
{"trace_count": 1, "optimizer_type": "MockOptimizer"},
),
(
{"traces": [], "optimizer": None},
{"trace_count": 0, "optimizer_type": "default"},
),
({"traces": None, "optimizer": None}, {"optimizer_type": "default"}),
({}, {"optimizer_type": "default"}),
],
)
def test_align_judge_parse_params(arguments, expected_params):
assert AlignJudgeEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
# Normal case with optimizer and prompt URIs
(
{
"optimizer": type("MockOptimizer", (), {})(),
"prompt_uris": ["prompts:/test/1"],
"scorers": None,
"aggregation": None,
},
{
"optimizer_type": "MockOptimizer",
"prompt_count": 1,
"scorer_count": None,
"custom_aggregation": False,
},
),
# Multiple prompt URIs with custom scorers
(
{
"optimizer": type("CustomAdapter", (), {})(),
"prompt_uris": ["prompts:/test/1", "prompts:/test/2"],
"scorers": [Mock()],
"aggregation": None,
},
{
"optimizer_type": "CustomAdapter",
"prompt_count": 2,
"scorer_count": 1,
"custom_aggregation": False,
},
),
# Custom objective with multiple scorers
(
{
"optimizer": type("TestAdapter", (), {})(),
"prompt_uris": ["prompts:/test/1"],
"scorers": [Mock(), Mock(), Mock()],
"aggregation": lambda scores: sum(scores.values()),
},
{
"optimizer_type": "TestAdapter",
"prompt_count": 1,
"scorer_count": 3,
"custom_aggregation": True,
},
),
# No optimizer provided - optimizer_type should be None
(
{
"optimizer": None,
"prompt_uris": ["prompts:/test/1"],
"scorers": None,
"aggregation": None,
},
{
"optimizer_type": None,
"prompt_count": 1,
"scorer_count": None,
"custom_aggregation": False,
},
),
],
)
def test_prompt_optimization_parse_params(arguments, expected_params):
assert PromptOptimizationEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("result", "expected_params"),
[
(
[["t1", "t2", "t3"], ["t1"]],
{"simulated_conversation_info": [{"turn_count": 3}, {"turn_count": 1}]},
),
([[]], {"simulated_conversation_info": [{"turn_count": 0}]}),
([], {"simulated_conversation_info": []}),
],
)
def test_simulate_conversation_parse_result(result, expected_params):
assert SimulateConversationEvent.parse_result(result) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
(
{
"fallback_config": {"strategy": "FAILOVER"},
"routing_strategy": "REQUEST_BASED_TRAFFIC_SPLIT",
"model_configs": [{"model_definition_id": "md-1"}, {"model_definition_id": "md-2"}],
},
{
"has_fallback_config": True,
"routing_strategy": "REQUEST_BASED_TRAFFIC_SPLIT",
"num_model_configs": 2,
},
),
(
{
"fallback_config": None,
"routing_strategy": None,
"model_configs": [{"model_definition_id": "md-1"}],
},
{
"has_fallback_config": False,
"routing_strategy": None,
"num_model_configs": 1,
},
),
(
{"fallback_config": None, "routing_strategy": None, "model_configs": []},
{"has_fallback_config": False, "routing_strategy": None, "num_model_configs": 0},
),
(
{},
{"has_fallback_config": False, "routing_strategy": None, "num_model_configs": 0},
),
],
)
def test_gateway_create_endpoint_parse_params(arguments, expected_params):
assert GatewayCreateEndpointEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
(
{
"fallback_config": {"strategy": "FAILOVER"},
"routing_strategy": "ROUND_ROBIN",
"model_configs": [{"model_definition_id": "md-1"}],
},
{
"has_fallback_config": True,
"routing_strategy": "ROUND_ROBIN",
"num_model_configs": 1,
},
),
(
{"fallback_config": None, "routing_strategy": None, "model_configs": None},
{"has_fallback_config": False, "routing_strategy": None, "num_model_configs": None},
),
(
{},
{"has_fallback_config": False, "routing_strategy": None, "num_model_configs": None},
),
],
)
def test_gateway_update_endpoint_parse_params(arguments, expected_params):
assert GatewayUpdateEndpointEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
({"provider": "openai"}, {"filter_by_provider": True}),
({"provider": "anthropic"}, {"filter_by_provider": True}),
({"provider": None}, {"filter_by_provider": False}),
({}, {"filter_by_provider": False}),
],
)
def test_gateway_list_endpoints_parse_params(arguments, expected_params):
assert GatewayListEndpointsEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
({"provider": "openai"}, {"provider": "openai"}),
({"provider": "anthropic"}, {"provider": "anthropic"}),
({"provider": None}, {"provider": None}),
({}, {"provider": None}),
],
)
def test_gateway_create_secret_parse_params(arguments, expected_params):
assert GatewayCreateSecretEvent.parse(arguments) == expected_params
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
({"provider": "openai"}, {"filter_by_provider": True}),
({"provider": "anthropic"}, {"filter_by_provider": True}),
({"provider": None}, {"filter_by_provider": False}),
({}, {"filter_by_provider": False}),
],
)
def test_gateway_list_secrets_parse_params(arguments, expected_params):
assert GatewayListSecretsEvent.parse(arguments) == expected_params
def test_simulate_conversation_parse_params():
result = SimulateConversationEvent.parse({})
assert result == {"callsite": "conversation_simulator"}
def test_optimize_prompts_job_event_name():
assert OptimizePromptsJobEvent.name == "optimize_prompts_job"
@pytest.mark.parametrize(
("arguments", "expected_params"),
[
(
{"optimizer_type": "gepa", "scorer_names": ["Correctness", "Safety"]},
{"optimizer_type": "gepa", "scorer_count": 2},
),
(
{"optimizer_type": "metaprompt", "scorer_names": ["Correctness"]},
{"optimizer_type": "metaprompt", "scorer_count": 1},
),
(
{"optimizer_type": "gepa", "scorer_names": []},
{"optimizer_type": "gepa", "scorer_count": 0},
),
({}, None),
],
)
def test_optimize_prompts_job_parse_params(arguments, expected_params):
assert OptimizePromptsJobEvent.parse(arguments) == expected_params
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/telemetry/test_events.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/telemetry/test_track.py | import time
from unittest import mock
import pytest
from mlflow.environment_variables import MLFLOW_DISABLE_TELEMETRY
from mlflow.telemetry.client import (
TelemetryClient,
_fetch_server_info,
get_telemetry_client,
set_telemetry_client,
)
from mlflow.telemetry.events import CreateLoggedModelEvent, Event
from mlflow.telemetry.schemas import Status
from mlflow.telemetry.track import _is_telemetry_disabled_for_event, record_usage_event
from mlflow.telemetry.utils import is_telemetry_disabled
from mlflow.tracking._tracking_service.utils import _use_tracking_uri
from mlflow.version import VERSION
class TestEvent(Event):
name = "test_event"
def test_record_usage_event(mock_requests, mock_telemetry_client: TelemetryClient):
@record_usage_event(TestEvent)
def succeed_func():
# sleep to make sure duration_ms > 0
time.sleep(0.01)
return True
@record_usage_event(TestEvent)
def fail_func():
time.sleep(0.01)
raise ValueError("test")
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=mock_telemetry_client
):
succeed_func()
with pytest.raises(ValueError, match="test"):
fail_func()
mock_telemetry_client.flush()
records = [
record["data"] for record in mock_requests if record["data"]["event_name"] == TestEvent.name
]
assert len(records) == 2
succeed_record = records[0]
assert succeed_record["schema_version"] == 2
assert succeed_record["event_name"] == TestEvent.name
assert succeed_record["status"] == Status.SUCCESS.value
assert succeed_record["params"] is None
assert succeed_record["duration_ms"] > 0
fail_record = records[1]
assert fail_record["schema_version"] == 2
assert fail_record["event_name"] == TestEvent.name
assert fail_record["status"] == Status.FAILURE.value
assert fail_record["params"] is None
assert fail_record["duration_ms"] > 0
telemetry_info = mock_telemetry_client.info
assert telemetry_info.items() <= succeed_record.items()
assert telemetry_info.items() <= fail_record.items()
def test_backend_store_info(tmp_path, mock_telemetry_client: TelemetryClient, monkeypatch):
sqlite_uri = f"sqlite:///{tmp_path.joinpath('test.db')}"
with _use_tracking_uri(sqlite_uri):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "sqlite"
with _use_tracking_uri(tmp_path):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "file"
# Verify ws_enabled reflects MLFLOW_WORKSPACE env var
monkeypatch.delenv("MLFLOW_WORKSPACE", raising=False)
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["ws_enabled"] is False
monkeypatch.setenv("MLFLOW_WORKSPACE", "my-workspace")
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["ws_enabled"] is True
@pytest.mark.parametrize(
("scheme", "store_type", "expected_scheme"),
[
("http", "FileStore", "http-file"),
("http", "SqlStore", "http-sql"),
("https", "FileStore", "https-file"),
("https", "SqlStore", "https-sql"),
("http", None, "http"),
("https", None, "https"),
],
)
def test_backend_store_info_http_scheme_enrichment(
mock_telemetry_client: TelemetryClient,
scheme: str,
store_type: str | None,
expected_scheme: str,
):
server_info = {"store_type": store_type} if store_type else None
with (
mock.patch(
"mlflow.telemetry.client._get_tracking_uri_info",
return_value=(scheme, True),
) as mock_uri_info,
mock.patch(
"mlflow.telemetry.client._fetch_server_info",
return_value=server_info,
) as mock_fetch,
):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == expected_scheme
mock_uri_info.assert_called_once()
mock_fetch.assert_called_once()
def test_backend_store_info_http_scheme_enrichment_cached(
mock_telemetry_client: TelemetryClient,
):
mock_response = mock.Mock(
status_code=200, json=mock.Mock(return_value={"store_type": "SqlStore"})
)
with (
mock.patch(
"mlflow.telemetry.client._get_tracking_uri_info",
return_value=("http", True),
) as mock_uri_info,
mock.patch(
"mlflow.telemetry.client.http_request",
return_value=mock_response,
) as mock_req,
):
mock_telemetry_client._update_backend_store()
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "http-sql"
assert mock_uri_info.call_count == 2
# http_request is called only once due to lru_cache
mock_req.assert_called_once()
def test_backend_store_info_http_scheme_enrichment_per_uri(
mock_telemetry_client: TelemetryClient,
):
uri_to_server_info = {
"http://server-a:5000": {"store_type": "FileStore"},
"http://server-b:5000": {"store_type": "SqlStore"},
}
with (
mock.patch(
"mlflow.telemetry.client._get_tracking_uri_info",
return_value=("http", True),
) as mock_uri_info,
mock.patch(
"mlflow.telemetry.client._fetch_server_info",
side_effect=uri_to_server_info.get,
) as mock_fetch,
):
with _use_tracking_uri("http://server-a:5000"):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "http-file"
with _use_tracking_uri("http://server-b:5000"):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "http-sql"
assert mock_uri_info.call_count == 2
assert mock_fetch.call_count == 2
@pytest.mark.parametrize(
("status_code", "json_body", "expected"),
[
(200, {"store_type": "FileStore"}, {"store_type": "FileStore"}),
(200, {"store_type": "SqlStore"}, {"store_type": "SqlStore"}),
(200, {}, {}),
(404, None, None),
],
)
def test_fetch_server_info(
status_code: int,
json_body: dict[str, str | None] | None,
expected: dict[str, str | None] | None,
):
mock_response = mock.Mock(status_code=status_code)
if json_body is not None:
mock_response.json.return_value = json_body
with mock.patch(
"mlflow.telemetry.client.http_request",
return_value=mock_response,
) as mock_req:
result = _fetch_server_info("http://localhost:5000")
assert result == expected
mock_req.assert_called_once()
def test_fetch_server_info_connection_error():
with mock.patch(
"mlflow.telemetry.client.http_request",
side_effect=ConnectionError,
) as mock_req:
result = _fetch_server_info("http://localhost:5000")
assert result is None
mock_req.assert_called_once()
@pytest.mark.parametrize(
("env_var", "value", "expected_result"),
[
(MLFLOW_DISABLE_TELEMETRY.name, "true", None),
(MLFLOW_DISABLE_TELEMETRY.name, "false", TelemetryClient),
("DO_NOT_TRACK", "true", None),
("DO_NOT_TRACK", "false", TelemetryClient),
],
)
def test_record_usage_event_respect_env_var(
monkeypatch, env_var, value, expected_result, bypass_env_check
):
monkeypatch.setenv(env_var, value)
# mimic the behavior of `import mlflow`
set_telemetry_client()
telemetry_client = get_telemetry_client()
if expected_result is None:
assert is_telemetry_disabled() is True
assert telemetry_client is None
else:
assert isinstance(telemetry_client, expected_result)
telemetry_client._clean_up()
def test_record_usage_event_update_env_var_after_import(
monkeypatch, mock_requests, mock_telemetry_client
):
assert isinstance(mock_telemetry_client, TelemetryClient)
@record_usage_event(TestEvent)
def test_func():
pass
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=mock_telemetry_client
):
test_func()
mock_telemetry_client.flush()
events = {record["data"]["event_name"] for record in mock_requests}
assert TestEvent.name in events
mock_requests.clear()
monkeypatch.setenv("MLFLOW_DISABLE_TELEMETRY", "true")
test_func()
# no new record should be added
assert len(mock_requests) == 0
@pytest.mark.no_mock_requests_get
def test_is_telemetry_disabled_for_event():
def mock_requests_get(*args, **kwargs):
time.sleep(1)
return mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_events": ["test_event"],
}
),
)
with mock.patch("mlflow.telemetry.client.requests.get", side_effect=mock_requests_get):
client = TelemetryClient()
assert client is not None
client.activate()
assert client.config is None
with mock.patch("mlflow.telemetry.track.get_telemetry_client", return_value=client):
# do not skip when config is not fetched yet
assert _is_telemetry_disabled_for_event(TestEvent) is False
assert _is_telemetry_disabled_for_event(TestEvent) is False
time.sleep(2)
assert client._is_config_fetched is True
assert client.config is not None
# event not in disable_events, do not skip
assert _is_telemetry_disabled_for_event(CreateLoggedModelEvent) is False
# event in disable_events, skip
assert _is_telemetry_disabled_for_event(TestEvent) is True
# clean up
client._clean_up()
# test telemetry disabled after config is fetched
def mock_requests_get(*args, **kwargs):
time.sleep(1)
return mock.Mock(status_code=403)
with mock.patch("mlflow.telemetry.client.requests.get", side_effect=mock_requests_get):
client = TelemetryClient()
assert client is not None
client.activate()
assert client.config is None
with (
mock.patch("mlflow.telemetry.track.get_telemetry_client", return_value=client),
mock.patch(
"mlflow.telemetry.client._set_telemetry_client"
) as mock_set_telemetry_client,
):
# do not skip when config is not fetched yet
assert _is_telemetry_disabled_for_event(CreateLoggedModelEvent) is False
assert _is_telemetry_disabled_for_event(TestEvent) is False
time.sleep(2)
assert client._is_config_fetched is True
assert client.config is None
# global telemetry client is set to None when telemetry is disabled
mock_set_telemetry_client.assert_called_once_with(None)
# clean up
client._clean_up()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/telemetry/test_track.py",
"license": "Apache License 2.0",
"lines": 280,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/telemetry/test_tracked_events.py | import json
import time
from unittest import mock
from unittest.mock import AsyncMock, MagicMock, patch
import pandas as pd
import pytest
import sklearn.neighbors as knn
from click.testing import CliRunner
import mlflow
from mlflow import MlflowClient
from mlflow.entities import (
EvaluationDataset,
Expectation,
Feedback,
GatewayEndpointModelConfig,
Metric,
Param,
RunTag,
)
from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType
from mlflow.entities.gateway_endpoint import GatewayModelLinkageType
from mlflow.entities.trace import Trace
from mlflow.entities.webhook import WebhookAction, WebhookEntity, WebhookEvent
from mlflow.gateway.cli import start
from mlflow.gateway.schemas import chat
from mlflow.genai.datasets import create_dataset
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.base import AlignmentOptimizer
from mlflow.genai.scorers import scorer
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.builtin_scorers import (
Completeness,
Guidelines,
RelevanceToQuery,
Safety,
UserFrustration,
)
from mlflow.genai.simulators import ConversationSimulator
from mlflow.pyfunc.model import (
ResponsesAgent,
ResponsesAgentRequest,
ResponsesAgentResponse,
)
from mlflow.server.gateway_api import chat_completions, invocations
from mlflow.store.tracking.gateway.entities import GatewayEndpointConfig
from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore
from mlflow.telemetry.client import TelemetryClient
from mlflow.telemetry.events import (
AiCommandRunEvent,
AlignJudgeEvent,
AutologgingEvent,
CreateDatasetEvent,
CreateExperimentEvent,
CreateLoggedModelEvent,
CreateModelVersionEvent,
CreatePromptEvent,
CreateRegisteredModelEvent,
CreateRunEvent,
CreateWebhookEvent,
EvaluateEvent,
GatewayCreateEndpointEvent,
GatewayCreateSecretEvent,
GatewayDeleteEndpointEvent,
GatewayDeleteSecretEvent,
GatewayGetEndpointEvent,
GatewayInvocationEvent,
GatewayListEndpointsEvent,
GatewayListSecretsEvent,
GatewayStartEvent,
GatewayUpdateEndpointEvent,
GatewayUpdateSecretEvent,
GenAIEvaluateEvent,
GetLoggedModelEvent,
GitModelVersioningEvent,
InvokeCustomJudgeModelEvent,
LoadPromptEvent,
LogAssessmentEvent,
LogBatchEvent,
LogDatasetEvent,
LogMetricEvent,
LogParamEvent,
MakeJudgeEvent,
McpRunEvent,
MergeRecordsEvent,
PromptOptimizationEvent,
ScorerCallEvent,
SimulateConversationEvent,
StartTraceEvent,
TracingContextPropagation,
)
from mlflow.tracing.distributed import (
get_tracing_context_headers_for_http_request,
set_tracing_context_from_http_request_headers,
)
from mlflow.tracking.fluent import _create_dataset_input, _initialize_logged_model
from mlflow.utils.os import is_windows
from tests.telemetry.helper_functions import validate_telemetry_record
class TestModel(mlflow.pyfunc.PythonModel):
def predict(self, model_input: list[str]) -> str:
return "test"
@pytest.fixture
def mlflow_client():
return MlflowClient()
@pytest.fixture(autouse=True)
def mock_get_telemetry_client(mock_telemetry_client: TelemetryClient):
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client",
return_value=mock_telemetry_client,
):
yield
def test_create_logged_model(mock_requests, mock_telemetry_client: TelemetryClient):
event_name = CreateLoggedModelEvent.name
mlflow.create_external_model(name="model")
validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, {"flavor": "external"}
)
mlflow.initialize_logged_model(name="model", tags={"key": "value"})
validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, {"flavor": "initialize"}
)
_initialize_logged_model(name="model", flavor="keras")
validate_telemetry_record(mock_telemetry_client, mock_requests, event_name, {"flavor": "keras"})
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"flavor": "pyfunc.CustomPythonModel"},
)
mlflow.sklearn.log_model(
knn.KNeighborsClassifier(),
name="model",
)
validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, {"flavor": "sklearn"}
)
class SimpleResponsesAgent(ResponsesAgent):
def predict(self, request: ResponsesAgentRequest) -> ResponsesAgentResponse:
mock_response = {
"output": [
{
"type": "message",
"id": "1234",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": request.input[0].content,
}
],
}
],
}
return ResponsesAgentResponse(**mock_response)
mlflow.pyfunc.log_model(
name="model",
python_model=SimpleResponsesAgent(),
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"flavor": "pyfunc.ResponsesAgent"},
)
def test_create_experiment(mock_requests, mlflow_client, mock_telemetry_client: TelemetryClient):
event_name = CreateExperimentEvent.name
exp_id = mlflow.create_experiment(name="test_experiment")
validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, {"experiment_id": exp_id}
)
exp_id = mlflow_client.create_experiment(name="test_experiment1")
validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, {"experiment_id": exp_id}
)
def test_create_run(mock_requests, mlflow_client, mock_telemetry_client: TelemetryClient):
event_name = CreateRunEvent.name
exp_id = mlflow.create_experiment(name="test_experiment")
with mlflow.start_run(experiment_id=exp_id):
record = validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, check_params=False
)
assert json.loads(record["params"])["experiment_id"] == exp_id
mlflow_client.create_run(experiment_id=exp_id)
validate_telemetry_record(mock_telemetry_client, mock_requests, event_name, check_params=False)
exp_id = mlflow.create_experiment(name="test_experiment2")
mlflow.set_experiment(experiment_id=exp_id)
with mlflow.start_run():
record = validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, check_params=False
)
params = json.loads(record["params"])
assert params["mlflow_experiment_id"] == exp_id
def test_create_run_with_imports(mock_requests, mock_telemetry_client: TelemetryClient):
event_name = CreateRunEvent.name
import pyspark.ml # noqa: F401
with mlflow.start_run():
data = validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, check_params=False
)
assert "pyspark.ml" in json.loads(data["params"])["imports"]
def test_create_registered_model(
mock_requests, mlflow_client, mock_telemetry_client: TelemetryClient
):
event_name = CreateRegisteredModelEvent.name
mlflow_client.create_registered_model(name="test_model1")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"is_prompt": False},
)
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
registered_model_name="test_model",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"is_prompt": False},
)
def test_create_model_version(mock_requests, mlflow_client, mock_telemetry_client: TelemetryClient):
event_name = CreateModelVersionEvent.name
mlflow_client.create_registered_model(name="test_model")
mlflow_client.create_model_version(
name="test_model", source="test_source", run_id="test_run_id"
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"is_prompt": False},
)
mlflow.pyfunc.log_model(
name="model",
python_model=TestModel(),
registered_model_name="test_model",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"is_prompt": False},
)
mlflow.genai.register_prompt(
name="ai_assistant_prompt",
template="Respond to the user's message as a {{style}} AI. {{greeting}}",
commit_message="Initial version of AI assistant",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
event_name,
{"is_prompt": True},
)
def test_start_trace(mock_requests, mlflow_client, mock_telemetry_client: TelemetryClient):
event_name = StartTraceEvent.name
with mlflow.start_span(name="test_span"):
pass
validate_telemetry_record(mock_telemetry_client, mock_requests, event_name, check_params=False)
@mlflow.trace
def test_func():
pass
test_func()
validate_telemetry_record(mock_telemetry_client, mock_requests, event_name, check_params=False)
trace_id = mlflow_client.start_trace(name="test_trace").trace_id
mlflow_client.end_trace(trace_id=trace_id)
validate_telemetry_record(mock_telemetry_client, mock_requests, event_name, check_params=False)
import openai # noqa: F401
test_func()
data = validate_telemetry_record(
mock_telemetry_client, mock_requests, event_name, check_params=False
)
assert "openai" in json.loads(data["params"])["imports"]
def test_create_prompt(mock_requests, mlflow_client, mock_telemetry_client: TelemetryClient):
mlflow_client.create_prompt(name="test_prompt")
validate_telemetry_record(mock_telemetry_client, mock_requests, CreatePromptEvent.name)
# OSS prompt registry uses create_registered_model with a special tag
mlflow.genai.register_prompt(
name="greeting_prompt",
template="Respond to the user's message as a {{style}} AI. {{greeting}}",
)
expected_params = {"is_prompt": True}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
CreateRegisteredModelEvent.name,
expected_params,
)
def test_log_assessment(mock_requests, mock_telemetry_client: TelemetryClient):
with mlflow.start_span(name="test_span") as span:
feedback = Feedback(
name="faithfulness",
value=0.9,
rationale="The model is faithful to the input.",
metadata={"model": "gpt-4o-mini"},
)
mlflow.log_assessment(trace_id=span.trace_id, assessment=feedback)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogAssessmentEvent.name,
{"type": "feedback", "source_type": "CODE"},
)
mlflow.log_feedback(trace_id=span.trace_id, value=0.9, name="faithfulness")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogAssessmentEvent.name,
{"type": "feedback", "source_type": "CODE"},
)
with mlflow.start_span(name="test_span2") as span:
expectation = Expectation(
name="expected_answer",
value="MLflow",
)
mlflow.log_assessment(trace_id=span.trace_id, assessment=expectation)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogAssessmentEvent.name,
{"type": "expectation", "source_type": "HUMAN"},
)
mlflow.log_expectation(trace_id=span.trace_id, value="MLflow", name="expected_answer")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogAssessmentEvent.name,
{"type": "expectation", "source_type": "HUMAN"},
)
def test_evaluate(mock_requests, mock_telemetry_client: TelemetryClient):
mlflow.models.evaluate(
data=pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}),
model=lambda x: x["x"] * 2,
extra_metrics=[mlflow.metrics.latency()],
)
validate_telemetry_record(mock_telemetry_client, mock_requests, EvaluateEvent.name)
def test_create_webhook(mock_requests, mock_telemetry_client: TelemetryClient):
client = MlflowClient()
client.create_webhook(
name="test_webhook",
url="https://example.com/webhook",
events=[WebhookEvent(WebhookEntity.MODEL_VERSION, WebhookAction.CREATED)],
)
expected_params = {"events": ["model_version.created"]}
validate_telemetry_record(
mock_telemetry_client, mock_requests, CreateWebhookEvent.name, expected_params
)
def test_genai_evaluate(mock_requests, mock_telemetry_client: TelemetryClient):
@mlflow.genai.scorer
def decorator_scorer():
return 1.0
instructions_judge = make_judge(
name="quality_judge",
instructions="Evaluate if {{ outputs }} is high quality",
model="openai:/gpt-4",
)
session_level_instruction_judge = make_judge(
name="conversation_quality",
instructions="Evaluate if the {{ conversation }} is engaging and coherent",
model="openai:/gpt-4",
)
guidelines_scorer = Guidelines(
name="politeness",
guidelines=["Be polite", "Be respectful"],
)
builtin_scorer = RelevanceToQuery(name="relevance_check")
session_level_builtin_scorer = UserFrustration(name="frustration_check")
data = [
{
"inputs": {"model_input": ["What is MLflow?"]},
"outputs": "MLflow is an open source platform.",
}
]
model = TestModel()
with (
mock.patch("mlflow.genai.judges.utils.invocation_utils.invoke_judge_model"),
mock.patch("mlflow.genai.judges.builtin.invoke_judge_model"),
mock.patch("mlflow.genai.judges.instructions_judge.invoke_judge_model"),
):
# Test with all scorer kinds and scopes, without predict_fn
mlflow.genai.evaluate(
data=data,
scorers=[
decorator_scorer,
instructions_judge,
session_level_instruction_judge,
guidelines_scorer,
builtin_scorer,
session_level_builtin_scorer,
],
)
expected_params = {
"predict_fn_provided": False,
"scorer_info": [
{
"class": "UserDefinedScorer",
"kind": "decorator",
"scope": "response",
},
{
"class": "UserDefinedScorer",
"kind": "instructions",
"scope": "response",
},
{
"class": "UserDefinedScorer",
"kind": "instructions",
"scope": "session",
},
{"class": "Guidelines", "kind": "guidelines", "scope": "response"},
{"class": "RelevanceToQuery", "kind": "builtin", "scope": "response"},
{"class": "UserFrustration", "kind": "builtin", "scope": "session"},
],
"eval_data_type": "list[dict]",
"eval_data_size": 1,
"eval_data_provided_fields": ["inputs", "outputs"],
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GenAIEvaluateEvent.name,
expected_params,
)
# Test with predict_fn
mlflow.genai.evaluate(
data=data,
scorers=[builtin_scorer, guidelines_scorer],
predict_fn=model.predict,
)
expected_params = {
"predict_fn_provided": True,
"scorer_info": [
{"class": "RelevanceToQuery", "kind": "builtin", "scope": "response"},
{"class": "Guidelines", "kind": "guidelines", "scope": "response"},
],
"eval_data_type": "list[dict]",
"eval_data_size": 1,
"eval_data_provided_fields": ["inputs", "outputs"],
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GenAIEvaluateEvent.name,
expected_params,
)
def test_genai_evaluate_telemetry_data_fields(
mock_requests, mock_telemetry_client: TelemetryClient
):
@mlflow.genai.scorer
def sample_scorer():
return 1.0
with mock.patch("mlflow.genai.judges.utils.invocation_utils.invoke_judge_model"):
# Test with list of dicts
data_list = [
{
"inputs": {"question": "Q1"},
"outputs": "A1",
"expectations": {"answer": "Expected1"},
},
{
"inputs": {"question": "Q2"},
"outputs": "A2",
"expectations": {"answer": "Expected2"},
},
]
mlflow.genai.evaluate(data=data_list, scorers=[sample_scorer])
expected_params = {
"predict_fn_provided": False,
"scorer_info": [
{
"class": "UserDefinedScorer",
"kind": "decorator",
"scope": "response",
},
],
"eval_data_type": "list[dict]",
"eval_data_size": 2,
"eval_data_provided_fields": ["expectations", "inputs", "outputs"],
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GenAIEvaluateEvent.name,
expected_params,
)
# Test with pandas DataFrame
df_data = pd.DataFrame(
[
{"inputs": {"question": "Q1"}, "outputs": "A1"},
{"inputs": {"question": "Q2"}, "outputs": "A2"},
{"inputs": {"question": "Q3"}, "outputs": "A3"},
]
)
mlflow.genai.evaluate(data=df_data, scorers=[sample_scorer])
expected_params = {
"predict_fn_provided": False,
"scorer_info": [
{
"class": "UserDefinedScorer",
"kind": "decorator",
"scope": "response",
},
],
"eval_data_type": "pd.DataFrame",
"eval_data_size": 3,
"eval_data_provided_fields": ["inputs", "outputs"],
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GenAIEvaluateEvent.name,
expected_params,
)
# Test with list of Traces
trace_ids = []
for i in range(2):
with mlflow.start_span(name=f"test_span_{i}") as span:
span.set_inputs({"question": f"Q{i}"})
span.set_outputs({"answer": f"A{i}"})
trace_ids.append(span.trace_id)
traces = [mlflow.get_trace(trace_id) for trace_id in trace_ids]
mlflow.genai.evaluate(data=traces, scorers=[sample_scorer])
expected_params = {
"predict_fn_provided": False,
"scorer_info": [
{
"class": "UserDefinedScorer",
"kind": "decorator",
"scope": "response",
},
],
"eval_data_type": "list[Trace]",
"eval_data_size": 2,
"eval_data_provided_fields": ["inputs", "outputs", "trace"],
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GenAIEvaluateEvent.name,
expected_params,
)
# Test with EvaluationDataset
from mlflow.genai.datasets import create_dataset
dataset = create_dataset("test_dataset")
dataset_data = [
{
"inputs": {"question": "Q1"},
"outputs": "A1",
"expectations": {"answer": "Expected1"},
},
{
"inputs": {"question": "Q2"},
"outputs": "A2",
"expectations": {"answer": "Expected2"},
},
]
dataset.merge_records(dataset_data)
mlflow.genai.evaluate(data=dataset, scorers=[sample_scorer])
expected_params = {
"predict_fn_provided": False,
"scorer_info": [
{
"class": "UserDefinedScorer",
"kind": "decorator",
"scope": "response",
},
],
"eval_data_type": "EvaluationDataset",
"eval_data_size": 2,
"eval_data_provided_fields": ["expectations", "inputs", "outputs"],
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GenAIEvaluateEvent.name,
expected_params,
)
def test_simulate_conversation(mock_requests, mock_telemetry_client: TelemetryClient):
simulator = ConversationSimulator(
test_cases=[
{"goal": "Learn about MLflow"},
{"goal": "Debug an issue"},
],
max_turns=2,
)
def mock_predict_fn(input, **kwargs):
return {"role": "assistant", "content": "Mock response"}
mock_trace = mock.Mock()
with (
mock.patch(
"mlflow.genai.simulators.simulator.invoke_model_without_tracing",
return_value="Mock user message",
),
mock.patch(
"mlflow.genai.simulators.simulator.ConversationSimulator._check_goal_achieved",
return_value=False,
),
mock.patch(
"mlflow.genai.simulators.simulator.mlflow.get_trace",
return_value=mock_trace,
),
):
result = simulator.simulate(predict_fn=mock_predict_fn)
assert len(result) == 2
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
SimulateConversationEvent.name,
{
"callsite": "conversation_simulator",
"simulated_conversation_info": [
{"turn_count": len(result[0])},
{"turn_count": len(result[1])},
],
},
)
def test_simulate_conversation_from_genai_evaluate(
mock_requests, mock_telemetry_client: TelemetryClient
):
simulator = ConversationSimulator(
test_cases=[
{"goal": "Learn about MLflow"},
],
max_turns=1,
)
def mock_predict_fn(input, **kwargs):
return {"role": "assistant", "content": "Mock response"}
@scorer
def simple_scorer(outputs) -> bool:
return len(outputs) > 0
with (
mock.patch(
"mlflow.genai.simulators.simulator.invoke_model_without_tracing",
return_value="Mock user message",
),
mock.patch(
"mlflow.genai.simulators.simulator.ConversationSimulator._check_goal_achieved",
return_value=True,
),
):
mlflow.genai.evaluate(data=simulator, predict_fn=mock_predict_fn, scorers=[simple_scorer])
mock_telemetry_client.flush()
simulate_events = [
record
for record in mock_requests
if record["data"]["event_name"] == SimulateConversationEvent.name
]
assert len(simulate_events) == 1
event_params = json.loads(simulate_events[0]["data"]["params"])
assert event_params == {
"callsite": "genai_evaluate",
"simulated_conversation_info": [{"turn_count": 1}],
}
def test_prompt_optimization(mock_requests, mock_telemetry_client: TelemetryClient):
from mlflow.genai.optimize import optimize_prompts
from mlflow.genai.optimize.optimizers import BasePromptOptimizer
from mlflow.genai.optimize.types import PromptOptimizerOutput
class MockAdapter(BasePromptOptimizer):
def __init__(self):
self.model_name = "openai:/gpt-4o-mini"
def optimize(self, eval_fn, train_data, target_prompts, enable_tracking):
return PromptOptimizerOutput(optimized_prompts=target_prompts)
sample_prompt = mlflow.genai.register_prompt(
name="test_prompt_for_adaptation",
template="Translate {{input_text}} to {{language}}",
)
sample_data = [
{"inputs": {"input_text": "Hello", "language": "Spanish"}, "outputs": "Hola"},
{"inputs": {"input_text": "World", "language": "French"}, "outputs": "Monde"},
]
@mlflow.genai.scorers.scorer
def exact_match_scorer(outputs, expectations):
return 1.0 if outputs == expectations["expected_response"] else 0.0
def predict_fn(input_text, language):
mlflow.genai.load_prompt(f"prompts:/{sample_prompt.name}/{sample_prompt.version}")
return "translated"
optimize_prompts(
predict_fn=predict_fn,
train_data=sample_data,
prompt_uris=[f"prompts:/{sample_prompt.name}/{sample_prompt.version}"],
optimizer=MockAdapter(),
scorers=[exact_match_scorer],
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
PromptOptimizationEvent.name,
{
"optimizer_type": "MockAdapter",
"prompt_count": 1,
"scorer_count": 1,
"custom_aggregation": False,
},
)
def test_create_dataset(mock_requests, mock_telemetry_client: TelemetryClient):
with mock.patch("mlflow.tracking._tracking_service.utils._get_store") as mock_store:
mock_store_instance = mock.MagicMock()
mock_store.return_value = mock_store_instance
mock_store_instance.create_dataset.return_value = mock.MagicMock(
dataset_id="test-dataset-id", name="test_dataset", tags={"test": "value"}
)
create_dataset(name="test_dataset", tags={"test": "value"})
validate_telemetry_record(mock_telemetry_client, mock_requests, CreateDatasetEvent.name)
def test_merge_records(mock_requests, mock_telemetry_client: TelemetryClient):
with mock.patch("mlflow.tracking._tracking_service.utils._get_store") as mock_store:
mock_store_instance = mock.MagicMock()
mock_store.return_value = mock_store_instance
mock_store_instance.get_dataset.return_value = mock.MagicMock(dataset_id="test-id")
mock_store_instance.upsert_dataset_records.return_value = {
"inserted": 2,
"updated": 0,
}
evaluation_dataset = EvaluationDataset(
dataset_id="test-id",
name="test",
digest="digest",
created_time=123,
last_update_time=456,
)
records = [
{"inputs": {"q": "Q1"}, "expectations": {"a": "A1"}},
{"inputs": {"q": "Q2"}, "expectations": {"a": "A2"}},
]
evaluation_dataset.merge_records(records)
expected_params = {
"record_count": 2,
"input_type": "list[dict]",
"dataset_type": "trace",
}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
MergeRecordsEvent.name,
expected_params,
)
def test_log_dataset(mock_requests, mock_telemetry_client: TelemetryClient):
with mlflow.start_run() as run:
dataset = mlflow.data.from_pandas(pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}))
mlflow.log_input(dataset)
validate_telemetry_record(mock_telemetry_client, mock_requests, LogDatasetEvent.name)
mlflow.log_inputs(datasets=[dataset], contexts=["training"], tags_list=[None])
validate_telemetry_record(mock_telemetry_client, mock_requests, LogDatasetEvent.name)
client = MlflowClient()
client.log_inputs(run_id=run.info.run_id, datasets=[_create_dataset_input(dataset)])
validate_telemetry_record(mock_telemetry_client, mock_requests, LogDatasetEvent.name)
def test_log_metric(mock_requests, mock_telemetry_client: TelemetryClient):
with mlflow.start_run():
mlflow.log_metric("test_metric", 1.0)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogMetricEvent.name,
{"synchronous": True},
)
mlflow.log_metric("test_metric", 1.0, synchronous=False)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogMetricEvent.name,
{"synchronous": False},
)
client = MlflowClient()
client.log_metric(
run_id=mlflow.active_run().info.run_id,
key="test_metric",
value=1.0,
timestamp=int(time.time()),
step=0,
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogMetricEvent.name,
{"synchronous": True},
)
client.log_metric(
run_id=mlflow.active_run().info.run_id,
key="test_metric",
value=1.0,
timestamp=int(time.time()),
step=0,
synchronous=False,
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogMetricEvent.name,
{"synchronous": False},
)
def test_log_param(mock_requests, mock_telemetry_client: TelemetryClient):
with mlflow.start_run():
mlflow.log_param("test_param", "test_value")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogParamEvent.name,
{"synchronous": True},
)
mlflow.log_param("test_param", "test_value", synchronous=False)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogParamEvent.name,
{"synchronous": False},
)
client = mlflow.MlflowClient()
client.log_param(
run_id=mlflow.active_run().info.run_id,
key="test_param",
value="test_value",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogParamEvent.name,
{"synchronous": True},
)
def test_log_batch(mock_requests, mock_telemetry_client: TelemetryClient):
with mlflow.start_run():
mlflow.log_params(params={"test_param": "test_value"})
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": False, "params": True, "tags": False, "synchronous": True},
)
mlflow.log_params(params={"test_param": "test_value"}, synchronous=False)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": False, "params": True, "tags": False, "synchronous": False},
)
mlflow.log_metrics(metrics={"test_metric": 1.0})
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": True, "params": False, "tags": False, "synchronous": True},
)
mlflow.log_metrics(metrics={"test_metric": 1.0}, synchronous=False)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": True, "params": False, "tags": False, "synchronous": False},
)
mlflow.set_tags(tags={"test_tag": "test_value"})
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": False, "params": False, "tags": True, "synchronous": True},
)
mlflow.set_tags(tags={"test_tag": "test_value"}, synchronous=False)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": False, "params": False, "tags": True, "synchronous": False},
)
client = mlflow.MlflowClient()
client.log_batch(
run_id=mlflow.active_run().info.run_id,
metrics=[Metric(key="test_metric", value=1.0, timestamp=int(time.time()), step=0)],
params=[Param(key="test_param", value="test_value")],
tags=[RunTag(key="test_tag", value="test_value")],
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LogBatchEvent.name,
{"metrics": True, "params": True, "tags": True, "synchronous": True},
)
def test_get_logged_model(mock_requests, mock_telemetry_client: TelemetryClient, tmp_path):
model_info = mlflow.sklearn.log_model(
knn.KNeighborsClassifier(),
name="model",
)
mock_telemetry_client.flush()
mlflow.sklearn.load_model(model_info.model_uri)
data = validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GetLoggedModelEvent.name,
check_params=False,
)
assert "sklearn" in json.loads(data["params"])["imports"]
mlflow.pyfunc.load_model(model_info.model_uri)
data = validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GetLoggedModelEvent.name,
check_params=False,
)
model_def = """
import mlflow
from mlflow.models import set_model
class TestModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: list[str], params=None) -> list[str]:
return model_input
set_model(TestModel())
"""
model_path = tmp_path / "model.py"
model_path.write_text(model_def)
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=model_path,
)
mock_telemetry_client.flush()
mlflow.pyfunc.load_model(model_info.model_uri)
data = validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GetLoggedModelEvent.name,
check_params=False,
)
# test load model after registry
mlflow.register_model(model_info.model_uri, name="test")
mock_telemetry_client.flush()
mlflow.pyfunc.load_model("models:/test/1")
data = validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GetLoggedModelEvent.name,
check_params=False,
)
def test_mcp_run(mock_requests, mock_telemetry_client: TelemetryClient):
from mlflow.mcp.cli import run
runner = CliRunner(catch_exceptions=False)
with mock.patch("mlflow.mcp.cli.run_server") as mock_run_server:
runner.invoke(run)
mock_run_server.assert_called_once()
mock_telemetry_client.flush()
validate_telemetry_record(mock_telemetry_client, mock_requests, McpRunEvent.name)
@pytest.mark.skipif(is_windows(), reason="Windows does not support gateway start")
def test_gateway_start(tmp_path, mock_requests, mock_telemetry_client: TelemetryClient):
config = tmp_path.joinpath("config.yml")
config.write_text(
"""
endpoints:
- name: test-endpoint
endpoint_type: llm/v1/completions
model:
provider: openai
name: gpt-3.5-turbo
config:
openai_api_key: test-key
"""
)
runner = CliRunner(catch_exceptions=False)
with mock.patch("mlflow.gateway.cli.run_app"):
runner.invoke(start, ["--config-path", str(config)])
mock_telemetry_client.flush()
validate_telemetry_record(mock_telemetry_client, mock_requests, GatewayStartEvent.name)
def test_ai_command_run(mock_requests, mock_telemetry_client: TelemetryClient):
from mlflow.ai_commands import commands
runner = CliRunner(catch_exceptions=False)
# Test CLI context
with mock.patch("mlflow.ai_commands.get_command", return_value="---\ntest\n---\nTest command"):
result = runner.invoke(commands, ["run", "test_command"])
assert result.exit_code == 0
mock_telemetry_client.flush()
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
AiCommandRunEvent.name,
{"command_key": "test_command", "context": "cli"},
)
def test_git_model_versioning(mock_requests, mock_telemetry_client):
from mlflow.genai import enable_git_model_versioning
with enable_git_model_versioning():
pass
mock_telemetry_client.flush()
validate_telemetry_record(mock_telemetry_client, mock_requests, GitModelVersioningEvent.name)
@pytest.mark.parametrize(
("model_uri", "expected_provider", "litellm_available", "use_native_provider"),
[
("databricks:/llama-3.1-70b", "databricks", True, False),
("openai:/gpt-4o-mini", "openai", True, False),
("endpoints:/my-endpoint", "endpoints", True, False),
("anthropic:/claude-3-opus", "anthropic", True, False),
],
)
def test_invoke_custom_judge_model(
mock_requests,
mock_telemetry_client: TelemetryClient,
model_uri,
expected_provider,
litellm_available,
use_native_provider,
):
from mlflow.genai.judges.utils import invoke_judge_model
from mlflow.utils.rest_utils import MlflowHostCreds
mock_response = json.dumps({"result": 0.8, "rationale": "Test rationale"})
# Mock Databricks credentials for databricks:// URIs
mock_creds = MlflowHostCreds(host="https://test.databricks.com", token="test-token")
with (
mock.patch(
"mlflow.genai.judges.utils._is_litellm_available",
return_value=litellm_available,
),
mock.patch(
"mlflow.utils.databricks_utils.get_databricks_host_creds",
return_value=mock_creds,
),
):
if use_native_provider:
with (
mock.patch.object(
__import__(
"mlflow.metrics.genai.model_utils",
fromlist=["score_model_on_payload"],
),
"score_model_on_payload",
return_value=mock_response,
),
mock.patch.object(
__import__(
"mlflow.metrics.genai.model_utils",
fromlist=["get_endpoint_type"],
),
"get_endpoint_type",
return_value="llm/v1/chat",
),
):
invoke_judge_model(
model_uri=model_uri,
prompt="Test prompt",
assessment_name="test_assessment",
)
else:
from mlflow.genai.judges.adapters.litellm_adapter import InvokeLiteLLMOutput
with mock.patch(
"mlflow.genai.judges.adapters.litellm_adapter._invoke_litellm_and_handle_tools",
return_value=InvokeLiteLLMOutput(
response=mock_response,
request_id="req-123",
num_prompt_tokens=5,
num_completion_tokens=3,
cost=10,
),
):
invoke_judge_model(
model_uri=model_uri,
prompt="Test prompt",
assessment_name="test_assessment",
)
expected_params = {"model_provider": expected_provider}
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
InvokeCustomJudgeModelEvent.name,
expected_params,
)
def test_make_judge(mock_requests, mock_telemetry_client: TelemetryClient):
make_judge(
name="test_judge",
instructions="Evaluate the {{ inputs }} and {{ outputs }}",
model="openai:/gpt-4",
feedback_value_type=str,
)
expected_params = {"model_provider": "openai"}
validate_telemetry_record(
mock_telemetry_client, mock_requests, MakeJudgeEvent.name, expected_params
)
make_judge(
name="test_judge",
instructions="Evaluate the {{ inputs }} and {{ outputs }}",
feedback_value_type=str,
)
expected_params = {"model_provider": None}
validate_telemetry_record(
mock_telemetry_client, mock_requests, MakeJudgeEvent.name, expected_params
)
def test_align_judge(mock_requests, mock_telemetry_client: TelemetryClient):
judge = make_judge(
name="test_judge",
instructions="Evaluate the {{ inputs }} and {{ outputs }}",
model="openai:/gpt-4",
feedback_value_type=str,
)
traces = [
mock.MagicMock(spec=Trace),
mock.MagicMock(spec=Trace),
]
class MockOptimizer(AlignmentOptimizer):
def align(self, judge, traces):
return judge
custom_optimizer = MockOptimizer()
judge.align(traces, optimizer=custom_optimizer)
expected_params = {"trace_count": 2, "optimizer_type": "MockOptimizer"}
validate_telemetry_record(
mock_telemetry_client, mock_requests, AlignJudgeEvent.name, expected_params
)
def test_autologging(mock_requests, mock_telemetry_client: TelemetryClient):
try:
mlflow.openai.autolog()
mlflow.autolog()
mock_telemetry_client.flush()
data = [record["data"] for record in mock_requests]
params = [event["params"] for event in data if event["event_name"] == AutologgingEvent.name]
assert (
json.dumps(
{
"flavor": mlflow.openai.FLAVOR_NAME,
"log_traces": True,
"disable": False,
}
)
in params
)
assert json.dumps({"flavor": "all", "log_traces": True, "disable": False}) in params
finally:
mlflow.autolog(disable=True)
def test_load_prompt(mock_requests, mock_telemetry_client: TelemetryClient):
# Register a prompt first
prompt = mlflow.genai.register_prompt(
name="test_prompt",
template="Hello {{name}}",
)
mock_telemetry_client.flush()
# Set an alias for testing
mlflow.genai.set_prompt_alias(name="test_prompt", version=prompt.version, alias="production")
# Test load_prompt with version (no alias)
mlflow.genai.load_prompt(name_or_uri="test_prompt", version=prompt.version)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LoadPromptEvent.name,
{"uses_alias": False},
)
# Test load_prompt with URI and version (no alias)
mlflow.genai.load_prompt(name_or_uri=f"prompts:/test_prompt/{prompt.version}")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
LoadPromptEvent.name,
{"uses_alias": False},
)
# Test load_prompt with alias
mlflow.genai.load_prompt(name_or_uri="prompts:/test_prompt@production")
validate_telemetry_record(
mock_telemetry_client, mock_requests, LoadPromptEvent.name, {"uses_alias": True}
)
# Test load_prompt with @latest (special alias)
mlflow.genai.load_prompt(name_or_uri="prompts:/test_prompt@latest")
validate_telemetry_record(
mock_telemetry_client, mock_requests, LoadPromptEvent.name, {"uses_alias": True}
)
def test_scorer_call_direct(mock_requests, mock_telemetry_client: TelemetryClient):
@scorer
def custom_scorer(outputs) -> bool:
return len(outputs) > 0
result = custom_scorer(outputs="test output")
assert result is True
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserDefinedScorer",
"scorer_kind": "decorator",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": False,
},
)
safety_scorer = Safety()
mock_feedback = Feedback(
name="test_feedback",
value="yes",
rationale="Test rationale",
)
with mock.patch(
"mlflow.genai.judges.builtin.invoke_judge_model",
return_value=mock_feedback,
):
safety_scorer(outputs="test output")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "Safety",
"scorer_kind": "builtin",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": False,
},
)
mock_requests.clear()
guidelines_scorer = Guidelines(guidelines="The response must be in English")
with mock.patch(
"mlflow.genai.judges.builtin.invoke_judge_model",
return_value=mock_feedback,
):
guidelines_scorer(
inputs={"question": "What is MLflow?"}, outputs="MLflow is an ML platform"
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "Guidelines",
"scorer_kind": "guidelines",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": False,
},
)
mock_requests.clear()
class CustomClassScorer(Scorer):
name: str = "custom_class"
def __call__(self, *, outputs) -> bool:
return len(outputs) > 0
custom_class_scorer = CustomClassScorer()
result = custom_class_scorer(outputs="test output")
assert result is True
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserDefinedScorer",
"scorer_kind": "class",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": False,
},
)
def test_scorer_call_from_genai_evaluate(mock_requests, mock_telemetry_client: TelemetryClient):
@scorer
def simple_length_checker(outputs) -> bool:
return len(outputs) > 0
session_judge = make_judge(
name="conversation_quality",
instructions="Evaluate if the {{ conversation }} is engaging and coherent",
model="openai:/gpt-4",
)
# Create traces with session metadata for session-level scorer testing
@mlflow.trace(span_type=mlflow.entities.SpanType.CHAT_MODEL)
def model(question, session_id):
mlflow.update_current_trace(metadata={"mlflow.trace.session": session_id})
return f"Answer to: {question}"
model("What is MLflow?", session_id="test_session")
trace_1 = mlflow.get_trace(mlflow.get_last_active_trace_id())
model("How does MLflow work?", session_id="test_session")
trace_2 = mlflow.get_trace(mlflow.get_last_active_trace_id())
test_data = pd.DataFrame(
[
{
"trace": trace_1,
},
{
"trace": trace_2,
},
]
)
mock_feedback = Feedback(
name="test_feedback",
value="yes",
rationale="Test",
)
with mock.patch(
"mlflow.genai.judges.instructions_judge.invoke_judge_model",
return_value=mock_feedback,
):
mlflow.genai.evaluate(data=test_data, scorers=[simple_length_checker, session_judge])
mock_telemetry_client.flush()
scorer_call_events = [
record for record in mock_requests if record["data"]["event_name"] == ScorerCallEvent.name
]
# Should have 3 events: 2 response-level calls (one per trace)
# + 1 session-level call (one per session)
assert len(scorer_call_events) == 3
event_params = [json.loads(event["data"]["params"]) for event in scorer_call_events]
# Validate response-level scorer was called twice (once per trace)
response_level_events = [
params
for params in event_params
if params["scorer_class"] == "UserDefinedScorer"
and params["scorer_kind"] == "decorator"
and params["is_session_level_scorer"] is False
and params["callsite"] == "genai_evaluate"
and params["has_feedback_error"] is False
]
assert len(response_level_events) == 2
# Validate session-level scorer was called once (once per session)
session_level_events = [
params
for params in event_params
if params["scorer_class"] == "UserDefinedScorer"
and params["scorer_kind"] == "instructions"
and params["is_session_level_scorer"] is True
and params["callsite"] == "genai_evaluate"
and params["has_feedback_error"] is False
]
assert len(session_level_events) == 1
mock_requests.clear()
@pytest.mark.parametrize(
("job_name", "expected_callsite"),
[
("run_online_trace_scorer", "online_scoring"),
("run_online_session_scorer", "online_scoring"),
# Counterexample: non-online-scoring job should be treated as direct call
("invoke_scorer", "direct_scorer_call"),
],
)
def test_scorer_call_online_scoring_callsite(
mock_requests, mock_telemetry_client: TelemetryClient, monkeypatch, job_name, expected_callsite
):
# Import here to avoid circular imports
from mlflow.server.jobs.utils import MLFLOW_SERVER_JOB_NAME_ENV_VAR
monkeypatch.setenv(MLFLOW_SERVER_JOB_NAME_ENV_VAR, job_name)
@scorer
def custom_scorer(outputs: str) -> bool:
return True
custom_scorer(outputs="test output")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserDefinedScorer",
"scorer_kind": "decorator",
"is_session_level_scorer": False,
"callsite": expected_callsite,
"has_feedback_error": False,
},
)
def test_scorer_call_tracks_feedback_errors(mock_requests, mock_telemetry_client: TelemetryClient):
error_judge = make_judge(
name="quality_judge",
instructions="Evaluate if {{ outputs }} is high quality",
model="openai:/gpt-4",
)
error_feedback = Feedback(
name="quality_judge",
error="Model invocation failed",
source=AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE, source_id="openai:/gpt-4"
),
)
with mock.patch(
"mlflow.genai.judges.instructions_judge.invoke_judge_model",
return_value=error_feedback,
):
result = error_judge(outputs="test output")
assert result.error is not None
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserDefinedScorer",
"scorer_kind": "instructions",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": True,
},
)
mock_requests.clear()
# Test Scorer returns list of Feedback with mixed errors
@scorer
def multi_feedback_scorer(outputs) -> list[Feedback]:
return [
Feedback(name="feedback1", value=1.0),
Feedback(name="feedback2", error=ValueError("Error in feedback 2")),
Feedback(name="feedback3", value=0.5),
]
multi_feedback_scorer(outputs="test")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserDefinedScorer",
"scorer_kind": "decorator",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": True,
},
)
mock_requests.clear()
# Test Scorer returns primitive type (no Feedback error possible)
@scorer
def primitive_scorer(outputs) -> bool:
return True
primitive_scorer(outputs="test")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserDefinedScorer",
"scorer_kind": "decorator",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": False,
},
)
def test_scorer_call_wrapped_builtin_scorer_direct(
mock_requests, mock_telemetry_client: TelemetryClient
):
completeness_scorer = Completeness()
mock_feedback = Feedback(
name="completeness",
value="yes",
rationale="Test rationale",
)
with mock.patch(
"mlflow.genai.judges.instructions_judge.invoke_judge_model",
return_value=mock_feedback,
):
completeness_scorer(inputs={"question": "What is MLflow?"}, outputs="MLflow is a platform")
mock_telemetry_client.flush()
# Verify exactly 1 scorer_call event was created
# (only top-level Completeness, not nested InstructionsJudge)
scorer_call_events = [
record for record in mock_requests if record["data"]["event_name"] == ScorerCallEvent.name
]
assert len(scorer_call_events) == 1, (
f"Expected 1 scorer call event for Completeness scorer (nested calls should be skipped), "
f"got {len(scorer_call_events)}"
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "Completeness",
"scorer_kind": "builtin",
"is_session_level_scorer": False,
"callsite": "direct_scorer_call",
"has_feedback_error": False,
},
)
def test_scorer_call_wrapped_builtin_scorer_from_genai_evaluate(
mock_requests, mock_telemetry_client: TelemetryClient
):
user_frustration_scorer = UserFrustration()
@mlflow.trace(span_type=mlflow.entities.SpanType.CHAT_MODEL)
def model(question, session_id):
mlflow.update_current_trace(metadata={"mlflow.trace.session": session_id})
return f"Answer to: {question}"
model("What is MLflow?", session_id="test_session")
trace_1 = mlflow.get_trace(mlflow.get_last_active_trace_id())
model("How does MLflow work?", session_id="test_session")
trace_2 = mlflow.get_trace(mlflow.get_last_active_trace_id())
test_data = pd.DataFrame(
[
{"trace": trace_1},
{"trace": trace_2},
]
)
mock_feedback = Feedback(
name="user_frustration",
value="no",
rationale="Test rationale",
)
with mock.patch(
"mlflow.genai.judges.instructions_judge.invoke_judge_model",
return_value=mock_feedback,
):
mlflow.genai.evaluate(data=test_data, scorers=[user_frustration_scorer])
mock_telemetry_client.flush()
# Verify exactly 1 scorer_call event was created for the session-level scorer
# (one call at the session level and no nested InstructionsJudge event)
scorer_call_events = [
record for record in mock_requests if record["data"]["event_name"] == ScorerCallEvent.name
]
assert len(scorer_call_events) == 1, (
f"Expected 1 scorer call event for UserFrustration scorer "
f"(nested calls should be skipped), got {len(scorer_call_events)}"
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
ScorerCallEvent.name,
{
"scorer_class": "UserFrustration",
"scorer_kind": "builtin",
"is_session_level_scorer": True,
"callsite": "genai_evaluate",
"has_feedback_error": False,
},
)
def test_gateway_crud_telemetry(mock_requests, mock_telemetry_client: TelemetryClient, tmp_path):
db_path = tmp_path / "mlflow.db"
store = SqlAlchemyStore(f"sqlite:///{db_path}", tmp_path.as_posix())
secret = store.create_gateway_secret(
secret_name="test-secret",
secret_value={"api_key": "test-api-key"},
provider="openai",
created_by="test-user",
)
model_def = store.create_gateway_model_definition(
name="test-model",
provider="openai",
model_name="gpt-4",
secret_id=secret.secret_id,
created_by="test-user",
)
model_config = GatewayEndpointModelConfig(
model_definition_id=model_def.model_definition_id,
linkage_type=GatewayModelLinkageType.PRIMARY,
weight=100,
)
endpoint = store.create_gateway_endpoint(
name="test-endpoint",
model_configs=[model_config],
created_by="test-user",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayCreateEndpointEvent.name,
{
"has_fallback_config": False,
"routing_strategy": None,
"num_model_configs": 1,
},
)
store.get_gateway_endpoint(endpoint_id=endpoint.endpoint_id)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayGetEndpointEvent.name,
)
store.list_gateway_endpoints()
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayListEndpointsEvent.name,
{"filter_by_provider": False},
)
store.list_gateway_endpoints(provider="openai")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayListEndpointsEvent.name,
{"filter_by_provider": True},
)
store.update_gateway_endpoint(
endpoint_id=endpoint.endpoint_id,
name="updated-endpoint",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayUpdateEndpointEvent.name,
{
"has_fallback_config": False,
"routing_strategy": None,
"num_model_configs": None,
},
)
store.delete_gateway_endpoint(endpoint_id=endpoint.endpoint_id)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayDeleteEndpointEvent.name,
)
def test_gateway_secret_crud_telemetry(
mock_requests, mock_telemetry_client: TelemetryClient, tmp_path
):
db_path = tmp_path / "mlflow.db"
store = SqlAlchemyStore(f"sqlite:///{db_path}", tmp_path.as_posix())
secret = store.create_gateway_secret(
secret_name="test-secret",
secret_value={"api_key": "test-api-key"},
provider="openai",
created_by="test-user",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayCreateSecretEvent.name,
{"provider": "openai"},
)
secret2 = store.create_gateway_secret(
secret_name="test-secret-2",
secret_value={"api_key": "test-api-key-2"},
created_by="test-user",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayCreateSecretEvent.name,
{"provider": None},
)
store.list_secret_infos()
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayListSecretsEvent.name,
{"filter_by_provider": False},
)
store.list_secret_infos(provider="openai")
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayListSecretsEvent.name,
{"filter_by_provider": True},
)
store.update_gateway_secret(
secret_id=secret.secret_id,
secret_value={"api_key": "updated-api-key"},
updated_by="test-user",
)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayUpdateSecretEvent.name,
)
store.delete_gateway_secret(secret_id=secret.secret_id)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayDeleteSecretEvent.name,
)
store.delete_gateway_secret(secret_id=secret2.secret_id)
@pytest.mark.asyncio
async def test_gateway_invocation_telemetry(
mock_requests, mock_telemetry_client: TelemetryClient, tmp_path
):
db_path = tmp_path / "mlflow.db"
store = SqlAlchemyStore(f"sqlite:///{db_path}", tmp_path.as_posix())
secret = store.create_gateway_secret(
secret_name="test-secret",
secret_value={"api_key": "test-api-key"},
provider="openai",
created_by="test-user",
)
mock_telemetry_client.flush()
mock_requests.clear()
model_def = store.create_gateway_model_definition(
name="test-model",
provider="openai",
model_name="gpt-4",
secret_id=secret.secret_id,
created_by="test-user",
)
endpoint = store.create_gateway_endpoint(
name="test-endpoint",
model_configs=[
GatewayEndpointModelConfig(
model_definition_id=model_def.model_definition_id,
linkage_type=GatewayModelLinkageType.PRIMARY,
weight=100,
)
],
created_by="test-user",
)
mock_telemetry_client.flush()
mock_requests.clear()
mock_response = chat.ResponsePayload(
id="test-id",
object="chat.completion",
created=1234567890,
model="gpt-4",
choices=[
chat.Choice(
index=0,
message=chat.ResponseMessage(role="assistant", content="Hello!"),
finish_reason="stop",
)
],
usage=chat.ChatUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15),
)
# Test invocations endpoint (chat)
mock_request = MagicMock()
mock_request.json = AsyncMock(
return_value={
"messages": [{"role": "user", "content": "Hi"}],
"temperature": 0.7,
"stream": False,
}
)
with (
patch("mlflow.server.gateway_api._get_store", return_value=store),
patch(
"mlflow.server.gateway_api._create_provider_from_endpoint_name"
) as mock_create_provider,
):
mock_provider = MagicMock()
mock_provider.chat = AsyncMock(return_value=mock_response)
mock_endpoint_config = GatewayEndpointConfig(
endpoint_id=endpoint.endpoint_id, endpoint_name=endpoint.name, models=[]
)
mock_create_provider.return_value = (mock_provider, mock_endpoint_config)
await invocations(endpoint.name, mock_request)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayInvocationEvent.name,
{"is_streaming": False, "invocation_type": "mlflow_invocations"},
)
# Test chat_completions endpoint
mock_request = MagicMock()
mock_request.json = AsyncMock(
return_value={
"model": endpoint.name,
"messages": [{"role": "user", "content": "Hi"}],
"temperature": 0.7,
"stream": False,
}
)
with (
patch("mlflow.server.gateway_api._get_store", return_value=store),
patch(
"mlflow.server.gateway_api._create_provider_from_endpoint_name"
) as mock_create_provider,
):
mock_provider = MagicMock()
mock_provider.chat = AsyncMock(return_value=mock_response)
mock_endpoint_config = GatewayEndpointConfig(
endpoint_id=endpoint.endpoint_id, endpoint_name=endpoint.name, models=[]
)
mock_create_provider.return_value = (mock_provider, mock_endpoint_config)
await chat_completions(mock_request)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayInvocationEvent.name,
{"is_streaming": False, "invocation_type": "mlflow_chat_completions"},
)
# Test streaming invocation
mock_request = MagicMock()
mock_request.json = AsyncMock(
return_value={
"model": endpoint.name,
"messages": [{"role": "user", "content": "Hi"}],
"stream": True,
}
)
async def mock_stream():
yield chat.StreamResponsePayload(
id="test-id",
object="chat.completion.chunk",
created=1234567890,
model="gpt-4",
choices=[
chat.StreamChoice(
index=0,
delta=chat.StreamDelta(role="assistant", content="Hello"),
finish_reason=None,
)
],
)
with (
patch("mlflow.server.gateway_api._get_store", return_value=store),
patch(
"mlflow.server.gateway_api._create_provider_from_endpoint_name"
) as mock_create_provider,
):
mock_provider = MagicMock()
mock_provider.chat_stream = MagicMock(return_value=mock_stream())
mock_endpoint_config = GatewayEndpointConfig(
endpoint_id=endpoint.endpoint_id, endpoint_name=endpoint.name, models=[]
)
mock_create_provider.return_value = (mock_provider, mock_endpoint_config)
await chat_completions(mock_request)
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
GatewayInvocationEvent.name,
{"is_streaming": True, "invocation_type": "mlflow_chat_completions"},
)
def test_tracing_context_propagation_get_and_set_success(
mock_requests, mock_telemetry_client: TelemetryClient
):
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=mock_telemetry_client
):
with mlflow.start_span("client span"):
headers = get_tracing_context_headers_for_http_request()
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
TracingContextPropagation.name,
)
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=mock_telemetry_client
):
with set_tracing_context_from_http_request_headers(headers):
with mlflow.start_span("server span"):
pass
validate_telemetry_record(
mock_telemetry_client,
mock_requests,
TracingContextPropagation.name,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/telemetry/test_tracked_events.py",
"license": "Apache License 2.0",
"lines": 1801,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/telemetry/test_utils.py | from unittest.mock import Mock, patch
from mlflow.telemetry.constant import (
CONFIG_STAGING_URL,
CONFIG_URL,
FALLBACK_UI_CONFIG,
UI_CONFIG_STAGING_URL,
UI_CONFIG_URL,
)
from mlflow.telemetry.utils import (
_get_config_url,
fetch_ui_telemetry_config,
is_telemetry_disabled,
)
def test_is_telemetry_disabled(monkeypatch, bypass_env_check):
assert is_telemetry_disabled() is False
with monkeypatch.context() as m:
m.setenv("MLFLOW_DISABLE_TELEMETRY", "true")
assert is_telemetry_disabled() is True
assert is_telemetry_disabled() is False
with monkeypatch.context() as m:
m.setenv("DO_NOT_TRACK", "true")
assert is_telemetry_disabled() is True
def test_get_config_url(bypass_env_check):
assert _get_config_url("1.0.0") == f"{CONFIG_URL}/1.0.0.json"
assert _get_config_url("1.0.0.rc0") == f"{CONFIG_URL}/1.0.0.rc0.json"
assert _get_config_url("1.0.0.dev0") == f"{CONFIG_STAGING_URL}/1.0.0.dev0.json"
assert _get_config_url("1.0.0+abc") is None
assert _get_config_url("1.0.0", is_ui=True) == f"{UI_CONFIG_URL}/1.0.0.json"
assert _get_config_url("1.0.0.rc0", is_ui=True) == f"{UI_CONFIG_URL}/1.0.0.rc0.json"
assert _get_config_url("1.0.0.dev0", is_ui=True) == f"{UI_CONFIG_STAGING_URL}/1.0.0.dev0.json"
assert _get_config_url("1.0.0+abc", is_ui=True) is None
def test_fetch_ui_telemetry_config_fetch_success(bypass_env_check):
mock_config = {
"mlflow_version": "3.7.1.dev0",
"disable_telemetry": False,
"rollout_percentage": 100,
"ingestion_url": "https://api.mlflow-telemetry.io/staging/log",
"disable_sdks": [],
"disable_os": [],
"disable_events": [],
"disable_ui_telemetry": False,
"disable_ui_events": ["test_event"],
"ui_rollout_percentage": 100,
}
with patch("requests.get") as mock_get:
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_config
mock_get.return_value = mock_response
result = fetch_ui_telemetry_config()
assert result["disable_ui_telemetry"] is False
assert result["disable_ui_events"] == ["test_event"]
assert result["ui_rollout_percentage"] == 100
def test_fetch_ui_telemetry_config_fetch_error_fallback(bypass_env_check):
with patch("requests.get") as mock_get:
mock_response = Mock()
mock_response.status_code = 404
mock_get.return_value = mock_response
result = fetch_ui_telemetry_config()
assert result == FALLBACK_UI_CONFIG
assert result["disable_ui_telemetry"] is True
with patch("requests.get") as mock_get:
mock_get.side_effect = Exception("Network error")
result = fetch_ui_telemetry_config()
assert result == FALLBACK_UI_CONFIG
assert result["disable_ui_telemetry"] is True
assert result["disable_ui_events"] == []
assert result["ui_rollout_percentage"] == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/telemetry/test_utils.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/semantic_kernel/autolog.py | import logging
import threading
from opentelemetry import trace as otel_trace
from opentelemetry.context import Context
from opentelemetry.sdk.trace import ReadableSpan as OTelReadableSpan
from opentelemetry.sdk.trace import Span as OTelSpan
from opentelemetry.sdk.trace import TracerProvider as SDKTracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from opentelemetry.trace import (
NoOpTracerProvider,
ProxyTracerProvider,
get_tracer_provider,
set_tracer_provider,
)
from mlflow.entities.span import create_mlflow_span
from mlflow.semantic_kernel.tracing_utils import set_model, set_span_type, set_token_usage
from mlflow.tracing.constant import SpanAttributeKey
from mlflow.tracing.provider import _get_tracer, mlflow_runtime_context
from mlflow.tracing.trace_manager import InMemoryTraceManager
from mlflow.tracing.utils import (
_bypass_attribute_guard,
get_mlflow_span_for_otel_span,
get_otel_attribute,
set_span_cost_attribute,
should_compute_cost_client_side,
)
_logger = logging.getLogger(__name__)
def _enable_experimental_genai_tracing():
# NB: These settings are required to enable the telemetry for genai attributes
# such as chat completion inputs/outputs, which are currently marked as experimental.
# We directly update the singleton setting object instead of using env vars,
# because the object might be already initialized by the time we call this function.
# https://learn.microsoft.com/en-us/semantic-kernel/concepts/enterprise-readiness/observability/telemetry-with-console
from semantic_kernel.utils.telemetry.model_diagnostics.decorators import (
MODEL_DIAGNOSTICS_SETTINGS,
)
MODEL_DIAGNOSTICS_SETTINGS.enable_otel_diagnostics = True
MODEL_DIAGNOSTICS_SETTINGS.enable_otel_diagnostics_sensitive = True
try:
# This only exists in Semantic Kernel 1.35.1 or later.
from semantic_kernel.utils.telemetry.agent_diagnostics.decorators import (
MODEL_DIAGNOSTICS_SETTINGS as AGENT_DIAGNOSTICS_SETTINGS,
)
AGENT_DIAGNOSTICS_SETTINGS.enable_otel_diagnostics = True
AGENT_DIAGNOSTICS_SETTINGS.enable_otel_diagnostics_sensitive = True
except ImportError:
pass
_logger.info("Semantic Kernel Otel diagnostics is turned on for enabling tracing.")
def setup_semantic_kernel_tracing():
_enable_experimental_genai_tracing()
# NB: This logic has a known issue that it does not work when Semantic Kernel program is
# executed before calling this setup is called. This is because Semantic Kernel caches the
# tracer instance in each module (ref:https://github.com/microsoft/semantic-kernel/blob/6ecf2b9c2c893dc6da97abeb5962dfc49bed062d/python/semantic_kernel/functions/kernel_function.py#L46),
# which prevent us from updating the span processor setup for the tracer.
# Therefore, `mlflow.semantic_kernel.autolog()` should always be called before running the
# Semantic Kernel program.
provider = get_tracer_provider()
sk_processor = SemanticKernelSpanProcessor()
if isinstance(provider, (NoOpTracerProvider, ProxyTracerProvider)):
new_provider = SDKTracerProvider()
new_provider.add_span_processor(sk_processor)
set_tracer_provider(new_provider)
else:
if not any(
isinstance(p, SemanticKernelSpanProcessor)
for p in provider._active_span_processor._span_processors
):
provider.add_span_processor(sk_processor)
class SemanticKernelSpanProcessor(SimpleSpanProcessor):
def __init__(self):
# NB: Dummy NoOp exporter, because OTel span processor requires an exporter
self.span_exporter = SpanExporter()
# Store context tokens for each span so we can detach them in on_end
self._context_tokens: dict[int, object] = {}
self._processing_local = threading.local()
def on_start(self, span: OTelSpan, parent_context: Context | None = None):
# Recursion guard: with MLFLOW_USE_DEFAULT_TRACER_PROVIDER=false (shared provider),
# tracer.span_processor.on_start() routes back through the same composite processor,
# re-entering this method and causing infinite recursion.
if getattr(self._processing_local, "in_on_start", False):
return
self._processing_local.in_on_start = True
try:
# Trigger MLflow's span processor
tracer = _get_tracer(__name__)
tracer.span_processor.on_start(span, parent_context)
trace_id = get_otel_attribute(span, SpanAttributeKey.REQUEST_ID)
mlflow_span = create_mlflow_span(span, trace_id)
# Register new span in the in-memory trace manager
InMemoryTraceManager.get_instance().register_span(mlflow_span)
# Also set this span in MLflow's runtime context so that other autolog integrations
# (like OpenAI) can correctly parent their spans to Semantic Kernel spans.
# NB: We use otel_trace.set_span_in_context() directly instead of
# mlflow.tracing.provider.set_span_in_context() because the latter can produce
# two separate traces when MLFLOW_USE_DEFAULT_TRACER_PROVIDER is set to False.
# Using the OpenTelemetry API directly ensures consistent behavior for autologging.
context = otel_trace.set_span_in_context(span)
token = mlflow_runtime_context.attach(context)
self._context_tokens[span.context.span_id] = token
finally:
self._processing_local.in_on_start = False
def on_end(self, span: OTelReadableSpan) -> None:
# Recursion guard: with MLFLOW_USE_DEFAULT_TRACER_PROVIDER=false (shared provider),
# tracer.span_processor.on_end() routes back through the same composite processor,
# re-entering this method and causing infinite recursion.
if getattr(self._processing_local, "in_on_end", False):
return
self._processing_local.in_on_end = True
try:
# Detach the span from MLflow's runtime context
token = self._context_tokens.pop(span.context.span_id, None)
if token is not None:
mlflow_runtime_context.detach(token)
mlflow_span = get_mlflow_span_for_otel_span(span)
if mlflow_span is None:
_logger.debug("Span not found in the map. Skipping end.")
return
with _bypass_attribute_guard(mlflow_span._span):
set_span_type(mlflow_span)
set_model(mlflow_span)
set_token_usage(mlflow_span)
if should_compute_cost_client_side():
set_span_cost_attribute(mlflow_span)
# Export the span using MLflow's span processor
tracer = _get_tracer(__name__)
tracer.span_processor.on_end(span)
finally:
self._processing_local.in_on_end = False
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/semantic_kernel/autolog.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/semantic_kernel/tracing_utils.py | import logging
from typing import Any
from opentelemetry import context as otel_context_api
from opentelemetry import trace as otel_trace
from opentelemetry.trace import get_current_span
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents.kernel_content import KernelContent
from semantic_kernel.contents.streaming_content_mixin import StreamingContentMixin
from semantic_kernel.functions import FunctionResult
from semantic_kernel.utils.telemetry.model_diagnostics import (
gen_ai_attributes as model_gen_ai_attributes,
)
from semantic_kernel.utils.telemetry.model_diagnostics.decorators import (
CHAT_COMPLETION_OPERATION,
TEXT_COMPLETION_OPERATION,
)
from semantic_kernel.utils.telemetry.model_diagnostics.function_tracer import (
OPERATION_NAME as FUNCTION_OPERATION_NAME,
)
import mlflow
from mlflow.entities import SpanType
from mlflow.entities.span import LiveSpan
from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey
from mlflow.tracing.utils import (
construct_full_inputs,
get_mlflow_span_for_otel_span,
)
_OPERATION_TO_SPAN_TYPE = {
CHAT_COMPLETION_OPERATION: SpanType.CHAT_MODEL,
TEXT_COMPLETION_OPERATION: SpanType.LLM,
FUNCTION_OPERATION_NAME: SpanType.TOOL,
# https://github.com/microsoft/semantic-kernel/blob/d5ee6aa1c176a4b860aba72edaa961570874661b/python/semantic_kernel/utils/telemetry/agent_diagnostics/decorators.py#L22
"invoke_agent": SpanType.AGENT,
}
# NB: Streaming operation names were removed in Semantic Kernel 1.38.0
try:
from semantic_kernel.utils.telemetry.agent_diagnostics.decorators import (
CHAT_STREAMING_COMPLETION_OPERATION,
TEXT_STREAMING_COMPLETION_OPERATION,
)
_OPERATION_TO_SPAN_TYPE[CHAT_STREAMING_COMPLETION_OPERATION] = SpanType.CHAT_MODEL
_OPERATION_TO_SPAN_TYPE[TEXT_STREAMING_COMPLETION_OPERATION] = SpanType.LLM
except ImportError:
pass
_logger = logging.getLogger(__name__)
def semantic_kernel_diagnostics_wrapper(original, *args, **kwargs) -> None:
"""
Wrapper for Semantic Kernel's model diagnostics decorators.
This wrapper is used to record the inputs and outputs to the span, because
Semantic Kernel's Otel span do not record the inputs and outputs.
"""
full_kwargs = construct_full_inputs(original, *args, **kwargs)
current_span = full_kwargs.get("current_span") or get_current_span()
mlflow_span = get_mlflow_span_for_otel_span(current_span)
if not mlflow_span:
_logger.debug("Span is not found or recording. Skipping error handling.")
return original(*args, **kwargs)
if prompt := full_kwargs.get("prompt"):
# Wrapping _set_completion_input
# https://github.com/microsoft/semantic-kernel/blob/d5ee6aa1c176a4b860aba72edaa961570874661b/python/semantic_kernel/utils/telemetry/model_diagnostics/decorators.py#L369
mlflow_span.set_inputs(_parse_content(prompt))
if completions := full_kwargs.get("completions"):
# Wrapping _set_completion_response
# https://github.com/microsoft/semantic-kernel/blob/d5ee6aa1c176a4b860aba72edaa961570874661b/
mlflow_span.set_outputs({"messages": [_parse_content(c) for c in completions]})
if error := full_kwargs.get("error"):
# Wrapping _set_completion_error
# https://github.com/microsoft/semantic-kernel/blob/d5ee6aa1c176a4b860aba72edaa961570874661b/python/semantic_kernel/utils/telemetry/model_diagnostics/decorators.py#L452
mlflow_span.record_exception(error)
return original(*args, **kwargs)
async def patched_kernel_entry_point(original, self, *args, **kwargs):
with mlflow.start_span(
name=f"{self.__class__.__name__}.{original.__name__}",
span_type=SpanType.AGENT,
) as mlflow_span:
inputs = construct_full_inputs(original, self, *args, **kwargs)
mlflow_span.set_inputs(_parse_content(inputs))
# Attach the MLflow span to the global OTel context so that Semantic Kernel's
# internal OTel spans (e.g., execute_tool, chat.completions) will inherit the
# same trace_id and be properly linked as child spans.
global_ctx = otel_trace.set_span_in_context(mlflow_span._span)
token = otel_context_api.attach(global_ctx)
try:
result = await original(self, *args, **kwargs)
finally:
otel_context_api.detach(token)
mlflow_span.set_outputs(_parse_content(result))
return result
def _parse_content(value: Any) -> Any:
"""
Parse the message content objects in Semantic Kernel into a more readable format.
Those objects are Pydantic models, but includes many noisy fields that are not
useful for debugging and hard to read. The base KernelContent class has a to_dict()
method that converts them into more readable format (role, content), so we use that.
"""
if isinstance(value, dict) and (chat_history := value.get("chat_history")):
value = _parse_content(chat_history)
elif isinstance(value, ChatHistory):
# Record chat history as a list of messages for better readability
value = {"messages": [_parse_content(m) for m in value.messages]}
elif isinstance(value, (KernelContent, StreamingContentMixin)):
value = value.to_dict()
elif isinstance(value, FunctionResult):
# Extract "value" field from the FunctionResult object
value = _parse_content(value.value)
elif isinstance(value, list):
value = [_parse_content(item) for item in value]
return value
def set_span_type(mlflow_span: LiveSpan) -> str:
"""Determine the span type based on the operation."""
span_type = SpanType.UNKNOWN
if operation := mlflow_span.get_attribute(model_gen_ai_attributes.OPERATION):
span_type = _OPERATION_TO_SPAN_TYPE.get(operation, SpanType.UNKNOWN)
mlflow_span.set_span_type(span_type)
def set_token_usage(mlflow_span: LiveSpan) -> None:
"""Set token usage attributes on the MLflow span."""
input_tokens = mlflow_span.get_attribute(model_gen_ai_attributes.INPUT_TOKENS)
output_tokens = mlflow_span.get_attribute(model_gen_ai_attributes.OUTPUT_TOKENS)
usage_dict = {}
if input_tokens is not None:
usage_dict[TokenUsageKey.INPUT_TOKENS] = input_tokens
if output_tokens is not None:
usage_dict[TokenUsageKey.OUTPUT_TOKENS] = output_tokens
if input_tokens is not None or output_tokens is not None:
total_tokens = (input_tokens or 0) + (output_tokens or 0)
usage_dict[TokenUsageKey.TOTAL_TOKENS] = total_tokens
if usage_dict:
mlflow_span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_dict)
def set_model(mlflow_span: LiveSpan) -> None:
"""Set model name attribute on the MLflow span."""
if model := mlflow_span.get_attribute(model_gen_ai_attributes.MODEL):
mlflow_span.set_attribute(SpanAttributeKey.MODEL, model)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/semantic_kernel/tracing_utils.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/semantic_kernel/resources.py | import openai
from semantic_kernel import Kernel
from semantic_kernel.agents import ChatCompletionAgent
from semantic_kernel.connectors.ai.open_ai import (
OpenAIChatCompletion,
OpenAITextCompletion,
OpenAITextEmbedding,
)
from semantic_kernel.contents import ChatHistory
from semantic_kernel.functions import KernelArguments
from tests.tracing.helper import reset_autolog_state # noqa: F401
async def _create_and_invoke_kernel_simple(mock_openai):
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAIChatCompletion(
service_id="chat-gpt",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
)
return await kernel.invoke_prompt("Is sushi the best food ever?")
async def _create_and_invoke_kernel_complex(mock_openai):
from semantic_kernel.prompt_template import PromptTemplateConfig
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAIChatCompletion(
service_id="chat-gpt",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
)
settings = kernel.get_prompt_execution_settings_from_service_id("chat-gpt")
settings.max_tokens = 100
settings.temperature = 0.7
settings.top_p = 0.8
prompt_template_config = PromptTemplateConfig(
template="{{$chat_history}}{{$user_input}}", allow_dangerously_set_content=True
)
chat_function = kernel.add_function(
plugin_name="ChatBot",
function_name="Chat",
prompt_template_config=prompt_template_config,
template_format="semantic-kernel",
prompt_execution_settings=settings,
)
chat_history = ChatHistory(
system_message=(
"You are a chat bot named Mosscap, dedicated to figuring out what people need."
)
)
chat_history.add_user_message("Hi there, who are you?")
chat_history.add_assistant_message(
"I am Mosscap, a chat bot. I'm trying to figure out what people need."
)
user_input = "I want to find a hotel in Seattle with free wifi and a pool."
return await kernel.invoke(
chat_function,
KernelArguments(
user_input=user_input,
chat_history=chat_history,
),
allow_dangerously_set_content=True,
)
async def _create_and_invoke_chat_agent(mock_openai):
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
service = OpenAIChatCompletion(
service_id="chat-gpt",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
agent = ChatCompletionAgent(
service=service,
name="sushi_agent",
instructions="You are a master at all things sushi. But, you are not very smart.",
)
return await agent.get_response(messages="How do I make sushi?")
async def _create_and_invoke_text_completion(mock_openai):
"""Test text completion methods - parser extracts {"prompt": "..."}"""
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAITextCompletion(
service_id="text-davinci",
ai_model_id="text-davinci-003",
async_client=openai_client,
)
)
text_service = kernel.get_service("text-davinci")
settings = kernel.get_prompt_execution_settings_from_service_id("text-davinci")
return await text_service.get_text_content("Complete this: The sky is", settings)
async def _create_and_invoke_embeddings(mock_openai):
"""Test embedding methods - parser extracts {"texts": [...]}"""
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
embedding_service = OpenAITextEmbedding(
service_id="embedding",
ai_model_id="text-embedding-ada-002",
async_client=openai_client,
)
texts = ["Hello world", "Semantic kernel", "MLflow tracing"]
return await embedding_service.generate_embeddings(texts)
async def _create_and_invoke_chat_completion_direct(mock_openai):
"""Test direct chat completion - parser extracts {"messages": [...]}"""
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAIChatCompletion(
service_id="chat",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
)
chat_history = ChatHistory()
chat_history.add_user_message("What is semantic kernel?")
chat_history.add_assistant_message("Semantic Kernel is an AI orchestration framework.")
chat_history.add_user_message("Tell me more about it.")
chat_service = kernel.get_service("chat")
settings = kernel.get_prompt_execution_settings_from_service_id("chat")
return await chat_service.get_chat_message_content(chat_history, settings)
async def _create_and_invoke_kernel_function_object(mock_openai):
"""
Test kernel.invoke with function object and arguments
"""
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAIChatCompletion(
service_id="chat",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
)
function = kernel.add_function(
plugin_name="MathPlugin",
function_name="Add",
prompt="Add {{$num1}} and {{$num2}}",
template_format="semantic-kernel",
)
return await kernel.invoke(function, KernelArguments(num1=5, num2=3))
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/semantic_kernel/resources.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/semantic_kernel/test_semantic_kernel_autolog.py | import asyncio
from unittest import mock
import openai
import pytest
import pytest_asyncio
from semantic_kernel import Kernel
from semantic_kernel.agents import AgentResponseItem
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from semantic_kernel.contents import ChatMessageContent
from semantic_kernel.exceptions import FunctionExecutionException, KernelInvokeException
from semantic_kernel.functions.function_result import FunctionResult
from semantic_kernel.utils.telemetry.agent_diagnostics import (
gen_ai_attributes as agent_gen_ai_attributes,
)
from semantic_kernel.utils.telemetry.model_diagnostics import (
gen_ai_attributes as model_gen_ai_attributes,
)
import mlflow.semantic_kernel
from mlflow.entities import SpanType
from mlflow.entities.span_status import SpanStatusCode
from mlflow.environment_variables import MLFLOW_USE_DEFAULT_TRACER_PROVIDER
from mlflow.semantic_kernel.autolog import SemanticKernelSpanProcessor
from mlflow.tracing.constant import (
SpanAttributeKey,
TokenUsageKey,
)
from mlflow.version import IS_TRACING_SDK_ONLY
from tests.semantic_kernel.resources import (
_create_and_invoke_chat_agent,
_create_and_invoke_chat_completion_direct,
_create_and_invoke_embeddings,
_create_and_invoke_kernel_complex,
_create_and_invoke_kernel_function_object,
_create_and_invoke_kernel_simple,
_create_and_invoke_text_completion,
)
from tests.tracing.helper import get_traces
lock = asyncio.Lock()
@pytest_asyncio.fixture(autouse=True)
async def lock_fixture():
async with lock:
yield
@pytest.fixture(params=[True, False])
def with_openai_autolog(request):
# Test with OpenAI autologging enabled and disabled
if request.param:
mlflow.openai.autolog()
else:
mlflow.openai.autolog(disable=True)
return request.param
@pytest.mark.asyncio
async def test_sk_invoke_simple(mock_openai, with_openai_autolog, mock_litellm_cost):
mlflow.semantic_kernel.autolog()
result = await _create_and_invoke_kernel_simple(mock_openai)
# The mock OpenAI endpoint echos the user message back
prompt = "Is sushi the best food ever?"
expected_content = '[{"role": "user", "content": "Is sushi the best food ever?"}]'
# Validate the result is not mutated by tracing logic
assert isinstance(result, FunctionResult)
assert isinstance(result.value[0], ChatMessageContent)
assert result.value[0].items[0].text == expected_content
# Trace
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.request_id
assert trace.info.experiment_id == "0"
assert trace.info.timestamp_ms > 0
assert trace.info.status == "OK"
assert "Is sushi the best food ever?" in trace.info.request_preview
assert "Is sushi the best food ever?" in trace.info.response_preview
spans = trace.data.spans
assert len(spans) == (5 if with_openai_autolog else 4)
# Kernel.invoke_prompt
assert spans[0].name == "Kernel.invoke_prompt"
assert spans[0].span_type == SpanType.AGENT
assert spans[0].inputs == {"prompt": prompt}
assert spans[0].outputs == [{"role": "assistant", "content": expected_content}]
# Kernel.invoke_prompt
assert spans[1].name == "Kernel.invoke"
assert spans[1].span_type == SpanType.AGENT
assert spans[1].inputs["function"] is not None
assert spans[1].outputs == [{"role": "assistant", "content": expected_content}]
# Execute LLM as a tool
assert spans[2].name.startswith("execute_tool")
assert spans[2].span_type == SpanType.TOOL
# Actual LLM call
assert spans[3].name in ("chat.completions gpt-4o-mini", "chat gpt-4o-mini")
assert "gen_ai.operation.name" in spans[3].attributes
assert spans[3].inputs == {"messages": [{"role": "user", "content": prompt}]}
assert spans[3].outputs == {"messages": [{"role": "assistant", "content": expected_content}]}
chat_usage = spans[3].get_attribute(SpanAttributeKey.CHAT_USAGE)
assert chat_usage[TokenUsageKey.INPUT_TOKENS] == 9
assert chat_usage[TokenUsageKey.OUTPUT_TOKENS] == 12
assert chat_usage[TokenUsageKey.TOTAL_TOKENS] == 21
assert spans[3].get_attribute(SpanAttributeKey.SPAN_TYPE) == SpanType.CHAT_MODEL
assert spans[3].model_name == "gpt-4o-mini"
if not IS_TRACING_SDK_ONLY:
# Verify cost is calculated (9 input tokens * 1.0 + 12 output tokens * 2.0)
assert spans[3].llm_cost == {
"input_cost": 9.0,
"output_cost": 24.0,
"total_cost": 33.0,
}
# OpenAI autologging
if with_openai_autolog:
assert spans[4].name == "AsyncCompletions"
assert spans[4].span_type == SpanType.CHAT_MODEL
assert spans[4].parent_id == spans[3].span_id
assert spans[4].inputs == {
"messages": [{"role": "user", "content": prompt}],
"model": "gpt-4o-mini",
"stream": False,
}
assert spans[4].get_attribute(SpanAttributeKey.CHAT_USAGE) == {
"input_tokens": 9,
"output_tokens": 12,
"total_tokens": 21,
}
assert spans[4].model_name == "gpt-4o-mini"
if not IS_TRACING_SDK_ONLY:
assert spans[4].llm_cost == {
"input_cost": 9.0,
"output_cost": 24.0,
"total_cost": 33.0,
}
# Trace level token usage should not double-count
assert trace.info.token_usage == {
"input_tokens": 9,
"output_tokens": 12,
"total_tokens": 21,
}
@pytest.mark.asyncio
async def test_sk_invoke_simple_with_sk_initialization_of_tracer(mock_openai):
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
from opentelemetry.semconv.resource import ResourceAttributes
from opentelemetry.trace import get_tracer_provider, set_tracer_provider
resource = Resource.create({ResourceAttributes.SERVICE_NAME: "telemetry-console-quickstart"})
tracer_provider = TracerProvider(resource=resource)
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
set_tracer_provider(tracer_provider)
mlflow.semantic_kernel.autolog()
_tracer_provider = get_tracer_provider()
assert isinstance(_tracer_provider, TracerProvider)
span_processors = _tracer_provider._active_span_processor._span_processors
assert len(span_processors) == 2
assert any(isinstance(p, SemanticKernelSpanProcessor) for p in span_processors)
_ = await _create_and_invoke_kernel_simple(mock_openai)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.request_id
assert len(trace.data.spans) == 4
@pytest.mark.asyncio
async def test_sk_invoke_complex(mock_openai, mock_litellm_cost):
mlflow.semantic_kernel.autolog()
result = await _create_and_invoke_kernel_complex(mock_openai)
# Validate the result is not mutated by tracing logic
assert isinstance(result, FunctionResult)
assert isinstance(result.value[0], ChatMessageContent)
assert result.value[0].items[0].text.startswith('[{"role": "system",')
# Trace
traces = get_traces()
assert len(traces) == 1
spans = traces[0].data.spans
assert len(spans) == 3 # Kernel.invoke, execute_tool, chat.completions
kernel_span, tool_span, chat_span = spans
assert kernel_span.name == "Kernel.invoke"
assert kernel_span.span_type == SpanType.AGENT
function_metadata = kernel_span.inputs["function"]["metadata"]
assert function_metadata["name"] == "Chat"
assert function_metadata["plugin_name"] == "ChatBot"
prompt = kernel_span.inputs["function"]["prompt_template"]["prompt_template_config"]
assert prompt["template"] == "{{$chat_history}}{{$user_input}}"
arguments = kernel_span.inputs["arguments"]
assert arguments["user_input"] == "I want to find a hotel in Seattle with free wifi and a pool."
assert len(arguments["chat_history"]) == 2
assert tool_span.name == "execute_tool ChatBot-Chat"
assert tool_span.span_type == SpanType.TOOL
assert tool_span.parent_id == kernel_span.span_id
assert chat_span.name in ("chat.completions gpt-4o-mini", "chat gpt-4o-mini")
assert chat_span.parent_id == tool_span.span_id
assert chat_span.span_type == SpanType.CHAT_MODEL
assert chat_span.get_attribute(model_gen_ai_attributes.OPERATION).startswith("chat")
assert chat_span.get_attribute(model_gen_ai_attributes.SYSTEM) == "openai"
assert chat_span.get_attribute(model_gen_ai_attributes.MODEL) == "gpt-4o-mini"
assert chat_span.get_attribute(model_gen_ai_attributes.RESPONSE_ID) == "chatcmpl-123"
assert chat_span.get_attribute(model_gen_ai_attributes.FINISH_REASON) == "FinishReason.STOP"
assert chat_span.get_attribute(model_gen_ai_attributes.INPUT_TOKENS) == 9
assert chat_span.get_attribute(model_gen_ai_attributes.OUTPUT_TOKENS) == 12
assert chat_span.model_name == "gpt-4o-mini"
assert any(
"I want to find a hotel in Seattle with free wifi and a pool." in m.get("content", "")
for m in chat_span.inputs.get("messages", [])
)
assert isinstance(chat_span.outputs["messages"], list)
chat_usage = chat_span.get_attribute(SpanAttributeKey.CHAT_USAGE)
assert chat_usage[TokenUsageKey.INPUT_TOKENS] == 9
assert chat_usage[TokenUsageKey.OUTPUT_TOKENS] == 12
assert chat_usage[TokenUsageKey.TOTAL_TOKENS] == 21
if not IS_TRACING_SDK_ONLY:
assert chat_span.llm_cost == {
"input_cost": 9.0,
"output_cost": 24.0,
"total_cost": 33.0,
}
@pytest.mark.asyncio
async def test_sk_invoke_agent(mock_openai):
mlflow.semantic_kernel.autolog()
result = await _create_and_invoke_chat_agent(mock_openai)
assert isinstance(result, AgentResponseItem)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
spans = trace.data.spans
assert len(spans) == 3
root_span, child_span, grandchild_span = spans
assert root_span.name == "invoke_agent sushi_agent"
assert root_span.span_type == SpanType.AGENT
assert root_span.get_attribute(model_gen_ai_attributes.OPERATION) == "invoke_agent"
assert root_span.get_attribute(agent_gen_ai_attributes.AGENT_NAME) == "sushi_agent"
assert child_span.name == "AutoFunctionInvocationLoop"
assert child_span.span_type == SpanType.UNKNOWN
assert "sk.available_functions" in child_span.attributes
assert grandchild_span.name.startswith("chat")
assert grandchild_span.span_type == SpanType.CHAT_MODEL
assert grandchild_span.get_attribute(model_gen_ai_attributes.MODEL) == "gpt-4o-mini"
assert grandchild_span.model_name == "gpt-4o-mini"
assert isinstance(grandchild_span.inputs["messages"], list)
assert isinstance(grandchild_span.outputs["messages"], list)
assert (
grandchild_span.get_attribute(model_gen_ai_attributes.FINISH_REASON) == "FinishReason.STOP"
)
@pytest.mark.asyncio
async def test_sk_autolog_trace_on_exception(mock_openai):
mlflow.semantic_kernel.autolog()
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAIChatCompletion(
service_id="chat-gpt",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
)
error_message = "thiswillfail"
with mock.patch.object(
openai_client.chat.completions, "create", side_effect=RuntimeError(error_message)
):
with pytest.raises(
KernelInvokeException, match="Error occurred while invoking function"
) as exc_info:
await kernel.invoke_prompt("Hello?")
assert isinstance(exc_info.value.__cause__, FunctionExecutionException)
traces = get_traces()
assert traces, "No traces recorded"
assert len(traces) == 1
trace = traces[0]
assert len(trace.data.spans) == 4
assert trace.info.status == "ERROR"
_, _, _, llm_span = trace.data.spans
assert llm_span.status.status_code == SpanStatusCode.ERROR
assert llm_span.events[0].name == "exception"
assert error_message in llm_span.events[0].attributes["exception.message"]
@pytest.mark.asyncio
async def test_tracing_autolog_with_active_span(mock_openai, with_openai_autolog):
mlflow.semantic_kernel.autolog()
with mlflow.start_span("parent"):
response = await _create_and_invoke_kernel_simple(mock_openai)
assert isinstance(response, FunctionResult)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
spans = trace.data.spans
assert len(spans) == (6 if with_openai_autolog else 5)
assert trace.info.request_id is not None
assert trace.info.status == "OK"
assert trace.info.tags["mlflow.traceName"] == "parent"
parent = trace.data.spans[0]
assert parent.name == "parent"
assert parent.parent_id is None
assert parent.span_type == SpanType.UNKNOWN
assert spans[1].name == "Kernel.invoke_prompt"
assert spans[1].parent_id == parent.span_id
assert spans[2].name == "Kernel.invoke"
assert spans[2].parent_id == spans[1].span_id
assert spans[3].name.startswith("execute_tool")
assert spans[3].parent_id == spans[2].span_id
assert spans[4].name in ("chat.completions gpt-4o-mini", "chat gpt-4o-mini")
assert spans[4].parent_id == spans[3].span_id
if with_openai_autolog:
assert spans[5].name == "AsyncCompletions"
assert spans[5].parent_id == spans[4].span_id
@pytest.mark.asyncio
async def test_tracing_attribution_with_threaded_calls(mock_openai):
mlflow.semantic_kernel.autolog()
n = 3
openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
kernel = Kernel()
kernel.add_service(
OpenAIChatCompletion(
service_id="chat-gpt",
ai_model_id="gpt-4o-mini",
async_client=openai_client,
)
)
async def call(prompt: str):
return await kernel.invoke_prompt(prompt)
prompts = [f"What is this number: {i}" for i in range(n)]
_ = await asyncio.gather(*(call(p) for p in prompts))
traces = get_traces()
assert len(traces) == n
unique_messages = set()
for trace in traces:
spans = trace.data.spans
assert len(spans) == 4
assert spans[0].span_type == SpanType.AGENT
assert spans[1].span_type == SpanType.AGENT
assert spans[2].span_type == SpanType.TOOL
assert spans[3].span_type == SpanType.CHAT_MODEL
assert spans[3].model_name == "gpt-4o-mini"
message = spans[3].inputs["messages"][0]["content"]
assert message.startswith("What is this number: ")
unique_messages.add(message)
assert spans[3].outputs["messages"][0]["content"]
assert len(unique_messages) == n
@pytest.mark.parametrize(
("create_and_invoke_func", "span_name_pattern", "expected_span_input_keys"),
[
(
_create_and_invoke_kernel_simple,
"chat",
["messages"],
),
(
_create_and_invoke_text_completion,
"text",
# Text completion input should be stored as a raw string
None,
),
(
_create_and_invoke_chat_completion_direct,
"chat",
["messages"],
),
],
)
@pytest.mark.asyncio
async def test_sk_input_parsing(
mock_openai, create_and_invoke_func, span_name_pattern, expected_span_input_keys
):
mlflow.semantic_kernel.autolog()
_ = await create_and_invoke_func(mock_openai)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
target_span = None
for span in trace.data.spans:
if span_name_pattern in span.name:
target_span = span
break
assert target_span is not None, f"No span found with pattern '{span_name_pattern}'"
if expected_span_input_keys:
for key in expected_span_input_keys:
assert key in target_span.inputs, (
f"Expected '{key}' in span inputs for {target_span.name}, got: {target_span.inputs}"
)
else:
assert isinstance(target_span.inputs, str)
@pytest.mark.asyncio
async def test_sk_invoke_with_kernel_arguments(mock_openai):
mlflow.semantic_kernel.autolog()
_ = await _create_and_invoke_kernel_function_object(mock_openai)
traces = get_traces()
assert len(traces) == 1
# Check that kernel arguments were passed through to the prompt
child_span = next(s for s in traces[0].data.spans if "chat" in s.name)
assert child_span.inputs["messages"][0]["content"] == "Add 5 and 3"
@pytest.mark.asyncio
async def test_sk_embeddings(mock_openai):
mlflow.semantic_kernel.autolog()
result = await _create_and_invoke_embeddings(mock_openai)
assert result is not None
assert len(result) == 3
# NOTE: Semantic Kernel currently does not instrument embeddings with OpenTelemetry
# spans, so no traces are generated for embedding operations
traces = get_traces()
assert len(traces) == 0
@pytest.mark.asyncio
async def test_kernel_invoke_function_object(mock_openai):
mlflow.semantic_kernel.autolog()
await _create_and_invoke_kernel_function_object(mock_openai)
traces = get_traces()
assert len(traces) == 1
# Verify trace structure
assert len(traces[0].data.spans) == 3
# Root span should be execute_tool
kernel_span, tool_span, chat_span = traces[0].data.spans
assert kernel_span.name == "Kernel.invoke"
assert kernel_span.span_type == SpanType.AGENT
assert kernel_span.inputs["function"] is not None
assert kernel_span.outputs is not None
assert tool_span.name == "execute_tool MathPlugin-Add"
assert tool_span.span_type == SpanType.TOOL
# Child span should be chat completion
assert chat_span.name in ("chat.completions gpt-4o-mini", "chat gpt-4o-mini")
assert chat_span.span_type == SpanType.CHAT_MODEL
assert chat_span.model_name == "gpt-4o-mini"
@pytest.mark.asyncio
async def test_sk_shared_provider_no_recursion(monkeypatch, mock_openai):
# Verify semantic_kernel.autolog() works with shared tracer provider (no RecursionError)
monkeypatch.setenv(MLFLOW_USE_DEFAULT_TRACER_PROVIDER.name, "false")
mlflow.semantic_kernel.autolog()
result = await _create_and_invoke_kernel_simple(mock_openai)
assert isinstance(result, FunctionResult)
traces = get_traces()
assert len(traces) == 1
spans = traces[0].data.spans
assert len(spans) >= 3
assert spans[0].name == "Kernel.invoke_prompt"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/semantic_kernel/test_semantic_kernel_autolog.py",
"license": "Apache License 2.0",
"lines": 422,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_forbidden_set_active_model_usage.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.forbidden_set_active_model_usage import ForbiddenSetActiveModelUsage
def test_forbidden_set_active_model_usage(index_path: Path) -> None:
code = """
import mlflow
# Bad
mlflow.set_active_model("model_name")
# Good
mlflow._set_active_model("model_name")
# Bad - with aliasing
from mlflow import set_active_model
set_active_model("model_name")
# Good - with aliasing
from mlflow import _set_active_model
_set_active_model("model_name")
"""
config = Config(select={ForbiddenSetActiveModelUsage.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 3
assert all(isinstance(v.rule, ForbiddenSetActiveModelUsage) for v in violations)
assert violations[0].range == Range(Position(4, 0)) # mlflow.set_active_model call
assert violations[1].range == Range(Position(10, 0)) # from mlflow import set_active_model
assert violations[2].range == Range(Position(11, 0)) # direct set_active_model call
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_forbidden_set_active_model_usage.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_invalid_experimental_decorator.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.invalid_experimental_decorator import InvalidExperimentalDecorator
def test_invalid_experimental_decorator(index_path: Path) -> None:
code = """
from mlflow.utils.annotations import experimental
# Bad - no arguments
@experimental
def bad_function1():
pass
# Bad - no version argument
@experimental()
def bad_function2():
pass
# Bad - invalid version format
@experimental(version="invalid")
def bad_function3():
pass
# Bad - pre-release version
@experimental(version="1.0.0rc1")
def bad_function4():
pass
# Bad - non-string version
@experimental(version=123)
def bad_function5():
pass
# Good - valid semantic version
@experimental(version="1.2.3")
def good_function1():
pass
# Good - valid semantic version with multiple parts
@experimental(version="2.0.0")
def good_function2():
pass
"""
config = Config(select={InvalidExperimentalDecorator.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 5
assert all(isinstance(v.rule, InvalidExperimentalDecorator) for v in violations)
assert violations[0].range == Range(Position(4, 1)) # @experimental without args
assert violations[1].range == Range(Position(9, 1)) # @experimental() without version
assert violations[2].range == Range(Position(14, 1)) # invalid version format
assert violations[3].range == Range(Position(19, 1)) # pre-release version
assert violations[4].range == Range(Position(24, 1)) # non-string version
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_invalid_experimental_decorator.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_lazy_module.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.lazy_module import LazyModule
def test_lazy_module(index_path: Path) -> None:
# Create a file that looks like mlflow/__init__.py for the rule to apply
code = """
from mlflow.utils.lazy_load import LazyLoader
from typing import TYPE_CHECKING
# Bad - LazyLoader module not imported in TYPE_CHECKING block
anthropic = LazyLoader("mlflow.anthropic", globals(), "mlflow.anthropic")
# Good - LazyLoader with corresponding TYPE_CHECKING import
sklearn = LazyLoader("mlflow.sklearn", globals(), "mlflow.sklearn")
if TYPE_CHECKING:
from mlflow import sklearn # Good - this one is imported
"""
config = Config(select={LazyModule.name})
violations = lint_file(Path("mlflow", "__init__.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, LazyModule) for v in violations)
assert violations[0].range == Range(Position(5, 12)) # anthropic LazyLoader
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_lazy_module.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_mlflow_class_name.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.mlflow_class_name import MlflowClassName
def test_mlflow_class_name(index_path: Path) -> None:
code = """
# Bad - using MLflow
class MLflowClient:
pass
# Bad - using MLFlow
class MLFlowLogger:
pass
# Bad - nested occurrence of MLflow
class CustomMLflowHandler:
pass
# Bad - nested occurrence of MLFlow
class BaseMLFlowTracker:
pass
# Good - using Mlflow
class MlflowModel:
pass
# Good - no MLflow patterns
class DataHandler:
pass
"""
config = Config(select={MlflowClassName.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 4
assert all(isinstance(v.rule, MlflowClassName) for v in violations)
assert violations[0].range == Range(Position(2, 0)) # MLflowClient
assert violations[1].range == Range(Position(6, 0)) # MLFlowLogger
assert violations[2].range == Range(Position(10, 0)) # CustomMLflowHandler
assert violations[3].range == Range(Position(14, 0)) # BaseMLFlowTracker
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_mlflow_class_name.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_unparameterized_generic_type.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.unparameterized_generic_type import UnparameterizedGenericType
def test_unparameterized_generic_type(index_path: Path) -> None:
code = """
from typing import Callable, Sequence
# Bad - unparameterized built-in types
def bad_list() -> list:
pass
def bad_dict() -> dict:
pass
# Good - parameterized built-in types
def good_list() -> list[str]:
pass
def good_dict() -> dict[str, int]:
pass
"""
config = Config(select={UnparameterizedGenericType.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, UnparameterizedGenericType) for v in violations)
assert violations[0].range == Range(Position(4, 18)) # bad_list return type
assert violations[1].range == Range(Position(7, 18)) # bad_dict return type
def test_unparameterized_generic_type_async(index_path: Path) -> None:
code = """
async def bad_async_dict(x: dict) -> dict:
pass
async def good_async_dict(x: dict[str, int]) -> dict[str, int]:
pass
"""
config = Config(select={UnparameterizedGenericType.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2 # param and return type
assert all(isinstance(v.rule, UnparameterizedGenericType) for v in violations)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_unparameterized_generic_type.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/datasets/databricks_evaluation_dataset_source.py | from typing import Any
from mlflow.data.dataset_source import DatasetSource
class DatabricksEvaluationDatasetSource(DatasetSource):
"""
Represents a Databricks Evaluation Dataset source.
This source is used for datasets managed by the Databricks agents SDK.
"""
def __init__(self, table_name: str, dataset_id: str):
"""
Args:
table_name: The three-level UC table name of the dataset
dataset_id: The unique identifier of the dataset
"""
self._table_name = table_name
self._dataset_id = dataset_id
@property
def table_name(self) -> str:
"""The UC table name of the dataset."""
return self._table_name
@property
def dataset_id(self) -> str:
"""The unique identifier of the dataset."""
return self._dataset_id
@staticmethod
def _get_source_type() -> str:
return "databricks_evaluation_dataset"
def load(self, **kwargs) -> Any:
"""
Loads the dataset from the source.
This method is not implemented as the dataset should be loaded through
the databricks.agents.datasets API.
"""
raise NotImplementedError(
"Loading a Databricks Evaluation Dataset from source is not supported"
)
@staticmethod
def _can_resolve(raw_source: dict[str, Any]) -> bool:
"""
Determines whether the source can be resolved from a dictionary representation.
"""
# Resolution from a dictionary representation is not supported for Databricks Evaluation
# Datasets
return False
@classmethod
def _resolve(cls, raw_source: dict[str, Any]):
"""
Resolves the source from a dictionary representation.
"""
raise NotImplementedError("Resolution from a source dictionary is not supported")
def to_dict(self) -> dict[str, Any]:
"""
Returns a dictionary representation of the source.
"""
return {
"table_name": self._table_name,
"dataset_id": self._dataset_id,
}
@classmethod
def from_dict(cls, source_dict: dict[str, Any]) -> "DatabricksEvaluationDatasetSource":
"""
Creates an instance from a dictionary representation.
"""
return cls(table_name=source_dict["table_name"], dataset_id=source_dict["dataset_id"])
class DatabricksUCTableDatasetSource(DatabricksEvaluationDatasetSource):
@staticmethod
def _get_source_type() -> str:
return "databricks-uc-table"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/datasets/databricks_evaluation_dataset_source.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/genai/datasets/test_databricks_evaluation_dataset_source.py | import json
import pytest
from mlflow.genai.datasets.databricks_evaluation_dataset_source import (
DatabricksEvaluationDatasetSource,
)
def test_databricks_evaluation_dataset_source_init():
source = DatabricksEvaluationDatasetSource(
table_name="catalog.schema.table", dataset_id="12345"
)
assert source.table_name == "catalog.schema.table"
assert source.dataset_id == "12345"
def test_databricks_evaluation_dataset_source_get_source_type():
assert DatabricksEvaluationDatasetSource._get_source_type() == "databricks_evaluation_dataset"
def test_databricks_evaluation_dataset_source_to_dict():
source = DatabricksEvaluationDatasetSource(
table_name="catalog.schema.table", dataset_id="12345"
)
assert source.to_dict() == {
"table_name": "catalog.schema.table",
"dataset_id": "12345",
}
def test_databricks_evaluation_dataset_source_from_dict():
source_dict = {"table_name": "catalog.schema.table", "dataset_id": "12345"}
source = DatabricksEvaluationDatasetSource.from_dict(source_dict)
assert source.table_name == "catalog.schema.table"
assert source.dataset_id == "12345"
def test_databricks_evaluation_dataset_source_to_json():
source = DatabricksEvaluationDatasetSource(
table_name="catalog.schema.table", dataset_id="12345"
)
json_str = source.to_json()
parsed = json.loads(json_str)
assert parsed == {"table_name": "catalog.schema.table", "dataset_id": "12345"}
def test_databricks_evaluation_dataset_source_from_json():
json_str = json.dumps({"table_name": "catalog.schema.table", "dataset_id": "12345"})
source = DatabricksEvaluationDatasetSource.from_json(json_str)
assert source.table_name == "catalog.schema.table"
assert source.dataset_id == "12345"
def test_databricks_evaluation_dataset_source_load_not_implemented():
source = DatabricksEvaluationDatasetSource(
table_name="catalog.schema.table", dataset_id="12345"
)
with pytest.raises(
NotImplementedError,
match="Loading a Databricks Evaluation Dataset from source is not supported",
):
source.load()
def test_databricks_evaluation_dataset_source_can_resolve():
# _can_resolve should return False for all inputs
assert DatabricksEvaluationDatasetSource._can_resolve({}) is False
assert DatabricksEvaluationDatasetSource._can_resolve({"table_name": "test"}) is False
def test_databricks_evaluation_dataset_source_resolve_not_implemented():
with pytest.raises(
NotImplementedError, match="Resolution from a source dictionary is not supported"
):
DatabricksEvaluationDatasetSource._resolve({})
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/datasets/test_databricks_evaluation_dataset_source.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/datasets/test_evaluation_dataset.py | import json
from typing import Any
from unittest.mock import Mock
import pandas as pd
import pytest
from mlflow.data.dataset_source_registry import (
get_dataset_source_from_json,
register_dataset_source,
)
from mlflow.data.spark_dataset_source import SparkDatasetSource
from mlflow.entities.evaluation_dataset import DatasetGranularity
from mlflow.entities.evaluation_dataset import EvaluationDataset as MLflowEvaluationDataset
from mlflow.genai.datasets.databricks_evaluation_dataset_source import (
DatabricksEvaluationDatasetSource,
DatabricksUCTableDatasetSource,
)
from mlflow.genai.datasets.evaluation_dataset import EvaluationDataset
def create_test_source_json(table_name: str = "main.default.testtable") -> str:
"""Create a JSON string source value consistent with Databricks managed evaluation datasets.
This format matches the behavior of Databricks managed evaluation datasets as of July 2025.
"""
return json.dumps({"table_name": table_name})
def create_mock_managed_dataset(source_value: Any) -> Mock:
"""Create a mock Databricks Agent Evaluation ManagedDataset for testing"""
mock_dataset = Mock()
mock_dataset.dataset_id = getattr(source_value, "dataset_id", "test-dataset-id")
mock_dataset.name = getattr(source_value, "_table_name", "catalog.schema.table")
mock_dataset.digest = "test-digest"
mock_dataset.schema = "test-schema"
mock_dataset.profile = "test-profile"
mock_dataset.source = source_value
mock_dataset.source_type = "databricks-uc-table"
mock_dataset.create_time = "2024-01-01T00:00:00"
mock_dataset.created_by = "test-user"
mock_dataset.last_update_time = "2024-01-02T00:00:00"
mock_dataset.last_updated_by = "test-user-2"
# Mock methods
mock_dataset.to_df.return_value = pd.DataFrame({"col1": [1, 2, 3], "col2": ["a", "b", "c"]})
mock_dataset.set_profile.return_value = mock_dataset
mock_dataset.merge_records.return_value = mock_dataset
return mock_dataset
@pytest.fixture
def mock_managed_dataset() -> Mock:
"""Create a mock Databricks Agent Evaluation ManagedDataset for testing."""
return create_mock_managed_dataset(create_test_source_json())
def create_dataset_with_source(source_value: Any) -> EvaluationDataset:
"""Factory function to create EvaluationDataset with specific source value."""
mock_dataset = create_mock_managed_dataset(source_value)
return EvaluationDataset(mock_dataset)
def test_evaluation_dataset_properties(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
assert dataset.dataset_id == "test-dataset-id"
assert dataset.name == "catalog.schema.table"
assert dataset.digest == "test-digest"
assert dataset.schema == "test-schema"
assert dataset.profile == "test-profile"
assert dataset.source_type == "databricks-uc-table"
assert dataset.create_time == "2024-01-01T00:00:00"
assert dataset.created_by == "test-user"
assert dataset.last_update_time == "2024-01-02T00:00:00"
assert dataset.last_updated_by == "test-user-2"
assert isinstance(dataset.source, DatabricksEvaluationDatasetSource)
assert dataset.source.table_name == "catalog.schema.table"
assert dataset.source.dataset_id == "test-dataset-id"
def test_evaluation_dataset_source_with_string_source():
dataset = create_dataset_with_source("string-value")
assert isinstance(dataset.source, DatabricksEvaluationDatasetSource)
assert dataset.source.table_name == "catalog.schema.table"
assert dataset.source.dataset_id == "test-dataset-id"
def test_evaluation_dataset_source_with_none():
dataset = create_dataset_with_source(None)
assert isinstance(dataset.source, DatabricksEvaluationDatasetSource)
assert dataset.source.table_name == "catalog.schema.table"
assert dataset.source.dataset_id == "test-dataset-id"
def test_evaluation_dataset_source_always_returns_databricks_evaluation_dataset_source():
existing_source = DatabricksEvaluationDatasetSource(
table_name="existing.table", dataset_id="existing-id"
)
dataset = create_dataset_with_source(existing_source)
assert isinstance(dataset.source, DatabricksEvaluationDatasetSource)
assert dataset.source.table_name == "existing.table"
assert dataset.source.dataset_id == "existing-id"
spark_source = SparkDatasetSource(table_name="spark.table")
dataset = create_dataset_with_source(spark_source)
assert isinstance(dataset.source, DatabricksEvaluationDatasetSource)
assert dataset.source.table_name == "spark.table"
assert dataset.source.dataset_id == "test-dataset-id"
def test_evaluation_dataset_to_df(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
df = dataset.to_df()
assert isinstance(df, pd.DataFrame)
assert len(df) == 3
mock_managed_dataset.to_df.assert_called_once()
def test_evaluation_dataset_to_mlflow_entity(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
entity = dataset._to_mlflow_entity()
assert entity.name == "catalog.schema.table"
assert entity.digest == "test-digest"
assert entity.source_type == "databricks-uc-table"
source_dict = json.loads(entity.source)
assert source_dict["table_name"] == "catalog.schema.table"
assert source_dict["dataset_id"] == "test-dataset-id"
assert entity.schema == "test-schema"
assert entity.profile == "test-profile"
def test_evaluation_dataset_to_mlflow_entity_with_existing_source():
existing_source = DatabricksEvaluationDatasetSource(
table_name="existing.table", dataset_id="existing-id"
)
dataset = create_dataset_with_source(existing_source)
entity = dataset._to_mlflow_entity()
assert entity.name == "existing.table"
assert entity.digest == "test-digest"
assert entity.source_type == "databricks-uc-table"
source_dict = json.loads(entity.source)
assert source_dict["table_name"] == "existing.table"
assert source_dict["dataset_id"] == "existing-id"
assert entity.schema == "test-schema"
assert entity.profile == "test-profile"
def test_evaluation_dataset_set_profile(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
new_dataset = dataset.set_profile("new-profile")
assert isinstance(new_dataset, EvaluationDataset)
mock_managed_dataset.set_profile.assert_called_once_with("new-profile")
def test_evaluation_dataset_merge_records(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
new_records = [{"col1": 4, "col2": "d"}]
new_dataset = dataset.merge_records(new_records)
assert isinstance(new_dataset, EvaluationDataset)
mock_managed_dataset.merge_records.assert_called_once_with(new_records)
def test_evaluation_dataset_delete_records_not_supported_for_databricks(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
record_ids = ["record-1", "record-2"]
with pytest.raises(NotImplementedError, match="not supported for Databricks managed datasets"):
dataset.delete_records(record_ids)
def test_evaluation_dataset_digest_computation(mock_managed_dataset):
# Test when managed dataset has no digest
mock_managed_dataset.digest = None
dataset = EvaluationDataset(mock_managed_dataset)
digest = dataset.digest
assert digest is not None
def test_evaluation_dataset_to_evaluation_dataset(mock_managed_dataset):
dataset = EvaluationDataset(mock_managed_dataset)
legacy_dataset = dataset.to_evaluation_dataset(
path="/path/to/data", feature_names=["col1", "col2"]
)
assert legacy_dataset._features_data.equals(dataset.to_df())
assert legacy_dataset._path == "/path/to/data"
assert legacy_dataset._feature_names == ["col1", "col2"]
assert legacy_dataset.name == "catalog.schema.table"
assert legacy_dataset.digest == "test-digest"
def test_databricks_uc_table_dataset_source():
register_dataset_source(DatabricksUCTableDatasetSource)
source_json = json.dumps({"table_name": "catalog.schema.table", "dataset_id": "test-id"})
source = get_dataset_source_from_json(source_json, "databricks-uc-table")
assert isinstance(source, DatabricksUCTableDatasetSource)
assert source._get_source_type() == "databricks-uc-table"
assert source.table_name == "catalog.schema.table"
assert source.dataset_id == "test-id"
def _create_mlflow_evaluation_dataset() -> MLflowEvaluationDataset:
return MLflowEvaluationDataset(
dataset_id="test-id",
name="test-dataset",
digest="test-digest",
created_time=0,
last_update_time=0,
)
@pytest.mark.parametrize(
("input_keys", "expected_granularity"),
[
# empty keys -> UNKNOWN
(set(), DatasetGranularity.UNKNOWN),
# no 'goal' field -> TRACE
({"request"}, DatasetGranularity.TRACE),
({"messages"}, DatasetGranularity.TRACE),
({"query", "context"}, DatasetGranularity.TRACE),
# 'goal' and only session fields -> SESSION
({"goal"}, DatasetGranularity.SESSION),
({"goal", "persona"}, DatasetGranularity.SESSION),
({"goal", "context"}, DatasetGranularity.SESSION),
({"goal", "persona", "context"}, DatasetGranularity.SESSION),
# 'goal' mixed with non-session fields -> UNKNOWN
({"goal", "request"}, DatasetGranularity.UNKNOWN),
({"goal", "messages"}, DatasetGranularity.UNKNOWN),
({"goal", "persona", "extra_field"}, DatasetGranularity.UNKNOWN),
],
)
def test_classify_input_fields(
input_keys,
expected_granularity,
):
dataset = _create_mlflow_evaluation_dataset()
result = dataset._classify_input_fields(input_keys)
assert result == expected_granularity
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/datasets/test_evaluation_dataset.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_example_syntax_error.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.example_syntax_error import ExampleSyntaxError
def test_example_syntax_error(index_path: Path) -> None:
code = '''
def bad():
"""
.. code-block:: python
def f():
"""
def good():
"""
.. code-block:: python
def f():
return "This is a good example"
"""
'''
config = Config(select={ExampleSyntaxError.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, ExampleSyntaxError) for v in violations)
assert violations[0].range == Range(Position(5, 8))
@pytest.mark.parametrize("suffix", [".md", ".mdx"])
def test_example_syntax_error_markdown(index_path: Path, suffix: str) -> None:
code = """
```python
def g():
```
"""
config = Config(select={ExampleSyntaxError.name})
violations = lint_file(Path("test").with_suffix(suffix), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, ExampleSyntaxError) for v in violations)
assert violations[0].range == Range(Position(2, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_example_syntax_error.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_os_environ_set_in_test.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.os_environ_set_in_test import OsEnvironSetInTest
def test_os_environ_set_in_test(index_path: Path) -> None:
code = """
import os
# Bad
def test_func():
os.environ["MY_VAR"] = "value"
# Good
def non_test_func():
os.environ["MY_VAR"] = "value"
"""
config = Config(select={OsEnvironSetInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsEnvironSetInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_os_environ_set_in_test.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_pytest_mark_repeat.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.pytest_mark_repeat import PytestMarkRepeat
def test_pytest_mark_repeat(index_path: Path) -> None:
code = """
import pytest
@pytest.mark.repeat(10)
def test_flaky_function():
...
"""
config = Config(select={PytestMarkRepeat.name})
violations = lint_file(Path("test_pytest_mark_repeat.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, PytestMarkRepeat) for v in violations)
assert violations[0].range == Range(Position(3, 1))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_pytest_mark_repeat.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_unknown_mlflow_arguments.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.unknown_mlflow_arguments import UnknownMlflowArguments
def test_unknown_mlflow_arguments(index_path: Path) -> None:
code = '''
def bad():
"""
.. code-block:: python
import mlflow
mlflow.log_param(foo="bar")
"""
def good():
"""
.. code-block:: python
import mlflow
mlflow.log_param(key="k", value="v")
"""
'''
config = Config(
select={UnknownMlflowArguments.name},
example_rules=[UnknownMlflowArguments.name],
)
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, UnknownMlflowArguments) for v in violations)
assert violations[0].range == Range(Position(7, 8))
@pytest.mark.parametrize("suffix", [".md", ".mdx"])
def test_unknown_mlflow_arguments_markdown(index_path: Path, suffix: str) -> None:
code = """
# Bad
```python
import mlflow
mlflow.log_param(foo="bar")
```
# Good
```python
import mlflow
mlflow.log_param(key="k", value="v")
```
"""
config = Config(
select={UnknownMlflowArguments.name},
example_rules=[UnknownMlflowArguments.name],
)
violations = lint_file(Path("test").with_suffix(suffix), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, UnknownMlflowArguments) for v in violations)
assert violations[0].range == Range(Position(6, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_unknown_mlflow_arguments.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_unknown_mlflow_function.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.unknown_mlflow_function import UnknownMlflowFunction
def test_unknown_mlflow_function(index_path: Path) -> None:
code = '''
def bad():
"""
.. code-block:: python
import mlflow
mlflow.foo()
"""
def good():
"""
.. code-block:: python
import mlflow
mlflow.log_param("k", "v")
"""
'''
config = Config(select={UnknownMlflowFunction.name}, example_rules=[UnknownMlflowFunction.name])
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, UnknownMlflowFunction) for v in violations)
assert violations[0].range == Range(Position(7, 8))
@pytest.mark.parametrize("suffix", [".md", ".mdx"])
def test_unknown_mlflow_function_markdown(index_path: Path, suffix: str) -> None:
code = """
# Bad
```python
import mlflow
mlflow.foo()
```
# Good
```python
import mlflow
mlflow.log_param("k", "v")
```
"""
config = Config(
select={UnknownMlflowFunction.name},
example_rules=[UnknownMlflowFunction.name],
)
violations = lint_file(Path("test").with_suffix(suffix), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, UnknownMlflowFunction) for v in violations)
assert violations[0].range == Range(Position(6, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_unknown_mlflow_function.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_extraneous_docstring_param.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.extraneous_docstring_param import ExtraneousDocstringParam
def test_extraneous_docstring_param(index_path: Path) -> None:
code = '''
def bad_function(param1: str) -> None:
"""
Example function docstring.
Args:
param1: First parameter
param2: This parameter doesn't exist in function signature
param3: Another non-existent parameter
"""
def good_function(param1: str, param2: int) -> None:
"""
Good function with matching parameters.
Args:
param1: First parameter
param2: Second parameter
"""
'''
config = Config(select={ExtraneousDocstringParam.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, ExtraneousDocstringParam) for v in violations)
assert violations[0].range == Range(Position(1, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_extraneous_docstring_param.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_forbidden_trace_ui_in_notebook.py | from pathlib import Path
from clint.config import Config
from clint.linter import lint_file
from clint.rules.forbidden_trace_ui_in_notebook import ForbiddenTraceUIInNotebook
def test_forbidden_trace_ui_in_notebook(index_path: Path) -> None:
notebook_content = """
{
"cells": [
{
"cell_type": "markdown",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This is a normal cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<iframe src='http://localhost:5000/static-files/lib/notebook-trace-renderer/index.html'></iframe>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# This cell contains trace UI output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This is a normal cell"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
"""
code = notebook_content
config = Config(select={ForbiddenTraceUIInNotebook.name})
violations = lint_file(Path("test.ipynb"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, ForbiddenTraceUIInNotebook) for v in violations)
assert violations[0].cell == 2
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_forbidden_trace_ui_in_notebook.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_incorrect_type_annotation.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.incorrect_type_annotation import IncorrectTypeAnnotation
def test_incorrect_type_annotation(index_path: Path) -> None:
code = """
def bad_function_callable(param: callable) -> callable:
...
def bad_function_any(param: any) -> any:
...
def good_function(param: Callable[[str], str]) -> Any:
...
"""
config = Config(select={IncorrectTypeAnnotation.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 4
assert all(isinstance(v.rule, IncorrectTypeAnnotation) for v in violations)
assert violations[0].range == Range(Position(1, 33)) # callable
assert violations[1].range == Range(Position(1, 46)) # callable
assert violations[2].range == Range(Position(4, 28)) # any
assert violations[3].range == Range(Position(4, 36)) # any
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_incorrect_type_annotation.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_invalid_abstract_method.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.invalid_abstract_method import InvalidAbstractMethod
def test_invalid_abstract_method(index_path: Path) -> None:
code = """
import abc
class AbstractExample(abc.ABC):
@abc.abstractmethod
def bad_abstract_method_has_implementation(self) -> None:
return "This should not be here"
@abc.abstractmethod
def bad_abstract_method_multiple_statements(self) -> None:
pass
...
@abc.abstractmethod
def good_abstract_method_pass(self) -> None:
pass
@abc.abstractmethod
def good_abstract_method_ellipsis(self) -> None:
...
@abc.abstractmethod
def good_abstract_method_docstring(self) -> None:
'''This is a valid docstring'''
"""
config = Config(select={InvalidAbstractMethod.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, InvalidAbstractMethod) for v in violations)
assert violations[0].range == Range(Position(5, 4))
assert violations[1].range == Range(Position(9, 4))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_invalid_abstract_method.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_missing_docstring_param.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.missing_docstring_param import MissingDocstringParam
def test_missing_docstring_param(index_path: Path) -> None:
code = '''
def bad_function(param1: str, param2: int, param3: bool) -> None:
"""
Example function with missing parameters in docstring.
Args:
param1: First parameter described
"""
def good_function(param1: str, param2: int) -> None:
"""
Good function with all parameters documented.
Args:
param1: First parameter
param2: Second parameter
"""
'''
config = Config(select={MissingDocstringParam.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MissingDocstringParam) for v in violations)
assert violations[0].range == Range(Position(1, 0))
def test_missing_docstring_param_init(index_path: Path) -> None:
code = '''
class MyClass:
def __init__(self, param1: str, param2: int) -> None:
"""
Initialize MyClass.
Args:
param1: First parameter
"""
pass
class GoodClass:
def __init__(self, param1: str, param2: int) -> None:
"""
Initialize GoodClass.
Args:
param1: First parameter
param2: Second parameter
"""
pass
'''
config = Config(select={MissingDocstringParam.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MissingDocstringParam) for v in violations)
assert violations[0].range == Range(Position(2, 4))
def test_missing_docstring_param_name_mangled(index_path: Path) -> None:
code = '''
class MyClass:
def __private_helper(self, param1: str, param2: int) -> None:
"""
Private name-mangled method (starts with __ but doesn't end with __).
Should be skipped by clint.
Args:
param1: First parameter
"""
pass
def __init__(self, param1: str) -> None:
"""
Initialize MyClass.
Args:
param1: First parameter
"""
pass
'''
config = Config(select={MissingDocstringParam.name})
violations = lint_file(Path("test.py"), code, config, index_path)
# Only __init__ should be checked, __private_helper should be skipped
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_missing_docstring_param.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_empty_notebook_cell.py | import json
from pathlib import Path
from clint.config import Config
from clint.linter import lint_file
from clint.rules.empty_notebook_cell import EmptyNotebookCell
def test_empty_notebook_cell(index_path: Path) -> None:
notebook_content = {
"cells": [
{
"cell_type": "code",
"source": [], # Empty cell
"metadata": {},
"execution_count": None,
"outputs": [],
},
{
"cell_type": "code",
"source": ["x = 5"],
"metadata": {},
"execution_count": None,
"outputs": [],
},
{
"cell_type": "code",
"source": [], # Another empty cell
"metadata": {},
"execution_count": None,
"outputs": [],
},
],
"metadata": {
"kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}
},
"nbformat": 4,
"nbformat_minor": 4,
}
code = json.dumps(notebook_content)
config = Config(select={EmptyNotebookCell.name})
violations = lint_file(Path("test_notebook.ipynb"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, EmptyNotebookCell) for v in violations)
assert violations[0].cell == 1
assert violations[1].cell == 3
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_empty_notebook_cell.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_log_model_artifact_path.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.log_model_artifact_path import LogModelArtifactPath
def test_log_model_artifact_path(index_path: Path) -> None:
code = """
import mlflow
# Bad - using deprecated artifact_path positionally
mlflow.sklearn.log_model(model, "model")
# Bad - using deprecated artifact_path as keyword
mlflow.tensorflow.log_model(model, artifact_path="tf_model")
# Good - using the new 'name' parameter
mlflow.sklearn.log_model(model, name="my_model")
# Good - spark flavor is exempted from this rule
mlflow.spark.log_model(spark_model, "spark_model")
# Bad - another flavor with artifact_path
mlflow.pytorch.log_model(model, artifact_path="pytorch_model")
"""
config = Config(select={LogModelArtifactPath.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 3
assert all(isinstance(v.rule, LogModelArtifactPath) for v in violations)
assert violations[0].range == Range(Position(4, 0))
assert violations[1].range == Range(Position(7, 0))
assert violations[2].range == Range(Position(16, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_log_model_artifact_path.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_markdown_link.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.markdown_link import MarkdownLink
def test_markdown_link(index_path: Path) -> None:
code = '''
# Bad
def function_with_markdown_link():
"""
This function has a [markdown link](https://example.com).
"""
async def async_function_with_markdown_link():
"""
This async function has a [markdown link](https://example.com).
"""
class MyClass:
"""
Class with [another markdown link](https://test.com).
"""
# Good
def function_with_rest_link():
"""
This function has a `reST link <https://example.com>`_.
"""
'''
config = Config(select={MarkdownLink.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 3
assert all(isinstance(v.rule, MarkdownLink) for v in violations)
assert violations[0].range == Range(Position(3, 4))
assert violations[1].range == Range(Position(8, 4))
assert violations[2].range == Range(Position(13, 4))
def test_markdown_link_disable_on_end_line(index_path: Path) -> None:
code = '''
def func():
"""
Docstring with [markdown link](url).
""" # clint: disable=markdown-link
pass
async def async_func():
"""
Async docstring with [markdown link](url).
""" # clint: disable=markdown-link
pass
class MyClass:
"""
Class docstring with [markdown link](url).
""" # clint: disable=markdown-link
pass
# This should still be detected (no disable comment)
def func_without_disable():
"""
Docstring with [markdown link](url).
"""
pass
'''
config = Config(select={MarkdownLink.name})
violations = lint_file(Path("test.py"), code, config, index_path)
# Only the last function without disable comment should have a violation
assert len(violations) == 1
assert isinstance(violations[0].rule, MarkdownLink)
def test_markdown_link_disable_multiple_rules(index_path: Path) -> None:
code = '''
def func():
"""
Docstring with [markdown link](url).
""" # clint: disable=markdown-link,other-rule
pass
def func2():
"""
Docstring with [markdown link](url).
""" # clint: disable=other-rule, markdown-link
pass
# This should still be detected (markdown-link not in disable list)
def func_without_markdown_disable():
"""
Docstring with [markdown link](url).
""" # clint: disable=some-other-rule
pass
'''
config = Config(select={MarkdownLink.name})
violations = lint_file(Path("test.py"), code, config, index_path)
# Only the last function should have a violation (markdown-link not disabled)
assert len(violations) == 1
assert isinstance(violations[0].rule, MarkdownLink)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_markdown_link.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_os_environ_delete_in_test.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.os_environ_delete_in_test import OsEnvironDeleteInTest
def test_os_environ_delete_in_test(index_path: Path) -> None:
code = """
import os
def test_something():
# Bad
del os.environ["MY_VAR"]
# Good
# monkeypatch.delenv("MY_VAR")
"""
config = Config(select={OsEnvironDeleteInTest.name})
violations = lint_file(Path("test_env.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsEnvironDeleteInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_os_environ_pop_in_test(index_path: Path) -> None:
code = """
import os
def test_something():
# Bad
os.environ.pop("MY_VAR")
# Good
# monkeypatch.delenv("MY_VAR")
"""
config = Config(select={OsEnvironDeleteInTest.name})
violations = lint_file(Path("test_env.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsEnvironDeleteInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_os_environ_pop_with_default_in_test(index_path: Path) -> None:
code = """
import os
def test_something():
# Bad - with default value
os.environ.pop("MY_VAR", None)
# Good
# monkeypatch.delenv("MY_VAR", raising=False)
"""
config = Config(select={OsEnvironDeleteInTest.name})
violations = lint_file(Path("test_env.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsEnvironDeleteInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_os_environ_multiple_violations(index_path: Path) -> None:
code = """
import os
def test_something():
# Bad - del
del os.environ["VAR1"]
# Bad - pop
os.environ.pop("VAR2")
# Bad - pop with default
os.environ.pop("VAR3", None)
"""
config = Config(select={OsEnvironDeleteInTest.name})
violations = lint_file(Path("test_env.py"), code, config, index_path)
assert len(violations) == 3
assert all(isinstance(v.rule, OsEnvironDeleteInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
assert violations[1].range == Range(Position(8, 4))
assert violations[2].range == Range(Position(11, 4))
def test_os_environ_pop_not_in_test(index_path: Path) -> None:
code = """
import os
def some_function():
# This is OK - not in a test file
os.environ.pop("MY_VAR")
"""
config = Config(select={OsEnvironDeleteInTest.name})
violations = lint_file(Path("utils.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_os_environ_delete_in_test.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_test_name_typo.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.test_name_typo import TestNameTypo
def test_test_name_typo(index_path: Path) -> None:
code = """import pytest
# Bad - starts with 'test' but missing underscore
def testSomething():
assert True
# Bad - another one without underscore
def testAnother():
assert True
# Good - properly named test
def test_valid_function():
assert True
# Good - not a test function
def helper_function():
return 42
# Good - starts with something else
def tset_something():
pass
"""
config = Config(select={TestNameTypo.name})
violations = lint_file(Path("test_something.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, TestNameTypo) for v in violations)
assert violations[0].range == Range(Position(3, 0))
assert violations[1].range == Range(Position(7, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_test_name_typo.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/check_function_signatures.py | from __future__ import annotations
import argparse
import ast
import os
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
def is_github_actions() -> bool:
return os.environ.get("GITHUB_ACTIONS") == "true"
@dataclass
class Error:
file_path: Path
line: int
column: int
lines: list[str]
def format(self, github: bool = False) -> str:
message = " ".join(self.lines)
if github:
return f"::warning file={self.file_path},line={self.line},col={self.column}::{message}"
else:
return f"{self.file_path}:{self.line}:{self.column}: {message}"
@dataclass
class Parameter:
name: str
position: int | None # None for keyword-only
is_required: bool
is_positional_only: bool
is_keyword_only: bool
lineno: int
col_offset: int
@dataclass
class Signature:
positional: list[Parameter] # Includes positional-only and regular positional
keyword_only: list[Parameter]
has_var_positional: bool # *args
has_var_keyword: bool # **kwargs
@dataclass
class ParameterError:
message: str
param_name: str
lineno: int
col_offset: int
def parse_signature(args: ast.arguments) -> Signature:
"""Convert ast.arguments to a Signature dataclass for easier processing."""
parameters_positional: list[Parameter] = []
parameters_keyword_only: list[Parameter] = []
# Process positional-only parameters
for i, arg in enumerate(args.posonlyargs):
parameters_positional.append(
Parameter(
name=arg.arg,
position=i,
is_required=True, # All positional-only are required
is_positional_only=True,
is_keyword_only=False,
lineno=arg.lineno,
col_offset=arg.col_offset,
)
)
# Process regular positional parameters
offset = len(args.posonlyargs)
first_optional_idx = len(args.posonlyargs + args.args) - len(args.defaults)
for i, arg in enumerate(args.args):
pos = offset + i
parameters_positional.append(
Parameter(
name=arg.arg,
position=pos,
is_required=pos < first_optional_idx,
is_positional_only=False,
is_keyword_only=False,
lineno=arg.lineno,
col_offset=arg.col_offset,
)
)
# Process keyword-only parameters
for arg, default in zip(args.kwonlyargs, args.kw_defaults):
parameters_keyword_only.append(
Parameter(
name=arg.arg,
position=None,
is_required=default is None,
is_positional_only=False,
is_keyword_only=True,
lineno=arg.lineno,
col_offset=arg.col_offset,
)
)
return Signature(
positional=parameters_positional,
keyword_only=parameters_keyword_only,
has_var_positional=args.vararg is not None,
has_var_keyword=args.kwarg is not None,
)
def check_signature_compatibility(
old_fn: ast.FunctionDef | ast.AsyncFunctionDef,
new_fn: ast.FunctionDef | ast.AsyncFunctionDef,
) -> list[ParameterError]:
"""
Return list of error messages when *new_fn* is not backward-compatible with *old_fn*,
or None if compatible.
Compatibility rules
-------------------
• Positional / positional-only parameters
- Cannot be reordered, renamed, or removed.
- Adding **required** ones is breaking.
- Adding **optional** ones is allowed only at the end.
- Making an optional parameter required is breaking.
• Keyword-only parameters (order does not matter)
- Cannot be renamed or removed.
- Making an optional parameter required is breaking.
- Adding a required parameter is breaking; adding an optional parameter is fine.
"""
old_sig = parse_signature(old_fn.args)
new_sig = parse_signature(new_fn.args)
errors: list[ParameterError] = []
# ------------------------------------------------------------------ #
# 1. Positional / pos-only parameters
# ------------------------------------------------------------------ #
# (a) existing parameters must line up
for idx, old_param in enumerate(old_sig.positional):
if idx >= len(new_sig.positional):
errors.append(
ParameterError(
message=f"Positional param '{old_param.name}' was removed.",
param_name=old_param.name,
lineno=old_param.lineno,
col_offset=old_param.col_offset,
)
)
continue
new_param = new_sig.positional[idx]
if old_param.name != new_param.name:
errors.append(
ParameterError(
message=(
f"Positional param order/name changed: "
f"'{old_param.name}' -> '{new_param.name}'."
),
param_name=new_param.name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# Stop checking further positional params after first order/name mismatch
break
if (not old_param.is_required) and new_param.is_required:
errors.append(
ParameterError(
message=f"Optional positional param '{old_param.name}' became required.",
param_name=new_param.name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# (b) any extra new positional params must be optional and appended
if len(new_sig.positional) > len(old_sig.positional):
for idx in range(len(old_sig.positional), len(new_sig.positional)):
new_param = new_sig.positional[idx]
if new_param.is_required:
errors.append(
ParameterError(
message=f"New required positional param '{new_param.name}' added.",
param_name=new_param.name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# ------------------------------------------------------------------ #
# 2. Keyword-only parameters (order-agnostic)
# ------------------------------------------------------------------ #
old_kw_names = {p.name for p in old_sig.keyword_only}
new_kw_names = {p.name for p in new_sig.keyword_only}
# Build mappings for easier lookup
old_kw_by_name = {p.name: p for p in old_sig.keyword_only}
new_kw_by_name = {p.name: p for p in new_sig.keyword_only}
# removed or renamed
for name in old_kw_names - new_kw_names:
old_param = old_kw_by_name[name]
errors.append(
ParameterError(
message=f"Keyword-only param '{name}' was removed.",
param_name=name,
lineno=old_param.lineno,
col_offset=old_param.col_offset,
)
)
# optional -> required upgrades
for name in old_kw_names & new_kw_names:
if not old_kw_by_name[name].is_required and new_kw_by_name[name].is_required:
new_param = new_kw_by_name[name]
errors.append(
ParameterError(
message=f"Keyword-only param '{name}' became required.",
param_name=name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# new required keyword-only params
errors.extend(
ParameterError(
message=f"New required keyword-only param '{param.name}' added.",
param_name=param.name,
lineno=param.lineno,
col_offset=param.col_offset,
)
for param in new_sig.keyword_only
if param.is_required and param.name not in old_kw_names
)
return errors
def _is_private(n: str) -> bool:
return n.startswith("_") and not n.startswith("__") and not n.endswith("__")
class FunctionSignatureExtractor(ast.NodeVisitor):
def __init__(self) -> None:
self.functions: dict[str, ast.FunctionDef | ast.AsyncFunctionDef] = {}
self.stack: list[ast.ClassDef] = []
def visit_ClassDef(self, node: ast.ClassDef) -> None:
self.stack.append(node)
self.generic_visit(node)
self.stack.pop()
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
# Is this a private function or a function in a private class?
# If so, skip it.
if _is_private(node.name) or (self.stack and _is_private(self.stack[-1].name)):
return
names = [*(c.name for c in self.stack), node.name]
self.functions[".".join(names)] = node
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
if _is_private(node.name) or (self.stack and _is_private(self.stack[-1].name)):
return
names = [*(c.name for c in self.stack), node.name]
self.functions[".".join(names)] = node
def get_changed_python_files(base_branch: str = "master") -> list[Path]:
# In GitHub Actions PR context, we need to fetch the base branch first
if is_github_actions():
# Fetch the base branch to ensure we have it locally
subprocess.check_call(
["git", "fetch", "origin", f"{base_branch}:{base_branch}"],
)
result = subprocess.check_output(
["git", "diff", "--name-only", f"{base_branch}...HEAD"], text=True
)
files = [s.strip() for s in result.splitlines()]
return [Path(f) for f in files if f]
def parse_functions(content: str) -> dict[str, ast.FunctionDef | ast.AsyncFunctionDef]:
tree = ast.parse(content)
extractor = FunctionSignatureExtractor()
extractor.visit(tree)
return extractor.functions
def get_file_content_at_revision(file_path: Path, revision: str) -> str | None:
try:
return subprocess.check_output(["git", "show", f"{revision}:{file_path}"], text=True)
except subprocess.CalledProcessError as e:
print(f"Warning: Failed to get file content at revision: {e}", file=sys.stderr)
return None
def compare_signatures(base_branch: str = "master") -> list[Error]:
errors: list[Error] = []
for file_path in get_changed_python_files(base_branch):
# Ignore non-Python files
if not file_path.suffix == ".py":
continue
# Ignore files not in the mlflow directory
if file_path.parts[0] != "mlflow":
continue
# Ignore private modules
if any(part.startswith("_") and part != "__init__.py" for part in file_path.parts):
continue
base_content = get_file_content_at_revision(file_path, base_branch)
if base_content is None:
# Find not found in the base branch, likely added in the current branch
continue
if not file_path.exists():
# File not found, likely deleted in the current branch
continue
current_content = file_path.read_text()
base_functions = parse_functions(base_content)
current_functions = parse_functions(current_content)
for func_name in set(base_functions.keys()) & set(current_functions.keys()):
base_func = base_functions[func_name]
current_func = current_functions[func_name]
if param_errors := check_signature_compatibility(base_func, current_func):
# Create individual errors for each problematic parameter
errors.extend(
Error(
file_path=file_path,
line=param_error.lineno,
column=param_error.col_offset + 1,
lines=[
"[Non-blocking | Ignore if not public API]",
param_error.message,
f"This change will break existing `{func_name}` calls.",
"If this is not intended, please fix it.",
],
)
for param_error in param_errors
)
return errors
@dataclass
class Args:
base_branch: str
def parse_args() -> Args:
parser = argparse.ArgumentParser(
description="Check for breaking changes in Python function signatures"
)
parser.add_argument("--base-branch", default=os.environ.get("GITHUB_BASE_REF", "master"))
args = parser.parse_args()
return Args(base_branch=args.base_branch)
def main() -> None:
args = parse_args()
errors = compare_signatures(args.base_branch)
for error in errors:
print(error.format(github=is_github_actions()))
if __name__ == "__main__":
main()
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/check_function_signatures.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/dev/test_check_function_signatures.py | import ast
from dev.check_function_signatures import check_signature_compatibility
def test_no_changes():
old_code = "def func(a, b=1): pass"
new_code = "def func(a, b=1): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 0
def test_positional_param_removed():
old_code = "def func(a, b, c): pass"
new_code = "def func(a, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "Positional param 'c' was removed."
assert errors[0].param_name == "c"
def test_positional_param_renamed():
old_code = "def func(a, b): pass"
new_code = "def func(x, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert "Positional param order/name changed: 'a' -> 'x'." in errors[0].message
assert errors[0].param_name == "x"
def test_only_first_positional_rename_flagged():
old_code = "def func(a, b, c, d): pass"
new_code = "def func(x, y, z, w): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert "Positional param order/name changed: 'a' -> 'x'." in errors[0].message
def test_optional_positional_became_required():
old_code = "def func(a, b=1): pass"
new_code = "def func(a, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "Optional positional param 'b' became required."
assert errors[0].param_name == "b"
def test_multiple_optional_became_required():
old_code = "def func(a, b=1, c=2): pass"
new_code = "def func(a, b, c): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 2
assert errors[0].message == "Optional positional param 'b' became required."
assert errors[1].message == "Optional positional param 'c' became required."
def test_new_required_positional_param():
old_code = "def func(a): pass"
new_code = "def func(a, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "New required positional param 'b' added."
assert errors[0].param_name == "b"
def test_new_optional_positional_param_allowed():
old_code = "def func(a): pass"
new_code = "def func(a, b=1): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 0
def test_keyword_only_param_removed():
old_code = "def func(*, a, b): pass"
new_code = "def func(*, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "Keyword-only param 'a' was removed."
assert errors[0].param_name == "a"
def test_multiple_keyword_only_removed():
old_code = "def func(*, a, b, c): pass"
new_code = "def func(*, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 2
error_messages = {e.message for e in errors}
assert "Keyword-only param 'a' was removed." in error_messages
assert "Keyword-only param 'c' was removed." in error_messages
def test_optional_keyword_only_became_required():
old_code = "def func(*, a=1): pass"
new_code = "def func(*, a): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "Keyword-only param 'a' became required."
assert errors[0].param_name == "a"
def test_new_required_keyword_only_param():
old_code = "def func(*, a): pass"
new_code = "def func(*, a, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "New required keyword-only param 'b' added."
assert errors[0].param_name == "b"
def test_new_optional_keyword_only_allowed():
old_code = "def func(*, a): pass"
new_code = "def func(*, a, b=1): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 0
def test_complex_mixed_violations():
old_code = "def func(a, b=1, *, c, d=2): pass"
new_code = "def func(x, b, *, c=3, e): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 3
error_messages = [e.message for e in errors]
assert any("Positional param order/name changed: 'a' -> 'x'." in msg for msg in error_messages)
assert any("Keyword-only param 'd' was removed." in msg for msg in error_messages)
assert any("New required keyword-only param 'e' added." in msg for msg in error_messages)
def test_parameter_error_has_location_info():
old_code = "def func(a): pass"
new_code = "def func(b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].lineno == 1
assert errors[0].col_offset > 0
def test_async_function_compatibility():
old_code = "async def func(a, b=1): pass"
new_code = "async def func(a, b): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert errors[0].message == "Optional positional param 'b' became required."
def test_positional_only_compatibility():
old_code = "def func(a, /): pass"
new_code = "def func(b, /): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert "Positional param order/name changed: 'a' -> 'b'." in errors[0].message
def test_rename_stops_further_positional_checks():
old_code = "def func(a, b=1, c=2): pass"
new_code = "def func(x, b, c): pass"
old_tree = ast.parse(old_code)
new_tree = ast.parse(new_code)
errors = check_signature_compatibility(old_tree.body[0], new_tree.body[0])
assert len(errors) == 1
assert "Positional param order/name changed: 'a' -> 'x'." in errors[0].message
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/dev/test_check_function_signatures.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_do_not_disable.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.do_not_disable import DoNotDisable
def test_do_not_disable(index_path: Path) -> None:
code = """
# Bad B006
# noqa: B006
# Bad F821
# noqa: F821
# Good
# noqa: B004
"""
config = Config(select={DoNotDisable.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, DoNotDisable) for v in violations)
assert violations[0].range == Range(Position(2, 0))
assert violations[1].range == Range(Position(5, 0))
def test_do_not_disable_comma_separated(index_path: Path) -> None:
code = """
# Bad: B006 and F821 both should be caught
# noqa: B006, F821
# Bad: B006 and F821 both should be caught (no space after comma)
# noqa: B006,F821
# Good: B004 is allowed
# noqa: B004, B005
"""
config = Config(select={DoNotDisable.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, DoNotDisable) for v in violations)
# Both violations should have both rules B006 and F821
assert isinstance(violations[0].rule, DoNotDisable)
assert isinstance(violations[1].rule, DoNotDisable)
assert violations[0].rule.rules == {"B006", "F821"}
assert violations[1].rule.rules == {"B006", "F821"}
assert violations[0].range == Range(Position(2, 0))
assert violations[1].range == Range(Position(5, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_do_not_disable.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_docstring_param_order.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.docstring_param_order import DocstringParamOrder
def test_docstring_param_order(index_path: Path) -> None:
code = """
# Bad
def f(x: int, y: str) -> None:
'''
Args:
y: Second param.
x: First param.
'''
# Good
def f(a: int, b: str) -> None:
'''
Args:
a: First param.
b: Second param.
'''
"""
config = Config(select={DocstringParamOrder.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, DocstringParamOrder) for v in violations)
assert violations[0].range == Range(Position(2, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_docstring_param_order.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_forbidden_top_level_import.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.forbidden_top_level_import import ForbiddenTopLevelImport
def test_forbidden_top_level_import(index_path: Path) -> None:
code = """
# Bad
import foo
from foo import bar
# Good
import baz
"""
config = Config(
select={ForbiddenTopLevelImport.name},
forbidden_top_level_imports={"*": ["foo"]},
)
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, ForbiddenTopLevelImport) for v in violations)
assert violations[0].range == Range(Position(2, 0))
assert violations[1].range == Range(Position(3, 0))
def test_nested_if_in_type_checking_block(index_path: Path) -> None:
code = """
from typing import TYPE_CHECKING
if TYPE_CHECKING:
if True:
pass
import databricks # Should NOT be flagged
from databricks import foo # Should NOT be flagged
"""
config = Config(
select={ForbiddenTopLevelImport.name},
forbidden_top_level_imports={"*": ["databricks"]},
)
violations = lint_file(Path("test.py"), code, config, index_path)
# Should have no violations since imports are inside TYPE_CHECKING
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_forbidden_top_level_import.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_no_rst.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.no_rst import NoRst
def test_no_rst(index_path: Path) -> None:
code = """
def bad(y: int) -> str:
'''
:param y: The parameter
:returns: The result
'''
def good(x: int) -> str:
'''
Args:
x: The parameter.
Returns:
The result.
'''
"""
config = Config(select={NoRst.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, NoRst) for v in violations)
assert violations[0].range == Range(Position(2, 4))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_no_rst.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_typing_extensions.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.typing_extensions import TypingExtensions
def test_typing_extensions(index_path: Path) -> None:
code = """
# Bad
from typing_extensions import ParamSpec
# Good
from typing_extensions import Self
"""
config = Config(
select={TypingExtensions.name}, typing_extensions_allowlist=["typing_extensions.Self"]
)
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TypingExtensions) for v in violations)
assert violations[0].range == Range(Position(2, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_typing_extensions.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_implicit_optional.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules import ImplicitOptional
def test_implicit_optional(index_path: Path) -> None:
code = """
from typing import Optional
# Bad
bad: int = None
class Bad:
x: str = None
# Good
good: Optional[int] = None
class Good:
x: Optional[str] = None
"""
config = Config(select={ImplicitOptional.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 2
assert all(isinstance(r.rule, ImplicitOptional) for r in results)
assert results[0].range == Range(Position(4, 5))
assert results[1].range == Range(Position(6, 7))
def test_implicit_optional_stringified(index_path: Path) -> None:
code = """
from typing import Optional
# Bad - stringified without Optional or None union
bad1: "int" = None
bad2: "str" = None
class Bad:
x: "int" = None
# Good - stringified with Optional
good1: "Optional[int]" = None
good2: "Optional[str]" = None
class Good1:
x: "Optional[str]" = None
# Good - stringified with | None
good3: "int | None" = None
good4: "str | None" = None
good5: "int|None" = None
class Good2:
x: "SomeClass | None" = None
"""
config = Config(select={ImplicitOptional.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 3
assert all(isinstance(r.rule, ImplicitOptional) for r in results)
assert results[0].range == Range(Position(4, 6)) # bad1
assert results[1].range == Range(Position(5, 6)) # bad2
assert results[2].range == Range(Position(7, 7)) # Bad.x
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_implicit_optional.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_multi_assign.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules import MultiAssign
def test_multi_assign(index_path: Path) -> None:
code = """
# Bad - non-constant values
x, y = func1(), func2()
# Good - unpacking from function
a, b = func()
# Good - all constants (allowed)
c, d = 1, 1
e, f, g = 0, 0, 0
h, i = "test", "test"
"""
config = Config(select={MultiAssign.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 1
assert all(isinstance(r.rule, MultiAssign) for r in results)
assert results[0].range == Range(Position(2, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_multi_assign.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_unnamed_thread.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules import UnnamedThread
def test_unnamed_thread(index_path: Path) -> None:
code = """
import threading
# Bad
threading.Thread(target=lambda: None)
# Good
# threading.Thread(target=lambda: None, name="worker")
"""
config = Config(select={UnnamedThread.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 1
assert isinstance(results[0].rule, UnnamedThread)
assert results[0].range == Range(Position(4, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_unnamed_thread.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_use_sys_executable.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules import UseSysExecutable
def test_use_sys_executable(index_path: Path) -> None:
code = """
import subprocess
import sys
# Bad
subprocess.run(["mlflow", "ui"])
subprocess.check_call(["mlflow", "ui"])
# Good
subprocess.run([sys.executable, "-m", "mlflow", "ui"])
subprocess.check_call([sys.executable, "-m", "mlflow", "ui"])
"""
config = Config(select={UseSysExecutable.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 2
assert all(isinstance(r.rule, UseSysExecutable) for r in results)
assert results[0].range == Range(Position(5, 0))
assert results[1].range == Range(Position(6, 0))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_use_sys_executable.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_missing_notebook_h1_header.py | import json
from pathlib import Path
from clint.config import Config
from clint.linter import lint_file
from clint.rules import MissingNotebookH1Header
def test_missing_notebook_h1_header(index_path: Path) -> None:
notebook = {
"cells": [
{
"cell_type": "markdown",
"source": ["## Some other header"],
},
{
"cell_type": "code",
"source": ["print('hello')"],
},
]
}
code = json.dumps(notebook)
config = Config(select={MissingNotebookH1Header.name})
results = lint_file(Path("test.ipynb"), code, config, index_path)
assert len(results) == 1
assert isinstance(results[0].rule, MissingNotebookH1Header)
def test_missing_notebook_h1_header_positive(index_path: Path) -> None:
notebook = {
"cells": [
{
"cell_type": "markdown",
"source": ["# This is a title"],
},
{
"cell_type": "code",
"source": ["print('hello')"],
},
]
}
code = json.dumps(notebook)
config = Config(select={MissingNotebookH1Header.name})
results = lint_file(Path("test_positive.ipynb"), code, config, index_path)
assert len(results) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_missing_notebook_h1_header.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/data/polars_dataset.py | import json
import logging
from functools import cached_property
from inspect import isclass
from typing import Any, Final, TypedDict
import polars as pl
from polars.datatypes.classes import DataType as PolarsDataType
from polars.datatypes.classes import DataTypeClass as PolarsDataTypeClass
from mlflow.data.dataset import Dataset
from mlflow.data.dataset_source import DatasetSource
from mlflow.data.evaluation_dataset import EvaluationDataset
from mlflow.data.pyfunc_dataset_mixin import PyFuncConvertibleDatasetMixin, PyFuncInputsOutputs
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.types.schema import Array, ColSpec, DataType, Object, Property, Schema
_logger = logging.getLogger(__name__)
def hash_polars_df(df: pl.DataFrame) -> str:
# probably not the best way to hash, also see:
# https://github.com/pola-rs/polars/issues/9743
# https://stackoverflow.com/q/76678160
return str(df.hash_rows().sum())
ColSpecType = DataType | Array | Object | str
TYPE_MAP: Final[dict[PolarsDataTypeClass, DataType]] = {
pl.Binary: DataType.binary,
pl.Boolean: DataType.boolean,
pl.Datetime: DataType.datetime,
pl.Float32: DataType.float,
pl.Float64: DataType.double,
pl.Int8: DataType.integer,
pl.Int16: DataType.integer,
pl.Int32: DataType.integer,
pl.Int64: DataType.long,
pl.String: DataType.string,
pl.Utf8: DataType.string,
}
CLOSE_MAP: Final[dict[PolarsDataTypeClass, DataType]] = {
pl.Categorical: DataType.string,
pl.Enum: DataType.string,
pl.Date: DataType.datetime,
pl.UInt8: DataType.integer,
pl.UInt16: DataType.integer,
pl.UInt32: DataType.long,
}
# Remaining types:
# pl.Decimal
# pl.UInt64
# pl.Duration
# pl.Time
# pl.Null
# pl.Object
# pl.Unknown
def infer_schema(df: pl.DataFrame) -> Schema:
return Schema([infer_colspec(df[col]) for col in df.columns])
def infer_colspec(col: pl.Series, *, allow_unknown: bool = True) -> ColSpec:
return ColSpec(
type=infer_dtype(col.dtype, col.name, allow_unknown=allow_unknown),
name=col.name,
required=col.count() > 0,
)
def infer_dtype(
dtype: PolarsDataType | PolarsDataTypeClass, col_name: str, *, allow_unknown: bool
) -> ColSpecType:
cls: PolarsDataTypeClass = dtype if isinstance(dtype, PolarsDataTypeClass) else type(dtype)
mapped = TYPE_MAP.get(cls)
if mapped is not None:
return mapped
mapped = CLOSE_MAP.get(cls)
if mapped is not None:
logging.warning(
"Data type of Column '%s' contains dtype=%s which will be mapped to %s."
" This is not an exact match but is close enough",
col_name,
dtype,
mapped,
)
return mapped
if not isinstance(dtype, PolarsDataType):
return _handle_unknown_dtype(dtype=dtype, col_name=col_name, allow_unknown=allow_unknown)
if isinstance(dtype, (pl.Array, pl.List)):
# cannot check inner if not instantiated
if isclass(dtype):
if not allow_unknown:
_raise_unknown_type(dtype)
return Array("Unknown")
inner = (
"Unknown"
if dtype.inner is None
else infer_dtype(dtype.inner, f"{col_name}.[]", allow_unknown=allow_unknown)
)
return Array(inner)
if isinstance(dtype, pl.Struct):
# cannot check fields if not instantiated
if isclass(dtype):
if not allow_unknown:
_raise_unknown_type(dtype)
return Object([])
return Object(
[
Property(
name=field.name,
dtype=infer_dtype(
field.dtype, f"{col_name}.{field.name}", allow_unknown=allow_unknown
),
)
for field in dtype.fields
]
)
return _handle_unknown_dtype(dtype=dtype, col_name=col_name, allow_unknown=allow_unknown)
def _handle_unknown_dtype(dtype: Any, col_name: str, *, allow_unknown: bool) -> str:
if not allow_unknown:
_raise_unknown_type(dtype)
logging.warning(
"Data type of Columns '%s' contains dtype=%s, which cannot be mapped to any DataType",
col_name,
dtype,
)
return str(dtype)
def _raise_unknown_type(dtype: Any) -> None:
msg = f"Unknown type: {dtype!r}"
raise ValueError(msg)
class PolarsDataset(Dataset, PyFuncConvertibleDatasetMixin):
"""A polars DataFrame for use with MLflow Tracking."""
def __init__(
self,
df: pl.DataFrame,
source: DatasetSource,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
predictions: str | None = None,
) -> None:
"""
Args:
df: A polars DataFrame.
source: Source of the DataFrame.
targets: Name of the target column. Optional.
name: Name of the dataset. E.g. "wiki_train". If unspecified, a name is automatically
generated.
digest: Digest (hash, fingerprint) of the dataset. If unspecified, a digest is
automatically computed.
predictions: Name of the column containing model predictions, if the dataset contains
model predictions. Optional. If specified, this column must be present in ``df``.
"""
if targets is not None and targets not in df.columns:
raise MlflowException(
f"DataFrame does not contain specified targets column: '{targets}'",
INVALID_PARAMETER_VALUE,
)
if predictions is not None and predictions not in df.columns:
raise MlflowException(
f"DataFrame does not contain specified predictions column: '{predictions}'",
INVALID_PARAMETER_VALUE,
)
# _df needs to be set before super init, as it is used in _compute_digest
# see Dataset.__init__()
self._df = df
super().__init__(source=source, name=name, digest=digest)
self._targets = targets
self._predictions = predictions
def _compute_digest(self) -> str:
"""Compute a digest for the dataset.
Called if the user doesn't supply a digest when constructing the dataset.
"""
return hash_polars_df(self._df)
class PolarsDatasetConfig(TypedDict):
name: str
digest: str
source: str
source_type: str
schema: str
profile: str
def to_dict(self) -> PolarsDatasetConfig:
"""Create config dictionary for the dataset.
Return a string dictionary containing the following fields: name, digest, source,
source type, schema, and profile.
"""
schema = json.dumps({"mlflow_colspec": self.schema.to_dict()} if self.schema else None)
return {
"name": self.name,
"digest": self.digest,
"source": self.source.to_json(),
"source_type": self.source._get_source_type(),
"schema": schema,
"profile": json.dumps(self.profile),
}
@property
def df(self) -> pl.DataFrame:
"""Underlying DataFrame."""
return self._df
@property
def source(self) -> DatasetSource:
"""Source of the dataset."""
return self._source
@property
def targets(self) -> str | None:
"""Name of the target column.
May be ``None`` if no target column is available.
"""
return self._targets
@property
def predictions(self) -> str | None:
"""Name of the predictions column.
May be ``None`` if no predictions column is available.
"""
return self._predictions
class PolarsDatasetProfile(TypedDict):
num_rows: int
num_elements: int
@property
def profile(self) -> PolarsDatasetProfile:
"""Profile of the dataset."""
return {
"num_rows": self._df.height,
"num_elements": self._df.height * self._df.width,
}
@cached_property
def schema(self) -> Schema | None:
"""Instance of :py:class:`mlflow.types.Schema` representing the tabular dataset.
May be ``None`` if the schema cannot be inferred from the dataset.
"""
try:
return infer_schema(self._df)
except Exception as e:
_logger.warning("Failed to infer schema for PolarsDataset. Exception: %s", e)
return None
def to_pyfunc(self) -> PyFuncInputsOutputs:
"""Convert dataset to a collection of pyfunc inputs and outputs for model evaluation."""
if self._targets:
inputs = self._df.drop(*self._targets)
outputs = self._df.select(self._targets).to_series()
return PyFuncInputsOutputs([inputs.to_pandas()], [outputs.to_pandas()])
else:
return PyFuncInputsOutputs([self._df.to_pandas()])
def to_evaluation_dataset(self, path=None, feature_names=None) -> EvaluationDataset:
"""Convert dataset to an EvaluationDataset for model evaluation."""
return EvaluationDataset(
data=self._df.to_pandas(),
targets=self._targets,
path=path,
feature_names=feature_names,
predictions=self._predictions,
name=self.name,
digest=self.digest,
)
def from_polars(
df: pl.DataFrame,
source: str | DatasetSource | None = None,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
predictions: str | None = None,
) -> PolarsDataset:
"""Construct a :py:class:`PolarsDataset <mlflow.data.polars_dataset.PolarsDataset>` instance.
Args:
df: A polars DataFrame.
source: Source from which the DataFrame was derived, e.g. a filesystem
path, an S3 URI, an HTTPS URL, a delta table name with version, or
spark table etc. ``source`` may be specified as a URI, a path-like string,
or an instance of
:py:class:`DatasetSource <mlflow.data.dataset_source.DatasetSource>`.
If unspecified, the source is assumed to be the code location
(e.g. notebook cell, script, etc.) where
:py:func:`from_polars <mlflow.data.from_polars>` is being called.
targets: An optional target column name for supervised training. This column
must be present in ``df``.
name: Name of the dataset. If unspecified, a name is generated.
digest: Dataset digest (hash). If unspecified, a digest is computed
automatically.
predictions: An optional predictions column name for model evaluation. This column
must be present in ``df``.
.. code-block:: python
:test:
:caption: Example
import mlflow
import polars as pl
x = pl.DataFrame(
[["tom", 10, 1, 1], ["nick", 15, 0, 1], ["julie", 14, 1, 1]],
schema=["Name", "Age", "Label", "ModelOutput"],
)
dataset = mlflow.data.from_polars(x, targets="Label", predictions="ModelOutput")
"""
from mlflow.data.code_dataset_source import CodeDatasetSource
from mlflow.data.dataset_source_registry import resolve_dataset_source
from mlflow.tracking.context import registry
if source is not None:
if isinstance(source, DatasetSource):
resolved_source = source
else:
resolved_source = resolve_dataset_source(source)
else:
context_tags = registry.resolve_tags()
resolved_source = CodeDatasetSource(tags=context_tags)
return PolarsDataset(
df=df,
source=resolved_source,
targets=targets,
name=name,
digest=digest,
predictions=predictions,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/data/polars_dataset.py",
"license": "Apache License 2.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/data/test_polars_dataset.py | from __future__ import annotations
import json
import re
from datetime import date, datetime
from pathlib import Path
import pandas as pd
import polars as pl
import pytest
from mlflow.data.code_dataset_source import CodeDatasetSource
from mlflow.data.evaluation_dataset import EvaluationDataset
from mlflow.data.filesystem_dataset_source import FileSystemDatasetSource
from mlflow.data.polars_dataset import PolarsDataset, from_polars, infer_schema
from mlflow.data.pyfunc_dataset_mixin import PyFuncInputsOutputs
from mlflow.exceptions import MlflowException
from mlflow.types.schema import Array, ColSpec, DataType, Object, Property, Schema
from tests.resources.data.dataset_source import SampleDatasetSource
@pytest.fixture(name="source", scope="module")
def sample_source() -> SampleDatasetSource:
source_uri = "test:/my/test/uri"
return SampleDatasetSource._resolve(source_uri)
def test_infer_schema() -> None:
data = [
[
b"asd",
True,
datetime(2024, 1, 1, 12, 34, 56, 789),
10,
10,
10,
10,
10,
10,
"asd",
"😆",
"category",
"val2",
date(2024, 1, 1),
10,
10,
10,
[1, 2, 3],
[1, 2, 3],
{"col1": 1},
]
]
schema = {
"Binary": pl.Binary,
"Boolean": pl.Boolean,
"Datetime": pl.Datetime,
"Float32": pl.Float32,
"Float64": pl.Float64,
"Int8": pl.Int8,
"Int16": pl.Int16,
"Int32": pl.Int32,
"Int64": pl.Int64,
"String": pl.String,
"Utf8": pl.Utf8,
"Categorical": pl.Categorical,
"Enum": pl.Enum(["val1", "val2"]),
"Date": pl.Date,
"UInt8": pl.UInt8,
"UInt16": pl.UInt16,
"UInt32": pl.UInt32,
"List": pl.List(pl.Int8),
"Array": pl.Array(pl.Int8, 3),
"Struct": pl.Struct({"col1": pl.Int8}),
}
df = pl.DataFrame(data=data, schema=schema)
assert infer_schema(df) == Schema(
[
ColSpec(name="Binary", type=DataType.binary),
ColSpec(name="Boolean", type=DataType.boolean),
ColSpec(name="Datetime", type=DataType.datetime),
ColSpec(name="Float32", type=DataType.float),
ColSpec(name="Float64", type=DataType.double),
ColSpec(name="Int8", type=DataType.integer),
ColSpec(name="Int16", type=DataType.integer),
ColSpec(name="Int32", type=DataType.integer),
ColSpec(name="Int64", type=DataType.long),
ColSpec(name="String", type=DataType.string),
ColSpec(name="Utf8", type=DataType.string),
ColSpec(name="Categorical", type=DataType.string),
ColSpec(name="Enum", type=DataType.string),
ColSpec(name="Date", type=DataType.datetime),
ColSpec(name="UInt8", type=DataType.integer),
ColSpec(name="UInt16", type=DataType.integer),
ColSpec(name="UInt32", type=DataType.long),
ColSpec(name="List", type=Array(DataType.integer)),
ColSpec(name="Array", type=Array(DataType.integer)),
ColSpec(name="Struct", type=Object([Property(name="col1", dtype=DataType.integer)])),
]
)
def test_conversion_to_json(source: SampleDatasetSource) -> None:
dataset = PolarsDataset(
df=pl.DataFrame([1, 2, 3], schema=["Numbers"]), source=source, name="testname"
)
dataset_json = dataset.to_json()
parsed_json = json.loads(dataset_json)
assert parsed_json.keys() <= {"name", "digest", "source", "source_type", "schema", "profile"}
assert parsed_json["name"] == dataset.name
assert parsed_json["digest"] == dataset.digest
assert parsed_json["source"] == dataset.source.to_json()
assert parsed_json["source_type"] == dataset.source._get_source_type()
assert parsed_json["profile"] == json.dumps(dataset.profile)
schema_json = json.dumps(json.loads(parsed_json["schema"])["mlflow_colspec"])
assert Schema.from_json(schema_json) == dataset.schema
def test_digest_property_has_expected_value(source: SampleDatasetSource) -> None:
dataset = PolarsDataset(df=pl.DataFrame([1, 2, 3], schema=["Numbers"]), source=source)
assert dataset.digest == dataset._compute_digest()
# Digest value varies across Polars versions due to hash_rows() implementation changes
assert re.match(r"^\d+$", dataset.digest)
def test_digest_consistent(source: SampleDatasetSource) -> None:
dataset1 = PolarsDataset(
df=pl.DataFrame({"numbers": [1, 2, 3], "strs": ["a", "b", "c"]}), source=source
)
dataset2 = PolarsDataset(
df=pl.DataFrame({"numbers": [2, 3, 1], "strs": ["b", "c", "a"]}), source=source
)
assert dataset1.digest == dataset2.digest
def test_digest_change(source: SampleDatasetSource) -> None:
dataset1 = PolarsDataset(
df=pl.DataFrame({"numbers": [1, 2, 3], "strs": ["a", "b", "c"]}), source=source
)
dataset2 = PolarsDataset(
df=pl.DataFrame({"numbers": [10, 20, 30], "strs": ["aa", "bb", "cc"]}), source=source
)
assert dataset1.digest != dataset2.digest
def test_df_property(source: SampleDatasetSource) -> None:
df = pl.DataFrame({"numbers": [1, 2, 3]})
dataset = PolarsDataset(df=df, source=source)
assert dataset.df.equals(df)
def test_targets_none(source: SampleDatasetSource) -> None:
df_no_targets = pl.DataFrame({"numbers": [1, 2, 3]})
dataset_no_targets = PolarsDataset(df=df_no_targets, source=source)
assert dataset_no_targets._targets is None
def test_targets_not_none(source: SampleDatasetSource) -> None:
df_with_targets = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
dataset_with_targets = PolarsDataset(df=df_with_targets, source=source, targets="c")
assert dataset_with_targets._targets == "c"
def test_targets_invalid(source: SampleDatasetSource) -> None:
df = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
with pytest.raises(
MlflowException,
match="DataFrame does not contain specified targets column: 'd'",
):
PolarsDataset(df=df, source=source, targets="d")
def test_to_pyfunc_wo_outputs(source: SampleDatasetSource) -> None:
df = pl.DataFrame({"numbers": [1, 2, 3]})
dataset = PolarsDataset(df=df, source=source)
input_outputs = dataset.to_pyfunc()
assert isinstance(input_outputs, PyFuncInputsOutputs)
assert len(input_outputs.inputs) == 1
assert isinstance(input_outputs.inputs[0], pd.DataFrame)
assert input_outputs.inputs[0].equals(pd.DataFrame({"numbers": [1, 2, 3]}))
def test_to_pyfunc_with_outputs(source: SampleDatasetSource) -> None:
df = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
dataset = PolarsDataset(df=df, source=source, targets="c")
input_outputs = dataset.to_pyfunc()
assert isinstance(input_outputs, PyFuncInputsOutputs)
assert len(input_outputs.inputs) == 1
assert isinstance(input_outputs.inputs[0], pd.DataFrame)
assert input_outputs.inputs[0].equals(pd.DataFrame({"a": [1, 1], "b": [2, 2]}))
assert len(input_outputs.outputs) == 1
assert isinstance(input_outputs.outputs[0], pd.Series)
assert input_outputs.outputs[0].equals(pd.Series([3, 3], name="c"))
def test_from_polars_with_targets(tmp_path: Path) -> None:
df = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
path = tmp_path / "temp.csv"
df.write_csv(path)
dataset = from_polars(df, targets="c", source=str(path))
input_outputs = dataset.to_pyfunc()
assert isinstance(input_outputs, PyFuncInputsOutputs)
assert len(input_outputs.inputs) == 1
assert isinstance(input_outputs.inputs[0], pd.DataFrame)
assert input_outputs.inputs[0].equals(pd.DataFrame({"a": [1, 1], "b": [2, 2]}))
assert len(input_outputs.outputs) == 1
assert isinstance(input_outputs.outputs[0], pd.Series)
assert input_outputs.outputs[0].equals(pd.Series([3, 3], name="c"))
def test_from_polars_file_system_datasource(tmp_path: Path) -> None:
df = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
path = tmp_path / "temp.csv"
df.write_csv(path)
mlflow_df = from_polars(df, source=str(path))
assert isinstance(mlflow_df, PolarsDataset)
assert mlflow_df.df.equals(df)
assert mlflow_df.schema == infer_schema(df)
assert mlflow_df.profile == {"num_rows": 2, "num_elements": 6}
assert isinstance(mlflow_df.source, FileSystemDatasetSource)
def test_from_polars_no_source_specified() -> None:
df = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
mlflow_df = from_polars(df)
assert isinstance(mlflow_df, PolarsDataset)
assert isinstance(mlflow_df.source, CodeDatasetSource)
assert "mlflow.source.name" in mlflow_df.source.to_json()
def test_to_evaluation_dataset(source: SampleDatasetSource) -> None:
import numpy as np
df = pl.DataFrame({"a": [1, 1], "b": [2, 2], "c": [3, 3]})
dataset = PolarsDataset(df=df, source=source, targets="c", name="testname")
evaluation_dataset = dataset.to_evaluation_dataset()
assert evaluation_dataset.name is not None
assert evaluation_dataset.digest is not None
assert isinstance(evaluation_dataset, EvaluationDataset)
assert isinstance(evaluation_dataset.features_data, pd.DataFrame)
assert evaluation_dataset.features_data.equals(df.drop("c").to_pandas())
assert isinstance(evaluation_dataset.labels_data, np.ndarray)
assert np.array_equal(evaluation_dataset.labels_data, df["c"].to_numpy())
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/data/test_polars_dataset.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/utils/copy.py | from typing import Any
from mlflow.entities.span import LiveSpan, Span
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_STATE
from mlflow.tracing.trace_manager import InMemoryTraceManager
def copy_trace_to_experiment(trace_dict: dict[str, Any], experiment_id: str | None = None) -> str:
"""
Copy the given trace to the current experiment.
The copied trace will have a new trace ID and location metadata.
Args:
trace_dict: The trace dictionary returned from model serving endpoint.
This can be either V2 or V3 trace.
experiment_id: The ID of the experiment to copy the trace to.
If not provided, the trace will be copied to the current experiment.
"""
new_trace_id = None
new_root_span = None
trace_manager = InMemoryTraceManager.get_instance()
spans = [Span.from_dict(span_dict) for span_dict in trace_dict["data"]["spans"]]
# Create a copy of spans in the current experiment
for old_span in spans:
new_span = LiveSpan.from_immutable_span(
span=old_span,
parent_span_id=old_span.parent_id,
trace_id=new_trace_id,
# Only set the experiment ID for the root span.
experiment_id=experiment_id if old_span.parent_id is None else None,
)
# we need to register the span to trace manager first before ending it
# otherwise the span will not be correctly exported
trace_manager.register_span(new_span)
if old_span.parent_id is None:
new_root_span = new_span
new_trace_id = new_span.trace_id
else:
new_span.end(end_time_ns=old_span.end_time_ns)
if new_trace_id is None:
raise MlflowException(
"Root span not found in the trace. Perhaps the trace data is corrupted.",
error_code=INVALID_STATE,
)
if info := trace_dict.get("info"):
with trace_manager.get_trace(trace_id=new_trace_id) as trace:
# Copy user tags (excluding mlflow internal tags)
if all_tags := info.get("tags"):
if user_tags := {k: v for k, v in all_tags.items() if not k.startswith("mlflow.")}:
trace.info.tags.update(user_tags)
# Copy trace metadata
if trace_metadata := info.get("trace_metadata"):
trace.info.trace_metadata.update(trace_metadata)
# Close the root span triggers the trace export.
new_root_span.end(end_time_ns=spans[0].end_time_ns)
return new_trace_id
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/utils/copy.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/src/clint/rules/multi_assign.py | import ast
from clint.rules.base import Rule
class MultiAssign(Rule):
def _message(self) -> str:
return (
"Avoid multiple assignment (e.g., `x, y = func()`). Use separate assignments "
"instead for better readability and easier debugging."
)
@staticmethod
def check(node: ast.Assign) -> bool:
"""
Returns True if the assignment is a tuple assignment where the number of
targets matches the number of values, unless all values are constants.
Examples that should be flagged:
- x, y = func1(), func2()
- a, b = get_value(), other_value
Examples that should NOT be flagged:
- x, y = 1, 1 (all constants)
- a, b, c = 0, 0, 0 (all constants)
- x, y = z (unpacking from single value)
- a, b = func() (unpacking from function return)
- x, y = get_coordinates() (unpacking from function return)
"""
# Check if we have exactly one target and it's a Tuple
if len(node.targets) != 1 or not isinstance(node.targets[0], ast.Tuple):
return False
# Check if the value is also a Tuple
if not isinstance(node.value, ast.Tuple):
return False
# Get the number of targets and values
num_targets = len(node.targets[0].elts)
num_values = len(node.value.elts)
# Only flag when we have matching number of targets and values (at least 2)
if not (num_targets == num_values and num_targets >= 2):
return False
# Allow if all values are constants (e.g., x, y = 1, 1)
all_constants = all(isinstance(elt, ast.Constant) for elt in node.value.elts)
if all_constants:
return False
return True
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/multi_assign.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/base.py | import inspect
import itertools
import re
from abc import ABC, abstractmethod
from typing import Any
_id_counter = itertools.count(start=1)
_CLASS_NAME_TO_RULE_NAME_REGEX = re.compile(r"(?<!^)(?=[A-Z])")
class Rule(ABC):
id: str
name: str
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
# Only generate ID for concrete classes
if not inspect.isabstract(cls):
id_ = next(_id_counter)
cls.id = f"MLF{id_:04d}"
cls.name = _CLASS_NAME_TO_RULE_NAME_REGEX.sub("-", cls.__name__).lower()
@abstractmethod
def _message(self) -> str:
"""
Return a message that explains this rule.
"""
@property
def message(self) -> str:
return self._message()
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/base.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/do_not_disable.py | from typing_extensions import Self
from clint.rules.base import Rule
class DoNotDisable(Rule):
RULES = {
"B006": "Use None as default and set value in function body instead of mutable defaults",
"F821": "Use typing.TYPE_CHECKING for forward references to optional dependencies",
}
def __init__(self, rules: set[str]) -> None:
self.rules = rules
@classmethod
def check(cls, rules: set[str]) -> Self | None:
if s := rules.intersection(DoNotDisable.RULES.keys()):
return cls(s)
return None
def _message(self) -> str:
# Build message for all rules (works for single and multiple rules)
hints = []
for rule in sorted(self.rules):
if hint := DoNotDisable.RULES.get(rule):
hints.append(f"{rule}: {hint}")
else:
hints.append(rule)
return f"DO NOT DISABLE {', '.join(hints)}"
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/do_not_disable.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/docstring_param_order.py | from clint.rules.base import Rule
class DocstringParamOrder(Rule):
def __init__(self, params: list[str]) -> None:
self.params = params
def _message(self) -> str:
return f"Unordered parameters in docstring: {self.params}"
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/docstring_param_order.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/extraneous_docstring_param.py | from clint.rules.base import Rule
class ExtraneousDocstringParam(Rule):
def __init__(self, params: set[str]) -> None:
self.params = params
def _message(self) -> str:
return f"Extraneous parameters in docstring: {self.params}"
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/extraneous_docstring_param.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/forbidden_set_active_model_usage.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class ForbiddenSetActiveModelUsage(Rule):
def _message(self) -> str:
return (
"Usage of `set_active_model` is not allowed in mlflow, use `_set_active_model` instead."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""Check if this is a call to set_active_model function."""
if names := resolver.resolve(node):
match names:
case ["mlflow", *_, "set_active_model"]:
return True
case _:
return False
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/forbidden_set_active_model_usage.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/forbidden_top_level_import.py | from clint.rules.base import Rule
class ForbiddenTopLevelImport(Rule):
def __init__(self, module: str) -> None:
self.module = module
def _message(self) -> str:
return (
f"Importing module `{self.module}` at the top level is not allowed "
"in this file. Use lazy import instead."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/forbidden_top_level_import.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/forbidden_trace_ui_in_notebook.py | from clint.rules.base import Rule
class ForbiddenTraceUIInNotebook(Rule):
def _message(self) -> str:
return (
"Found the MLflow Trace UI iframe in the notebook. "
"The trace UI in cell outputs will not render correctly in previews or the website. "
"Please run `mlflow.tracing.disable_notebook_display()` and rerun the cell "
"to remove the iframe."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/forbidden_trace_ui_in_notebook.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/implicit_optional.py | import ast
from clint.rules.base import Rule
class ImplicitOptional(Rule):
def _message(self) -> str:
return "Use `Optional` if default value is `None`"
@staticmethod
def check(node: ast.AnnAssign) -> bool:
"""
Returns True if the value to assign is `None` but the type annotation is
not `Optional[...]` or `... | None`. For example: `a: int = None`.
"""
if not ImplicitOptional._is_none(node.value):
return False
# Parse stringified annotations
if isinstance(node.annotation, ast.Constant) and isinstance(node.annotation.value, str):
try:
parsed = ast.parse(node.annotation.value, mode="eval")
ann = parsed.body
except (SyntaxError, ValueError):
# If parsing fails, the annotation is invalid and we trigger the rule
# since we cannot verify it contains Optional or | None
return True
else:
ann = node.annotation
return not (ImplicitOptional._is_optional(ann) or ImplicitOptional._is_bitor_none(ann))
@staticmethod
def _is_optional(ann: ast.expr) -> bool:
"""
Returns True if `ann` looks like `Optional[...]`.
"""
return (
isinstance(ann, ast.Subscript)
and isinstance(ann.value, ast.Name)
and ann.value.id == "Optional"
)
@staticmethod
def _is_bitor_none(ann: ast.expr) -> bool:
"""
Returns True if `ann` looks like `... | None`.
"""
return (
isinstance(ann, ast.BinOp)
and isinstance(ann.op, ast.BitOr)
and (isinstance(ann.right, ast.Constant) and ann.right.value is None)
)
@staticmethod
def _is_none(value: ast.expr | None) -> bool:
"""
Returns True if `value` represents `None`.
"""
return isinstance(value, ast.Constant) and value.value is None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/implicit_optional.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/incorrect_type_annotation.py | import ast
from clint.rules.base import Rule
class IncorrectTypeAnnotation(Rule):
MAPPING = {
"callable": "Callable",
"any": "Any",
}
def __init__(self, type_hint: str) -> None:
self.type_hint = type_hint
@staticmethod
def check(node: ast.Name) -> bool:
return node.id in IncorrectTypeAnnotation.MAPPING
def _message(self) -> str:
if correct_hint := self.MAPPING.get(self.type_hint):
return f"Did you mean `{correct_hint}` instead of `{self.type_hint}`?"
raise ValueError(
f"Unexpected type: {self.type_hint}. It must be one of {list(self.MAPPING)}."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/incorrect_type_annotation.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/invalid_abstract_method.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class InvalidAbstractMethod(Rule):
def _message(self) -> str:
return (
"Abstract method should only contain a single statement/expression, "
"and it must be `pass`, `...`, or a docstring."
)
@staticmethod
def _is_abstract_method(
node: ast.FunctionDef | ast.AsyncFunctionDef, resolver: Resolver
) -> bool:
return any(
(resolved := resolver.resolve(d)) and resolved == ["abc", "abstractmethod"]
for d in node.decorator_list
)
@staticmethod
def _has_invalid_body(node: ast.FunctionDef | ast.AsyncFunctionDef) -> bool:
# Does this abstract method have multiple statements/expressions?
if len(node.body) > 1:
return True
# This abstract method has a single statement/expression.
# Check if it's `pass`, `...`, or a docstring. If not, it's invalid.
stmt = node.body[0]
# Check for `pass`
if isinstance(stmt, ast.Pass):
return False
# Check for `...` or docstring
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Constant):
value = stmt.value.value
# `...` literal or docstring
return not (value is ... or isinstance(value, str))
# Any other statement is invalid
return True
@staticmethod
def check(node: ast.FunctionDef | ast.AsyncFunctionDef, resolver: Resolver) -> bool:
return InvalidAbstractMethod._is_abstract_method(
node, resolver
) and InvalidAbstractMethod._has_invalid_body(node)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/invalid_abstract_method.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/invalid_experimental_decorator.py | import ast
from packaging.version import InvalidVersion, Version
from clint.resolver import Resolver
from clint.rules.base import Rule
def _is_valid_version(version: str) -> bool:
try:
v = Version(version)
return not (v.is_devrelease or v.is_prerelease or v.is_postrelease)
except InvalidVersion:
return False
class InvalidExperimentalDecorator(Rule):
def _message(self) -> str:
return (
"Invalid usage of `@experimental` decorator. It must be used with a `version` "
"argument that is a valid semantic version string."
)
@staticmethod
def check(node: ast.expr, resolver: Resolver) -> bool:
"""
Returns True if the `@experimental` decorator from mlflow.utils.annotations is used
incorrectly.
"""
resolved = resolver.resolve(node)
if not resolved:
return False
if resolved != ["mlflow", "utils", "annotations", "experimental"]:
return False
if not isinstance(node, ast.Call):
return True
version = next((k.value for k in node.keywords if k.arg == "version"), None)
if version is None:
# No `version` argument, invalid usage
return True
if not isinstance(version, ast.Constant) or not isinstance(version.value, str):
# `version` is not a string literal, invalid usage
return True
if not _is_valid_version(version.value):
# `version` is not a valid semantic version, # invalid usage
return True
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/invalid_experimental_decorator.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/log_model_artifact_path.py | import ast
from typing import TYPE_CHECKING
from clint.rules.base import Rule
from clint.utils import resolve_expr
if TYPE_CHECKING:
from clint.index import SymbolIndex
class LogModelArtifactPath(Rule):
def _message(self) -> str:
return "`artifact_path` parameter of `log_model` is deprecated. Use `name` instead."
@staticmethod
def check(node: ast.Call, index: "SymbolIndex") -> bool:
"""
Returns True if the call looks like `mlflow.<flavor>.log_model(...)` and
the `artifact_path` argument is specified.
"""
parts = resolve_expr(node.func)
if not parts or len(parts) != 3:
return False
first, second, third = parts
if not (first == "mlflow" and third == "log_model"):
return False
# TODO: Remove this once spark flavor supports logging models as logged model artifacts
if second == "spark":
return False
function_name = f"{first}.{second}.log_model"
artifact_path_idx = LogModelArtifactPath._find_artifact_path_index(index, function_name)
if artifact_path_idx is None:
return False
if len(node.args) > artifact_path_idx:
return True
else:
return any(kw.arg and kw.arg == "artifact_path" for kw in node.keywords)
@staticmethod
def _find_artifact_path_index(index: "SymbolIndex", function_name: str) -> int | None:
"""
Finds the index of the `artifact_path` argument in the function signature of `log_model`
using the SymbolIndex.
"""
if f := index.resolve(function_name):
try:
return f.all_args.index("artifact_path")
except ValueError:
return None
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/log_model_artifact_path.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/markdown_link.py | from clint.rules.base import Rule
class MarkdownLink(Rule):
def _message(self) -> str:
return (
"Markdown link is not supported in docstring. "
"Use reST link instead (e.g., `Link text <link URL>`_)."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/markdown_link.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/missing_docstring_param.py | from clint.rules.base import Rule
class MissingDocstringParam(Rule):
def __init__(self, params: set[str]) -> None:
self.params = params
def _message(self) -> str:
return f"Missing parameters in docstring: {self.params}"
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/missing_docstring_param.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/os_environ_delete_in_test.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class OsEnvironDeleteInTest(Rule):
def _message(self) -> str:
return (
"Do not delete `os.environ` in test directly (del os.environ[...] or "
"os.environ.pop(...)). Use `monkeypatch.delenv` "
"(https://docs.pytest.org/en/stable/reference/reference.html#pytest.MonkeyPatch.delenv)."
)
@staticmethod
def check(node: ast.Delete | ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the operation is deletion from os.environ[...] or
a call to os.environ.pop().
"""
if isinstance(node, ast.Delete):
# Handle: del os.environ["KEY"]
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Subscript):
resolved = resolver.resolve(node.targets[0].value)
return resolved == ["os", "environ"]
elif isinstance(node, ast.Call):
# Handle: os.environ.pop("KEY")
resolved = resolver.resolve(node)
return resolved == ["os", "environ", "pop"]
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/os_environ_delete_in_test.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/os_environ_set_in_test.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class OsEnvironSetInTest(Rule):
def _message(self) -> str:
return "Do not set `os.environ` in test directly. Use `monkeypatch.setenv` (https://docs.pytest.org/en/stable/reference/reference.html#pytest.MonkeyPatch.setenv)."
@staticmethod
def check(node: ast.Assign, resolver: Resolver) -> bool:
"""
Returns True if the assignment is to os.environ[...].
"""
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Subscript):
resolved = resolver.resolve(node.targets[0].value)
return resolved == ["os", "environ"]
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/os_environ_set_in_test.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/pytest_mark_repeat.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class PytestMarkRepeat(Rule):
def _message(self) -> str:
return (
"@pytest.mark.repeat decorator should not be committed. "
"This decorator is meant for local testing only to check for flaky tests."
)
@staticmethod
def check(decorator_list: list[ast.expr], resolver: Resolver) -> ast.expr | None:
"""
Returns the decorator node if it is a `@pytest.mark.repeat` decorator.
"""
for deco in decorator_list:
if (res := resolver.resolve(deco)) and res == ["pytest", "mark", "repeat"]:
return deco
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/pytest_mark_repeat.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/typing_extensions.py | from clint.rules.base import Rule
class TypingExtensions(Rule):
def __init__(self, *, full_name: str, allowlist: list[str]) -> None:
self.full_name = full_name
self.allowlist = allowlist
def _message(self) -> str:
return (
f"`{self.full_name}` is not allowed to use. Only {self.allowlist} are allowed. "
"You can extend `tool.clint.typing-extensions-allowlist` in `pyproject.toml` if needed "
"but make sure that the version requirement for `typing-extensions` is compatible with "
"the added types."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/typing_extensions.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/unknown_mlflow_arguments.py | from clint.rules.base import Rule
class UnknownMlflowArguments(Rule):
def __init__(self, function_name: str, unknown_args: set[str]) -> None:
self.function_name = function_name
self.unknown_args = unknown_args
def _message(self) -> str:
args_str = ", ".join(f"`{arg}`" for arg in sorted(self.unknown_args))
return (
f"Unknown arguments {args_str} passed to `{self.function_name}`. "
"Check the function signature for valid parameter names."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/unknown_mlflow_arguments.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/unknown_mlflow_function.py | from clint.rules.base import Rule
class UnknownMlflowFunction(Rule):
def __init__(self, function_name: str) -> None:
self.function_name = function_name
def _message(self) -> str:
return (
f"Unknown MLflow function: `{self.function_name}`. "
"This function may not exist or could be misspelled."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/unknown_mlflow_function.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/unnamed_thread.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class UnnamedThread(Rule):
def _message(self) -> str:
return (
"`threading.Thread()` must be called with a `name` argument to improve debugging "
"and traceability of thread-related issues."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the call is threading.Thread() without a name parameter.
"""
if names := resolver.resolve(node):
return names == ["threading", "Thread"] and not any(
keyword.arg == "name" for keyword in node.keywords
)
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/unnamed_thread.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/unparameterized_generic_type.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class UnparameterizedGenericType(Rule):
def __init__(self, type_hint: str) -> None:
self.type_hint = type_hint
@staticmethod
def is_generic_type(node: ast.Name | ast.Attribute, resolver: Resolver) -> bool:
if names := resolver.resolve(node):
return tuple(names) in {
("typing", "Callable"),
("typing", "Sequence"),
}
elif isinstance(node, ast.Name):
return node.id in {
"dict",
"list",
"set",
"tuple",
"frozenset",
}
return False
def _message(self) -> str:
return (
f"Generic type `{self.type_hint}` must be parameterized "
"(e.g., `list[str]` rather than `list`)."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/unparameterized_generic_type.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/rules/use_sys_executable.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class UseSysExecutable(Rule):
def _message(self) -> str:
return (
"Use `[sys.executable, '-m', 'mlflow', ...]` when running mlflow CLI in a subprocess."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if `node` looks like `subprocess.Popen(["mlflow", ...])`.
"""
resolved = resolver.resolve(node)
if (
resolved
and len(resolved) == 2
and resolved[0] == "subprocess"
and resolved[1] in ["Popen", "run", "check_output", "check_call"]
and node.args
):
first_arg = node.args[0]
if isinstance(first_arg, ast.List) and first_arg.elts:
first_elem = first_arg.elts[0]
return (
isinstance(first_elem, ast.Constant)
and isinstance(first_elem.value, str)
and first_elem.value == "mlflow"
)
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/use_sys_executable.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/src/clint/utils.py | from __future__ import annotations
import ast
import re
import subprocess
from functools import lru_cache
from pathlib import Path
@lru_cache(maxsize=1)
def get_repo_root() -> Path:
"""Find the git repository root directory with caching."""
try:
result = subprocess.check_output(["git", "rev-parse", "--show-toplevel"], text=True).strip()
return Path(result)
except (OSError, subprocess.CalledProcessError) as e:
raise RuntimeError("Failed to find git repository root") from e
def resolve_expr(expr: ast.expr) -> list[str] | None:
"""
Resolves `expr` to a list of attribute names. For example, given `expr` like
`some.module.attribute`, ['some', 'module', 'attribute'] is returned.
If `expr` is not resolvable, `None` is returned.
"""
if isinstance(expr, ast.Attribute):
base = resolve_expr(expr.value)
if base is None:
return None
return base + [expr.attr]
elif isinstance(expr, ast.Name):
return [expr.id]
return None
def get_ignored_rules_for_file(
file_path: Path, per_file_ignores: dict[re.Pattern[str], set[str]]
) -> set[str]:
"""
Returns a set of rule names that should be ignored for the given file path.
Args:
file_path: The file path to check
per_file_ignores: Dict mapping compiled regex patterns to lists of rule names to ignore
Returns:
Set of rule names to ignore for this file
"""
ignored_rules: set[str] = set()
for pattern, rules in per_file_ignores.items():
if pattern.fullmatch(file_path.as_posix()):
ignored_rules |= rules
return ignored_rules
ALLOWED_EXTS = {".md", ".mdx", ".rst", ".py", ".ipynb"}
def _git_ls_files(pathspecs: list[Path]) -> list[Path]:
"""
Return git-tracked and untracked (but not ignored) files matching the given pathspecs.
Git does not filter by extension; filtering happens in Python.
"""
try:
out = subprocess.check_output(
["git", "ls-files", "--cached", "--others", "--exclude-standard", "--", *pathspecs],
text=True,
)
except (OSError, subprocess.CalledProcessError) as e:
raise RuntimeError("Failed to list git files") from e
return [Path(line) for line in out.splitlines() if line]
def resolve_paths(paths: list[Path]) -> list[Path]:
"""
Resolve CLI arguments into a list of tracked and untracked files to lint.
- Includes git-tracked files and untracked files (but not ignored files)
- Only includes: .md, .mdx, .rst, .py, .ipynb
"""
if not paths:
paths = [Path(".")]
all_files = _git_ls_files(paths)
filtered = {p for p in all_files if p.suffix.lower() in ALLOWED_EXTS and p.exists()}
return sorted(filtered)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/utils.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/src/clint/index.py | """Symbol indexing for MLflow codebase.
This module provides efficient indexing and lookup of Python symbols (functions, classes)
across the MLflow codebase using AST parsing and parallel processing.
Key components:
- FunctionInfo: Lightweight function signature information
- ModuleSymbolExtractor: AST visitor for extracting symbols from modules
- SymbolIndex: Main index class for symbol resolution and lookup
Example usage:
```python
# Build an index of all MLflow symbols
index = SymbolIndex.build()
# Look up function signature information
func_info = index.resolve("mlflow.log_metric")
print(f"Arguments: {func_info.args}") # -> ['key, 'value', 'step', ...]
```
"""
import ast
import multiprocessing
import pickle
import subprocess
from concurrent.futures import ProcessPoolExecutor, as_completed
from dataclasses import dataclass, field
from pathlib import Path
from typing_extensions import Self
from clint.utils import get_repo_root
@dataclass
class FunctionInfo:
"""Lightweight function signature information for efficient serialization."""
has_vararg: bool # *args
has_kwarg: bool # **kwargs
args: list[str] = field(default_factory=list) # Regular arguments
kwonlyargs: list[str] = field(default_factory=list) # Keyword-only arguments
posonlyargs: list[str] = field(default_factory=list) # Positional-only arguments
@classmethod
def from_func_def(
cls, node: ast.FunctionDef | ast.AsyncFunctionDef, skip_self: bool = False
) -> Self:
"""Create FunctionInfo from an AST function definition node."""
args = node.args.args
if skip_self and args:
args = args[1:] # Skip 'self' for methods
return cls(
has_vararg=node.args.vararg is not None,
has_kwarg=node.args.kwarg is not None,
args=[arg.arg for arg in args],
kwonlyargs=[arg.arg for arg in node.args.kwonlyargs],
posonlyargs=[arg.arg for arg in node.args.posonlyargs],
)
@property
def all_args(self) -> list[str]:
return self.posonlyargs + self.args + self.kwonlyargs
class ModuleSymbolExtractor(ast.NodeVisitor):
"""Extracts function definitions and import mappings from a Python module."""
def __init__(self, mod: str) -> None:
self.mod = mod
self.import_mapping: dict[str, str] = {}
self.func_mapping: dict[str, FunctionInfo] = {}
def visit_Import(self, node: ast.Import) -> None:
for alias in node.names:
if not alias.name.startswith("mlflow."):
continue
if alias.asname:
self.import_mapping[f"{self.mod}.{alias.asname}"] = alias.name
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module is None or not node.module.startswith("mlflow."):
return
for alias in node.names:
if alias.name.startswith("_"):
continue
if alias.asname:
self.import_mapping[f"{self.mod}.{alias.asname}"] = f"{node.module}.{alias.name}"
else:
self.import_mapping[f"{self.mod}.{alias.name}"] = f"{node.module}.{alias.name}"
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
if node.name.startswith("_"):
return
self.func_mapping[f"{self.mod}.{node.name}"] = FunctionInfo.from_func_def(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
if node.name.startswith("_"):
return
self.func_mapping[f"{self.mod}.{node.name}"] = FunctionInfo.from_func_def(node)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
for stmt in node.body:
if isinstance(stmt, ast.FunctionDef):
if stmt.name == "__init__":
info = FunctionInfo.from_func_def(stmt, skip_self=True)
self.func_mapping[f"{self.mod}.{node.name}"] = info
elif any(
isinstance(deco, ast.Name) and deco.id in ("classmethod", "staticmethod")
for deco in stmt.decorator_list
):
info = FunctionInfo.from_func_def(stmt, skip_self=True)
self.func_mapping[f"{self.mod}.{node.name}.{stmt.name}"] = info
else:
# If no __init__ found, still add the class with *args and **kwargs
self.func_mapping[f"{self.mod}.{node.name}"] = FunctionInfo(
has_vararg=True, has_kwarg=True
)
def extract_symbols_from_file(
rel_path: str, content: str
) -> tuple[dict[str, str], dict[str, FunctionInfo]] | None:
"""Extract function definitions and import mappings from a Python file."""
p = Path(rel_path)
if not p.parts or p.parts[0] != "mlflow":
return None
try:
tree = ast.parse(content)
except (SyntaxError, UnicodeDecodeError):
return None
mod_name = (
".".join(p.parts[:-1]) if p.name == "__init__.py" else ".".join([*p.parts[:-1], p.stem])
)
extractor = ModuleSymbolExtractor(mod_name)
extractor.visit(tree)
return extractor.import_mapping, extractor.func_mapping
class SymbolIndex:
"""Index of all symbols (functions, classes) in the MLflow codebase."""
def __init__(
self,
import_mapping: dict[str, str],
func_mapping: dict[str, FunctionInfo],
) -> None:
self.import_mapping = import_mapping
self.func_mapping = func_mapping
def save(self, path: Path) -> None:
with path.open("wb") as f:
pickle.dump((self.import_mapping, self.func_mapping), f)
@classmethod
def load(cls, path: Path) -> Self:
with path.open("rb") as f:
import_mapping, func_mapping = pickle.load(f)
return cls(import_mapping, func_mapping)
@classmethod
def build(cls) -> Self:
repo_root = get_repo_root()
py_files = subprocess.check_output(
["git", "-C", repo_root, "ls-files", "mlflow/*.py"], text=True
).splitlines()
mapping: dict[str, str] = {}
func_mapping: dict[str, FunctionInfo] = {}
# Ensure at least 1 worker to avoid ProcessPoolExecutor ValueError
max_workers = max(1, min(multiprocessing.cpu_count(), len(py_files)))
with ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = {}
for py_file in py_files:
abs_file_path = repo_root / py_file
if not abs_file_path.exists():
continue
content = abs_file_path.read_text()
f = executor.submit(extract_symbols_from_file, py_file, content)
futures[f] = py_file
for future in as_completed(futures):
if result := future.result():
file_imports, file_functions = result
mapping.update(file_imports)
func_mapping.update(file_functions)
return cls(mapping, func_mapping)
def _resolve_import(self, target: str) -> str:
resolved = target
seen = {resolved}
while v := self.import_mapping.get(resolved):
if v in seen:
# Circular import detected, break to avoid infinite loop
break
seen.add(v)
resolved = v
return resolved
def resolve(self, target: str) -> FunctionInfo | None:
"""Resolve a symbol to its actual definition, following import chains."""
if f := self.func_mapping.get(target):
return f
resolved = self._resolve_import(target)
if f := self.func_mapping.get(resolved):
return f
target, tail = target.rsplit(".", 1)
resolved = self._resolve_import(target)
if f := self.func_mapping.get(f"{resolved}.{tail}"):
return f
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/index.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/tracing/config.py | from dataclasses import dataclass, field, replace
from typing import TYPE_CHECKING, Any, Callable
from mlflow.tracing.utils.processor import validate_span_processors
if TYPE_CHECKING:
from mlflow.entities.span import LiveSpan
@dataclass
class TracingConfig:
"""Configuration for MLflow tracing behavior."""
# TODO: Move more configuration options here, such as async logging, display, etc.
# A list of functions to process spans before export.
span_processors: list[Callable[["LiveSpan"], None]] = field(default_factory=list)
def __post_init__(self):
self.span_processors = validate_span_processors(self.span_processors)
# Global configuration instance for tracing
_MLFLOW_TRACING_CONFIG = TracingConfig()
class TracingConfigContext:
"""Context manager for temporary tracing configuration changes."""
def __init__(self, config_updates: dict[str, Any]):
self.config_updates = config_updates
# Create a shallow copy of the current config
self.previous_config = replace(_MLFLOW_TRACING_CONFIG)
for key, value in self.config_updates.items():
setattr(_MLFLOW_TRACING_CONFIG, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global _MLFLOW_TRACING_CONFIG
_MLFLOW_TRACING_CONFIG = self.previous_config
def get_config() -> TracingConfig:
"""
Get the current tracing configuration.
Returns:
The current TracingConfig instance.
"""
return _MLFLOW_TRACING_CONFIG
def reset_config():
"""
Reset the tracing configuration to defaults.
"""
global _MLFLOW_TRACING_CONFIG
_MLFLOW_TRACING_CONFIG = TracingConfig()
def configure(
span_processors: list[Callable[["LiveSpan"], None]] | None = None,
) -> TracingConfigContext:
"""
Configure MLflow tracing. Can be used as function or context manager.
Only updates explicitly provided arguments, leaving others unchanged.
Args:
span_processors: List of functions to process spans before export.
This is helpful for filtering/masking particular attributes from the span to
prevent sensitive data from being logged or for reducing the size of the span.
Each function must accept a single argument of type LiveSpan and should not
return any value. When multiple functions are provided, they are applied
sequentially in the order they are provided.
Returns:
TracingConfigContext: Context manager for temporary configuration changes.
When used as a function, the configuration changes persist.
When used as a context manager, changes are reverted on exit.
Examples:
.. code-block:: python
def pii_filter(span):
\"\"\"Example PII filter that masks sensitive data in span attributes.\"\"\"
# Mask sensitive inputs
if inputs := span.inputs:
for key, value in inputs.items():
if "password" in key.lower() or "token" in key.lower():
span.set_inputs({**inputs, key: "[REDACTED]"})
# Mask sensitive outputs
if outputs := span.outputs:
if isinstance(outputs, dict):
for key in outputs:
if "secret" in key.lower():
outputs[key] = "[REDACTED]"
span.set_outputs(outputs)
# Mask sensitive attributes
for attr_key in list(span.attributes.keys()):
if "api_key" in attr_key.lower():
span.set_attribute(attr_key, "[REDACTED]")
# Permanent configuration change
mlflow.tracing.configure(span_processors=[pii_filter])
# Temporary configuration change
with mlflow.tracing.configure(span_processors=[pii_filter]):
# PII filtering enabled only in this block
pass
"""
# Collect only the arguments that were explicitly provided
config_updates = {}
if span_processors is not None:
config_updates["span_processors"] = span_processors
# Return TracingConfigContext which handles both function and context manager usage
return TracingConfigContext(config_updates)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/config.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/tracing/utils/processor.py | import logging
from mlflow.exceptions import MlflowException
_logger = logging.getLogger(__name__)
def apply_span_processors(span):
"""Apply configured span processors sequentially to the span."""
from mlflow.tracing.config import get_config
config = get_config()
if not config.span_processors:
return
non_null_return_processors = []
for processor in config.span_processors:
try:
result = processor(span)
if result is not None:
non_null_return_processors.append(processor.__name__)
except Exception as e:
_logger.warning(
f"Span processor {processor.__name__} failed: {e}",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
if non_null_return_processors:
_logger.warning(
f"Span processors {non_null_return_processors} returned a non-null value, "
"but it will be ignored. Span processors should not return a value."
)
def validate_span_processors(span_processors):
"""Validate that the span processor is a valid function."""
span_processors = span_processors or []
for span_processor in span_processors:
if not callable(span_processor):
raise MlflowException.invalid_parameter_value(
"Span processor must be a callable function."
)
# Skip validation for builtin functions and partial functions that don't have __code__
if not hasattr(span_processor, "__code__"):
continue
if span_processor.__code__.co_argcount != 1:
raise MlflowException.invalid_parameter_value(
"Span processor must take exactly one argument that accepts a LiveSpan object."
)
return span_processors
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/utils/processor.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/tracing/utils/test_config.py | import pytest
import mlflow
from mlflow.tracing.config import TracingConfig, get_config
@pytest.fixture(autouse=True)
def reset_tracing_config():
mlflow.tracing.reset()
def test_tracing_config_default_values():
config = TracingConfig()
assert config.span_processors == []
def test_configure():
# Default config
assert get_config().span_processors == []
def dummy_filter(span):
pass
mlflow.tracing.configure(span_processors=[dummy_filter])
assert get_config().span_processors == [dummy_filter]
mlflow.tracing.configure(span_processors=[])
assert get_config().span_processors == []
def test_configure_empty_call():
def dummy_filter(span):
pass
mlflow.tracing.configure(span_processors=[dummy_filter])
assert get_config().span_processors == [dummy_filter]
# No-op
mlflow.tracing.configure()
assert get_config().span_processors == [dummy_filter]
def test_reset_config():
def filter1(span):
pass
assert get_config().span_processors == []
mlflow.tracing.configure(span_processors=[filter1])
assert get_config().span_processors == [filter1]
mlflow.tracing.reset()
assert get_config().span_processors == []
def test_configure_context_manager():
def filter1(span):
return
def filter2(span):
return
# Set initial config
mlflow.tracing.configure(span_processors=[filter1])
assert get_config().span_processors == [filter1]
with mlflow.tracing.configure(span_processors=[filter2]):
assert get_config().span_processors == [filter2]
with mlflow.tracing.configure(span_processors=[filter1, filter2]):
assert get_config().span_processors == [filter1, filter2]
# Config should be restored after context exit
assert get_config().span_processors == [filter2]
assert get_config().span_processors == [filter1]
def test_context_manager_with_exception():
def filter1(span):
pass
def filter2(span):
pass
mlflow.tracing.configure(span_processors=[filter1])
with pytest.raises(ValueError, match="test error"): # noqa: PT012
with mlflow.tracing.configure(span_processors=[filter2]):
assert get_config().span_processors == [filter2]
raise ValueError("test error")
# Config should be restored despite exception
assert get_config().span_processors == [filter1]
def test_context_manager_with_non_copyable_callable():
# Lambda functions are not deepcopyable
lambda_filter = lambda span: None # noqa: E731
# Configure with a lambda function
mlflow.tracing.configure(span_processors=[lambda_filter])
assert get_config().span_processors == [lambda_filter]
def regular_filter(span):
pass
# Context manager should still work with non-copyable callables
with mlflow.tracing.configure(span_processors=[regular_filter]):
assert get_config().span_processors == [regular_filter]
# Config should be restored
assert get_config().span_processors == [lambda_filter]
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/utils/test_config.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/utils/test_processor.py | from unittest.mock import patch
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.tracing.utils.processor import validate_span_processors
from tests.tracing.helper import get_traces
@mlflow.trace
def predict(text: str):
return "Answer: " + text
@pytest.fixture(autouse=True)
def reset_tracing_config():
"""Reset tracing configuration before each test."""
mlflow.tracing.reset()
def test_span_processors_no_processors_configured():
mlflow.tracing.configure(span_processors=[])
predict("test")
span = get_traces()[0].data.spans[0]
assert span.inputs == {"text": "test"}
assert span.outputs == "Answer: test"
def test_span_processors_single_processor_success():
def test_processor(span):
span.set_outputs("overridden_output")
span.set_attribute("test_attribute", "test_value")
mlflow.tracing.configure(span_processors=[test_processor])
predict("test")
span = get_traces()[0].data.spans[0]
assert span.inputs == {"text": "test"}
assert span.outputs == "overridden_output"
assert span.attributes["test_attribute"] == "test_value"
def test_apply_span_processors_multiple_processors_success():
def processor1(span):
span.set_outputs("overridden_output_1")
span.set_attribute("attr_1", "value_1")
def processor2(span):
span.set_outputs("overridden_output_2")
span.set_attribute("attr_2", "value_2")
mlflow.tracing.configure(span_processors=[processor1, processor2])
predict("test")
span = get_traces()[0].data.spans[0]
assert span.inputs == {"text": "test"}
assert span.outputs == "overridden_output_2"
assert span.attributes["attr_1"] == "value_1"
assert span.attributes["attr_2"] == "value_2"
def test_apply_span_processors_returns_non_none_warning():
def bad_processor(span):
return "some_value" # Should return nothing
def good_processor(span):
span.set_outputs("overridden_output")
with patch("mlflow.tracing.utils.processor._logger") as mock_logger:
mlflow.tracing.configure(span_processors=[bad_processor, good_processor])
predict("test")
mock_logger.warning.assert_called_once()
message = mock_logger.warning.call_args[0][0]
assert message.startswith("Span processors ['bad_processor'] returned a non-null value")
# Other processors should still be applied
span = get_traces()[0].data.spans[0]
assert span.outputs == "overridden_output"
def test_apply_span_processors_exception_handling():
def failing_processor(span):
raise ValueError("Test error")
def good_processor(span):
span.set_outputs("overridden_output")
with patch("mlflow.tracing.utils.processor._logger") as mock_logger:
mlflow.tracing.configure(span_processors=[failing_processor, good_processor])
predict("test")
span = get_traces()[0].data.spans[0]
assert span.outputs == "overridden_output"
mock_logger.warning.assert_called_once()
message = mock_logger.warning.call_args[0][0]
assert message.startswith("Span processor failing_processor failed")
def test_validate_span_processors_empty_input():
assert validate_span_processors(None) == []
assert validate_span_processors([]) == []
def test_validate_span_processors_valid_processors():
def processor1(span):
return None
def processor2(span):
return None
result = validate_span_processors([processor1, processor2])
assert result == [processor1, processor2]
def test_validate_span_processors_non_callable_raises_exception():
non_callable_processor = "not_a_function"
with pytest.raises(MlflowException, match=r"Span processor must be"):
validate_span_processors([non_callable_processor])
def test_validate_span_processors_invalid_arguments_raises_exception():
def processor_no_args():
return None
with pytest.raises(MlflowException, match=r"Span processor must take"):
validate_span_processors([processor_no_args])
def processor_extra_args(span, extra_arg):
return None
with pytest.raises(MlflowException, match=r"Span processor must take"):
validate_span_processors([processor_extra_args])
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/utils/test_processor.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/comments.py | import io
import re
import tokenize
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterator
from typing_extensions import Self
if TYPE_CHECKING:
from clint.linter import Position
NOQA_REGEX = re.compile(r"#\s*noqa\s*:\s*([A-Z]\d+(?:\s*,\s*[A-Z]\d+)*)", re.IGNORECASE)
@dataclass
class Noqa:
start: "Position"
end: "Position"
rules: set[str]
@classmethod
def from_token(cls, token: tokenize.TokenInfo) -> Self | None:
# Import here to avoid circular dependency
from clint.linter import Position
if match := NOQA_REGEX.match(token.string):
rules = {r.strip() for r in match.group(1).upper().split(",")}
start = Position(token.start[0], token.start[1])
end = Position(token.end[0], token.end[1])
return cls(start=start, end=end, rules=rules)
return None
def iter_comments(code: str) -> Iterator[tokenize.TokenInfo]:
readline = io.StringIO(code).readline
try:
tokens = tokenize.generate_tokens(readline)
for token in tokens:
if token.type == tokenize.COMMENT:
yield token
except tokenize.TokenError:
# Handle incomplete tokens at end of file
pass
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/comments.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/server/graphql/graphql_no_batching.py | from typing import NamedTuple
from graphql.error import GraphQLError
from graphql.execution import ExecutionResult
from graphql.language.ast import DocumentNode, FieldNode
from mlflow.environment_variables import (
MLFLOW_SERVER_GRAPHQL_MAX_ALIASES,
MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS,
)
_MAX_DEPTH = 10
_MAX_SELECTIONS = 1000
class QueryInfo(NamedTuple):
root_fields: int
max_aliases: int
def scan_query(ast_node: DocumentNode) -> QueryInfo:
"""
Scan a GraphQL query and return its information.
"""
root_fields = 0
max_aliases = 0
total_selections = 0
for definition in ast_node.definitions:
if selection_set := getattr(definition, "selection_set", None):
stack = [(selection_set, 1)]
while stack:
selection_set, depth = stack.pop()
# check current level depth
if depth > _MAX_DEPTH:
raise GraphQLError(f"Query exceeds maximum depth of {_MAX_DEPTH}")
selections = getattr(selection_set, "selections", [])
# check current level aliases
current_aliases = 0
for selection in selections:
if isinstance(selection, FieldNode):
if depth == 1:
root_fields += 1
if selection.alias:
current_aliases += 1
if selection.selection_set:
stack.append((selection.selection_set, depth + 1))
total_selections += 1
if total_selections > _MAX_SELECTIONS:
raise GraphQLError(
f"Query exceeds maximum total selections of {_MAX_SELECTIONS}"
)
max_aliases = max(max_aliases, current_aliases)
return QueryInfo(root_fields, max_aliases)
def check_query_safety(ast_node: DocumentNode) -> ExecutionResult | None:
try:
query_info = scan_query(ast_node)
except GraphQLError as e:
return ExecutionResult(
data=None,
errors=[e],
)
if query_info.root_fields > MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS.get():
msg = "root fields"
env_var = MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS
value = query_info.root_fields
elif query_info.max_aliases > MLFLOW_SERVER_GRAPHQL_MAX_ALIASES.get():
msg = "aliases"
env_var = MLFLOW_SERVER_GRAPHQL_MAX_ALIASES
value = query_info.max_aliases
else:
return None
return ExecutionResult(
data=None,
errors=[
GraphQLError(
f"GraphQL queries should have at most {env_var.get()} {msg}, "
f"got {value} {msg}. To increase the limit, set the "
f"{env_var.name} environment variable."
)
],
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/graphql/graphql_no_batching.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/src/clint/resolver.py | import ast
from collections.abc import Iterator
from contextlib import contextmanager
class Resolver:
def __init__(self) -> None:
self.name_map: dict[str, list[str]] = {}
self._scope_stack: list[dict[str, list[str]]] = []
def clear(self) -> None:
"""Clear all name mappings. Useful when starting to process a new file."""
self.name_map.clear()
self._scope_stack.clear()
def enter_scope(self) -> None:
"""Enter a new scope by taking a snapshot of current mappings."""
self._scope_stack.append(self.name_map.copy())
def exit_scope(self) -> None:
"""Exit current scope by restoring the previous snapshot."""
if self._scope_stack:
self.name_map = self._scope_stack.pop()
@contextmanager
def scope(self) -> Iterator[None]:
"""Context manager for automatic scope management."""
self.enter_scope()
try:
yield
finally:
self.exit_scope()
def add_import(self, node: ast.Import) -> None:
for alias in node.names:
if alias.asname:
self.name_map[alias.asname] = alias.name.split(".")
else:
toplevel = alias.name.split(".", 1)[0]
self.name_map[toplevel] = [toplevel]
def add_import_from(self, node: ast.ImportFrom) -> None:
if node.module is None:
return
for alias in node.names:
name = alias.asname or alias.name
module_parts = node.module.split(".")
self.name_map[name] = module_parts + [alias.name]
def resolve(self, node: ast.expr) -> list[str] | None:
"""
Resolve a node to its fully qualified name parts.
Args:
node: AST node to resolve, typically a Call, Name, or Attribute.
Returns:
List of name parts (e.g., ["threading", "Thread"]) or None if unresolvable
"""
if isinstance(node, ast.Call):
parts = self._extract_call_parts(node.func)
elif isinstance(node, ast.Name):
parts = [node.id]
elif isinstance(node, ast.Attribute):
parts = self._extract_call_parts(node)
else:
return None
return self._resolve_parts(parts) if parts else None
def _extract_call_parts(self, node: ast.expr) -> list[str]:
if isinstance(node, ast.Name):
return [node.id]
elif isinstance(node, ast.Attribute) and (
base_parts := self._extract_call_parts(node.value)
):
return base_parts + [node.attr]
return []
def _resolve_parts(self, parts: list[str]) -> list[str] | None:
if not parts:
return None
# Check if the first part is in our name mapping
if resolved_base := self.name_map.get(parts[0]):
return resolved_base + parts[1:]
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/resolver.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/remove_experimental_decorators.py | """
Script to automatically remove @experimental decorators from functions
that have been experimental for more than a configurable cutoff period (default: 6 months).
"""
import argparse
import ast
import json
import subprocess
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from urllib.request import urlopen
@dataclass
class ExperimentalDecorator:
version: str
line_number: int
end_line_number: int
column: int
age_days: int
content: str
def get_tracked_python_files() -> list[Path]:
"""Get all tracked Python files in the repository."""
result = subprocess.check_output(["git", "ls-files", "*.py"], text=True)
return [Path(f) for f in result.strip().split("\n") if f]
def get_mlflow_release_dates() -> dict[str, datetime]:
"""Fetch MLflow release dates from PyPI API."""
with urlopen("https://pypi.org/pypi/mlflow/json") as response:
data = json.loads(response.read().decode())
release_dates: dict[str, datetime] = {}
for version, releases in data["releases"].items():
if releases: # Some versions might have empty release lists
# Get the earliest release date for this version
upload_times: list[str] = [r["upload_time"] for r in releases if "upload_time" in r]
if upload_times:
earliest_time = min(upload_times)
# Parse ISO format datetime and convert to UTC
release_date = datetime.fromisoformat(earliest_time.replace("Z", "+00:00"))
if release_date.tzinfo is None:
release_date = release_date.replace(tzinfo=timezone.utc)
release_dates[version] = release_date
return release_dates
def find_experimental_decorators(
file_path: Path, release_dates: dict[str, datetime], now: datetime
) -> list[ExperimentalDecorator]:
"""
Find all @experimental decorators in a Python file using AST and return their information
with computed age.
"""
content = file_path.read_text()
tree = ast.parse(content)
decorators: list[ExperimentalDecorator] = []
for node in ast.walk(tree):
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
continue
for decorator in node.decorator_list:
if not isinstance(decorator, ast.Call):
continue
if not (isinstance(decorator.func, ast.Name) and decorator.func.id == "experimental"):
continue
version = _extract_version_from_ast_decorator(decorator)
if not version or version not in release_dates:
continue
release_date = release_dates[version]
age_days = (now - release_date).days
decorator_info = ExperimentalDecorator(
version=version,
line_number=decorator.lineno,
end_line_number=decorator.end_lineno or decorator.lineno,
column=decorator.col_offset + 1, # 1-indexed
age_days=age_days,
content=ast.unparse(decorator),
)
decorators.append(decorator_info)
return decorators
def _extract_version_from_ast_decorator(decorator: ast.Call) -> str | None:
"""Extract version string from AST decorator node."""
for keyword in decorator.keywords:
if keyword.arg == "version" and isinstance(keyword.value, ast.Constant):
return str(keyword.value.value)
return None
def remove_decorators_from_file(
file_path: Path,
decorators_to_remove: list[ExperimentalDecorator],
dry_run: bool,
) -> list[ExperimentalDecorator]:
if not decorators_to_remove:
return []
lines = file_path.read_text().splitlines(keepends=True)
# Create a set of line numbers to remove for quick lookup (handle ranges)
lines_to_remove: set[int] = set()
for decorator in decorators_to_remove:
lines_to_remove.update(range(decorator.line_number, decorator.end_line_number + 1))
new_lines: list[str] = []
for line_num, line in enumerate(lines, 1):
if line_num not in lines_to_remove:
new_lines.append(line)
if not dry_run:
file_path.write_text("".join(new_lines))
return decorators_to_remove
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Remove @experimental decorators older than specified cutoff period"
)
parser.add_argument(
"--dry-run", action="store_true", help="Show what would be removed without making changes"
)
parser.add_argument(
"--cutoff-days",
type=int,
default=180,
help="Number of days after which to remove decorators (default: 180)",
)
parser.add_argument(
"files", nargs="*", help="Python files to process (defaults to all tracked Python files)"
)
args = parser.parse_args()
release_dates = get_mlflow_release_dates()
# Calculate cutoff date using configurable cutoff days
now = datetime.now(timezone.utc)
cutoff_date = now - timedelta(days=args.cutoff_days)
print(f"Cutoff date: {cutoff_date.strftime('%Y-%m-%d %H:%M:%S UTC')}")
python_files = [Path(f) for f in args.files] if args.files else get_tracked_python_files()
for file_path in python_files:
if not file_path.exists():
continue
# First, find all experimental decorators in the file with computed ages
decorators = find_experimental_decorators(file_path, release_dates, now)
if not decorators:
continue
# Filter to only decorators that should be removed (older than cutoff days)
old_decorators = [d for d in decorators if d.age_days > args.cutoff_days]
if not old_decorators:
continue
# Remove old decorators
if removed := remove_decorators_from_file(file_path, old_decorators, args.dry_run):
for decorator in removed:
action = "Would remove" if args.dry_run else "Removed"
print(
f"{file_path}:{decorator.line_number}:{decorator.column}: "
f"{action} {decorator.content} (age: {decorator.age_days} days)"
)
if __name__ == "__main__":
main()
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/remove_experimental_decorators.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/dev/test_remove_experimental_decorators.py | import subprocess
import sys
from pathlib import Path
SCRIPT_PATH = "dev/remove_experimental_decorators.py"
def test_script_with_specific_file(tmp_path: Path) -> None:
test_file = tmp_path / "test.py"
test_file.write_text("""
@experimental(version="1.0.0")
def func():
pass
""")
output = subprocess.check_output(
[sys.executable, SCRIPT_PATH, "--dry-run", test_file], text=True
)
assert "Would remove" in output
assert "experimental(version='1.0.0')" in output
assert (
test_file.read_text()
== """
@experimental(version="1.0.0")
def func():
pass
"""
)
def test_script_without_files() -> None:
subprocess.check_call([sys.executable, SCRIPT_PATH, "--dry-run"])
def test_script_removes_decorator_without_dry_run(tmp_path: Path) -> None:
test_file = tmp_path / "test.py"
test_file.write_text("""
@experimental(version="1.0.0")
def func():
pass
""")
subprocess.check_call([sys.executable, SCRIPT_PATH, test_file])
content = test_file.read_text()
assert (
content
== """
def func():
pass
"""
)
def test_script_with_multiline_decorator(tmp_path: Path) -> None:
test_file = tmp_path / "test.py"
test_file.write_text("""
@experimental(
version="1.0.0",
)
def func():
pass
""")
output = subprocess.check_output([sys.executable, SCRIPT_PATH, test_file], text=True)
assert "Removed" in output
assert (
test_file.read_text()
== """
def func():
pass
"""
)
def test_script_with_multiple_decorators(tmp_path: Path) -> None:
test_file = tmp_path / "test.py"
test_file.write_text("""
@experimental(version="1.0.0")
def func1():
pass
@experimental(version="1.1.0")
class MyClass:
@experimental(version="1.2.0")
def method(self):
pass
def regular_func():
pass
""")
output = subprocess.check_output([sys.executable, SCRIPT_PATH, test_file], text=True)
assert output.count("Removed") == 3 # Should remove all 3 decorators
content = test_file.read_text()
assert (
content
== """
def func1():
pass
class MyClass:
def method(self):
pass
def regular_func():
pass
"""
)
def test_script_with_cutoff_days_argument(tmp_path: Path) -> None:
test_file = tmp_path / "test.py"
test_file.write_text("""
@experimental(version="1.0.0")
def func():
pass
""")
# Test with a very large cutoff (should not remove anything)
output = subprocess.check_output(
[sys.executable, SCRIPT_PATH, "--cutoff-days", "9999", "--dry-run", test_file], text=True
)
assert "Would remove" not in output
# Test with default cutoff (180 days, should remove old decorators)
output = subprocess.check_output(
[sys.executable, SCRIPT_PATH, "--dry-run", test_file], text=True
)
assert "Would remove" in output
# Test with explicit cutoff of 180 days
output = subprocess.check_output(
[sys.executable, SCRIPT_PATH, "--cutoff-days", "180", "--dry-run", test_file], text=True
)
assert "Would remove" in output
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/dev/test_remove_experimental_decorators.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/export/utils.py | """
Utility functions for prompt linking in trace exporters.
"""
import logging
import threading
import uuid
from typing import Sequence
from mlflow.entities.model_registry import PromptVersion
from mlflow.tracing.client import TracingClient
_logger = logging.getLogger(__name__)
def try_link_prompts_to_trace(
client: TracingClient,
trace_id: str,
prompts: Sequence[PromptVersion],
synchronous: bool = True,
) -> None:
"""
Attempt to link prompt versions to a trace with graceful error handling.
This function provides a reusable way to link prompts to traces with consistent
error handling across different exporters. Errors are caught and logged but do
not propagate, ensuring that prompt linking failures don't affect trace export.
Args:
client: The TracingClient instance to use for linking.
trace_id: The ID of the trace to link prompts to.
prompts: Sequence of PromptVersion objects to link.
synchronous: If True, run the linking synchronously. If False, run in a separate thread.
"""
if not prompts:
return
if synchronous:
_link_prompts_sync(client, trace_id, prompts)
else:
threading.Thread(
target=_link_prompts_sync,
args=(client, trace_id, prompts),
name=f"link_prompts_from_exporter-{uuid.uuid4().hex[:8]}",
).start()
def _link_prompts_sync(
client: TracingClient,
trace_id: str,
prompts: Sequence[PromptVersion],
) -> None:
"""
Synchronously link prompt versions to a trace with error handling.
This is the core implementation that handles the actual API call and error logging.
Args:
client: The TracingClient instance to use for linking.
trace_id: The ID of the trace to link prompts to.
prompts: Sequence of PromptVersion objects to link.
"""
try:
client.link_prompt_versions_to_trace(
trace_id=trace_id,
prompts=prompts,
)
_logger.debug(f"Successfully linked {len(prompts)} prompts to trace {trace_id}")
except Exception as e:
_logger.warning(f"Failed to link prompts to trace {trace_id}: {e}")
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/export/utils.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/tracing/test_tracing_client.py | import json
import uuid
from unittest.mock import Mock, patch
import pytest
from opentelemetry import trace as trace_api
from opentelemetry.sdk.trace import ReadableSpan as OTelReadableSpan
import mlflow
from mlflow.entities.span import create_mlflow_span
from mlflow.environment_variables import MLFLOW_TRACING_SQL_WAREHOUSE_ID
from mlflow.exceptions import MlflowException
from mlflow.store.tracking import SEARCH_TRACES_DEFAULT_MAX_RESULTS
from mlflow.tracing.analysis import TraceFilterCorrelationResult
from mlflow.tracing.client import TracingClient
from mlflow.tracing.constant import SpansLocation, TraceMetadataKey, TraceSizeStatsKey, TraceTagKey
from mlflow.tracing.utils import TraceJSONEncoder
from tests.tracing.helper import skip_when_testing_trace_sdk
def test_get_trace_v4():
mock_store = Mock()
mock_store.batch_get_traces.return_value = ["dummy_trace"]
with patch("mlflow.tracing.client._get_store", return_value=mock_store):
client = TracingClient()
trace = client.get_trace("trace:/catalog.schema/1234567890")
assert trace == "dummy_trace"
mock_store.batch_get_traces.assert_called_once_with(
["trace:/catalog.schema/1234567890"], "catalog.schema"
)
def test_get_trace_v4_retry():
mock_store = Mock()
mock_store.batch_get_traces.side_effect = [[], ["dummy_trace"]]
with patch("mlflow.tracing.client._get_store", return_value=mock_store):
client = TracingClient()
trace = client.get_trace("trace:/catalog.schema/1234567890")
assert trace == "dummy_trace"
assert mock_store.batch_get_traces.call_count == 2
@skip_when_testing_trace_sdk
def test_tracing_client_link_prompt_versions_to_trace():
with mlflow.start_run():
# Register a prompt
prompt_version = mlflow.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Create a trace
with mlflow.start_span("test_span"):
trace_id = mlflow.get_active_trace_id()
# Link prompts to trace
client = TracingClient()
client.link_prompt_versions_to_trace(trace_id, [prompt_version])
# Verify the linked prompts tag was set
trace = mlflow.get_trace(trace_id)
assert "mlflow.linkedPrompts" in trace.info.tags
# Parse and verify the linked prompts
linked_prompts = json.loads(trace.info.tags["mlflow.linkedPrompts"])
assert len(linked_prompts) == 1
assert linked_prompts[0]["name"] == "test_prompt"
assert linked_prompts[0]["version"] == "1"
def test_tracing_client_calculate_trace_filter_correlation():
mock_store = Mock()
expected_result = TraceFilterCorrelationResult(
npmi=0.456,
npmi_smoothed=0.445,
filter1_count=100,
filter2_count=80,
joint_count=50,
total_count=200,
)
mock_store.calculate_trace_filter_correlation.return_value = expected_result
with patch("mlflow.tracing.client._get_store", return_value=mock_store):
client = TracingClient()
result = client.calculate_trace_filter_correlation(
experiment_ids=["123", "456"],
filter_string1="span.type = 'LLM'",
filter_string2="feedback.quality > 0.8",
base_filter="request_time > 1000",
)
mock_store.calculate_trace_filter_correlation.assert_called_once_with(
experiment_ids=["123", "456"],
filter_string1="span.type = 'LLM'",
filter_string2="feedback.quality > 0.8",
base_filter="request_time > 1000",
)
assert result == expected_result
assert result.npmi == 0.456
assert result.npmi_smoothed == 0.445
assert result.filter1_count == 100
assert result.filter2_count == 80
assert result.joint_count == 50
assert result.total_count == 200
def test_tracing_client_calculate_trace_filter_correlation_without_base_filter():
mock_store = Mock()
expected_result = TraceFilterCorrelationResult(
npmi=float("nan"),
npmi_smoothed=None,
filter1_count=0,
filter2_count=0,
joint_count=0,
total_count=100,
)
mock_store.calculate_trace_filter_correlation.return_value = expected_result
with patch("mlflow.tracing.client._get_store", return_value=mock_store):
client = TracingClient()
result = client.calculate_trace_filter_correlation(
experiment_ids=["789"],
filter_string1="error = true",
filter_string2="duration > 5000",
)
mock_store.calculate_trace_filter_correlation.assert_called_once_with(
experiment_ids=["789"],
filter_string1="error = true",
filter_string2="duration > 5000",
base_filter=None,
)
assert result == expected_result
assert result.filter1_count == 0
assert result.filter2_count == 0
assert result.joint_count == 0
assert result.total_count == 100
@pytest.mark.parametrize("sql_warehouse_id", [None, "some-warehouse-id"])
def test_tracing_client_search_traces_with_model_id(monkeypatch, sql_warehouse_id: str | None):
if sql_warehouse_id:
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, sql_warehouse_id)
else:
monkeypatch.delenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, raising=False)
mock_store = Mock()
mock_store.search_traces.return_value = ([], None)
with patch("mlflow.tracing.client._get_store", return_value=mock_store):
client = TracingClient()
client.search_traces(model_id="model_id")
mock_store.search_traces.assert_called_once_with(
experiment_ids=None,
filter_string="request_metadata.`mlflow.modelId` = 'model_id'"
if sql_warehouse_id is None
else None,
max_results=SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by=None,
page_token=None,
model_id="model_id" if sql_warehouse_id else None,
locations=None,
)
@skip_when_testing_trace_sdk
def test_tracing_client_get_trace_with_database_stored_spans():
client = TracingClient()
experiment_id = mlflow.create_experiment("test")
trace_id = f"tr-{uuid.uuid4().hex}"
store = client.store
otel_span = OTelReadableSpan(
name="test_span",
context=trace_api.SpanContext(
trace_id=12345,
span_id=111,
is_remote=False,
trace_flags=trace_api.TraceFlags(1),
),
parent=None,
attributes={
"mlflow.traceRequestId": json.dumps(trace_id, cls=TraceJSONEncoder),
"llm.model_name": "test-model",
"custom.attribute": "test-value",
},
start_time=1_000_000_000,
end_time=2_000_000_000,
resource=None,
)
span = create_mlflow_span(otel_span, trace_id, "LLM")
store.log_spans(experiment_id, [span])
trace = client.get_trace(trace_id)
assert trace.info.trace_id == trace_id
assert trace.info.tags.get(TraceTagKey.SPANS_LOCATION) == SpansLocation.TRACKING_STORE
assert len(trace.data.spans) == 1
loaded_span = trace.data.spans[0]
assert loaded_span.name == "test_span"
assert loaded_span.trace_id == trace_id
assert loaded_span.start_time_ns == 1_000_000_000
assert loaded_span.end_time_ns == 2_000_000_000
assert loaded_span.attributes.get("llm.model_name") == "test-model"
assert loaded_span.attributes.get("custom.attribute") == "test-value"
@skip_when_testing_trace_sdk
def test_tracing_client_get_trace_error_handling():
client = TracingClient()
experiment_id = mlflow.create_experiment("test")
trace_id = f"tr-{uuid.uuid4().hex}"
store = client.store
otel_span = OTelReadableSpan(
name="test_span",
context=trace_api.SpanContext(
trace_id=12345,
span_id=111,
is_remote=False,
trace_flags=trace_api.TraceFlags(1),
),
parent=None,
attributes={
"mlflow.traceRequestId": json.dumps(trace_id, cls=TraceJSONEncoder),
"llm.model_name": "test-model",
"custom.attribute": "test-value",
},
start_time=1_000_000_000,
end_time=2_000_000_000,
resource=None,
)
span = create_mlflow_span(otel_span, trace_id, "LLM")
store.log_spans(experiment_id, [span])
trace = client.get_trace(trace_id)
trace_info = trace.info
trace_info.trace_metadata[TraceMetadataKey.SIZE_STATS] = json.dumps(
{TraceSizeStatsKey.NUM_SPANS: 2}
)
store.start_trace(trace_info)
with pytest.raises(
MlflowException, match=rf"Trace with ID {trace_id} is not fully exported yet"
):
client.get_trace(trace_id)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/test_tracing_client.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/prompts/test_prompts.py | import json
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from unittest import mock
import jinja2
import pytest
from pydantic import BaseModel, ValidationError
import mlflow
from mlflow import MlflowClient
from mlflow.entities.model_registry import PromptModelConfig, PromptVersion
from mlflow.environment_variables import MLFLOW_EXPERIMENT_ID
from mlflow.exceptions import MlflowException
from mlflow.genai.prompts.utils import format_prompt
from mlflow.prompt.constants import PROMPT_EXPERIMENT_IDS_TAG_KEY, PROMPT_TYPE_TEXT
from mlflow.prompt.registry_utils import PromptCache, PromptCacheKey
from mlflow.tracing.constant import SpanAttributeKey, TraceTagKey
from mlflow.tracking import fluent
def join_thread_by_name_prefix(prefix: str):
"""Join any thread whose name starts with the given prefix."""
for t in threading.enumerate():
if t.name.startswith(prefix):
t.join(timeout=5.0)
if t.is_alive():
raise TimeoutError(f"Thread {t.name} did not complete within timeout.")
@pytest.fixture(autouse=True)
def wait_for_linkage_threads_to_complete():
yield
for prefix in [
"link_prompt_thread",
"link_prompt_to_experiment_thread",
"link_prompts_from_exporter",
]:
join_thread_by_name_prefix(prefix)
def test_prompt_api_migration_warning():
with pytest.warns(FutureWarning, match="The `mlflow.register_prompt` API is"):
mlflow.register_prompt("test_prompt", "test_template")
with pytest.warns(FutureWarning, match="The `mlflow.search_prompts` API is"):
mlflow.search_prompts()
with pytest.warns(FutureWarning, match="The `mlflow.load_prompt` API is"):
mlflow.load_prompt("prompts:/test_prompt/1")
with pytest.warns(FutureWarning, match="The `mlflow.set_prompt_alias` API is"):
mlflow.set_prompt_alias("test_prompt", "test_alias", 1)
with pytest.warns(FutureWarning, match="The `mlflow.delete_prompt_alias` API is"):
mlflow.delete_prompt_alias("test_prompt", "test_alias")
def test_crud_prompts(tmp_path):
mlflow.genai.register_prompt(
name="prompt_1",
template="Hi, {title} {name}! How are you today?",
commit_message="A friendly greeting",
tags={"model": "my-model"},
)
# Wait for background prompt linking thread to complete
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
prompt = mlflow.genai.load_prompt("prompt_1", version=1)
assert prompt.name == "prompt_1"
assert prompt.template == "Hi, {title} {name}! How are you today?"
assert prompt.commit_message == "A friendly greeting"
# Currently, the tags from register_prompt become version tags
assert prompt.tags == {"model": "my-model"}
# Wait for background prompt linking thread from load_prompt
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
# Check prompt-level tags separately (if needed for test completeness)
from mlflow import MlflowClient
client = MlflowClient()
prompt_entity = client.get_prompt("prompt_1")
assert prompt_entity.tags == {"model": "my-model", "_mlflow_experiment_ids": ",0,"}
mlflow.genai.register_prompt(
name="prompt_1",
template="Hi, {title} {name}! What's up?",
commit_message="New greeting",
)
prompt = mlflow.genai.load_prompt("prompt_1", version=2)
assert prompt.template == "Hi, {title} {name}! What's up?"
prompt = mlflow.genai.load_prompt("prompt_1", version=1)
assert prompt.template == "Hi, {title} {name}! How are you today?"
prompt = mlflow.genai.load_prompt("prompts:/prompt_1/2")
assert prompt.template == "Hi, {title} {name}! What's up?"
# No version = latest
prompt = mlflow.genai.load_prompt("prompt_1")
assert prompt.template == "Hi, {title} {name}! What's up?"
# Test load_prompt with allow_missing for non-existent prompts
assert mlflow.genai.load_prompt("does_not_exist", version=1, allow_missing=True) is None
def test_prompt_alias(tmp_path):
# Reset cache to ensure clean state
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="p1", template="Hi, there!")
mlflow.genai.register_prompt(name="p1", template="Hi, {{name}}!")
mlflow.genai.set_prompt_alias("p1", alias="production", version=1)
prompt = mlflow.genai.load_prompt("prompts:/p1@production")
assert prompt.template == "Hi, there!"
assert prompt.aliases == ["production"]
# Reassign alias to a different version
# Need to bypass cache to see the updated alias
mlflow.genai.set_prompt_alias("p1", alias="production", version=2)
assert (
mlflow.genai.load_prompt("prompts:/p1@production", cache_ttl_seconds=0).template
== "Hi, {{name}}!"
)
mlflow.genai.delete_prompt_alias("p1", alias="production")
with pytest.raises(
MlflowException,
match=(r"Prompt (.*) does not exist.|Prompt alias (.*) not found."),
):
mlflow.genai.load_prompt("prompts:/p1@production", cache_ttl_seconds=0)
# Latest alias - bypass cache
assert (
mlflow.genai.load_prompt("prompts:/p1@latest", cache_ttl_seconds=0).template
== "Hi, {{name}}!"
)
def test_prompt_associate_with_run(tmp_path):
mlflow.genai.register_prompt(name="prompt_1", template="Hi, {title} {name}! How are you today?")
# mlflow.genai.load_prompt() call during the run should associate the prompt with the run
with mlflow.start_run() as run:
mlflow.genai.load_prompt("prompt_1", version=1)
# Check that the prompt was linked to the run via the linkedPrompts tag
client = MlflowClient()
run_data = client.get_run(run.info.run_id)
linked_prompts_tag = run_data.data.tags.get(TraceTagKey.LINKED_PROMPTS)
assert linked_prompts_tag is not None
assert len(json.loads(linked_prompts_tag)) == 1
assert json.loads(linked_prompts_tag)[0] == {
"name": "prompt_1",
"version": "1",
}
linked_prompts = json.loads(linked_prompts_tag)
assert len(linked_prompts) == 1
assert linked_prompts[0]["name"] == "prompt_1"
assert linked_prompts[0]["version"] == "1"
with mlflow.start_run() as run:
run_id_2 = run.info.run_id
# Prompt should be linked to the run even if it is loaded in a child thread
def task():
mlflow.genai.load_prompt("prompt_1", version=1)
with ThreadPoolExecutor(max_workers=4) as executor:
futures = [executor.submit(task) for _ in range(10)]
for future in futures:
future.result()
run_data = client.get_run(run_id_2)
linked_prompts_tag = run_data.data.tags.get(TraceTagKey.LINKED_PROMPTS)
assert linked_prompts_tag is not None
assert len(json.loads(linked_prompts_tag)) == 1
assert json.loads(linked_prompts_tag)[0] == {
"name": "prompt_1",
"version": "1",
}
def test_register_chat_prompt_with_messages():
chat_template = [
{"role": "system", "content": "You are a {{style}} assistant."},
{"role": "user", "content": "{{question}}"},
]
prompt = mlflow.genai.register_prompt(
name="test_chat", template=chat_template, commit_message="Test chat prompt"
)
not prompt.is_text_prompt
assert prompt.template == chat_template
assert prompt.commit_message == "Test chat prompt"
def test_register_prompt_with_pydantic_response_format():
class ResponseSchema(BaseModel):
answer: str
confidence: float
prompt = mlflow.genai.register_prompt(
name="test_response",
template="What is {{question}}?",
response_format=ResponseSchema,
)
expected_schema = ResponseSchema.model_json_schema()
assert prompt.response_format == expected_schema
def test_register_prompt_with_dict_response_format():
response_format = {
"type": "object",
"properties": {
"answer": {"type": "string"},
"confidence": {"type": "number"},
},
}
prompt = mlflow.genai.register_prompt(
name="test_dict_response",
template="What is {{question}}?",
response_format=response_format,
)
assert prompt.response_format == response_format
def test_register_prompt_error_handling_invalid_chat_format():
invalid_template = [{"content": "Hello"}] # Missing role
with pytest.raises(ValueError, match="Template must be a list of dicts with role and content"):
mlflow.genai.register_prompt(name="test_invalid", template=invalid_template)
def test_register_and_load_chat_prompt_integration():
chat_template = [
{"role": "system", "content": "You are a {{style}} assistant."},
{"role": "user", "content": "{{question}}"},
]
mlflow.genai.register_prompt(name="test_integration", template=chat_template)
loaded_prompt = mlflow.genai.load_prompt("test_integration", version=1)
assert not loaded_prompt.is_text_prompt
assert loaded_prompt.template == chat_template
# Test formatting
formatted = loaded_prompt.format(style="helpful", question="How are you?")
expected = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "How are you?"},
]
assert formatted == expected
def test_register_and_load_jinja2_prompt():
template = "Hello {% if name %}{{ name }}{% else %}Guest{% endif %}"
mlflow.genai.register_prompt(name="jinja-basic", template=template)
loaded_prompt = mlflow.genai.load_prompt("jinja-basic", version=1)
assert loaded_prompt.template == template
assert loaded_prompt._prompt_type == PROMPT_TYPE_TEXT
assert loaded_prompt.format(name="Alice") == "Hello Alice"
assert loaded_prompt.format() == "Hello Guest"
def test_register_and_load_jinja2_prompt_without_sandbox():
# Accessing private attributes to trigger unsafe operation
template = "{% if ''.__class__.__name__ == 'str' %}Yes{% else %}No{% endif %}"
mlflow.genai.register_prompt(name="jinja-nosandbox", template=template)
loaded_prompt = mlflow.genai.load_prompt("jinja-nosandbox", version=1)
# Unsafe operation should be banned by default
with pytest.raises(jinja2.exceptions.SecurityError, match="access to attribute '__class__'"):
loaded_prompt.format()
# Render without sandbox
assert loaded_prompt.format(use_jinja_sandbox=False) == "Yes"
def test_register_and_load_jinja2_chat_prompt():
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": "{% if formal %}Dear Sir{% else %}Hey{% endif %}, {{question}}",
},
]
mlflow.genai.register_prompt(name="jinja-chat", template=chat_template)
loaded_prompt = mlflow.genai.load_prompt("jinja-chat", version=1)
assert loaded_prompt.template == chat_template
assert not loaded_prompt.is_text_prompt
formatted = loaded_prompt.format(formal=True, question="How are you?")
assert formatted == [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Dear Sir, How are you?"},
]
formatted = loaded_prompt.format(formal=False, question="What's up?")
assert formatted == [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hey, What's up?"},
]
def test_jinja2_chat_prompt_with_loops():
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": (
"My friends are: {% for friend in friends %}{{ friend }}{% if not loop.last %}, "
"{% endif %}{% endfor %}."
),
},
]
mlflow.genai.register_prompt(name="jinja-chat-loop", template=chat_template)
loaded_prompt = mlflow.genai.load_prompt("jinja-chat-loop", version=1)
formatted = loaded_prompt.format(friends=["Alice", "Bob", "Charlie"])
assert formatted == [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "My friends are: Alice, Bob, Charlie."},
]
def test_register_text_prompt_backward_compatibility():
prompt = mlflow.genai.register_prompt(
name="test_text_backward",
template="Hello {{name}}!",
commit_message="Test backward compatibility",
)
assert prompt.is_text_prompt
assert prompt.template == "Hello {{name}}!"
assert prompt.commit_message == "Test backward compatibility"
def test_register_prompt_with_tags():
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "{{question}}"},
]
prompt = mlflow.genai.register_prompt(
name="test_with_tags",
template=chat_template,
tags={"author": "test_user", "model": "gpt-5"},
)
assert prompt.tags["author"] == "test_user"
assert prompt.tags["model"] == "gpt-5"
def test_register_prompt_with_complex_response_format():
class ComplexResponse(BaseModel):
summary: str
key_points: list[str]
confidence: float
metadata: dict[str, str] = {}
chat_template = [
{"role": "system", "content": "You are a data analyst."},
{"role": "user", "content": "Analyze this data: {{data}}"},
]
prompt = mlflow.genai.register_prompt(
name="test_complex_response",
template=chat_template,
response_format=ComplexResponse,
)
expected_schema = ComplexResponse.model_json_schema()
assert prompt.response_format == expected_schema
assert "properties" in prompt.response_format
assert "summary" in prompt.response_format["properties"]
assert "key_points" in prompt.response_format["properties"]
assert "confidence" in prompt.response_format["properties"]
assert "metadata" in prompt.response_format["properties"]
def test_register_prompt_with_none_response_format():
prompt = mlflow.genai.register_prompt(
name="test_none_response", template="Hello {{name}}!", response_format=None
)
assert prompt.response_format is None
def test_register_prompt_with_single_message_chat():
chat_template = [{"role": "user", "content": "Hello {{name}}!"}]
prompt = mlflow.genai.register_prompt(name="test_single_message", template=chat_template)
assert prompt.template == chat_template
assert prompt.variables == {"name"}
def test_register_prompt_with_multiple_variables_in_chat():
chat_template = [
{
"role": "system",
"content": "You are a {{style}} assistant named {{name}}.",
},
{"role": "user", "content": "{{greeting}}! {{question}}"},
{
"role": "assistant",
"content": "I understand you're asking about {{topic}}.",
},
]
prompt = mlflow.genai.register_prompt(name="test_multiple_variables", template=chat_template)
expected_variables = {"style", "name", "greeting", "question", "topic"}
assert prompt.variables == expected_variables
def test_register_prompt_with_mixed_content_types():
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello {{name}}!"},
{"role": "assistant", "content": "Hi there! How can I help you today?"},
]
prompt = mlflow.genai.register_prompt(name="test_mixed_content", template=chat_template)
assert prompt.template == chat_template
assert prompt.variables == {"name"}
def test_register_prompt_with_nested_variables():
chat_template = [
{
"role": "system",
"content": "You are a {{user.preferences.style}} assistant.",
},
{
"role": "user",
"content": "Hello {{user.name}}! {{user.preferences.greeting}}",
},
]
prompt = mlflow.genai.register_prompt(name="test_nested_variables", template=chat_template)
expected_variables = {
"user.preferences.style",
"user.name",
"user.preferences.greeting",
}
assert prompt.variables == expected_variables
def test_set_and_delete_prompt_tag_genai():
mlflow.genai.register_prompt(name="tag_prompt", template="Hi")
# Wait for background prompt linking thread to complete
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
mlflow.genai.set_prompt_tag("tag_prompt", "env", "prod")
mlflow.genai.set_prompt_version_tag("tag_prompt", 1, "env", "prod")
assert mlflow.genai.get_prompt_tags("tag_prompt") == {
"env": "prod",
"_mlflow_experiment_ids": ",0,",
}
assert mlflow.genai.load_prompt("tag_prompt", version=1).tags == {"env": "prod"}
mlflow.genai.delete_prompt_tag("tag_prompt", "env")
# After deleting 'env' tag, only the experiment IDs tag should remain
assert mlflow.genai.get_prompt_tags("tag_prompt") == {"_mlflow_experiment_ids": ",0,"}
mlflow.genai.delete_prompt_version_tag("tag_prompt", 1, "env")
assert "env" not in mlflow.genai.load_prompt("tag_prompt", version=1).tags
@pytest.mark.parametrize(
("prompt_template", "values", "expected"),
[
# Test with Unicode escape-like sequences
(
"User input: {{ user_text }}",
{"user_text": r"Path is C:\users\john"},
r"User input: Path is C:\users\john",
),
# Test with newlines and tabs
(
"Data: {{ data }}",
{"data": "Line1\\nLine2\\tTabbed"},
"Data: Line1\\nLine2\\tTabbed",
),
# Test with multiple variables
(
"Path: {{ path }}, Command: {{ cmd }}",
{"path": r"C:\temp", "cmd": r"echo \u0041"},
r"Path: C:\temp, Command: echo \u0041",
),
],
)
def test_format_prompt_with_backslashes(
prompt_template: str, values: dict[str, str], expected: str
):
result = format_prompt(prompt_template, **values)
assert result == expected
def test_load_prompt_with_link_to_model_disabled():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Create a logged model and set it as active
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
python_model=lambda x: x,
name="model",
pip_requirements=["mlflow"],
)
mlflow.set_active_model(model_id=model_info.model_id)
# Load prompt with link_to_model=False - should not link despite active model
prompt = mlflow.genai.load_prompt("test_prompt", version=1, link_to_model=False)
# Verify prompt was loaded correctly
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
# Join any potential background linking thread (it shouldn't run)
join_thread_by_name_prefix("link_prompt_thread")
# Verify the model does NOT have any linked prompts tag
client = mlflow.MlflowClient()
model = client.get_logged_model(model_info.model_id)
linked_prompts_tag = model.tags.get("mlflow.linkedPrompts")
assert linked_prompts_tag is None, (
"Model should not have linkedPrompts tag when link_to_model=False"
)
def test_load_prompt_with_explicit_model_id():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Create a logged model to link to
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
python_model=lambda x: x,
name="model",
pip_requirements=["mlflow"],
)
# Load prompt with explicit model_id - should link successfully
prompt = mlflow.genai.load_prompt(
"test_prompt", version=1, link_to_model=True, model_id=model_info.model_id
)
# Verify prompt was loaded correctly
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
# Join background linking thread to wait for completion
join_thread_by_name_prefix("link_prompt_thread")
# Verify the model has the linked prompt in its tags
client = mlflow.MlflowClient()
model = client.get_logged_model(model_info.model_id)
linked_prompts_tag = model.tags.get("mlflow.linkedPrompts")
assert linked_prompts_tag is not None
# Parse the JSON tag value
linked_prompts = json.loads(linked_prompts_tag)
assert len(linked_prompts) == 1
assert linked_prompts[0]["name"] == "test_prompt"
assert linked_prompts[0]["version"] == "1"
def test_load_prompt_with_active_model_integration():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Test loading prompt with active model context
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
python_model=lambda x: x,
name="model",
pip_requirements=["mlflow"],
)
mlflow.set_active_model(model_id=model_info.model_id)
# Load prompt with link_to_model=True - should use active model
prompt = mlflow.genai.load_prompt("test_prompt", version=1, link_to_model=True)
# Verify prompt was loaded correctly
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
# Join background linking thread to wait for completion
join_thread_by_name_prefix("link_prompt_thread")
# Verify the model has the linked prompt in its tags
client = mlflow.MlflowClient()
model = client.get_logged_model(model_info.model_id)
linked_prompts_tag = model.tags.get("mlflow.linkedPrompts")
assert linked_prompts_tag is not None
# Parse the JSON tag value
linked_prompts = json.loads(linked_prompts_tag)
assert len(linked_prompts) == 1
assert linked_prompts[0]["name"] == "test_prompt"
assert linked_prompts[0]["version"] == "1"
def test_load_prompt_with_no_active_model():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Mock no active model available
with mock.patch(
"mlflow.tracking._model_registry.fluent.get_active_model_id", return_value=None
):
# Load prompt with link_to_model=True but no active model - should still work
prompt = mlflow.genai.load_prompt("test_prompt", version=1, link_to_model=True)
# Verify prompt was loaded correctly (linking just gets skipped)
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
def test_load_prompt_linking_error_handling():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Test with invalid model ID - should still load prompt successfully
with mock.patch(
"mlflow.tracking._model_registry.fluent.get_active_model_id",
return_value="invalid_model_id",
):
# Load prompt - should succeed despite linking failure (happens in background)
prompt = mlflow.genai.load_prompt("test_prompt", version=1, link_to_model=True)
# Verify prompt was loaded successfully despite linking failure
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
def test_load_prompt_explicit_model_id_overrides_active_model():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Create models to test override behavior
with mlflow.start_run():
active_model = mlflow.pyfunc.log_model(
python_model=lambda x: x,
name="active_model",
pip_requirements=["mlflow"],
)
explicit_model = mlflow.pyfunc.log_model(
python_model=lambda x: x,
name="explicit_model",
pip_requirements=["mlflow"],
)
# Set active model context but provide explicit model_id - explicit should win
mlflow.set_active_model(model_id=active_model.model_id)
prompt = mlflow.genai.load_prompt(
"test_prompt", version=1, link_to_model=True, model_id=explicit_model.model_id
)
# Verify prompt was loaded correctly (explicit model_id should be used)
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
# Join background linking thread to wait for completion
join_thread_by_name_prefix("link_prompt_thread")
# Verify the EXPLICIT model (not active model) has the linked prompt in its tags
client = mlflow.MlflowClient()
explicit_model_data = client.get_logged_model(explicit_model.model_id)
linked_prompts_tag = explicit_model_data.tags.get("mlflow.linkedPrompts")
assert linked_prompts_tag is not None
# Parse the JSON tag value
linked_prompts = json.loads(linked_prompts_tag)
assert len(linked_prompts) == 1
assert linked_prompts[0]["name"] == "test_prompt"
assert linked_prompts[0]["version"] == "1"
# Verify the active model does NOT have the linked prompt
active_model_data = client.get_logged_model(active_model.model_id)
active_linked_prompts_tag = active_model_data.tags.get("mlflow.linkedPrompts")
assert active_linked_prompts_tag is None
def test_load_prompt_with_tracing_single_prompt():
# Register a prompt
mlflow.genai.register_prompt(name="test_prompt", template="Hello, {{name}}!")
# Start tracing and load prompt
with mlflow.start_span("test_operation") as span:
prompt = mlflow.genai.load_prompt("test_prompt", version=1)
# Verify prompt was loaded correctly
assert prompt.name == "test_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
# Manually trigger prompt linking to trace since in test environment
# the trace export may not happen automatically
client = mlflow.MlflowClient()
prompt_version = PromptVersion(
name="test_prompt",
version=1,
template="Hello, {{name}}!",
commit_message=None,
creation_timestamp=None,
)
client.link_prompt_versions_to_trace(trace_id=span.trace_id, prompt_versions=[prompt_version])
# Verify the prompt was linked to the trace by checking EntityAssociation
trace = mlflow.get_trace(span.trace_id)
assert trace is not None
# Query EntityAssociation to verify the linkage
from mlflow.tracking import _get_store
store = _get_store()
with store.ManagedSessionMaker() as session:
from mlflow.entities.entity_type import EntityAssociationType
from mlflow.store.tracking.dbmodels.models import SqlEntityAssociation
associations = (
session.query(SqlEntityAssociation)
.filter(
SqlEntityAssociation.source_type == EntityAssociationType.TRACE,
SqlEntityAssociation.source_id == span.trace_id,
SqlEntityAssociation.destination_type == EntityAssociationType.PROMPT_VERSION,
)
.all()
)
assert len(associations) == 1
assert associations[0].destination_id == "test_prompt/1"
def test_load_prompt_with_tracing_multiple_prompts():
# Register one prompt with multiple versions
mlflow.genai.register_prompt(name="my_prompt", template="Hello, {{name}}!")
mlflow.genai.register_prompt(name="my_prompt", template="Hi there, {{name}}! How are you?")
# Start tracing and load multiple versions of the same prompt
with mlflow.start_span("multi_version_prompt_operation") as span:
prompt_v1 = mlflow.genai.load_prompt("my_prompt", version=1)
prompt_v2 = mlflow.genai.load_prompt("my_prompt", version=2)
# Verify prompts were loaded correctly
assert prompt_v1.name == "my_prompt"
assert prompt_v1.version == 1
assert prompt_v1.template == "Hello, {{name}}!"
assert prompt_v2.name == "my_prompt"
assert prompt_v2.version == 2
assert prompt_v2.template == "Hi there, {{name}}! How are you?"
# Manually trigger prompt linking to trace since in test environment
# the trace export may not happen automatically
client = mlflow.MlflowClient()
prompt_versions = [
PromptVersion(
name="my_prompt",
version=1,
template="Hello, {{name}}!",
commit_message=None,
creation_timestamp=None,
),
PromptVersion(
name="my_prompt",
version=2,
template="Hi there, {{name}}! How are you?",
commit_message=None,
creation_timestamp=None,
),
]
client.link_prompt_versions_to_trace(trace_id=span.trace_id, prompt_versions=prompt_versions)
# Verify both versions were linked to the same trace by checking EntityAssociation
trace = mlflow.get_trace(span.trace_id)
assert trace is not None
# Query EntityAssociation to verify the linkages
from mlflow.tracking import _get_store
store = _get_store()
with store.ManagedSessionMaker() as session:
from mlflow.entities.entity_type import EntityAssociationType
from mlflow.store.tracking.dbmodels.models import SqlEntityAssociation
associations = (
session.query(SqlEntityAssociation)
.filter(
SqlEntityAssociation.source_type == EntityAssociationType.TRACE,
SqlEntityAssociation.source_id == span.trace_id,
SqlEntityAssociation.destination_type == EntityAssociationType.PROMPT_VERSION,
)
.all()
)
assert len(associations) == 2
# Check that both versions of the same prompt are present
prompt_ids = {assoc.destination_id for assoc in associations}
expected_ids = {"my_prompt/1", "my_prompt/2"}
assert prompt_ids == expected_ids
def test_load_prompt_with_tracing_no_active_trace():
# Register a prompt
mlflow.genai.register_prompt(name="no_trace_prompt", template="Hello, {{name}}!")
# Load prompt without an active trace
prompt = mlflow.genai.load_prompt("no_trace_prompt", version=1)
# Verify prompt was loaded correctly
assert prompt.name == "no_trace_prompt"
assert prompt.version == 1
assert prompt.template == "Hello, {{name}}!"
# No trace should be created or linked when no active trace exists
# We can't easily test this without accessing the trace manager, but the function
# should complete successfully without errors
def test_load_prompt_with_tracing_nested_spans():
# Register prompts
mlflow.genai.register_prompt(name="outer_prompt", template="Outer: {{msg}}")
mlflow.genai.register_prompt(name="inner_prompt", template="Inner: {{msg}}")
# Start nested spans (same trace, different spans)
with mlflow.start_span("outer_operation") as outer_span:
mlflow.genai.load_prompt("outer_prompt", version=1)
with mlflow.start_span("inner_operation") as inner_span:
# Verify both spans belong to the same trace
assert inner_span.trace_id == outer_span.trace_id
mlflow.genai.load_prompt("inner_prompt", version=1)
# Manually trigger prompt linking to trace since in test environment
# the trace export may not happen automatically
client = mlflow.MlflowClient()
prompt_versions = [
PromptVersion(
name="outer_prompt",
version=1,
template="Outer: {{msg}}",
commit_message=None,
creation_timestamp=None,
),
PromptVersion(
name="inner_prompt",
version=1,
template="Inner: {{msg}}",
commit_message=None,
creation_timestamp=None,
),
]
client.link_prompt_versions_to_trace(
trace_id=outer_span.trace_id, prompt_versions=prompt_versions
)
# Check trace now has both prompts (same trace, different spans)
trace = mlflow.get_trace(outer_span.trace_id)
assert trace is not None
# Query EntityAssociation to verify both prompts are linked
from mlflow.tracking import _get_store
store = _get_store()
with store.ManagedSessionMaker() as session:
from mlflow.entities.entity_type import EntityAssociationType
from mlflow.store.tracking.dbmodels.models import SqlEntityAssociation
associations = (
session.query(SqlEntityAssociation)
.filter(
SqlEntityAssociation.source_type == EntityAssociationType.TRACE,
SqlEntityAssociation.source_id == outer_span.trace_id,
SqlEntityAssociation.destination_type == EntityAssociationType.PROMPT_VERSION,
)
.all()
)
assert len(associations) == 2
# Check that both prompts are present (order may vary)
prompt_ids = {assoc.destination_id for assoc in associations}
expected_ids = {"outer_prompt/1", "inner_prompt/1"}
assert prompt_ids == expected_ids
def test_load_prompt_caching_works():
# Mock the client load_prompt method to count calls
with mock.patch("mlflow.MlflowClient.load_prompt") as mock_client_load:
# Configure mock to return a prompt
mock_prompt = PromptVersion(
name="cached_prompt",
version=1,
template="Hello, {{name}}!",
creation_timestamp=123456789,
)
mock_client_load.return_value = mock_prompt
# Reset cache
PromptCache._reset_instance()
# Register prompts
mlflow.genai.register_prompt(name="cached_prompt", template="Hello, {{name}}!")
mlflow.genai.register_prompt(name="cached_prompt", template="Hi, {{name}}!")
# First call should hit the registry
prompt1 = mlflow.genai.load_prompt("cached_prompt", version=1, link_to_model=False)
assert prompt1.name == "cached_prompt"
# Second call with same parameters should use cache (not call registry again)
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
prompt2 = mlflow.genai.load_prompt("cached_prompt", version=1, link_to_model=False)
assert prompt2.name == "cached_prompt"
assert mock_load.call_count == 0 # Cache hit
# Call with different version should hit the registry again
prompt3 = mlflow.genai.load_prompt("cached_prompt", version=2, link_to_model=False)
assert prompt3.version == 2
# But subsequent calls to version 2 should use cache
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
prompt4 = mlflow.genai.load_prompt("cached_prompt", version=2, link_to_model=False)
assert prompt4.version == 2
assert mock_load.call_count == 0 # Cache hit
def test_load_prompt_caching_respects_ttl_env_var():
# Reset cache
PromptCache._reset_instance()
# Register a prompt
mlflow.genai.register_prompt(name="ttl_test_prompt", template="Hello!")
# Load with very short TTL
mlflow.genai.load_prompt(
"ttl_test_prompt", version=1, cache_ttl_seconds=0.2, link_to_model=False
)
# Immediate second load should hit cache
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
mlflow.genai.load_prompt(
"ttl_test_prompt", version=1, cache_ttl_seconds=0.2, link_to_model=False
)
assert mock_load.call_count == 0 # Cache hit
# Wait for TTL to expire
time.sleep(0.2)
# Load after expiration should miss cache
prompt = mlflow.genai.load_prompt(
"ttl_test_prompt", version=1, cache_ttl_seconds=1, link_to_model=False
)
assert prompt is not None
assert prompt.template == "Hello!"
def test_load_prompt_skip_cache_for_allow_missing_none():
# Mock the client load_prompt method to return None (prompt not found)
with mock.patch("mlflow.MlflowClient.load_prompt") as mock_client_load:
mock_client_load.return_value = None # Simulate prompt not found
# Reset cache
PromptCache._reset_instance()
# First call with allow_missing=True for non-existent prompt
prompt1 = mlflow.genai.load_prompt(
"nonexistent_prompt", version=1, allow_missing=True, link_to_model=False
)
assert prompt1 is None
# Now create the prompt
mlflow.genai.register_prompt(name="nonexistent_prompt", template="Now I exist!")
# Should find it now (None results are not cached)
prompt2 = mlflow.genai.load_prompt(
"nonexistent_prompt", version=1, allow_missing=True, link_to_model=False
)
assert prompt2 is not None
assert prompt2.template == "Now I exist!"
# Subsequent calls should use cache
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
prompt3 = mlflow.genai.load_prompt(
"nonexistent_prompt", version=1, allow_missing=True, link_to_model=False
)
assert prompt3.template == "Now I exist!"
assert mock_load.call_count == 0 # Cache hit
def test_load_prompt_missing_then_created_then_found():
# First try to load a prompt that doesn't exist
result1 = mlflow.genai.load_prompt(
"will_be_created", version=1, allow_missing=True, link_to_model=False
)
assert result1 is None
# Now create the prompt
created_prompt = mlflow.genai.register_prompt(name="will_be_created", template="Now I exist!")
assert created_prompt.name == "will_be_created"
assert created_prompt.version == 1
# Load again - should find it now (not cached because previous result was None)
result2 = mlflow.genai.load_prompt(
"will_be_created", version=1, allow_missing=True, link_to_model=False
)
assert result2 is not None
assert result2.name == "will_be_created"
assert result2.version == 1
assert result2.template == "Now I exist!"
# Load a third time - should be cached now (no need to mock since we want real caching)
result3 = mlflow.genai.load_prompt(
"will_be_created", version=1, allow_missing=True, link_to_model=False
)
assert result3.template == "Now I exist!"
# This demonstrates the cache working - if it wasn't cached, we'd get a network call
def test_load_prompt_none_result_no_linking():
# Mock only the client load_prompt method and linking methods
with (
mock.patch("mlflow.MlflowClient.load_prompt") as mock_client_load,
mock.patch("mlflow.MlflowClient.link_prompt_version_to_run") as mock_link_run,
mock.patch("mlflow.MlflowClient.link_prompt_version_to_model") as mock_link_model,
):
# Configure client to return None (prompt not found)
mock_client_load.return_value = None
# Try to load a prompt that doesn't exist with allow_missing=True
result = mlflow.genai.load_prompt(
"nonexistent", version=1, allow_missing=True, link_to_model=True
)
assert result is None
# Verify no linking methods were called
mock_link_run.assert_not_called()
mock_link_model.assert_not_called()
# Note: trace manager registration is handled differently and tested elsewhere
def test_load_prompt_caching_with_different_parameters():
# Reset cache
PromptCache._reset_instance()
# Register a prompt
mlflow.genai.register_prompt(name="param_test", template="Hello, {{name}}!")
# Load prompt - should cache it
mlflow.genai.load_prompt("param_test", version=1, link_to_model=False)
# allow_missing parameter doesn't affect cache key - same prompt should be cached
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
# Both should hit cache regardless of allow_missing value
mlflow.genai.load_prompt("param_test", version=1, allow_missing=False, link_to_model=False)
mlflow.genai.load_prompt("param_test", version=1, allow_missing=True, link_to_model=False)
assert mock_load.call_count == 0 # Both should be cache hits
# Different version should miss cache
mlflow.genai.register_prompt(name="param_test", template="Version 2")
prompt_v2 = mlflow.genai.load_prompt("param_test", version=2, link_to_model=False)
assert prompt_v2.version == 2
# But then it should be cached
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
mlflow.genai.load_prompt("param_test", version=2, link_to_model=False)
assert mock_load.call_count == 0 # Cache hit
def test_register_prompt_chat_format_integration():
chat_template = [
{"role": "system", "content": "You are a {{style}} assistant."},
{"role": "user", "content": "{{question}}"},
]
response_format = {
"type": "object",
"properties": {
"answer": {"type": "string"},
"confidence": {"type": "number"},
},
}
# Register chat prompt
mlflow.genai.register_prompt(
name="test_chat_integration",
template=chat_template,
response_format=response_format,
commit_message="Test chat prompt integration",
tags={"model": "test-model"},
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_chat_integration", version=1)
assert prompt.template == chat_template
assert prompt.response_format == response_format
assert prompt.commit_message == "Test chat prompt integration"
assert prompt.tags["model"] == "test-model"
# Test formatting
formatted = prompt.format(style="helpful", question="How are you?")
expected = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "How are you?"},
]
assert formatted == expected
def test_prompt_associate_with_run_chat_format():
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
]
mlflow.genai.register_prompt(name="test_chat_run", template=chat_template)
with mlflow.start_run() as run:
mlflow.genai.load_prompt("test_chat_run", version=1)
# Verify linking
client = MlflowClient()
run_data = client.get_run(run.info.run_id)
linked_prompts_tag = run_data.data.tags.get(TraceTagKey.LINKED_PROMPTS)
assert linked_prompts_tag is not None
linked_prompts = json.loads(linked_prompts_tag)
assert len(linked_prompts) == 1
assert linked_prompts[0]["name"] == "test_chat_run"
assert linked_prompts[0]["version"] == "1"
def test_register_prompt_with_pydantic_response_format():
from pydantic import BaseModel
class ResponseSchema(BaseModel):
answer: str
confidence: float
# Register prompt with Pydantic response format
mlflow.genai.register_prompt(
name="test_pydantic_response",
template="What is {{question}}?",
response_format=ResponseSchema,
commit_message="Test Pydantic response format",
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_pydantic_response", version=1)
assert prompt.response_format == ResponseSchema.model_json_schema()
assert prompt.commit_message == "Test Pydantic response format"
def test_register_prompt_with_dict_response_format():
response_format = {
"type": "object",
"properties": {
"summary": {"type": "string"},
"key_points": {"type": "array", "items": {"type": "string"}},
},
}
# Register prompt with dict response format
mlflow.genai.register_prompt(
name="test_dict_response",
template="Analyze this: {{text}}",
response_format=response_format,
tags={"analysis_type": "text"},
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_dict_response", version=1)
assert prompt.response_format == response_format
assert prompt.tags["analysis_type"] == "text"
def test_register_prompt_text_backward_compatibility():
# Register text prompt
mlflow.genai.register_prompt(
name="test_text_backward",
template="Hello {{name}}!",
commit_message="Test backward compatibility",
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_text_backward", version=1)
assert prompt.is_text_prompt
assert prompt.template == "Hello {{name}}!"
assert prompt.commit_message == "Test backward compatibility"
# Test formatting
formatted = prompt.format(name="Alice")
assert formatted == "Hello Alice!"
def test_register_prompt_complex_chat_template():
chat_template = [
{
"role": "system",
"content": "You are a {{style}} assistant named {{name}}.",
},
{"role": "user", "content": "{{greeting}}! {{question}}"},
{
"role": "assistant",
"content": "I understand you're asking about {{topic}}.",
},
]
# Register complex chat prompt
mlflow.genai.register_prompt(
name="test_complex_chat",
template=chat_template,
tags={"complexity": "high"},
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_complex_chat", version=1)
assert not prompt.is_text_prompt
assert prompt.template == chat_template
assert prompt.tags["complexity"] == "high"
# Test formatting
formatted = prompt.format(
style="friendly",
name="Alice",
greeting="Hello",
question="How are you?",
topic="wellbeing",
)
expected = [
{"role": "system", "content": "You are a friendly assistant named Alice."},
{"role": "user", "content": "Hello! How are you?"},
{
"role": "assistant",
"content": "I understand you're asking about wellbeing.",
},
]
assert formatted == expected
def test_register_prompt_with_none_response_format():
# Register prompt with None response format
mlflow.genai.register_prompt(
name="test_none_response", template="Hello {{name}}!", response_format=None
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_none_response", version=1)
assert prompt.response_format is None
def test_register_prompt_with_single_message_chat():
chat_template = [{"role": "user", "content": "Hello {{name}}!"}]
# Register single message chat prompt
mlflow.genai.register_prompt(name="test_single_message", template=chat_template)
# Load and verify
prompt = mlflow.genai.load_prompt("test_single_message", version=1)
not prompt.is_text_prompt
assert prompt.template == chat_template
assert prompt.variables == {"name"}
def test_register_prompt_with_multiple_variables_in_chat():
chat_template = [
{
"role": "system",
"content": "You are a {{style}} assistant named {{name}}.",
},
{"role": "user", "content": "{{greeting}}! {{question}}"},
{
"role": "assistant",
"content": "I understand you're asking about {{topic}}.",
},
]
# Register prompt with multiple variables
mlflow.genai.register_prompt(name="test_multiple_variables", template=chat_template)
# Load and verify
prompt = mlflow.genai.load_prompt("test_multiple_variables", version=1)
expected_variables = {"style", "name", "greeting", "question", "topic"}
assert prompt.variables == expected_variables
def test_register_prompt_with_mixed_content_types():
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello {{name}}!"},
{"role": "assistant", "content": "Hi there! How can I help you today?"},
]
# Register prompt with mixed content
mlflow.genai.register_prompt(name="test_mixed_content", template=chat_template)
# Load and verify
prompt = mlflow.genai.load_prompt("test_mixed_content", version=1)
not prompt.is_text_prompt
assert prompt.template == chat_template
assert prompt.variables == {"name"}
def test_register_prompt_with_nested_variables():
chat_template = [
{
"role": "system",
"content": "You are a {{user.preferences.style}} assistant.",
},
{
"role": "user",
"content": "Hello {{user.name}}! {{user.preferences.greeting}}",
},
]
# Register prompt with nested variables
mlflow.genai.register_prompt(name="test_nested_variables", template=chat_template)
# Load and verify
prompt = mlflow.genai.load_prompt("test_nested_variables", version=1)
expected_variables = {
"user.preferences.style",
"user.name",
"user.preferences.greeting",
}
assert prompt.variables == expected_variables
def test_register_prompt_invalidates_latest_cache():
PromptCache._reset_instance()
# Register first version
mlflow.genai.register_prompt(name="latest_cache_test", template="Version 1")
# Load using @latest and cache it
prompt_v1 = mlflow.genai.load_prompt("prompts:/latest_cache_test@latest")
assert prompt_v1.template == "Version 1"
# Verify it's cached
cache = PromptCache.get_instance()
key = PromptCacheKey.from_parts("latest_cache_test", alias="latest")
assert cache.get(key) is not None
# Register a new version - should invalidate @latest cache
mlflow.genai.register_prompt(name="latest_cache_test", template="Version 2")
# Cache should be invalidated
assert cache.get(key) is None
# Loading @latest should now return version 2
prompt_v2 = mlflow.genai.load_prompt("prompts:/latest_cache_test@latest")
assert prompt_v2.template == "Version 2"
assert prompt_v2.version == 2
def test_set_prompt_alias_invalidates_alias_cache():
PromptCache._reset_instance()
# Register two versions
mlflow.genai.register_prompt(name="alias_cache_test", template="Version 1")
mlflow.genai.register_prompt(name="alias_cache_test", template="Version 2")
# Set alias to version 1
mlflow.genai.set_prompt_alias("alias_cache_test", alias="production", version=1)
# Load using alias and cache it
prompt_v1 = mlflow.genai.load_prompt("prompts:/alias_cache_test@production")
assert prompt_v1.template == "Version 1"
# Verify it's cached
cache = PromptCache.get_instance()
key = PromptCacheKey.from_parts("alias_cache_test", alias="production")
assert cache.get(key) is not None
# Update alias to point to version 2 - should invalidate cache
mlflow.genai.set_prompt_alias("alias_cache_test", alias="production", version=2)
# Cache should be invalidated
assert cache.get(key) is None
# Loading @production should now return version 2
prompt_v2 = mlflow.genai.load_prompt("prompts:/alias_cache_test@production")
assert prompt_v2.template == "Version 2"
assert prompt_v2.version == 2
def test_prompt_cache_hit():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="cached_prompt", template="Hello {{name}}!")
# First load - cache miss (fetch from server)
prompt1 = mlflow.genai.load_prompt("cached_prompt", version=1)
# Second load - cache hit (should not call registry client)
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
prompt2 = mlflow.genai.load_prompt("cached_prompt", version=1)
assert mock_load.call_count == 0
assert prompt1.template == prompt2.template
assert prompt1.name == prompt2.name
def test_prompt_cache_ttl_expiration():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="expiring_prompt", template="Hello {{name}}!")
# Load with very short TTL
mlflow.genai.load_prompt("expiring_prompt", version=1, cache_ttl_seconds=1)
# Immediate second load should hit cache
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
mlflow.genai.load_prompt("expiring_prompt", version=1, cache_ttl_seconds=1)
assert mock_load.call_count == 0
# Wait for TTL to expire
time.sleep(1.1)
# Load after expiration should miss cache - need to actually fetch
prompt = mlflow.genai.load_prompt("expiring_prompt", version=1, cache_ttl_seconds=1)
assert prompt is not None
assert prompt.template == "Hello {{name}}!"
def test_prompt_cache_bypass_with_zero_ttl():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="bypass_prompt", template="Hello {{name}}!")
# First load to populate cache
mlflow.genai.load_prompt("bypass_prompt", version=1)
# Load with TTL=0 should bypass cache even though it's cached
# We verify by checking that the registry is called
call_count = 0
original_get = mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version
def counting_get(self, *args, **kwargs):
nonlocal call_count
call_count += 1
return original_get(self, *args, **kwargs)
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
counting_get,
):
mlflow.genai.load_prompt("bypass_prompt", version=1, cache_ttl_seconds=0)
mlflow.genai.load_prompt("bypass_prompt", version=1, cache_ttl_seconds=0)
mlflow.genai.load_prompt("bypass_prompt", version=1, cache_ttl_seconds=0)
assert call_count == 3
def test_prompt_cache_alias_cached():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="alias_prompt", template="Version 1")
mlflow.genai.set_prompt_alias("alias_prompt", alias="production", version=1)
# First load by alias - cache miss
mlflow.genai.load_prompt("prompts:/alias_prompt@production")
# Second load by alias - cache hit
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version_by_alias",
) as mock_load:
mlflow.genai.load_prompt("prompts:/alias_prompt@production")
assert mock_load.call_count == 0
def test_prompt_cache_different_versions():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="multi_version", template="Version 1")
mlflow.genai.register_prompt(name="multi_version", template="Version 2")
# Load both versions
prompt_v1 = mlflow.genai.load_prompt("multi_version", version=1)
prompt_v2 = mlflow.genai.load_prompt("multi_version", version=2)
assert prompt_v1.template == "Version 1"
assert prompt_v2.template == "Version 2"
# Both should be cached now
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
mlflow.genai.load_prompt("multi_version", version=1)
mlflow.genai.load_prompt("multi_version", version=2)
assert mock_load.call_count == 0
def test_prompt_cache_custom_ttl():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="custom_ttl_prompt", template="Hello!")
# Load with custom TTL (integer)
mlflow.genai.load_prompt("custom_ttl_prompt", version=1, cache_ttl_seconds=300)
# Should be cached
cache = PromptCache.get_instance()
key = PromptCacheKey.from_parts("custom_ttl_prompt", version=1)
cached = cache.get(key)
assert cached is not None
assert cached.template == "Hello!"
# Load with custom TTL (float)
mlflow.genai.register_prompt(name="custom_ttl_prompt_float", template="Hello float!")
mlflow.genai.load_prompt("custom_ttl_prompt_float", version=1, cache_ttl_seconds=300.5)
# Should be cached
key_float = PromptCacheKey.from_parts("custom_ttl_prompt_float", version=1)
cached_float = cache.get(key_float)
assert cached_float is not None
assert cached_float.template == "Hello float!"
def test_prompt_cache_invalidation():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="invalidate_prompt", template="Hello!")
# Load and cache
mlflow.genai.load_prompt("invalidate_prompt", version=1)
# Verify it's cached
cache = PromptCache.get_instance()
key = PromptCacheKey.from_parts("invalidate_prompt", version=1)
assert cache.get(key) is not None
# Delete specific version from cache
cache.delete("invalidate_prompt", version=1)
# Should be gone
assert cache.get(key) is None
# Next load should fetch from server
prompt = mlflow.genai.load_prompt("invalidate_prompt", version=1)
assert prompt is not None
assert prompt.template == "Hello!"
def test_prompt_cache_uri_format():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="uri_prompt", template="Hello!")
# Load using URI format
prompt1 = mlflow.genai.load_prompt("prompts:/uri_prompt/1")
# Should be cached
with mock.patch(
"mlflow.tracking._model_registry.client.ModelRegistryClient.get_prompt_version",
) as mock_load:
prompt2 = mlflow.genai.load_prompt("prompts:/uri_prompt/1")
assert mock_load.call_count == 0
assert prompt1.template == prompt2.template
def test_prompt_cache_clear():
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="clear_test_1", template="Hello 1!")
mlflow.genai.register_prompt(name="clear_test_2", template="Hello 2!")
# Load both
mlflow.genai.load_prompt("clear_test_1", version=1)
mlflow.genai.load_prompt("clear_test_2", version=1)
# Clear cache
cache = PromptCache.get_instance()
cache.clear()
# Both should require fetching from server
prompt1 = mlflow.genai.load_prompt("clear_test_1", version=1)
prompt2 = mlflow.genai.load_prompt("clear_test_2", version=1)
assert prompt1.template == "Hello 1!"
assert prompt2.template == "Hello 2!"
def test_prompt_cache_env_variable(monkeypatch):
PromptCache._reset_instance()
mlflow.genai.register_prompt(name="env_var_prompt", template="Hello!")
# Set environment variable to 1 second
monkeypatch.setenv("MLFLOW_PROMPT_CACHE_TTL_SECONDS", "1")
# Load prompt (uses env var TTL)
mlflow.genai.load_prompt("env_var_prompt", version=1)
# Wait for expiration
time.sleep(1.1)
# Should fetch from server again
prompt = mlflow.genai.load_prompt("env_var_prompt", version=1)
assert prompt is not None
assert prompt.template == "Hello!"
def test_load_prompt_links_to_experiment():
mlflow.genai.register_prompt(name="test_exp_link", template="Hello {{name}}!")
experiment = mlflow.set_experiment("test_experiment_link")
mlflow.genai.load_prompt("test_exp_link", version=1)
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
client = MlflowClient()
prompt_info = client.get_prompt("test_exp_link")
assert experiment.experiment_id in prompt_info.tags.get(PROMPT_EXPERIMENT_IDS_TAG_KEY)
def test_register_prompt_links_to_experiment():
experiment = mlflow.set_experiment("test_experiment_register")
mlflow.genai.register_prompt(name="test_exp_register", template="Greetings {{name}}!")
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
client = MlflowClient()
prompt_info = client.get_prompt("test_exp_register")
assert experiment.experiment_id in prompt_info.tags.get(PROMPT_EXPERIMENT_IDS_TAG_KEY)
def test_link_prompt_to_experiment_no_duplicate():
mlflow.genai.register_prompt(name="no_dup_prompt", template="Test {{x}}!")
experiment = mlflow.set_experiment("test_no_dup")
mlflow.genai.load_prompt("no_dup_prompt", version=1)
mlflow.genai.load_prompt("no_dup_prompt", version=1)
mlflow.genai.load_prompt("no_dup_prompt", version=1)
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
client = MlflowClient()
prompt_info = client.get_prompt("no_dup_prompt")
assert experiment.experiment_id in prompt_info.tags.get(PROMPT_EXPERIMENT_IDS_TAG_KEY)
def test_prompt_links_to_default_experiment():
# Reset experiment state to ensure we're using the Default experiment
fluent._active_experiment_id = None
MLFLOW_EXPERIMENT_ID.unset()
# Register a prompt without setting an experiment - should use Default (ID "0")
mlflow.genai.register_prompt(name="default_exp_prompt", template="Hello {{name}}!")
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
# Verify the prompt was linked to the Default experiment
client = MlflowClient()
default_experiment = client.get_experiment("0")
prompt_info = client.get_prompt("default_exp_prompt")
experiment_ids_tag = prompt_info.tags.get(PROMPT_EXPERIMENT_IDS_TAG_KEY, "")
assert default_experiment.experiment_id in experiment_ids_tag, (
f"Expected Default experiment ID '{default_experiment.experiment_id}' "
f"in prompt tags, but got: {experiment_ids_tag!r}"
)
# Also test that load_prompt links to Default experiment
fluent._active_experiment_id = None
MLFLOW_EXPERIMENT_ID.unset()
mlflow.genai.register_prompt(name="default_exp_load_prompt", template="Test {{x}}!")
# Don't wait after register, wait after load
mlflow.genai.load_prompt("default_exp_load_prompt", version=1)
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
prompt_info = client.get_prompt("default_exp_load_prompt")
experiment_ids_tag = prompt_info.tags.get(PROMPT_EXPERIMENT_IDS_TAG_KEY, "")
assert default_experiment.experiment_id in experiment_ids_tag, (
f"Expected Default experiment ID '{default_experiment.experiment_id}' "
f"in prompt tags after load_prompt, but got: {experiment_ids_tag!r}"
)
def test_search_prompts_by_experiment_id():
experiment = mlflow.set_experiment("test_search_by_exp")
mlflow.genai.register_prompt(name="exp_prompt_1", template="Template 1: {{x}}")
mlflow.genai.register_prompt(name="exp_prompt_2", template="Template 2: {{y}}")
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
client = MlflowClient()
prompts = client.search_prompts(filter_string=f'experiment_id = "{experiment.experiment_id}"')
assert len(prompts) == 2
prompt_names = {p.name for p in prompts}
assert "exp_prompt_1" in prompt_names
assert "exp_prompt_2" in prompt_names
def test_search_prompts_by_experiment_id_empty():
experiment = mlflow.set_experiment("test_empty_exp")
client = MlflowClient()
prompts = client.search_prompts(filter_string=f'experiment_id = "{experiment.experiment_id}"')
assert len(prompts) == 0
def test_search_prompts_same_prompt_multiple_experiments():
exp_id_1 = mlflow.create_experiment("test_multi_exp_1")
exp_id_2 = mlflow.create_experiment("test_multi_exp_2")
mlflow.set_experiment(experiment_id=exp_id_1)
mlflow.genai.register_prompt(name="shared_search_prompt", template="Shared: {{x}}")
mlflow.set_experiment(experiment_id=exp_id_2)
mlflow.genai.load_prompt("shared_search_prompt", version=1)
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
client = MlflowClient()
prompts_exp1 = client.search_prompts(filter_string=f'experiment_id = "{exp_id_1}"')
prompts_exp2 = client.search_prompts(filter_string=f'experiment_id = "{exp_id_2}"')
assert len(prompts_exp1) == 1
assert prompts_exp1[0].name == "shared_search_prompt"
assert len(prompts_exp2) == 1
assert prompts_exp2[0].name == "shared_search_prompt"
def test_search_prompts_with_combined_filters():
experiment = mlflow.set_experiment("test_combined_filters")
mlflow.genai.register_prompt(name="alpha_prompt", template="Alpha: {{x}}")
mlflow.genai.register_prompt(name="beta_prompt", template="Beta: {{y}}")
mlflow.genai.register_prompt(name="gamma_prompt", template="Gamma: {{z}}")
client = MlflowClient()
# Wait for the links to be established
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
# Test experiment_id filter combined with name filter
prompts = client.search_prompts(
filter_string=f'experiment_id = "{experiment.experiment_id}" AND name = "alpha_prompt"'
)
assert len(prompts) == 1
assert prompts[0].name == "alpha_prompt"
# Test experiment_id filter combined with name LIKE filter
prompts = client.search_prompts(
filter_string=f'experiment_id = "{experiment.experiment_id}" AND name LIKE "a%"'
)
assert len(prompts) == 1
assert prompts[0].name == "alpha_prompt"
# Test that name filter without experiment_id returns correct results
prompts = client.search_prompts(filter_string='name = "gamma_prompt"')
assert len(prompts) == 1
assert prompts[0].name == "gamma_prompt"
def test_load_prompt_sets_span_attributes():
mlflow.genai.register_prompt(name="span_test_prompt", template="Hello, {{name}}!")
with mlflow.start_span("test_span") as span:
prompt = mlflow.genai.load_prompt("span_test_prompt", version=1)
linked_prompts_value = span.attributes.get(SpanAttributeKey.LINKED_PROMPTS)
prompt_versions = json.loads(linked_prompts_value)
assert len(prompt_versions) == 1
assert prompt_versions[0] == {"name": "span_test_prompt", "version": "1"}
assert prompt.name == "span_test_prompt"
assert prompt.version == 1
def test_load_prompt_multiple_prompts_in_same_span():
mlflow.genai.register_prompt(name="prompt_1", template="First {{var1}}")
mlflow.genai.register_prompt(name="prompt_2", template="Second {{var2}}")
with mlflow.start_span("multi_prompt_span") as span:
prompt1 = mlflow.genai.load_prompt("prompt_1", version=1)
prompt2 = mlflow.genai.load_prompt("prompt_2", version=1)
linked_prompts_value = span.attributes.get(SpanAttributeKey.LINKED_PROMPTS)
prompt_versions = json.loads(linked_prompts_value)
assert len(prompt_versions) == 2
assert {"name": "prompt_1", "version": "1"} in prompt_versions
assert {"name": "prompt_2", "version": "1"} in prompt_versions
assert prompt1.name == "prompt_1"
assert prompt2.name == "prompt_2"
def test_load_prompt_same_prompt_twice_in_span():
mlflow.genai.register_prompt(name="duplicate_test", template="Test {{var}}")
with mlflow.start_span("duplicate_span") as span:
mlflow.genai.load_prompt("duplicate_test", version=1)
mlflow.genai.load_prompt("duplicate_test", version=1)
linked_prompts_value = span.attributes.get(SpanAttributeKey.LINKED_PROMPTS)
prompt_versions = json.loads(linked_prompts_value)
assert isinstance(prompt_versions, list)
assert len(prompt_versions) == 1
assert prompt_versions[0] == {"name": "duplicate_test", "version": "1"}
def test_register_and_load_prompt_with_model_config():
model_config = {
"model_name": "gpt-5",
"temperature": 0.7,
"top_p": 0.9,
"max_tokens": 1000,
}
# Register a prompt with model_config
mlflow.genai.register_prompt(
name="config_prompt",
template="Hello, {{name}}!",
model_config=model_config,
commit_message="Prompt with model config",
)
# Load the prompt and verify model_config is preserved
prompt = mlflow.genai.load_prompt("config_prompt", version=1)
assert prompt.name == "config_prompt"
assert prompt.template == "Hello, {{name}}!"
assert prompt.model_config == model_config
# Register a second version without model_config
mlflow.genai.register_prompt(
name="config_prompt",
template="Hi, {{name}}!",
commit_message="No model config",
)
# Verify the new version has no model_config
prompt_v2 = mlflow.genai.load_prompt("config_prompt", version=2)
assert prompt_v2.model_config is None
# Verify the first version still has model_config
prompt_v1 = mlflow.genai.load_prompt("config_prompt", version=1)
assert prompt_v1.model_config == model_config
def test_register_and_load_prompt_with_model_config_instance():
config = PromptModelConfig(
model_name="gpt-5",
temperature=0.6,
max_tokens=1500,
top_p=0.95,
extra_params={"stream": True, "n": 1},
)
mlflow.genai.register_prompt(
name="config_instance_prompt",
template="Summarize: {{text}}",
model_config=config,
)
prompt = mlflow.genai.load_prompt("config_instance_prompt", version=1)
assert prompt.model_config == {
"model_name": "gpt-5",
"temperature": 0.6,
"max_tokens": 1500,
"top_p": 0.95,
"stream": True,
"n": 1,
}
def test_model_config_validation_on_register():
with pytest.raises(ValidationError, match="Input should be greater than or equal to 0"):
mlflow.genai.register_prompt(
name="invalid_prompt",
template="Test",
model_config=PromptModelConfig(temperature=-1.0),
)
with pytest.raises(ValidationError, match="Input should be greater than 0"):
mlflow.genai.register_prompt(
name="invalid_prompt",
template="Test",
model_config=PromptModelConfig(max_tokens=0),
)
def test_set_prompt_model_config_with_dict():
# Register a prompt without model config
mlflow.genai.register_prompt(
name="test_set_config",
template="Hello, {{name}}!",
commit_message="Initial version",
)
# Verify no model config initially
prompt = mlflow.genai.load_prompt("test_set_config", version=1)
assert prompt.model_config is None
# Set model config using a dictionary
model_config = {
"model_name": "gpt-5",
"temperature": 0.7,
"max_tokens": 1000,
"top_p": 0.9,
}
mlflow.genai.set_prompt_model_config(
name="test_set_config", version=1, model_config=model_config
)
# Load and verify model config was set
prompt = mlflow.genai.load_prompt("test_set_config", version=1)
assert prompt.model_config == {
"model_name": "gpt-5",
"temperature": 0.7,
"max_tokens": 1000,
"top_p": 0.9,
}
def test_set_prompt_model_config_with_instance():
# Register a prompt without model config
mlflow.genai.register_prompt(
name="test_set_config_instance",
template="Summarize: {{text}}",
)
# Set model config using PromptModelConfig instance
config = PromptModelConfig(
model_name="gpt-5",
temperature=0.5,
max_tokens=2000,
top_p=0.95,
extra_params={"stream": True},
)
mlflow.genai.set_prompt_model_config(
name="test_set_config_instance", version=1, model_config=config
)
# Load and verify
prompt = mlflow.genai.load_prompt("test_set_config_instance", version=1)
assert prompt.model_config == {
"model_name": "gpt-5",
"temperature": 0.5,
"max_tokens": 2000,
"top_p": 0.95,
"stream": True,
}
def test_set_prompt_model_config_updates_existing():
# Register a prompt with initial model config
initial_config = {"model_name": "gpt-5", "temperature": 0.3}
mlflow.genai.register_prompt(
name="test_update_config",
template="Question: {{question}}",
model_config=initial_config,
)
# Verify initial config
prompt = mlflow.genai.load_prompt("test_update_config", version=1)
assert prompt.model_config == {
"model_name": "gpt-5",
"temperature": 0.3,
}
# Update to new config
new_config = {"model_name": "gpt-5", "temperature": 0.7, "max_tokens": 1500}
mlflow.genai.set_prompt_model_config(
name="test_update_config", version=1, model_config=new_config
)
# Verify config was updated
prompt = mlflow.genai.load_prompt("test_update_config", version=1)
assert prompt.model_config == {
"model_name": "gpt-5",
"temperature": 0.7,
"max_tokens": 1500,
}
def test_set_prompt_model_config_validation():
mlflow.genai.register_prompt(name="test_validation", template="Test")
# Test validation with invalid temperature
with pytest.raises(ValidationError, match="Input should be greater than or equal to 0"):
mlflow.genai.set_prompt_model_config(
name="test_validation",
version=1,
model_config=PromptModelConfig(temperature=-1.0),
)
# Test validation with invalid max_tokens
with pytest.raises(ValidationError, match="Input should be greater than 0"):
mlflow.genai.set_prompt_model_config(
name="test_validation",
version=1,
model_config={"max_tokens": 0},
)
def test_delete_prompt_model_config():
# Register a prompt with model config
model_config = {"model_name": "gpt-5", "temperature": 0.7}
mlflow.genai.register_prompt(
name="test_delete_config",
template="Analyze: {{data}}",
model_config=model_config,
)
# Verify model config exists
prompt = mlflow.genai.load_prompt("test_delete_config", version=1)
assert prompt.model_config is not None
# Delete model config
mlflow.genai.delete_prompt_model_config(name="test_delete_config", version=1)
# Verify model config was deleted
prompt = mlflow.genai.load_prompt("test_delete_config", version=1)
assert prompt.model_config is None
def test_concurrent_prompt_linking_to_run_and_trace():
mlflow.genai.register_prompt(name="test", template="Run prompt: {{x}}")
join_thread_by_name_prefix("link_prompt_to_experiment_thread")
client = mlflow.MlflowClient()
with mlflow.start_run() as run:
run_id = run.info.run_id
@mlflow.trace
def traced_function():
mlflow.genai.load_prompt("test", version=1)
traced_function()
# Verify the prompt was linked to the run
run_data = client.get_run(run_id)
linked_prompts_tag = run_data.data.tags.get(TraceTagKey.LINKED_PROMPTS)
assert linked_prompts_tag is not None
linked_prompts = json.loads(linked_prompts_tag)
assert any(p["name"] == "test" for p in linked_prompts)
# Verify the prompt was linked to the trace
trace_id = mlflow.get_last_active_trace_id()
trace = mlflow.get_trace(trace_id)
trace_linked_prompts = trace.info.tags.get(TraceTagKey.LINKED_PROMPTS)
assert trace_linked_prompts is not None
trace_prompts = json.loads(trace_linked_prompts)
assert any(p["name"] == "test" for p in trace_prompts)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/prompts/test_prompts.py",
"license": "Apache License 2.0",
"lines": 1628,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/evaluate/test_to_predict_fn.py | import importlib.metadata
from unittest import mock
import pytest
import mlflow
from mlflow.entities.trace_info import TraceInfo
from mlflow.environment_variables import MLFLOW_ENABLE_ASYNC_TRACE_LOGGING
from mlflow.exceptions import MlflowException
from mlflow.genai.evaluation.base import to_predict_fn
from mlflow.genai.utils.trace_utils import convert_predict_fn
from tests.tracing.helper import V2_TRACE_DICT
_DUMMY_CHAT_RESPONSE = {
"id": "1",
"object": "text_completion",
"created": "2021-10-01T00:00:00.000000Z",
"model": "gpt-4o-mini",
"choices": [
{
"index": 0,
"message": {
"content": "This is a response",
"role": "assistant",
},
"finish_reason": "length",
}
],
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 2,
},
}
@pytest.fixture
def mock_deploy_client():
with mock.patch("mlflow.deployments.get_deploy_client") as mock_get:
yield mock_get.return_value
# TODO: Remove this once OSS backend is migrated to V3.
@pytest.fixture
def mock_tracing_client(monkeypatch):
# Mock the TracingClient
with mock.patch("mlflow.tracing.export.mlflow_v3.TracingClient") as mock_get:
tracing_client = mock_get.return_value
tracing_client.tracking_uri = "databricks"
# Set up trace exporter to Databricks.
monkeypatch.setenv(MLFLOW_ENABLE_ASYNC_TRACE_LOGGING.name, "false")
mlflow.set_tracking_uri("databricks")
mlflow.tracing.enable() # Set up trace exporter again
yield tracing_client
def test_to_predict_fn_return_trace(sample_rag_trace, mock_deploy_client, mock_tracing_client):
mock_deploy_client.predict.return_value = {
**_DUMMY_CHAT_RESPONSE,
"databricks_output": {"trace": sample_rag_trace.to_dict()},
}
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/chat")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="chat",
inputs={
"messages": messages,
"databricks_options": {"return_trace": True},
},
)
assert response == _DUMMY_CHAT_RESPONSE # Response should not contain databricks_output
# Trace from endpoint (sample_rag_trace) should be copied to the current experiment
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
# Copied trace should have a new trace ID
assert trace_info.trace_id != sample_rag_trace.info.trace_id
assert trace_info.request_preview == '{"question": "query"}'
assert trace_info.response_preview == '"answer"'
trace_data = mock_tracing_client._upload_trace_data.call_args[0][1]
assert len(trace_data.spans) == 3
for old, new in zip(sample_rag_trace.data.spans, trace_data.spans):
assert old.name == new.name
assert old.inputs == new.inputs
assert old.outputs == new.outputs
assert old.start_time_ns == new.start_time_ns
assert old.end_time_ns == new.end_time_ns
assert old.parent_id == new.parent_id
assert old.span_id == new.span_id
mock_tracing_client._upload_trace_data.assert_called_once_with(mock.ANY, trace_data)
@pytest.mark.parametrize(
"databricks_output",
[
{},
{"databricks_output": {}},
{"databricks_output": {"trace": None}},
],
)
def test_to_predict_fn_does_not_return_trace(
databricks_output, mock_deploy_client, mock_tracing_client
):
mock_deploy_client.predict.return_value = _DUMMY_CHAT_RESPONSE | databricks_output
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/chat")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="chat",
inputs={
"messages": messages,
"databricks_options": {"return_trace": True},
},
)
assert response == _DUMMY_CHAT_RESPONSE # Response should not contain databricks_output
# Bare-minimum trace should be created when the endpoint does not return a trace
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
assert trace_info.request_preview == "What is Spark?"
trace_data = mock_tracing_client._upload_trace_data.call_args[0][1]
assert len(trace_data.spans) == 1
assert trace_data.spans[0].name == "predict"
def test_to_predict_fn_pass_tracing_check(
sample_rag_trace, mock_deploy_client, mock_tracing_client
):
"""
The function produced by to_predict_fn() is guaranteed to create a trace.
Therefore it should not be wrapped by @mlflow.trace by convert_predict_fn().
"""
mock_deploy_client.predict.side_effect = lambda **kwargs: {
**_DUMMY_CHAT_RESPONSE,
"databricks_output": {"trace": sample_rag_trace.to_dict()},
}
sample_input = {"messages": [{"role": "user", "content": "Hi"}]}
predict_fn = to_predict_fn("endpoints:/chat")
converted = convert_predict_fn(predict_fn, sample_input)
# The check should pass, the function should not be wrapped by @mlflow.trace
wrapped = hasattr(converted, "__wrapped__")
assert wrapped != predict_fn
# The function should not produce a trace during the check
mock_tracing_client.start_trace.assert_not_called()
# The function should produce a trace when invoked
converted(sample_input)
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
assert trace_info.request_preview == '{"question": "query"}'
assert trace_info.response_preview == '"answer"'
# The produced trace should be the one returned from the endpoint (sample_rag_trace)
trace_data = mock_tracing_client._upload_trace_data.call_args[0][1]
assert trace_data.spans[0].name == "rag"
assert trace_data.spans[0].inputs == {"question": "query"}
assert trace_data.spans[0].outputs == "answer"
def test_to_predict_fn_return_v2_trace(mock_deploy_client, mock_tracing_client):
mlflow.tracing.reset()
mock_deploy_client.predict.return_value = {
**_DUMMY_CHAT_RESPONSE,
"databricks_output": {"trace": V2_TRACE_DICT},
}
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/chat")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="chat",
inputs={
"messages": messages,
"databricks_options": {"return_trace": True},
},
)
assert response == _DUMMY_CHAT_RESPONSE # Response should not contain databricks_output
# Trace from endpoint (sample_rag_trace) should be copied to the current experiment
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
# Copied trace should have a new trace ID (and v3)
isinstance(trace_info, TraceInfo)
assert trace_info.trace_id != V2_TRACE_DICT["info"]["request_id"]
assert trace_info.request_preview == '{"x": 2, "y": 5}'
assert trace_info.response_preview == "8"
trace_data = mock_tracing_client._upload_trace_data.call_args[0][1]
assert len(trace_data.spans) == 2
assert trace_data.spans[0].name == "predict"
assert trace_data.spans[0].inputs == {"x": 2, "y": 5}
assert trace_data.spans[0].outputs == 8
mock_tracing_client._upload_trace_data.assert_called_once_with(mock.ANY, trace_data)
def test_to_predict_fn_should_not_pass_databricks_options_to_fmapi(
mock_deploy_client, mock_tracing_client
):
mock_deploy_client.get_endpoint.return_value = {
"endpoint_type": "FOUNDATION_MODEL_API",
}
mock_deploy_client.predict.return_value = _DUMMY_CHAT_RESPONSE
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/foundation-model-api")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="foundation-model-api",
inputs={"messages": messages},
)
assert response == _DUMMY_CHAT_RESPONSE # Response should not contain databricks_output
# Bare-minimum trace should be created when the endpoint does not return a trace
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
assert trace_info.request_preview == "What is Spark?"
trace_data = mock_tracing_client._upload_trace_data.call_args[0][1]
assert len(trace_data.spans) == 1
assert trace_data.spans[0].name == "predict"
def test_to_predict_fn_handles_trace_without_tags(
sample_rag_trace, mock_deploy_client, mock_tracing_client
):
# Create a trace dict without `tags` field
trace_dict = sample_rag_trace.to_dict()
trace_dict["info"].pop("tags", None) # Remove tags field entirely
mock_deploy_client.predict.return_value = {
**_DUMMY_CHAT_RESPONSE,
"databricks_output": {"trace": trace_dict},
}
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/chat")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="chat",
inputs={
"messages": messages,
"databricks_options": {"return_trace": True},
},
)
assert response == _DUMMY_CHAT_RESPONSE
# Trace should be copied successfully even without tags
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
assert trace_info.trace_id != sample_rag_trace.info.trace_id
assert trace_info.request_preview == '{"question": "query"}'
assert trace_info.response_preview == '"answer"'
trace_data = mock_tracing_client._upload_trace_data.call_args[0][1]
assert len(trace_data.spans) == 3
mock_tracing_client._upload_trace_data.assert_called_once_with(mock.ANY, trace_data)
def test_to_predict_fn_reuses_trace_in_dual_write_mode(
sample_rag_trace, mock_deploy_client, mock_tracing_client
):
"""
Test that when an endpoint logs traces to both inference table and MLflow experiment
(dual-write mode), the trace is reused instead of being re-logged.
This happens when MLFLOW_EXPERIMENT_ID env var is set in the serving endpoint.
"""
# Set up an experiment context
experiment_id = "test-experiment-123"
with mock.patch(
"mlflow.genai.evaluation.base._get_experiment_id", return_value=experiment_id
) as mock_get_experiment_id:
# Create a trace dict with experiment_id matching the current experiment
trace_dict = sample_rag_trace.to_dict()
trace_dict["info"]["trace_location"] = {
"mlflow_experiment": {"experiment_id": experiment_id}
}
mock_deploy_client.predict.return_value = {
**_DUMMY_CHAT_RESPONSE,
"databricks_output": {"trace": trace_dict},
}
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/chat")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="chat",
inputs={
"messages": messages,
"databricks_options": {"return_trace": True},
},
)
assert response == _DUMMY_CHAT_RESPONSE
# The trace should NOT be copied when it's already in the current experiment
mock_tracing_client.start_trace.assert_not_called()
mock_tracing_client._upload_trace_data.assert_not_called()
mock_get_experiment_id.assert_called_once()
# ========== Databricks Apps Tests ==========
def test_to_predict_fn_apps_uri_with_app_name(mock_tracing_client):
mock_app = mock.MagicMock()
mock_app.url = "https://agent-app-123.staging.aws.databricksapps.com"
mock_oauth_token = mock.MagicMock()
mock_oauth_token.access_token = "oauth-token-123"
mock_config = mock.MagicMock()
mock_config.oauth_token.return_value = mock_oauth_token
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.return_value = mock_app
mock_workspace_client.config = mock_config
mock_response = mock.MagicMock()
mock_response.json.return_value = {"response": "test response"}
with (
mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client),
mock.patch(
"mlflow.utils.databricks_utils.http_request", return_value=mock_response
) as mock_http_request,
):
predict_fn = to_predict_fn("apps:/agent-app")
result = predict_fn(input=[{"role": "user", "content": "test"}])
# Verify SDK was called with correct app name
mock_workspace_client.apps.get.assert_called_once_with(name="agent-app")
# Verify http_request was called with correct parameters
mock_http_request.assert_called_once()
call_kwargs = mock_http_request.call_args[1]
assert call_kwargs["endpoint"] == "/invocations"
assert call_kwargs["method"] == "POST"
assert call_kwargs["json"] == {"input": [{"role": "user", "content": "test"}]}
# Verify host_creds has the OAuth token
host_creds = call_kwargs["host_creds"]
assert host_creds.host == "https://agent-app-123.staging.aws.databricksapps.com"
assert host_creds.token == "oauth-token-123"
assert result == {"response": "test response"}
def test_to_predict_fn_apps_not_found():
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.side_effect = Exception("App not found: nonexistent-app")
with mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client):
with pytest.raises(MlflowException, match="Failed to get Databricks App"):
to_predict_fn("apps:/nonexistent-app")
def test_to_predict_fn_apps_no_url():
mock_app = mock.MagicMock()
mock_app.url = None # App exists but not deployed
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.return_value = mock_app
with mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client):
with pytest.raises(MlflowException, match="does not have a URL"):
to_predict_fn("apps:/undeployed-app")
def test_to_predict_fn_apps_no_oauth_raises_error():
mock_app = mock.MagicMock()
mock_app.url = "https://my-app-123.staging.aws.databricksapps.com"
mock_config = mock.MagicMock()
# Simulate non-OAuth auth - oauth_token() raises Exception
mock_config.oauth_token.side_effect = Exception(
"OAuth tokens are not available for pat authentication"
)
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.return_value = mock_app
mock_workspace_client.config = mock_config
with mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client):
predict_fn = to_predict_fn("apps:/my-app")
with pytest.raises(MlflowException, match="Databricks Apps require OAuth authentication"):
predict_fn(input=[{"role": "user", "content": "test"}])
def test_to_predict_fn_apps_old_sdk_version_error():
real_version = importlib.metadata.version
def mock_version(package):
if package == "databricks-sdk":
return "0.73.0"
return real_version(package)
with mock.patch("importlib.metadata.version", side_effect=mock_version):
with pytest.raises(MlflowException, match="databricks-sdk>=0.74.0"):
to_predict_fn("apps:/my-app")
def test_to_predict_fn_apps_http_error_handling():
mock_app = mock.MagicMock()
mock_app.url = "https://my-app-123.staging.aws.databricksapps.com"
mock_oauth_token = mock.MagicMock()
mock_oauth_token.access_token = "oauth-token"
mock_config = mock.MagicMock()
mock_config.oauth_token.return_value = mock_oauth_token
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.return_value = mock_app
mock_workspace_client.config = mock_config
with (
mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client),
mock.patch("mlflow.utils.databricks_utils.http_request") as mock_http_request,
):
# http_request raises MlflowException on errors
mock_http_request.side_effect = MlflowException("Request failed: 403 Forbidden")
predict_fn = to_predict_fn("apps:/my-app")
with pytest.raises(MlflowException, match="Request failed"):
predict_fn(input=[{"role": "user", "content": "test"}])
def test_to_predict_fn_apps_payload_passthrough():
mock_app = mock.MagicMock()
mock_app.url = "https://my-app-123.staging.aws.databricksapps.com"
mock_oauth_token = mock.MagicMock()
mock_oauth_token.access_token = "oauth-token"
mock_config = mock.MagicMock()
mock_config.oauth_token.return_value = mock_oauth_token
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.return_value = mock_app
mock_workspace_client.config = mock_config
mock_response = mock.MagicMock()
mock_response.json.return_value = {"output": "ok"}
with (
mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client),
mock.patch(
"mlflow.utils.databricks_utils.http_request", return_value=mock_response
) as mock_http_request,
):
predict_fn = to_predict_fn("apps:/my-app")
# Test 1: Standard format with input
predict_fn(input=[{"role": "user", "content": "test"}])
call_kwargs = mock_http_request.call_args[1]
assert call_kwargs["json"] == {"input": [{"role": "user", "content": "test"}]}
mock_http_request.reset_mock()
# Test 2: With custom_inputs
predict_fn(
input=[{"role": "user", "content": "test2"}],
custom_inputs={"session_id": "123"},
)
call_kwargs = mock_http_request.call_args[1]
assert call_kwargs["json"] == {
"input": [{"role": "user", "content": "test2"}],
"custom_inputs": {"session_id": "123"},
}
mock_http_request.reset_mock()
# Test 3: With stream parameter
predict_fn(input=[{"role": "user", "content": "test3"}], stream=True)
call_kwargs = mock_http_request.call_args[1]
assert call_kwargs["json"] == {
"input": [{"role": "user", "content": "test3"}],
"stream": True,
}
def test_to_predict_fn_apps_creates_trace():
mock_app = mock.MagicMock()
mock_app.url = "https://my-app-123.staging.aws.databricksapps.com"
mock_oauth_token = mock.MagicMock()
mock_oauth_token.access_token = "oauth-token"
mock_config = mock.MagicMock()
mock_config.oauth_token.return_value = mock_oauth_token
mock_workspace_client = mock.MagicMock()
mock_workspace_client.apps.get.return_value = mock_app
mock_workspace_client.config = mock_config
mock_response = mock.MagicMock()
mock_response.json.return_value = {"output": "test output"}
with (
mock.patch("databricks.sdk.WorkspaceClient", return_value=mock_workspace_client),
mock.patch("mlflow.utils.databricks_utils.http_request", return_value=mock_response),
):
predict_fn = to_predict_fn("apps:/my-app")
result = predict_fn(input=[{"role": "user", "content": "test"}])
# Verify trace was created
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert trace is not None
assert len(trace.data.spans) == 1
assert trace.data.spans[0].name == "predict"
assert result == {"output": "test output"}
def test_to_predict_fn_apps_invalid_uri():
with pytest.raises(ValueError, match="Invalid endpoint URI"):
to_predict_fn("invalid:/my-app")
def test_to_predict_fn_copies_trace_when_experiment_differs(
sample_rag_trace, mock_deploy_client, mock_tracing_client
):
"""
Test that when an endpoint returns a trace from a different experiment,
the trace is still copied to the current experiment.
"""
# Set up an experiment context
current_experiment_id = "current-experiment-123"
endpoint_experiment_id = "different-experiment-456"
with mock.patch(
"mlflow.genai.evaluation.base._get_experiment_id", return_value=current_experiment_id
) as mock_get_experiment_id:
# Create a trace dict with a different experiment_id
trace_dict = sample_rag_trace.to_dict()
trace_dict["info"]["trace_location"] = {
"mlflow_experiment": {"experiment_id": endpoint_experiment_id}
}
mock_deploy_client.predict.return_value = {
**_DUMMY_CHAT_RESPONSE,
"databricks_output": {"trace": trace_dict},
}
messages = [
{"content": "You are a helpful assistant.", "role": "system"},
{"content": "What is Spark?", "role": "user"},
]
predict_fn = to_predict_fn("endpoints:/chat")
response = predict_fn(messages=messages)
mock_deploy_client.predict.assert_called_once_with(
endpoint="chat",
inputs={
"messages": messages,
"databricks_options": {"return_trace": True},
},
)
assert response == _DUMMY_CHAT_RESPONSE
# The trace SHOULD be copied when experiments differ
mock_tracing_client.start_trace.assert_called_once()
trace_info = mock_tracing_client.start_trace.call_args[0][0]
# Copied trace should have a new trace ID
assert trace_info.trace_id != sample_rag_trace.info.trace_id
mock_tracing_client._upload_trace_data.assert_called_once()
mock_get_experiment_id.assert_called_once()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/evaluate/test_to_predict_fn.py",
"license": "Apache License 2.0",
"lines": 482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/store/model_registry/test_abstract_store.py | import json
import threading
import time
from unittest import mock
import pytest
from mlflow.entities.logged_model import LoggedModel
from mlflow.entities.logged_model_tag import LoggedModelTag
from mlflow.entities.model_registry.model_version import ModelVersion
from mlflow.entities.model_registry.model_version_tag import ModelVersionTag
from mlflow.entities.model_registry.prompt_version import PromptVersion
from mlflow.entities.run import Run
from mlflow.entities.run_data import RunData
from mlflow.entities.run_info import RunInfo
from mlflow.entities.run_tag import RunTag
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, ErrorCode
from mlflow.store.model_registry.abstract_store import AbstractStore
from mlflow.tracing.constant import TraceTagKey
class MockAbstractStore(AbstractStore):
"""Mock implementation of AbstractStore for testing."""
def __init__(self):
super().__init__()
self.prompt_versions = {}
self.model_versions = {}
def get_prompt_version(self, name: str, version: str) -> PromptVersion:
key = f"{name}:{version}"
if key not in self.prompt_versions:
raise MlflowException(
f"Prompt version '{name}' version '{version}' not found",
error_code=ErrorCode.Name(RESOURCE_DOES_NOT_EXIST),
)
return self.prompt_versions[key]
def get_model_version(self, name: str, version: int) -> ModelVersion:
key = f"{name}:{version}"
if key not in self.model_versions:
# Create a default model version for testing
self.model_versions[key] = ModelVersion(
name=name,
version=str(version),
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
description="Test model version",
tags={},
)
return self.model_versions[key]
def set_model_version_tag(self, name: str, version: int, tag: ModelVersionTag):
"""Mock implementation to set model version tags."""
mv = self.get_model_version(name, version)
if isinstance(mv.tags, dict):
mv.tags[tag.key] = tag.value
else:
# Convert to dict if it's not already
mv.tags = {tag.key: tag.value}
def add_prompt_version(self, name: str, version: str):
"""Helper method to add prompt versions for testing."""
# Convert version to integer for PromptVersion
version_int = int(version[1:]) if version.startswith("v") else int(version)
# Store using both formats to handle version lookups
key_with_v = f"{name}:v{version_int}"
key_without_v = f"{name}:{version_int}"
prompt_version = PromptVersion(
name=name,
version=version_int,
template="Test template",
creation_timestamp=1234567890,
)
self.prompt_versions[key_with_v] = prompt_version
self.prompt_versions[key_without_v] = prompt_version
@pytest.fixture
def store():
return MockAbstractStore()
@pytest.fixture
def mock_tracking_store():
with mock.patch("mlflow.tracking._get_store") as mock_get_store:
mock_store = mock.Mock()
mock_get_store.return_value = mock_store
yield mock_store
def test_link_prompt_version_to_model_success(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
model_id = "model_123"
# Mock logged model with no existing linked prompts
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
mock_tracking_store.get_logged_model.return_value = logged_model
# Execute
store.link_prompt_version_to_model("test_prompt", "v1", model_id)
# Verify
mock_tracking_store.set_logged_model_tags.assert_called_once()
call_args = mock_tracking_store.set_logged_model_tags.call_args
assert call_args[0][0] == model_id
logged_model_tags = call_args[0][1]
assert len(logged_model_tags) == 1
logged_model_tag = logged_model_tags[0]
assert isinstance(logged_model_tag, LoggedModelTag)
assert logged_model_tag.key == TraceTagKey.LINKED_PROMPTS
expected_value = [{"name": "test_prompt", "version": "1"}]
assert json.loads(logged_model_tag.value) == expected_value
def test_link_prompt_version_to_model_append_to_existing(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
model_id = "model_123"
existing_prompts = [{"name": "existing_prompt", "version": "v1"}]
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={TraceTagKey.LINKED_PROMPTS: json.dumps(existing_prompts)},
)
mock_tracking_store.get_logged_model.return_value = logged_model
# Execute
store.link_prompt_version_to_model("test_prompt", "v1", model_id)
# Verify
call_args = mock_tracking_store.set_logged_model_tags.call_args
logged_model_tags = call_args[0][1]
assert len(logged_model_tags) == 1
logged_model_tag = logged_model_tags[0]
expected_value = [
{"name": "existing_prompt", "version": "v1"},
{"name": "test_prompt", "version": "1"},
]
assert json.loads(logged_model_tag.value) == expected_value
def test_link_prompt_version_to_model_no_model_found(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
mock_tracking_store.get_logged_model.return_value = None
# Execute & Verify
with pytest.raises(MlflowException, match="Could not find model with ID 'nonexistent_model'"):
store.link_prompt_version_to_model("test_prompt", "v1", "nonexistent_model")
def test_link_prompt_version_to_model_prompt_not_found(store, mock_tracking_store):
# Setup
model_id = "model_123"
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
mock_tracking_store.get_logged_model.return_value = logged_model
# Execute & Verify
with pytest.raises(
MlflowException, match="Prompt version 'nonexistent_prompt' version 'v1' not found"
):
store.link_prompt_version_to_model("nonexistent_prompt", "v1", model_id)
def test_link_prompt_version_to_model_invalid_json_tag(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
model_id = "model_123"
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={TraceTagKey.LINKED_PROMPTS: "invalid json"},
)
mock_tracking_store.get_logged_model.return_value = logged_model
# Execute & Verify
with pytest.raises(MlflowException, match="Invalid JSON format for 'mlflow.linkedPrompts' tag"):
store.link_prompt_version_to_model("test_prompt", "v1", model_id)
def test_link_prompt_version_to_model_invalid_format_tag(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
model_id = "model_123"
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={TraceTagKey.LINKED_PROMPTS: json.dumps({"not": "a list"})},
)
mock_tracking_store.get_logged_model.return_value = logged_model
# Execute & Verify
with pytest.raises(MlflowException, match="Invalid format for 'mlflow.linkedPrompts' tag"):
store.link_prompt_version_to_model("test_prompt", "v1", model_id)
def test_link_prompt_version_to_model_duplicate_prevention(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
model_id = "model_123"
# Create a logged model that will be updated by the mocked set_logged_model_tags
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
# Mock the behavior where set_logged_model_tags updates the model's tags
def mock_set_tags(model_id, tags):
for tag in tags:
logged_model.tags[tag.key] = tag.value
mock_tracking_store.get_logged_model.return_value = logged_model
mock_tracking_store.set_logged_model_tags.side_effect = mock_set_tags
# Execute - link the same prompt twice
store.link_prompt_version_to_model("test_prompt", "v1", model_id)
store.link_prompt_version_to_model("test_prompt", "v1", model_id) # Should be idempotent
# Verify set_logged_model_tags was called only once (second call should return early)
assert mock_tracking_store.set_logged_model_tags.call_count == 1
# Verify the tag contains only one entry
tag_value = logged_model.tags[TraceTagKey.LINKED_PROMPTS]
parsed_value = json.loads(tag_value)
expected_value = [{"name": "test_prompt", "version": "1"}]
assert parsed_value == expected_value
def test_link_prompt_version_to_model_thread_safety(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt_1", "1")
store.add_prompt_version("test_prompt_2", "1")
model_id = "model_123"
# Create a shared logged model that will be updated
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
# Mock behavior to simulate updating the model's tags
def mock_set_tags(model_id, tags):
# Simulate concurrent access with small delay
time.sleep(0.01)
for tag in tags:
logged_model.tags[tag.key] = tag.value
mock_tracking_store.get_logged_model.return_value = logged_model
mock_tracking_store.set_logged_model_tags.side_effect = mock_set_tags
# Define thread worker function
def link_prompt(prompt_name):
try:
store.link_prompt_version_to_model(prompt_name, "v1", model_id)
except Exception as e:
# Store any exceptions for later verification
exceptions.append(e)
# Track exceptions from threads
exceptions = []
# Create and start threads
threads = []
for i in range(2):
thread = threading.Thread(target=link_prompt, args=[f"test_prompt_{i + 1}"])
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Verify no exceptions occurred
assert len(exceptions) == 0, f"Thread exceptions: {exceptions}"
# Verify final state contains both prompts (order may vary due to threading)
final_tag_value = json.loads(logged_model.tags[TraceTagKey.LINKED_PROMPTS])
expected_prompts = [
{"name": "test_prompt_1", "version": "1"},
{"name": "test_prompt_2", "version": "1"},
]
assert len(final_tag_value) == 2
for expected_prompt in expected_prompts:
assert expected_prompt in final_tag_value
# Tests for link_prompt_version_to_run
def test_link_prompt_version_to_run_success(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
run_id = "run_123"
# Mock run with no existing linked prompts
run_data = RunData(metrics=[], params=[], tags={})
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
mock_tracking_store.get_run.return_value = run
# Execute
store.link_prompt_version_to_run("test_prompt", "1", run_id)
# Verify run tag was set
mock_tracking_store.set_tag.assert_called_once()
call_args = mock_tracking_store.set_tag.call_args
assert call_args[0][0] == run_id
run_tag = call_args[0][1]
assert isinstance(run_tag, RunTag)
assert run_tag.key == TraceTagKey.LINKED_PROMPTS
expected_value = [{"name": "test_prompt", "version": "1"}]
assert json.loads(run_tag.value) == expected_value
def test_link_prompt_version_to_run_append_to_existing(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt_1", "1")
store.add_prompt_version("test_prompt_2", "1")
run_id = "run_123"
# Mock run with existing linked prompts
existing_prompts = [{"name": "existing_prompt", "version": "1"}]
run_data = RunData(
metrics=[],
params=[],
tags=[RunTag(TraceTagKey.LINKED_PROMPTS, json.dumps(existing_prompts))],
)
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
mock_tracking_store.get_run.return_value = run
# Execute
store.link_prompt_version_to_run("test_prompt_1", "1", run_id)
# Verify run tag was updated with both prompts
mock_tracking_store.set_tag.assert_called_once()
call_args = mock_tracking_store.set_tag.call_args
run_tag = call_args[0][1]
linked_prompts = json.loads(run_tag.value)
expected_prompts = [
{"name": "existing_prompt", "version": "1"},
{"name": "test_prompt_1", "version": "1"},
]
assert len(linked_prompts) == 2
for expected_prompt in expected_prompts:
assert expected_prompt in linked_prompts
def test_link_prompt_version_to_run_no_run_found(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1") # Use "1" instead of "v1"
run_id = "nonexistent_run"
mock_tracking_store.get_run.return_value = None
# Execute and verify error
with pytest.raises(MlflowException, match="Could not find run"):
store.link_prompt_version_to_run("test_prompt", "1", run_id)
def test_link_prompt_version_to_run_prompt_not_found(store, mock_tracking_store):
# Setup
run_id = "run_123"
run_data = RunData(metrics=[], params=[], tags={})
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
mock_tracking_store.get_run.return_value = run
# Execute and verify error
with pytest.raises(MlflowException, match="not found"):
store.link_prompt_version_to_run("nonexistent_prompt", "1", run_id)
def test_link_prompt_version_to_run_duplicate_prevention(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt", "1")
run_id = "run_123"
# Mock run with existing prompt already linked
existing_prompts = [{"name": "test_prompt", "version": "1"}]
run_data = RunData(
metrics=[],
params=[],
tags=[RunTag(TraceTagKey.LINKED_PROMPTS, json.dumps(existing_prompts))],
)
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
mock_tracking_store.get_run.return_value = run
# Execute - try to link the same prompt again
store.link_prompt_version_to_run("test_prompt", "1", run_id)
# Verify set_tag was not called since no change was needed
mock_tracking_store.set_tag.assert_not_called()
def test_link_prompt_version_to_run_thread_safety(store, mock_tracking_store):
# Setup
store.add_prompt_version("test_prompt_1", "1")
store.add_prompt_version("test_prompt_2", "1")
run_id = "run_123"
# Create a shared run that will be updated
run_data = RunData(metrics=[], params=[], tags={})
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
# Mock behavior to simulate updating the run's tags
def mock_set_tag(run_id, tag):
# Simulate concurrent access with small delay
time.sleep(0.01)
run.data.tags[tag.key] = tag.value
mock_tracking_store.get_run.return_value = run
mock_tracking_store.set_tag.side_effect = mock_set_tag
# Define thread worker function
def link_prompt(prompt_name):
store.link_prompt_version_to_run(prompt_name, "1", run_id)
# Execute concurrent linking
threads = []
for prompt_name in ["test_prompt_1", "test_prompt_2"]:
thread = threading.Thread(target=link_prompt, args=(prompt_name,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Verify both prompts were linked
final_tag_value = json.loads(run.data.tags[TraceTagKey.LINKED_PROMPTS])
expected_prompts = [
{"name": "test_prompt_1", "version": "1"},
{"name": "test_prompt_2", "version": "1"},
]
assert len(final_tag_value) == 2
for expected_prompt in expected_prompts:
assert expected_prompt in final_tag_value
def test_link_chat_prompt_to_model(store, mock_tracking_store):
# Create chat prompt
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello {{name}}!"},
]
prompt_version = PromptVersion("test_chat", 1, chat_template)
store.prompt_versions["test_chat:1"] = prompt_version
# Test linking
model_id = "model_123"
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
mock_tracking_store.get_logged_model.return_value = logged_model
store.link_prompt_version_to_model("test_chat", "1", model_id)
# Verify linking worked
mock_tracking_store.set_logged_model_tags.assert_called_once()
call_args = mock_tracking_store.set_logged_model_tags.call_args
logged_model_tags = call_args[0][1]
assert len(logged_model_tags) == 1
tag_value = json.loads(logged_model_tags[0].value)
assert tag_value == [{"name": "test_chat", "version": "1"}]
def test_link_prompt_with_response_format_to_model(store, mock_tracking_store):
response_format = {"type": "string", "description": "A response"}
prompt_version = PromptVersion(
"test_response", 1, "Hello {{name}}!", response_format=response_format
)
store.prompt_versions["test_response:1"] = prompt_version
model_id = "model_123"
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
mock_tracking_store.get_logged_model.return_value = logged_model
store.link_prompt_version_to_model("test_response", "1", model_id)
# Verify linking worked
mock_tracking_store.set_logged_model_tags.assert_called_once()
call_args = mock_tracking_store.set_logged_model_tags.call_args
logged_model_tags = call_args[0][1]
assert len(logged_model_tags) == 1
tag_value = json.loads(logged_model_tags[0].value)
assert tag_value == [{"name": "test_response", "version": "1"}]
def test_link_chat_prompt_to_run(store, mock_tracking_store):
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello {{name}}!"},
]
prompt_version = PromptVersion("test_chat", 1, chat_template)
store.prompt_versions["test_chat:1"] = prompt_version
run_id = "run_123"
run_data = RunData(metrics=[], params=[], tags={})
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
mock_tracking_store.get_run.return_value = run
store.link_prompt_version_to_run("test_chat", "1", run_id)
# Verify linking worked
mock_tracking_store.set_tag.assert_called_once()
call_args = mock_tracking_store.set_tag.call_args
run_tag = call_args[0][1]
assert run_tag.key == TraceTagKey.LINKED_PROMPTS
tag_value = json.loads(run_tag.value)
assert tag_value == [{"name": "test_chat", "version": "1"}]
def test_link_prompt_with_response_format_to_run(store, mock_tracking_store):
response_format = {
"type": "object",
"properties": {"answer": {"type": "string"}},
}
prompt_version = PromptVersion(
"test_response", 1, "What is {{question}}?", response_format=response_format
)
store.prompt_versions["test_response:1"] = prompt_version
run_id = "run_123"
run_data = RunData(metrics=[], params=[], tags={})
run_info = RunInfo(
run_id=run_id,
experiment_id="exp_123",
user_id="user_123",
status="FINISHED",
start_time=1234567890,
end_time=1234567890,
lifecycle_stage="active",
)
run = Run(run_info=run_info, run_data=run_data)
mock_tracking_store.get_run.return_value = run
store.link_prompt_version_to_run("test_response", "1", run_id)
# Verify linking worked
mock_tracking_store.set_tag.assert_called_once()
call_args = mock_tracking_store.set_tag.call_args
run_tag = call_args[0][1]
assert run_tag.key == TraceTagKey.LINKED_PROMPTS
tag_value = json.loads(run_tag.value)
assert tag_value == [{"name": "test_response", "version": "1"}]
def test_link_multiple_prompt_types_to_model(store, mock_tracking_store):
# Create text prompt
text_prompt = PromptVersion("test_text", 1, "Hello {{name}}!")
# Create chat prompt
chat_template = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "{{question}}"},
]
chat_prompt = PromptVersion("test_chat", 1, chat_template)
store.prompt_versions["test_text:1"] = text_prompt
store.prompt_versions["test_chat:1"] = chat_prompt
model_id = "model_123"
logged_model = LoggedModel(
experiment_id="exp_123",
model_id=model_id,
name="test_model",
artifact_location="/path/to/model",
creation_timestamp=1234567890,
last_updated_timestamp=1234567890,
tags={},
)
# Mock the behavior where set_logged_model_tags updates the model's tags
def mock_set_tags(model_id, tags):
for tag in tags:
logged_model.tags[tag.key] = tag.value
mock_tracking_store.get_logged_model.return_value = logged_model
mock_tracking_store.set_logged_model_tags.side_effect = mock_set_tags
# Link both prompts
store.link_prompt_version_to_model("test_text", "1", model_id)
store.link_prompt_version_to_model("test_chat", "1", model_id)
# Verify both were linked
assert mock_tracking_store.set_logged_model_tags.call_count == 2
# Check final state
final_call = mock_tracking_store.set_logged_model_tags.call_args_list[-1]
logged_model_tags = final_call[0][1]
tag_value = json.loads(logged_model_tags[0].value)
expected_prompts = [
{"name": "test_text", "version": "1"},
{"name": "test_chat", "version": "1"},
]
assert len(tag_value) == 2
for expected_prompt in expected_prompts:
assert expected_prompt in tag_value
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/store/model_registry/test_abstract_store.py",
"license": "Apache License 2.0",
"lines": 590,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/entities/model_registry/prompt_version.py | from __future__ import annotations
import json
import re
from typing import Any
from pydantic import BaseModel, Field, ValidationError
from mlflow.entities.model_registry._model_registry_entity import _ModelRegistryEntity
from mlflow.entities.model_registry.model_version_tag import ModelVersionTag
from mlflow.exceptions import MlflowException
from mlflow.prompt.constants import (
IS_PROMPT_TAG_KEY,
PROMPT_MODEL_CONFIG_TAG_KEY,
PROMPT_TEMPLATE_VARIABLE_PATTERN,
PROMPT_TEXT_DISPLAY_LIMIT,
PROMPT_TEXT_TAG_KEY,
PROMPT_TYPE_CHAT,
PROMPT_TYPE_TAG_KEY,
PROMPT_TYPE_TEXT,
RESPONSE_FORMAT_TAG_KEY,
)
# Alias type
PromptVersionTag = ModelVersionTag
def _is_jinja2_template(template: str | list[dict[str, Any]]) -> bool:
"""Check if template uses Jinja2 control flow syntax ({% %})."""
if isinstance(template, str):
return "{%" in template and "%}" in template
return any(
"{%" in msg.get("content", "") and "%}" in msg.get("content", "") for msg in template
)
class PromptModelConfig(BaseModel):
"""
Configuration for a model associated with a prompt, including model name and inference
parameters.
This class provides a structured way to store model-specific settings alongside prompts,
ensuring reproducibility and clarity about which model and parameters were used with a
particular prompt version.
Args:
provider: The model provider (e.g., "openai", "anthropic", "google").
model_name: The name or identifier of the model (e.g., "gpt-4", "claude-3-opus").
temperature: Sampling temperature for controlling randomness (typically 0.0-2.0).
Lower values make output more deterministic, higher values more random.
max_tokens: Maximum number of tokens to generate in the response.
top_p: Nucleus sampling parameter (typically 0.0-1.0). The model considers tokens
with top_p cumulative probability mass.
top_k: Top-k sampling parameter. The model considers only the k most likely tokens.
frequency_penalty: Penalty for token frequency (typically -2.0 to 2.0). Positive
values reduce repetition of tokens based on their frequency in the text so far.
presence_penalty: Penalty for token presence (typically -2.0 to 2.0). Positive
values increase likelihood of introducing new topics.
stop_sequences: List of sequences that will cause the model to stop generating.
extra_params: Additional model-specific parameters not covered by the standard fields.
This allows for flexibility with provider-specific or experimental parameters.
Example:
.. code-block:: python
from mlflow.entities.model_registry import PromptModelConfig
# Basic configuration
config = PromptModelConfig(
model_name="gpt-4",
temperature=0.7,
max_tokens=1000,
)
# Configuration with extra provider-specific params
config = PromptModelConfig(
model_name="claude-3-opus",
temperature=0.5,
max_tokens=2000,
extra_params={
"anthropic_version": "2023-06-01",
"response_metadata": {"cache_control": True},
},
)
# Use with prompt registration
import mlflow
mlflow.genai.register_prompt(
name="my_prompt",
template="Analyze this: {{text}}",
model_config=config,
)
"""
provider: str | None = None
model_name: str | None = None
temperature: float | None = Field(None, ge=0)
max_tokens: int | None = Field(None, gt=0)
top_p: float | None = Field(None, ge=0, le=1)
top_k: int | None = Field(None, gt=0)
frequency_penalty: float | None = None
presence_penalty: float | None = None
stop_sequences: list[str] | None = None
extra_params: dict[str, Any] = Field(default_factory=dict)
def to_dict(self) -> dict[str, Any]:
"""
Convert the PromptModelConfig to a dictionary, excluding None values and merging
extra_params.
Returns:
A dictionary representation of the config with None values filtered out and
extra_params merged at the top level.
"""
config_dict = {
k: v for k, v in self.model_dump(exclude_none=True).items() if k != "extra_params"
}
if self.extra_params:
config_dict.update(self.extra_params)
return config_dict
@classmethod
def from_dict(cls, config_dict: dict[str, Any]) -> PromptModelConfig:
"""
Create a PromptModelConfig from a dictionary, separating known fields from extra params.
Args:
config_dict: Dictionary containing model configuration.
Returns:
A PromptModelConfig instance with known fields populated and unknown fields in
extra_params.
"""
# Use Pydantic's model_fields to dynamically get field names (excluding extra_params)
known_fields = set(cls.model_fields.keys()) - {"extra_params"}
known_params = {}
extra_params = {}
for key, value in config_dict.items():
if key in known_fields:
known_params[key] = value
else:
extra_params[key] = value
return cls(**known_params, extra_params=extra_params)
def _is_reserved_tag(key: str) -> bool:
return key in {
IS_PROMPT_TAG_KEY,
PROMPT_TEXT_TAG_KEY,
PROMPT_TYPE_TAG_KEY,
RESPONSE_FORMAT_TAG_KEY,
PROMPT_MODEL_CONFIG_TAG_KEY,
}
class PromptVersion(_ModelRegistryEntity):
"""
An entity representing a specific version of a prompt with its template content.
Args:
name: The name of the prompt.
version: The version number of the prompt.
template: The template content of the prompt. Can be either:
- A string containing text with variables enclosed in double curly braces,
e.g. {{variable}}, which will be replaced with actual values by the `format` method.
MLflow uses the same variable naming rules as Jinja2:
https://jinja.palletsprojects.com/en/stable/api/#notes-on-identifiers
- A list of dictionaries representing chat messages, where each message has
'role' and 'content' keys (e.g., [{"role": "user", "content": "Hello {{name}}"}])
response_format: Optional Pydantic class or dictionary defining the expected response
structure. This can be used to specify the schema for structured outputs.
model_config: Optional PromptModelConfig instance or dictionary containing model-specific
configuration including model name and settings like temperature, top_p, max_tokens.
Using a PromptModelConfig instance provides validation and type safety for common
parameters.
Example (dict): {"model_name": "gpt-4", "temperature": 0.7}
Example (PromptModelConfig): PromptModelConfig(model_name="gpt-4", temperature=0.7)
commit_message: The commit message for the prompt version. Optional.
creation_timestamp: Timestamp of the prompt creation. Optional.
tags: A dictionary of tags associated with the **prompt version**.
This is useful for storing version-specific information, such as the author of
the changes. Optional.
aliases: List of aliases for this prompt version. Optional.
last_updated_timestamp: Timestamp of last update. Optional.
user_id: User ID that created this prompt version. Optional.
"""
def __init__(
self,
name: str,
version: int,
template: str | list[dict[str, Any]],
commit_message: str | None = None,
creation_timestamp: int | None = None,
tags: dict[str, str] | None = None,
aliases: list[str] | None = None,
last_updated_timestamp: int | None = None,
user_id: str | None = None,
response_format: type[BaseModel] | dict[str, Any] | None = None,
model_config: PromptModelConfig | dict[str, Any] | None = None,
):
from mlflow.types.chat import ChatMessage
super().__init__()
# Core PromptVersion attributes
self._name: str = name
self._version: str = str(version) # Store as string internally
self._creation_time: int = creation_timestamp or 0
# Initialize tags first
tags = tags or {}
# Determine prompt type and set it
if isinstance(template, list) and len(template) > 0:
try:
for msg in template:
ChatMessage.model_validate(msg)
except ValidationError as e:
raise ValueError("Template must be a list of dicts with role and content") from e
self._prompt_type = PROMPT_TYPE_CHAT
tags[PROMPT_TYPE_TAG_KEY] = PROMPT_TYPE_CHAT
else:
self._prompt_type = PROMPT_TYPE_TEXT
tags[PROMPT_TYPE_TAG_KEY] = PROMPT_TYPE_TEXT
# Store template text as a tag
tags[PROMPT_TEXT_TAG_KEY] = template if isinstance(template, str) else json.dumps(template)
tags[IS_PROMPT_TAG_KEY] = "true"
if response_format:
tags[RESPONSE_FORMAT_TAG_KEY] = json.dumps(
self.convert_response_format_to_dict(response_format)
)
if model_config:
# Convert PromptModelConfig to dict if needed
if isinstance(model_config, PromptModelConfig):
config_dict = model_config.to_dict()
else:
# Validate dict by converting through PromptModelConfig
config_dict = PromptModelConfig.from_dict(model_config).to_dict()
tags[PROMPT_MODEL_CONFIG_TAG_KEY] = json.dumps(config_dict)
# Store the tags dict
self._tags: dict[str, str] = tags
template_text = template if isinstance(template, str) else json.dumps(template)
self._variables = set(PROMPT_TEMPLATE_VARIABLE_PATTERN.findall(template_text))
self._last_updated_timestamp: int | None = last_updated_timestamp
self._description: str | None = commit_message
self._user_id: str | None = user_id
self._aliases: list[str] = aliases or []
def __repr__(self) -> str:
if self.is_text_prompt:
text = (
self.template[:PROMPT_TEXT_DISPLAY_LIMIT] + "..."
if len(self.template) > PROMPT_TEXT_DISPLAY_LIMIT
else self.template
)
else:
message = json.dumps(self.template)
text = (
message[:PROMPT_TEXT_DISPLAY_LIMIT] + "..."
if len(message) > PROMPT_TEXT_DISPLAY_LIMIT
else message
)
return f"PromptVersion(name={self.name}, version={self.version}, template={text})"
# Core PromptVersion properties
@property
def template(self) -> str | list[dict[str, Any]]:
"""
Return the template content of the prompt.
Returns:
Either a string (for text prompts) or a list of chat message dictionaries
(for chat prompts) with 'role' and 'content' keys.
"""
if self.is_text_prompt:
return self._tags[PROMPT_TEXT_TAG_KEY]
else:
return json.loads(self._tags[PROMPT_TEXT_TAG_KEY])
@property
def is_text_prompt(self) -> bool:
"""
Return True if the prompt is a text prompt, False if it's a chat prompt.
Returns:
True for text prompts (string templates), False for chat prompts (list of messages).
"""
return self._prompt_type == PROMPT_TYPE_TEXT
@property
def response_format(self) -> dict[str, Any] | None:
"""
Return the response format specification for the prompt.
Returns:
A dictionary defining the expected response structure, or None if no
response format is specified. This can be used to validate or structure
the output from LLM calls.
"""
if RESPONSE_FORMAT_TAG_KEY not in self._tags:
return None
return json.loads(self._tags[RESPONSE_FORMAT_TAG_KEY])
@property
def model_config(self) -> dict[str, Any] | None:
"""
Return the model configuration for the prompt.
Returns:
A dictionary containing model-specific configuration including model name
and settings like temperature, top_p, max_tokens, etc., or None if no
model config is specified.
"""
if PROMPT_MODEL_CONFIG_TAG_KEY not in self._tags:
return None
return json.loads(self._tags[PROMPT_MODEL_CONFIG_TAG_KEY])
def to_single_brace_format(self) -> str | list[dict[str, Any]]:
"""
Convert the template to single brace format. This is useful for integrating with other
systems that use single curly braces for variable replacement, such as LangChain's prompt
template.
Returns:
The template with variables converted from {{variable}} to {variable} format.
For text prompts, returns a string. For chat prompts, returns a list of messages.
"""
t = self.template if self.is_text_prompt else json.dumps(self.template)
for var in self.variables:
t = re.sub(r"\{\{\s*" + var + r"\s*\}\}", "{" + var + "}", t)
return t if self.is_text_prompt else json.loads(t)
@staticmethod
def convert_response_format_to_dict(
response_format: type[BaseModel] | dict[str, Any],
) -> dict[str, Any]:
"""
Convert a response format specification to a dictionary representation.
Args:
response_format: Either a Pydantic BaseModel class or a dictionary defining
the response structure.
Returns:
A dictionary representation of the response format. If a Pydantic class is
provided, returns its JSON schema. If a dictionary is provided, returns it as-is.
"""
if isinstance(response_format, type) and issubclass(response_format, BaseModel):
return response_format.model_json_schema()
else:
return response_format
@property
def variables(self) -> set[str]:
"""
Return a list of variables in the template text.
The value must be enclosed in double curly braces, e.g. {{variable}}.
"""
return self._variables
@property
def commit_message(self) -> str | None:
"""
Return the commit message of the prompt version.
"""
return self.description
@property
def tags(self) -> dict[str, str]:
"""
Return the version-level tags.
"""
return {key: value for key, value in self._tags.items() if not _is_reserved_tag(key)}
@property
def uri(self) -> str:
"""Return the URI of the prompt."""
return f"prompts:/{self.name}/{self.version}"
@property
def name(self) -> str:
"""String. Unique name within Model Registry."""
return self._name
@name.setter
def name(self, new_name: str):
self._name = new_name
@property
def version(self) -> int:
"""Version"""
return int(self._version)
@property
def creation_timestamp(self) -> int:
"""Integer. Prompt version creation timestamp (milliseconds since the Unix epoch)."""
return self._creation_time
@property
def last_updated_timestamp(self) -> int | None:
"""Integer. Timestamp of last update for this prompt version (milliseconds since the Unix
epoch).
"""
return self._last_updated_timestamp
@last_updated_timestamp.setter
def last_updated_timestamp(self, updated_timestamp: int):
self._last_updated_timestamp = updated_timestamp
@property
def description(self) -> str | None:
"""String. Description"""
return self._description
@description.setter
def description(self, description: str):
self._description = description
@property
def user_id(self) -> str | None:
"""String. User ID that created this prompt version."""
return self._user_id
@property
def aliases(self) -> list[str]:
"""List of aliases (string) for the current prompt version."""
return self._aliases
@aliases.setter
def aliases(self, aliases: list[str]):
self._aliases = aliases
# Methods
@classmethod
def _properties(cls) -> list[str]:
# aggregate with base class properties since cls.__dict__ does not do it automatically
return sorted(cls._get_properties_helper())
def _add_tag(self, tag: ModelVersionTag):
self._tags[tag.key] = tag.value
def format(
self,
allow_partial: bool = False,
use_jinja_sandbox: bool = True,
**kwargs,
) -> PromptVersion | str | list[dict[str, Any]]:
"""
Format the template with the given keyword arguments.
By default, it raises an error if there are missing variables. To format
the prompt partially, set `allow_partial=True`.
Example:
.. code-block:: python
# Text prompt formatting
prompt = PromptVersion("my-prompt", 1, "Hello, {{title}} {{name}}!")
formatted = prompt.format(title="Ms", name="Alice")
print(formatted)
# Output: "Hello, Ms Alice!"
# Chat prompt formatting
chat_prompt = PromptVersion(
"assistant",
1,
[
{"role": "system", "content": "You are a {{style}} assistant."},
{"role": "user", "content": "{{question}}"},
],
)
formatted = chat_prompt.format(style="friendly", question="How are you?")
print(formatted)
# Output: [{"role": "system", "content": "You are a friendly assistant."},
# {"role": "user", "content": "How are you?"}]
# Partial formatting
formatted = prompt.format(title="Ms", allow_partial=True)
print(formatted)
# Output: PromptVersion(name=my-prompt, version=1, template="Hello, Ms {{name}}!")
# Jinja2 template formatting (with conditionals and loops)
jinja_prompt = PromptVersion(
"jinja-prompt",
1,
"Hello {% if name %}{{ name }}{% else %}Guest{% endif %}!",
)
formatted = jinja_prompt.format(name="Alice")
print(formatted)
# Output: "Hello Alice!"
Args:
allow_partial: If True, allow partial formatting of the prompt text.
If False, raise an error if there are missing variables.
use_jinja_sandbox: If True (default), use Jinja2's SandboxedEnvironment
for safe rendering. Set to False to use unrestricted Environment.
Only applies to Jinja2 templates (those containing {% %} syntax).
kwargs: Keyword arguments to replace the variables in the template.
"""
from mlflow.genai.prompts.utils import format_prompt
# Jinja2 template rendering
if _is_jinja2_template(self.template):
try:
from jinja2 import Environment, Undefined
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise MlflowException.invalid_parameter_value(
"The prompt is a Jinja2 template. To format the prompt, "
"install Jinja2 with `pip install jinja2`."
)
env_cls = SandboxedEnvironment if use_jinja_sandbox else Environment
env = env_cls(undefined=Undefined)
if self.is_text_prompt:
tmpl = env.from_string(self.template)
return tmpl.render(**kwargs)
else:
# Jinja2 rendering for chat prompts
return [
{
"role": message["role"],
"content": env.from_string(message.get("content", "")).render(**kwargs),
}
for message in self.template
]
# Double-brace template formatting (native MLflow format)
if self.is_text_prompt:
template = format_prompt(self.template, **kwargs)
else:
# For chat prompts, we need to handle JSON properly
# Instead of working with JSON strings, work with the Python objects directly
template = [
{
"role": message["role"],
"content": format_prompt(message.get("content"), **kwargs),
}
for message in self.template
]
input_keys = set(kwargs.keys())
if missing_keys := self.variables - input_keys:
if not allow_partial:
raise MlflowException.invalid_parameter_value(
f"Missing variables: {missing_keys}. To partially format the prompt, "
"set `allow_partial=True`."
)
else:
return PromptVersion(
name=self.name,
version=int(self.version),
template=template,
response_format=self.response_format,
model_config=self.model_config,
commit_message=self.commit_message,
creation_timestamp=self.creation_timestamp,
tags=self.tags,
aliases=self.aliases,
last_updated_timestamp=self.last_updated_timestamp,
user_id=self.user_id,
)
return template
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/model_registry/prompt_version.py",
"license": "Apache License 2.0",
"lines": 490,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/label_schemas/label_schemas.py | from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, TypeVar
from mlflow.genai.utils.enum_utils import StrEnum
if TYPE_CHECKING:
from databricks.agents.review_app import label_schemas as _label_schemas
_InputCategorical = _label_schemas.InputCategorical
_InputCategoricalList = _label_schemas.InputCategoricalList
_InputNumeric = _label_schemas.InputNumeric
_InputText = _label_schemas.InputText
_InputTextList = _label_schemas.InputTextList
_LabelSchema = _label_schemas.LabelSchema
DatabricksInputType = TypeVar("DatabricksInputType")
_InputType = TypeVar("_InputType", bound="InputType")
class InputType(ABC):
"""Base class for all input types."""
@abstractmethod
def _to_databricks_input(self) -> DatabricksInputType:
"""Convert to the internal Databricks input type."""
@classmethod
@abstractmethod
def _from_databricks_input(cls, input_obj: DatabricksInputType) -> _InputType:
"""Create from the internal Databricks input type."""
@dataclass
class InputCategorical(InputType):
"""A single-select dropdown for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
options: list[str]
"""List of available options for the categorical selection."""
def _to_databricks_input(self) -> "_InputCategorical":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputCategorical(options=self.options)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputCategorical") -> "InputCategorical":
"""Create from the internal Databricks input type."""
return cls(options=input_obj.options)
@dataclass
class InputCategoricalList(InputType):
"""A multi-select dropdown for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
options: list[str]
"""List of available options for the multi-select categorical (dropdown)."""
def _to_databricks_input(self) -> "_InputCategoricalList":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputCategoricalList(options=self.options)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputCategoricalList") -> "InputCategoricalList":
"""Create from the internal Databricks input type."""
return cls(options=input_obj.options)
@dataclass
class InputTextList(InputType):
"""Like `Text`, but allows multiple entries.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
max_length_each: int | None = None
"""Maximum character length for each individual text entry. None means no limit."""
max_count: int | None = None
"""Maximum number of text entries allowed. None means no limit."""
def _to_databricks_input(self) -> "_InputTextList":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputTextList(
max_length_each=self.max_length_each, max_count=self.max_count
)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputTextList") -> "InputTextList":
"""Create from the internal Databricks input type."""
return cls(max_length_each=input_obj.max_length_each, max_count=input_obj.max_count)
@dataclass
class InputText(InputType):
"""A free-form text box for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
max_length: int | None = None
"""Maximum character length for the text input. None means no limit."""
def _to_databricks_input(self) -> "_InputText":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputText(max_length=self.max_length)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputText") -> "InputText":
"""Create from the internal Databricks input type."""
return cls(max_length=input_obj.max_length)
@dataclass
class InputNumeric(InputType):
"""A numeric input for collecting assessments from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
min_value: float | None = None
"""Minimum allowed numeric value. None means no minimum limit."""
max_value: float | None = None
"""Maximum allowed numeric value. None means no maximum limit."""
def _to_databricks_input(self) -> "_InputNumeric":
"""Convert to the internal Databricks input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
return _label_schemas.InputNumeric(min_value=self.min_value, max_value=self.max_value)
@classmethod
def _from_databricks_input(cls, input_obj: "_InputNumeric") -> "InputNumeric":
"""Create from the internal Databricks input type."""
return cls(min_value=input_obj.min_value, max_value=input_obj.max_value)
class LabelSchemaType(StrEnum):
"""Type of label schema."""
FEEDBACK = "feedback"
EXPECTATION = "expectation"
@dataclass(frozen=True)
class LabelSchema:
"""A label schema for collecting input from stakeholders.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
name: str
"""Unique name identifier for the label schema."""
type: LabelSchemaType
"""Type of the label schema, either 'feedback' or 'expectation'."""
title: str
"""Display title shown to stakeholders in the labeling review UI."""
input: InputCategorical | InputCategoricalList | InputText | InputTextList | InputNumeric
"""
Input type specification that defines how stakeholders will provide their assessment
(e.g., dropdown, text box, numeric input)
"""
instruction: str | None = None
"""Optional detailed instructions shown to stakeholders for guidance."""
enable_comment: bool = False
"""Whether to enable additional comment functionality for reviewers."""
@classmethod
def _convert_databricks_input(cls, input_obj):
"""Convert a Databricks input type to the corresponding MLflow input type."""
from databricks.agents.review_app import label_schemas as _label_schemas
input_type_mapping = {
_label_schemas.InputCategorical: InputCategorical,
_label_schemas.InputCategoricalList: InputCategoricalList,
_label_schemas.InputText: InputText,
_label_schemas.InputTextList: InputTextList,
_label_schemas.InputNumeric: InputNumeric,
}
input_class = input_type_mapping.get(type(input_obj))
if input_class is None:
raise ValueError(f"Unknown input type: {type(input_obj)}")
return input_class._from_databricks_input(input_obj)
@classmethod
def _from_databricks_label_schema(cls, schema: "_LabelSchema") -> "LabelSchema":
"""Convert from the internal Databricks label schema type."""
return cls(
name=schema.name,
type=schema.type,
title=schema.title,
input=cls._convert_databricks_input(schema.input),
instruction=schema.instruction,
enable_comment=schema.enable_comment,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/label_schemas/label_schemas.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/labeling/labeling.py | from typing import TYPE_CHECKING, Any, Iterable, Union
from mlflow.entities import Trace
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
if TYPE_CHECKING:
import pandas as pd
from databricks.agents.review_app import (
LabelSchema as _LabelSchema,
)
from databricks.agents.review_app import (
ReviewApp as _ReviewApp,
)
from databricks.agents.review_app.labeling import Agent as _Agent
class Agent:
"""The agent configuration, used for generating responses in the review app.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
def __init__(self, agent: "_Agent"):
self._agent = agent
@property
def agent_name(self) -> str:
"""The name of the agent."""
return self._agent.agent_name
@property
def model_serving_endpoint(self) -> str:
"""The model serving endpoint used by the agent."""
return self._agent.model_serving_endpoint
class LabelingSession:
"""A session for labeling items in the review app.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
def __init__(
self,
*,
name: str,
assigned_users: list[str],
agent: str | None,
label_schemas: list[str],
labeling_session_id: str,
mlflow_run_id: str,
review_app_id: str,
experiment_id: str,
url: str,
enable_multi_turn_chat: bool,
custom_inputs: dict[str, Any] | None,
):
self._name = name
self._assigned_users = assigned_users
self._agent = agent
self._label_schemas = label_schemas
self._labeling_session_id = labeling_session_id
self._mlflow_run_id = mlflow_run_id
self._review_app_id = review_app_id
self._experiment_id = experiment_id
self._url = url
self._enable_multi_turn_chat = enable_multi_turn_chat
self._custom_inputs = custom_inputs
@property
def name(self) -> str:
"""The name of the labeling session."""
return self._name
@property
def assigned_users(self) -> list[str]:
"""The users assigned to label items in the session."""
return self._assigned_users
@property
def agent(self) -> str | None:
"""The agent used to generate responses for the items in the session."""
return self._agent
@property
def label_schemas(self) -> list[str]:
"""The label schemas used in the session."""
return self._label_schemas
@property
def labeling_session_id(self) -> str:
"""The unique identifier of the labeling session."""
return self._labeling_session_id
@property
def mlflow_run_id(self) -> str:
"""The MLflow run ID associated with the session."""
return self._mlflow_run_id
@property
def review_app_id(self) -> str:
"""The review app ID associated with the session."""
return self._review_app_id
@property
def experiment_id(self) -> str:
"""The experiment ID associated with the session."""
return self._experiment_id
@property
def url(self) -> str:
"""The URL of the labeling session in the review app."""
return self._url
@property
def enable_multi_turn_chat(self) -> bool:
"""Whether multi-turn chat is enabled for the session."""
return self._enable_multi_turn_chat
@property
def custom_inputs(self) -> dict[str, Any] | None:
"""Custom inputs used in the session."""
return self._custom_inputs
def _get_store(self):
"""
Get a labeling store instance.
This method is defined in order to avoid circular imports.
"""
from mlflow.genai.labeling.stores import _get_labeling_store
return _get_labeling_store()
def add_dataset(
self, dataset_name: str, record_ids: list[str] | None = None
) -> "LabelingSession":
"""Add a dataset to the labeling session.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
Args:
dataset_name: The name of the dataset.
record_ids: Optional. The individual record ids to be added to the session. If not
provided, all records in the dataset will be added.
Returns:
LabelingSession: The updated labeling session.
"""
store = self._get_store()
return store.add_dataset_to_session(self, dataset_name, record_ids)
def add_traces(
self,
traces: Union[Iterable[Trace], Iterable[str], "pd.DataFrame"],
) -> "LabelingSession":
"""Add traces to the labeling session.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
Args:
traces: Can be either:
a) a pandas DataFrame with a 'trace' column. The 'trace' column should contain
either `mlflow.entities.Trace` objects or their json string representations.
b) an iterable of `mlflow.entities.Trace` objects.
c) an iterable of json string representations of `mlflow.entities.Trace` objects.
Returns:
LabelingSession: The updated labeling session.
"""
import pandas as pd
if isinstance(traces, pd.DataFrame):
if "trace" not in traces.columns:
raise MlflowException(
"traces must have a 'trace' column like the result of mlflow.search_traces()",
error_code=INVALID_PARAMETER_VALUE,
)
traces = traces["trace"].to_list()
trace_list: list[Trace] = []
for trace in traces:
if isinstance(trace, str):
trace_list.append(Trace.from_json(trace))
elif isinstance(trace, Trace):
trace_list.append(trace)
elif trace is None:
raise MlflowException(
"trace cannot be None. Must be mlflow.entities.Trace or its json string "
"representation.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
f"Expected mlflow.entities.Trace or json string, got {type(trace).__name__}",
error_code=INVALID_PARAMETER_VALUE,
)
store = self._get_store()
return store.add_traces_to_session(self, trace_list)
def sync(self, to_dataset: str) -> None:
"""Sync the traces and expectations from the labeling session to a dataset.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
Args:
to_dataset: The name of the dataset to sync traces and expectations to.
"""
store = self._get_store()
return store.sync_session_expectations(self, to_dataset)
def set_assigned_users(self, assigned_users: list[str]) -> "LabelingSession":
"""Set the assigned users for the labeling session.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
Args:
assigned_users: The list of users to assign to the session.
Returns:
LabelingSession: The updated labeling session.
"""
store = self._get_store()
return store.set_session_assigned_users(self, assigned_users)
class ReviewApp:
"""A review app is used to collect feedback from stakeholders for a given experiment.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
"""
def __init__(self, app: "_ReviewApp"):
self._app = app
@property
def review_app_id(self) -> str:
"""The ID of the review app."""
return self._app.review_app_id
@property
def experiment_id(self) -> str:
"""The ID of the experiment."""
return self._app.experiment_id
@property
def url(self) -> str:
"""The URL of the review app for stakeholders to provide feedback."""
return self._app.url
@property
def agents(self) -> list[Agent]:
"""The agents to be used to generate responses."""
return [Agent(agent) for agent in self._app.agents]
@property
def label_schemas(self) -> list["_LabelSchema"]:
"""The label schemas to be used in the review app."""
return self._app.label_schemas
def add_agent(
self, *, agent_name: str, model_serving_endpoint: str, overwrite: bool = False
) -> "ReviewApp":
"""Add an agent to the review app to be used to generate responses.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
Args:
agent_name: The name of the agent.
model_serving_endpoint: The model serving endpoint to be used by the agent.
overwrite: Whether to overwrite an existing agent with the same name.
Returns:
ReviewApp: The updated review app.
"""
return ReviewApp(
self._app.add_agent(
agent_name=agent_name,
model_serving_endpoint=model_serving_endpoint,
overwrite=overwrite,
)
)
def remove_agent(self, agent_name: str) -> "ReviewApp":
"""Remove an agent from the review app.
.. note::
This functionality is only available in Databricks. Please run
`pip install mlflow[databricks]` to use it.
Args:
agent_name: The name of the agent to remove.
Returns:
ReviewApp: The updated review app.
"""
return ReviewApp(self._app.remove_agent(agent_name))
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/labeling/labeling.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/label_schemas/test_label_schemas.py | import dataclasses
from contextlib import contextmanager
from unittest.mock import MagicMock, patch
import pytest
from mlflow.genai import label_schemas
from mlflow.genai.label_schemas import (
EXPECTED_FACTS,
EXPECTED_RESPONSE,
GUIDELINES,
InputCategorical,
InputCategoricalList,
InputNumeric,
InputText,
InputTextList,
LabelSchema,
LabelSchemaType,
create_label_schema,
delete_label_schema,
get_label_schema,
)
from mlflow.genai.label_schemas.label_schemas import (
InputType,
)
from mlflow.genai.scorers.validation import IS_DBX_AGENTS_INSTALLED
if not IS_DBX_AGENTS_INSTALLED:
pytest.skip("Skipping Databricks only test.", allow_module_level=True)
@pytest.fixture
def mock_databricks_labeling_store():
"""
Fixture providing a fully mocked Databricks labeling store environment.
Returns:
A context manager that provides mocked store, review app, and databricks modules.
"""
@contextmanager
def _mock_context():
with patch("mlflow.genai.labeling.stores._get_labeling_store") as mock_get_store:
from mlflow.genai.labeling.stores import DatabricksLabelingStore
mock_store = DatabricksLabelingStore()
mock_get_store.return_value = mock_store
# Mock the databricks modules and review app
with patch("databricks.agents.review_app.get_review_app") as mock_get_app:
mock_app = MagicMock()
mock_get_app.return_value = mock_app
yield {
"store": mock_store,
"get_store": mock_get_store,
"app": mock_app,
"get_app": mock_get_app,
}
return _mock_context
@pytest.fixture
def mock_review_app():
"""
Fixture providing just the review app mock for simpler test cases.
Returns:
A context manager that provides a mocked review app.
"""
@contextmanager
def _mock_context():
with patch("databricks.agents.review_app.get_review_app") as mock_get_app:
mock_app = MagicMock()
mock_get_app.return_value = mock_app
yield {
"app": mock_app,
"get_app": mock_get_app,
}
return _mock_context
# InputCategorical tests
def test_input_categorical_init():
options = ["yes", "no", "maybe"]
input_cat = InputCategorical(options=options)
assert input_cat.options == options
def test_input_categorical_to_databricks_input():
options = ["good", "bad", "neutral"]
input_cat = InputCategorical(options=options)
mock_databricks_input = MagicMock()
with patch(
"databricks.agents.review_app.label_schemas.InputCategorical",
return_value=mock_databricks_input,
) as mock_class:
result = input_cat._to_databricks_input()
mock_class.assert_called_once_with(options=options)
assert result == mock_databricks_input
def test_input_categorical_from_databricks_input():
options = ["excellent", "good", "poor"]
mock_databricks_input = MagicMock()
mock_databricks_input.options = options
result = InputCategorical._from_databricks_input(mock_databricks_input)
assert isinstance(result, InputCategorical)
assert result.options == options
def test_input_categorical_empty_options():
input_cat = InputCategorical(options=[])
assert input_cat.options == []
def test_input_categorical_single_option():
input_cat = InputCategorical(options=["only_option"])
assert input_cat.options == ["only_option"]
# InputCategoricalList tests
def test_input_categorical_list_init():
options = ["red", "green", "blue"]
input_cat_list = InputCategoricalList(options=options)
assert input_cat_list.options == options
def test_input_categorical_list_to_databricks_input():
options = ["python", "java", "javascript"]
input_cat_list = InputCategoricalList(options=options)
mock_databricks_input = MagicMock()
with patch(
"databricks.agents.review_app.label_schemas.InputCategoricalList",
return_value=mock_databricks_input,
) as mock_class:
result = input_cat_list._to_databricks_input()
mock_class.assert_called_once_with(options=options)
assert result == mock_databricks_input
def test_input_categorical_list_from_databricks_input():
options = ["feature1", "feature2", "feature3"]
mock_databricks_input = MagicMock()
mock_databricks_input.options = options
result = InputCategoricalList._from_databricks_input(mock_databricks_input)
assert isinstance(result, InputCategoricalList)
assert result.options == options
# InputText tests
def test_input_text_init_with_max_length():
input_text = InputText(max_length=100)
assert input_text.max_length == 100
def test_input_text_init_without_max_length():
input_text = InputText()
assert input_text.max_length is None
def test_input_text_to_databricks_input():
max_length = 500
input_text = InputText(max_length=max_length)
mock_databricks_input = MagicMock()
with patch(
"databricks.agents.review_app.label_schemas.InputText",
return_value=mock_databricks_input,
) as mock_class:
result = input_text._to_databricks_input()
mock_class.assert_called_once_with(max_length=max_length)
assert result == mock_databricks_input
def test_input_text_from_databricks_input():
max_length = 250
mock_databricks_input = MagicMock()
mock_databricks_input.max_length = max_length
result = InputText._from_databricks_input(mock_databricks_input)
assert isinstance(result, InputText)
assert result.max_length == max_length
def test_input_text_from_databricks_input_none_max_length():
mock_databricks_input = MagicMock()
mock_databricks_input.max_length = None
result = InputText._from_databricks_input(mock_databricks_input)
assert isinstance(result, InputText)
assert result.max_length is None
# InputTextList tests
def test_input_text_list_init_with_all_params():
input_text_list = InputTextList(max_length_each=50, max_count=5)
assert input_text_list.max_length_each == 50
assert input_text_list.max_count == 5
def test_input_text_list_init_with_partial_params():
input_text_list = InputTextList(max_count=3)
assert input_text_list.max_length_each is None
assert input_text_list.max_count == 3
def test_input_text_list_init_with_no_params():
input_text_list = InputTextList()
assert input_text_list.max_length_each is None
assert input_text_list.max_count is None
def test_input_text_list_to_databricks_input():
max_length_each = 100
max_count = 10
input_text_list = InputTextList(max_length_each=max_length_each, max_count=max_count)
mock_databricks_input = MagicMock()
with patch(
"databricks.agents.review_app.label_schemas.InputTextList",
return_value=mock_databricks_input,
) as mock_class:
result = input_text_list._to_databricks_input()
mock_class.assert_called_once_with(max_length_each=max_length_each, max_count=max_count)
assert result == mock_databricks_input
def test_input_text_list_from_databricks_input():
max_length_each = 75
max_count = 8
mock_databricks_input = MagicMock()
mock_databricks_input.max_length_each = max_length_each
mock_databricks_input.max_count = max_count
result = InputTextList._from_databricks_input(mock_databricks_input)
assert isinstance(result, InputTextList)
assert result.max_length_each == max_length_each
assert result.max_count == max_count
# InputNumeric tests
def test_input_numeric_init_with_all_params():
input_numeric = InputNumeric(min_value=0.0, max_value=10.0)
assert input_numeric.min_value == 0.0
assert input_numeric.max_value == 10.0
def test_input_numeric_init_with_partial_params():
input_numeric = InputNumeric(min_value=5.0)
assert input_numeric.min_value == 5.0
assert input_numeric.max_value is None
def test_input_numeric_init_with_no_params():
input_numeric = InputNumeric()
assert input_numeric.min_value is None
assert input_numeric.max_value is None
def test_input_numeric_to_databricks_input():
min_value = 1.5
max_value = 9.5
input_numeric = InputNumeric(min_value=min_value, max_value=max_value)
mock_databricks_input = MagicMock()
with patch(
"databricks.agents.review_app.label_schemas.InputNumeric",
return_value=mock_databricks_input,
) as mock_class:
result = input_numeric._to_databricks_input()
mock_class.assert_called_once_with(min_value=min_value, max_value=max_value)
assert result == mock_databricks_input
def test_input_numeric_from_databricks_input():
min_value = -5.0
max_value = 15.0
mock_databricks_input = MagicMock()
mock_databricks_input.min_value = min_value
mock_databricks_input.max_value = max_value
result = InputNumeric._from_databricks_input(mock_databricks_input)
assert isinstance(result, InputNumeric)
assert result.min_value == min_value
assert result.max_value == max_value
def test_input_numeric_negative_values():
input_numeric = InputNumeric(min_value=-100.0, max_value=-10.0)
assert input_numeric.min_value == -100.0
assert input_numeric.max_value == -10.0
def test_input_numeric_integer_values():
input_numeric = InputNumeric(min_value=1, max_value=100)
assert input_numeric.min_value == 1
assert input_numeric.max_value == 100
# InputType tests
def test_input_type_abstract_methods():
with pytest.raises(TypeError, match="Can't instantiate abstract class InputType"):
InputType()
@pytest.mark.parametrize(
"input_class",
[
InputCategorical,
InputCategoricalList,
InputText,
InputTextList,
InputNumeric,
],
)
def test_input_type_all_inputs_inherit_from_input_type(input_class):
assert issubclass(input_class, InputType)
@pytest.mark.parametrize(
"input_obj",
[
InputCategorical(options=["test"]),
InputCategoricalList(options=["test"]),
InputText(),
InputTextList(),
InputNumeric(),
],
)
def test_input_type_all_inputs_implement_required_methods(input_obj):
assert hasattr(input_obj, "_to_databricks_input")
assert callable(getattr(input_obj, "_to_databricks_input"))
assert hasattr(input_obj.__class__, "_from_databricks_input")
assert callable(getattr(input_obj.__class__, "_from_databricks_input"))
# LabelSchemaType tests
@pytest.mark.parametrize(
("enum_member", "expected_value"),
[
(LabelSchemaType.FEEDBACK, "feedback"),
(LabelSchemaType.EXPECTATION, "expectation"),
],
)
def test_label_schema_type_enum_values(enum_member, expected_value):
assert enum_member == expected_value
@pytest.mark.parametrize(
("value", "should_be_member"),
[
("feedback", True),
("expectation", True),
("invalid", False),
("", False),
("FEEDBACK", False),
],
)
def test_label_schema_type_enum_membership(value, should_be_member):
if should_be_member:
assert value in LabelSchemaType
else:
assert value not in LabelSchemaType
# LabelSchema tests
def test_label_schema_init_with_categorical_input():
input_cat = InputCategorical(options=["good", "bad"])
schema = LabelSchema(
name="quality",
type=LabelSchemaType.FEEDBACK,
title="Rate the quality",
input=input_cat,
)
assert schema.name == "quality"
assert schema.type == LabelSchemaType.FEEDBACK
assert schema.title == "Rate the quality"
assert schema.input == input_cat
assert schema.instruction is None
assert schema.enable_comment is False
def test_label_schema_init_with_all_params():
input_text = InputText(max_length=200)
schema = LabelSchema(
name="feedback_schema",
type=LabelSchemaType.EXPECTATION,
title="Provide feedback",
input=input_text,
instruction="Please be detailed",
enable_comment=True,
)
assert schema.name == "feedback_schema"
assert schema.type == LabelSchemaType.EXPECTATION
assert schema.title == "Provide feedback"
assert schema.input == input_text
assert schema.instruction == "Please be detailed"
assert schema.enable_comment is True
def test_label_schema_init_with_numeric_input():
input_numeric = InputNumeric(min_value=1.0, max_value=5.0)
schema = LabelSchema(
name="rating",
type=LabelSchemaType.FEEDBACK,
title="Rate from 1 to 5",
input=input_numeric,
)
assert schema.input == input_numeric
def test_label_schema_init_with_text_list_input():
input_text_list = InputTextList(max_length_each=50, max_count=3)
schema = LabelSchema(
name="suggestions",
type=LabelSchemaType.EXPECTATION,
title="Provide suggestions",
input=input_text_list,
)
assert schema.input == input_text_list
def test_label_schema_init_with_categorical_list_input():
input_cat_list = InputCategoricalList(options=["tag1", "tag2", "tag3"])
schema = LabelSchema(
name="tags",
type=LabelSchemaType.FEEDBACK,
title="Select relevant tags",
input=input_cat_list,
)
assert schema.input == input_cat_list
def test_label_schema_frozen_dataclass():
input_cat = InputCategorical(options=["test"])
schema = LabelSchema(
name="test",
type=LabelSchemaType.FEEDBACK,
title="Test",
input=input_cat,
)
with pytest.raises(dataclasses.FrozenInstanceError, match="cannot assign to field"):
schema.name = "new_name"
def test_label_schema_from_databricks_label_schema():
# Create a mock databricks input object
mock_databricks_input = MagicMock()
# Mock Databricks schema
mock_databricks_schema = MagicMock()
mock_databricks_schema.name = "test_schema"
mock_databricks_schema.type = LabelSchemaType.FEEDBACK
mock_databricks_schema.title = "Test Schema"
mock_databricks_schema.instruction = "Test instruction"
mock_databricks_schema.enable_comment = True
mock_databricks_schema.input = mock_databricks_input
expected_input = InputText(max_length=100)
with patch("databricks.agents.review_app.label_schemas") as mock_label_schemas:
mock_label_schemas.InputText = type(mock_databricks_input)
# Mock the _from_databricks_input method
with patch.object(
InputText, "_from_databricks_input", return_value=expected_input
) as mock_from_db:
result = LabelSchema._from_databricks_label_schema(mock_databricks_schema)
assert isinstance(result, LabelSchema)
assert result.name == "test_schema"
assert result.type == LabelSchemaType.FEEDBACK
assert result.title == "Test Schema"
assert result.instruction == "Test instruction"
assert result.enable_comment is True
assert result.input == expected_input
mock_from_db.assert_called_once_with(mock_databricks_input)
def test_convert_databricks_input():
# Create a simple mock that can be used as dict key
class MockInputTextList:
pass
mock_input = MockInputTextList()
expected = InputTextList(max_count=5)
# Patch the import and the method
with patch("databricks.agents.review_app.label_schemas") as mock_schemas:
mock_schemas.InputTextList = MockInputTextList
with patch.object(
InputTextList, "_from_databricks_input", return_value=expected
) as mock_from_db:
result = LabelSchema._convert_databricks_input(mock_input)
assert result == expected
mock_from_db.assert_called_once_with(mock_input)
def test_convert_databricks_input_unknown_type():
with patch("databricks.agents.review_app.label_schemas"):
unknown_input = MagicMock()
unknown_input.__class__ = MagicMock() # Unknown type
with pytest.raises(ValueError, match="Unknown input type"):
LabelSchema._convert_databricks_input(unknown_input)
def test_from_databricks_label_schema_uses_convert_input():
mock_schema = MagicMock()
mock_schema.name = "test"
mock_schema.type = LabelSchemaType.FEEDBACK
mock_schema.title = "Test"
expected_input = InputTextList(max_count=3)
with patch.object(
LabelSchema, "_convert_databricks_input", return_value=expected_input
) as mock_convert:
result = LabelSchema._from_databricks_label_schema(mock_schema)
assert result.input == expected_input
mock_convert.assert_called_once_with(mock_schema.input)
# Integration tests
def test_integration_complete_workflow_categorical():
# Create InputCategorical
options = ["excellent", "good", "fair", "poor"]
input_cat = InputCategorical(options=options)
# Convert to Databricks input and back
with patch("databricks.agents.review_app.label_schemas.InputCategorical") as mock_class:
mock_databricks_input = MagicMock()
mock_databricks_input.options = options
mock_class.return_value = mock_databricks_input
# To Databricks
databricks_input = input_cat._to_databricks_input()
# From Databricks
result = InputCategorical._from_databricks_input(databricks_input)
assert isinstance(result, InputCategorical)
assert result.options == options
def test_integration_complete_workflow_numeric():
# Create InputNumeric
min_val = 0.0
max_val = 10.0
input_numeric = InputNumeric(min_value=min_val, max_value=max_val)
# Convert to Databricks input and back
with patch("databricks.agents.review_app.label_schemas.InputNumeric") as mock_class:
mock_databricks_input = MagicMock()
mock_databricks_input.min_value = min_val
mock_databricks_input.max_value = max_val
mock_class.return_value = mock_databricks_input
# To Databricks
databricks_input = input_numeric._to_databricks_input()
# From Databricks
result = InputNumeric._from_databricks_input(databricks_input)
assert isinstance(result, InputNumeric)
assert result.min_value == min_val
assert result.max_value == max_val
@pytest.mark.parametrize(
("input_type", "schema_name"),
[
(InputCategorical(options=["yes", "no"]), "categorical_schema"),
(InputCategoricalList(options=["a", "b", "c"]), "categorical_list_schema"),
(InputText(max_length=100), "text_schema"),
(InputTextList(max_count=5), "text_list_schema"),
(InputNumeric(min_value=1, max_value=10), "numeric_schema"),
],
)
def test_integration_label_schema_with_different_input_types(input_type, schema_name):
schema = LabelSchema(
name=schema_name,
type=LabelSchemaType.FEEDBACK,
title=f"Schema for {schema_name}",
input=input_type,
)
assert schema.input == input_type
assert isinstance(schema.input, InputType)
# Edge case tests
def test_edge_cases_empty_string_values():
schema = LabelSchema(
name="",
type=LabelSchemaType.FEEDBACK,
title="",
input=InputCategorical(options=[]),
instruction="",
)
assert schema.name == ""
assert schema.title == ""
assert schema.instruction == ""
@pytest.mark.parametrize(
("min_value", "max_value", "description"),
[
(1e10, 1e20, "very_large_values"),
(-1000.5, -0.1, "negative_range"),
(0.0, 0.0, "zero_range"),
(-float("inf"), float("inf"), "infinite_range"),
(1.123456789, 2.987654321, "high_precision_decimals"),
],
)
def test_edge_cases_numeric_value_ranges(min_value, max_value, description):
input_numeric = InputNumeric(min_value=min_value, max_value=max_value)
assert input_numeric.min_value == min_value
assert input_numeric.max_value == max_value
def test_edge_cases_zero_max_length_text():
input_text = InputText(max_length=0)
assert input_text.max_length == 0
def test_edge_cases_zero_max_count_text_list():
input_text_list = InputTextList(max_count=0)
assert input_text_list.max_count == 0
@pytest.mark.parametrize(
"options",
[
[
"option with spaces",
"option-with-dashes",
"option_with_underscores",
"option@with$pecial!chars",
],
["🙂", "😢", "🤔", "αβγ", "中文"],
["", "empty_and_normal", ""],
["UPPERCASE", "lowercase", "MiXeD_CaSe"],
],
)
def test_edge_cases_special_and_unicode_characters_in_options(options):
input_cat = InputCategorical(options=options)
assert input_cat.options == options
# API integration tests
def test_create_label_schema_calls_to_databricks_input(mock_databricks_labeling_store):
input_cat = InputCategorical(options=["good", "bad"])
with mock_databricks_labeling_store() as mocks:
# Configure the mock app for this test
mocks["app"].create_label_schema.return_value = MagicMock()
# Mock the _to_databricks_input method
with patch.object(input_cat, "_to_databricks_input") as mock_to_databricks:
mock_databricks_input = MagicMock()
mock_to_databricks.return_value = mock_databricks_input
# Import here to avoid early import errors
from mlflow.genai.label_schemas import create_label_schema
create_label_schema(
name="test_schema",
type="feedback",
title="Test Schema",
input=input_cat,
)
# Verify _to_databricks_input was called
mock_to_databricks.assert_called_once()
# Verify the result was passed to create_label_schema
mocks["app"].create_label_schema.assert_called_once_with(
name="test_schema",
type="feedback",
title="Test Schema",
input=mock_databricks_input,
instruction=None,
enable_comment=False,
overwrite=False,
)
def test_get_label_schema_calls_from_databricks_label_schema(mock_databricks_labeling_store):
# Mock databricks label schema
mock_databricks_schema = MagicMock()
mock_databricks_schema.name = "test_schema"
with mock_databricks_labeling_store() as mocks:
# Configure the mock app for this test
mocks["app"].label_schemas = [mock_databricks_schema]
# Mock the _from_databricks_label_schema method
with patch.object(LabelSchema, "_from_databricks_label_schema") as mock_from_databricks:
mock_label_schema = MagicMock()
mock_from_databricks.return_value = mock_label_schema
# Import here to avoid early import errors
from mlflow.genai.label_schemas import get_label_schema
result = get_label_schema("test_schema")
# Verify _from_databricks_label_schema was called
mock_from_databricks.assert_called_once_with(mock_databricks_schema)
# Verify the result was returned
assert result == mock_label_schema
@pytest.mark.parametrize(
("input_type", "schema_name"),
[
(InputCategorical(options=["yes", "no"]), "categorical_api_test"),
(InputCategoricalList(options=["a", "b", "c"]), "categorical_list_api_test"),
(InputText(max_length=100), "text_api_test"),
(InputTextList(max_count=5), "text_list_api_test"),
(InputNumeric(min_value=1, max_value=10), "numeric_api_test"),
],
)
def test_api_integration_with_all_input_types(
input_type, schema_name, mock_databricks_labeling_store
):
with mock_databricks_labeling_store() as mocks:
# Configure the mock app for this test
mocks["app"].create_label_schema.return_value = MagicMock()
# Mock the _to_databricks_input method
with patch.object(input_type, "_to_databricks_input") as mock_to_databricks:
mock_databricks_input = MagicMock()
mock_to_databricks.return_value = mock_databricks_input
# Import here to avoid early import errors
from mlflow.genai.label_schemas import create_label_schema
create_label_schema(
name=schema_name,
type="feedback",
title=f"Test Schema for {schema_name}",
input=input_type,
)
# Verify _to_databricks_input was called for each type
mock_to_databricks.assert_called_once()
# Import tests
def test_databricks_label_schemas_is_importable():
# Test constants
assert label_schemas.EXPECTED_FACTS == EXPECTED_FACTS
assert label_schemas.GUIDELINES == GUIDELINES
assert label_schemas.EXPECTED_RESPONSE == EXPECTED_RESPONSE
# Test classes
assert label_schemas.LabelSchemaType == LabelSchemaType
assert label_schemas.LabelSchema == LabelSchema
assert label_schemas.InputCategorical == InputCategorical
assert label_schemas.InputCategoricalList == InputCategoricalList
assert label_schemas.InputNumeric == InputNumeric
assert label_schemas.InputText == InputText
assert label_schemas.InputTextList == InputTextList
# Test functions
assert label_schemas.create_label_schema == create_label_schema
assert label_schemas.get_label_schema == get_label_schema
assert label_schemas.delete_label_schema == delete_label_schema
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/label_schemas/test_label_schemas.py",
"license": "Apache License 2.0",
"lines": 610,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/labeling/test_labeling.py | from mlflow.genai import labeling
from mlflow.genai.labeling import (
Agent,
LabelingSession,
ReviewApp,
create_labeling_session,
delete_labeling_session,
get_labeling_session,
get_labeling_sessions,
get_review_app,
)
from tests.genai.conftest import databricks_only
@databricks_only
def test_databricks_labeling_is_importable():
assert labeling.Agent == Agent
assert labeling.LabelingSession == LabelingSession
assert labeling.ReviewApp == ReviewApp
assert labeling.get_review_app == get_review_app
assert labeling.create_labeling_session == create_labeling_session
assert labeling.get_labeling_sessions == get_labeling_sessions
assert labeling.get_labeling_session == get_labeling_session
assert labeling.delete_labeling_session == delete_labeling_session
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/labeling/test_labeling.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/scorer_utils.py | # This file contains utility functions for scorer functionality.
import ast
import inspect
import json
import logging
import re
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable
from mlflow.exceptions import INVALID_PARAMETER_VALUE, MlflowException
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
_logger = logging.getLogger(__name__)
GATEWAY_PROVIDER = "gateway"
INSTRUCTIONS_JUDGE_PYDANTIC_DATA = "instructions_judge_pydantic_data"
BUILTIN_SCORER_PYDANTIC_DATA = "builtin_scorer_pydantic_data"
# FunctionBodyExtractor class is forked from https://github.com/unitycatalog/unitycatalog/blob/20dd3820be332ac04deec4e063099fb863eb3392/ai/core/src/unitycatalog/ai/core/utils/callable_utils.py
class FunctionBodyExtractor(ast.NodeVisitor):
"""
AST NodeVisitor class to extract the body of a function.
"""
def __init__(self, func_name: str, source_code: str):
self.func_name = func_name
self.source_code = source_code
self.function_body = ""
self.indent_unit = 4
self.found = False
def visit_FunctionDef(self, node: ast.FunctionDef):
if not self.found and node.name == self.func_name:
self.found = True
self.extract_body(node)
def extract_body(self, node: ast.FunctionDef):
body = node.body
# Skip the docstring
if (
body
and isinstance(body[0], ast.Expr)
and isinstance(body[0].value, ast.Constant)
and isinstance(body[0].value.value, str)
):
body = body[1:]
if not body:
return
start_lineno = body[0].lineno
end_lineno = body[-1].end_lineno
source_lines = self.source_code.splitlines(keepends=True)
function_body_lines = source_lines[start_lineno - 1 : end_lineno]
self.function_body = dedent("".join(function_body_lines)).rstrip("\n")
if indents := [stmt.col_offset for stmt in body if stmt.col_offset is not None]:
self.indent_unit = min(indents)
# extract_function_body function is forked from https://github.com/unitycatalog/unitycatalog/blob/20dd3820be332ac04deec4e063099fb863eb3392/ai/core/src/unitycatalog/ai/core/utils/callable_utils.py
def extract_function_body(func: Callable[..., Any]) -> tuple[str, int]:
"""
Extracts the body of a function as a string without the signature or docstring,
dedents the code, and returns the indentation unit used in the function (e.g., 2 or 4 spaces).
"""
source_lines, _ = inspect.getsourcelines(func)
dedented_source = dedent("".join(source_lines))
func_name = func.__name__
extractor = FunctionBodyExtractor(func_name, dedented_source)
parsed_source = ast.parse(dedented_source)
extractor.visit(parsed_source)
return extractor.function_body, extractor.indent_unit
def recreate_function(source: str, signature: str, func_name: str) -> Callable[..., Any]:
"""
Recreate a function from its source code, signature, and name.
Args:
source: The function body source code.
signature: The function signature string (e.g., "(inputs, outputs)").
func_name: The name of the function.
Returns:
The recreated function.
"""
import mlflow
# Parse the signature to build the function definition
sig_match = re.match(r"\((.*?)\)", signature)
if not sig_match:
raise MlflowException(
f"Invalid signature format: '{signature}'", error_code=INVALID_PARAMETER_VALUE
)
params_str = sig_match.group(1).strip()
# Build the function definition with future annotations to defer type hint evaluation
func_def = "from __future__ import annotations\n"
func_def += f"def {func_name}({params_str}):\n"
# Indent the source code
indented_source = "\n".join(f" {line}" for line in source.split("\n"))
func_def += indented_source
# Create a namespace with common MLflow imports that scorer functions might use
# Include mlflow module so type hints like "mlflow.entities.Trace" can be resolved
import_namespace = {
"mlflow": mlflow,
}
# Import commonly used MLflow classes
try:
from mlflow.entities import (
Assessment,
AssessmentError,
AssessmentSource,
AssessmentSourceType,
Feedback,
Trace,
)
from mlflow.genai.judges import CategoricalRating
import_namespace.update(
{
"Feedback": Feedback,
"Assessment": Assessment,
"AssessmentSource": AssessmentSource,
"AssessmentError": AssessmentError,
"AssessmentSourceType": AssessmentSourceType,
"Trace": Trace,
"CategoricalRating": CategoricalRating,
}
)
except ImportError:
pass # Some imports might not be available in all contexts
# Local namespace will capture the created function
local_namespace = {}
# Execute the function definition with MLflow imports available
exec(func_def, import_namespace, local_namespace) # noqa: S102
# Return the recreated function
return local_namespace[func_name]
def is_gateway_model(model: str | None) -> bool:
if model is None:
return False
from mlflow.metrics.genai.model_utils import _parse_model_uri
try:
provider, _ = _parse_model_uri(model)
return provider == GATEWAY_PROVIDER
except MlflowException:
return False
def extract_endpoint_ref(model: str) -> str:
from mlflow.metrics.genai.model_utils import _parse_model_uri
_, endpoint_ref = _parse_model_uri(model)
return endpoint_ref
def build_gateway_model(endpoint_ref: str) -> str:
return f"{GATEWAY_PROVIDER}:/{endpoint_ref}"
def extract_model_from_serialized_scorer(serialized_data: dict[str, Any]) -> str | None:
if ij_data := serialized_data.get(INSTRUCTIONS_JUDGE_PYDANTIC_DATA):
return ij_data.get("model")
if bs_data := serialized_data.get(BUILTIN_SCORER_PYDANTIC_DATA):
return bs_data.get("model")
if mem_data := serialized_data.get("memory_augmented_judge_data"):
base_judge = mem_data.get("base_judge", {})
return extract_model_from_serialized_scorer(base_judge)
return None
def update_model_in_serialized_scorer(
serialized_data: dict[str, Any], new_model: str | None
) -> dict[str, Any]:
result = serialized_data.copy()
if ij_data := result.get(INSTRUCTIONS_JUDGE_PYDANTIC_DATA):
result[INSTRUCTIONS_JUDGE_PYDANTIC_DATA] = {**ij_data, "model": new_model}
elif bs_data := result.get(BUILTIN_SCORER_PYDANTIC_DATA):
result[BUILTIN_SCORER_PYDANTIC_DATA] = {**bs_data, "model": new_model}
elif mem_data := result.get("memory_augmented_judge_data"):
result["memory_augmented_judge_data"] = {
**mem_data,
"base_judge": update_model_in_serialized_scorer(
mem_data.get("base_judge", {}), new_model
),
}
return result
def validate_scorer_name(name: str | None) -> None:
"""
Validate the scorer name.
Args:
name: The scorer name to validate.
Raises:
MlflowException: If the name is invalid.
"""
if name is None:
raise MlflowException.invalid_parameter_value("Scorer name cannot be None.")
if not isinstance(name, str):
raise MlflowException.invalid_parameter_value(
f"Scorer name must be a string, got {type(name).__name__}."
)
if not name.strip():
raise MlflowException.invalid_parameter_value(
"Scorer name cannot be empty or contain only whitespace."
)
def validate_scorer_model(model: str | None) -> None:
"""
Validate the scorer model string if present.
Args:
model: The model string to validate.
Raises:
MlflowException: If the model is invalid.
"""
if model is None:
return
if not isinstance(model, str):
raise MlflowException.invalid_parameter_value(
f"Scorer model must be a string, got {type(model).__name__}."
)
if not model.strip():
raise MlflowException.invalid_parameter_value(
"Scorer model cannot be empty or contain only whitespace."
)
def parse_tool_call_expectations(
expectations: dict[str, Any] | None,
) -> list["FunctionCall"] | None:
from mlflow.genai.utils.type import FunctionCall
if not expectations or "expected_tool_calls" not in expectations:
return None
expected_tool_calls = expectations["expected_tool_calls"]
if not expected_tool_calls:
return None
normalized_calls = []
for call in expected_tool_calls:
if isinstance(call, FunctionCall):
normalized_calls.append(call)
elif isinstance(call, dict):
name = call.get("name")
arguments = call.get("arguments")
if arguments is not None and not isinstance(arguments, dict):
raise MlflowException(
f"Invalid arguments type: {type(arguments)}. Arguments must be a dict."
)
normalized_calls.append(FunctionCall(name=name, arguments=arguments))
else:
raise MlflowException(
f"Invalid expected tool call format: {type(call)}. "
"Expected dict with 'name' and optional 'arguments', or FunctionCall object."
)
return normalized_calls
def normalize_tool_call_arguments(args: dict[str, Any] | None) -> dict[str, Any]:
if args is None:
return {}
if isinstance(args, dict):
return args
raise MlflowException(f"Invalid arguments type: {type(args)}. Arguments must be a dict.")
def get_tool_call_signature(call: "FunctionCall", include_arguments: bool) -> str | None:
if include_arguments:
args = json.dumps(normalize_tool_call_arguments(call.arguments), sort_keys=True)
return f"{call.name}({args})"
return call.name
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/scorer_utils.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/test_scorer_utils.py | import json
import pytest
from mlflow.entities import Assessment, Feedback, Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers.scorer_utils import (
BUILTIN_SCORER_PYDANTIC_DATA,
INSTRUCTIONS_JUDGE_PYDANTIC_DATA,
build_gateway_model,
extract_endpoint_ref,
extract_model_from_serialized_scorer,
get_tool_call_signature,
is_gateway_model,
normalize_tool_call_arguments,
parse_tool_call_expectations,
recreate_function,
update_model_in_serialized_scorer,
)
from mlflow.genai.utils.type import FunctionCall
# ============================================================================
# HAPPY PATH TESTS
# ============================================================================
def test_simple_function_recreation():
source = "return x + y"
signature = "(x, y)"
func_name = "add_func"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated.__name__ == func_name
assert recreated(3, 4) == 7
assert recreated(10, -5) == 5
def test_function_with_control_flow():
source = """if x > 0:
return "positive"
else:
return "non-positive" """
signature = "(x)"
func_name = "classify_number"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated(5) == "positive"
assert recreated(-3) == "non-positive"
assert recreated(0) == "non-positive"
def test_function_with_loop():
source = """total = 0
for i in range(n):
total += i
return total"""
signature = "(n)"
func_name = "sum_range"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated(5) == 10 # 0+1+2+3+4
assert recreated(3) == 3 # 0+1+2
assert recreated(0) == 0
def test_function_with_multiple_parameters():
source = """if threshold is None:
threshold = 5
return len(text) > threshold"""
signature = "(text, threshold=None)"
func_name = "length_check"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated("hello") is False # len=5, not > 5
assert recreated("hello world") is True # len=11, > 5
assert recreated("hi", 1) is True # len=2, > 1
def test_function_creating_feedback_object():
source = """import re
words = re.findall(r'\\b\\w+\\b', text)
return Feedback(value=len(words), rationale=f"Found {len(words)} words")"""
signature = "(text)"
func_name = "word_counter"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
result = recreated("hello world test")
assert isinstance(result, Feedback)
assert result.value == 3
assert "Found 3 words" in result.rationale
def test_function_creating_assessment_object():
# Note: Assessment constructor doesn't take 'value' directly - it's an abstract base
# Use Feedback instead, which is a concrete subclass of Assessment
source = """score = 1 if "good" in response else 0
return Feedback(name=name, value=score, rationale="Assessment result")"""
signature = "(response, name='test_assessment')"
func_name = "assess_response"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
result = recreated("This is good")
assert isinstance(result, Feedback)
assert isinstance(result, Assessment) # Feedback is a subclass of Assessment
assert result.value == 1
assert result.name == "test_assessment"
def test_complex_logic_function():
source = """result = {}
for item in items:
if isinstance(item, str):
result[item] = len(item)
elif isinstance(item, (int, float)):
result[str(item)] = item * 2
return result"""
signature = "(items)"
func_name = "process_items"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
result = recreated(["hello", 5, "world", 3.5])
expected = {"hello": 5, "5": 10, "world": 5, "3.5": 7.0}
assert result == expected
# ============================================================================
# SIGNATURE PARSING TESTS
# ============================================================================
def test_empty_signature():
source = "return 42"
signature = "()"
func_name = "get_answer"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated() == 42
def test_single_parameter_signature():
source = "return x * 2"
signature = "(x)"
func_name = "double"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated(5) == 10
def test_signature_with_whitespace():
source = "return a + b"
signature = "( a , b )"
func_name = "add_with_spaces"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated(3, 7) == 10
def test_signature_with_defaults():
source = "return base ** exponent"
signature = "(base, exponent=2)"
func_name = "power"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated(3) == 9 # 3^2
assert recreated(2, 3) == 8 # 2^3
def test_complex_signature():
source = """if data is None:
data = []
return f"{prefix}: {len(data)} items" + (suffix or "")"""
signature = "(data=None, prefix='Result', suffix=None)"
func_name = "format_result"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated() == "Result: 0 items"
assert recreated([1, 2, 3]) == "Result: 3 items"
assert recreated([1, 2], "Count", "!") == "Count: 2 items!"
def test_empty_signature_string():
from mlflow.exceptions import MlflowException
source = "return 1"
signature = ""
func_name = "empty_sig"
with pytest.raises(MlflowException, match="Invalid signature format"):
recreate_function(source, signature, func_name)
# ============================================================================
# IMPORT NAMESPACE TESTS
# ============================================================================
def test_function_with_unavailable_import():
# Import errors occur at execution time, not definition time
source = """from some_nonexistent_module import NonExistentClass
return NonExistentClass()"""
signature = "()"
func_name = "use_bad_import"
recreated = recreate_function(source, signature, func_name)
# Function should be created successfully
assert recreated is not None
# But should fail when called due to import error
with pytest.raises(ModuleNotFoundError, match="some_nonexistent_module"):
recreated()
def test_function_with_undefined_variable():
source = "return undefined_variable * 2"
signature = "()"
func_name = "use_undefined"
recreated = recreate_function(source, signature, func_name)
# Function is created but will fail when called
assert recreated is not None
# Should raise NameError when called
with pytest.raises(NameError, match="undefined_variable"):
recreated()
def test_function_with_syntax_error():
source = "if x > 0\n return True" # Missing colon
signature = "(x)"
func_name = "syntax_error_func"
with pytest.raises(SyntaxError, match="expected ':'"):
recreate_function(source, signature, func_name)
def test_function_using_builtin_modules():
source = """import json
import re
data = {"count": len(re.findall(r'\\w+', text))}
return json.dumps(data)"""
signature = "(text)"
func_name = "json_word_count"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
result = recreated("hello world test")
parsed = json.loads(result)
assert parsed["count"] == 3
def test_mlflow_imports_available():
source = """# Test all available MLflow imports
feedback = Feedback(value=True, rationale="test")
# AssessmentSource should be available too
from mlflow.entities.assessment_source import AssessmentSourceType
source_obj = AssessmentSourceType.CODE # Use the default source type
# Test that Trace is available
from mlflow.entities import TraceInfo, TraceState, TraceData
from mlflow.entities.trace_location import (
TraceLocation,
TraceLocationType,
MlflowExperimentLocation,
)
from mlflow.entities.trace import Trace
mlflow_exp_location = MlflowExperimentLocation(experiment_id="0")
trace_location = TraceLocation(
type=TraceLocationType.MLFLOW_EXPERIMENT,
mlflow_experiment=mlflow_exp_location
)
trace_info = TraceInfo(
trace_id="test_trace_id",
trace_location=trace_location,
request_time=1000,
state=TraceState.OK
)
trace = Trace(info=trace_info, data=TraceData())
return {"feedback": feedback, "source": source_obj, "trace": trace}"""
signature = "()"
func_name = "test_mlflow_imports"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
result = recreated()
assert isinstance(result["feedback"], Feedback)
# AssessmentSourceType should be available (it's an enum/class)
assert result["source"] is not None
assert result["source"] == "CODE"
# Check that Trace is available and can be instantiated
assert isinstance(result["trace"], Trace)
def test_function_name_in_namespace():
source = "return 'success'"
signature = "()"
func_name = "test_name_func"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated.__name__ == func_name
def test_indentation_handling():
# Source without indentation - should be indented by the function
source = """x = 1
y = 2
return x + y"""
signature = "()"
func_name = "indentation_test"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated() == 3
def test_empty_source_code():
source = ""
signature = "()"
func_name = "empty_func"
# Empty source code should cause syntax error during function definition
with pytest.raises(SyntaxError, match="expected an indented block"):
recreate_function(source, signature, func_name)
def test_function_with_import_error_at_runtime():
# Import that doesn't exist is referenced but not imported in the function
source = """try:
return NonExistentClass()
except NameError:
return "import_failed" """
signature = "()"
func_name = "runtime_import_error"
recreated = recreate_function(source, signature, func_name)
# Function should be created successfully
assert recreated is not None
# But calling it should handle the missing import gracefully
result = recreated()
assert result == "import_failed"
def test_function_with_mlflow_trace_type_hint():
"""
Test that a function with mlflow.entities.Trace type hints can be recreated.
This reproduces the issue where scorers with type hints like mlflow.entities.Trace
would fail to register because the mlflow module wasn't available in the namespace
during function recreation.
"""
source = """return Feedback(
value=trace.info.trace_id is not None,
rationale=f"Trace ID: {trace.info.trace_id}"
)"""
signature = "(trace: mlflow.entities.Trace) -> mlflow.entities.Feedback"
func_name = "scorer_with_trace_type_hint"
recreated = recreate_function(source, signature, func_name)
assert recreated is not None
assert recreated.__name__ == func_name
# Test that it can be called with a Trace object
from mlflow.entities import TraceData, TraceInfo, TraceState
from mlflow.entities.trace_location import (
MlflowExperimentLocation,
TraceLocation,
TraceLocationType,
)
mlflow_exp_location = MlflowExperimentLocation(experiment_id="0")
trace_location = TraceLocation(
type=TraceLocationType.MLFLOW_EXPERIMENT, mlflow_experiment=mlflow_exp_location
)
trace_info = TraceInfo(
trace_id="test_trace_id",
trace_location=trace_location,
request_time=1000,
state=TraceState.OK,
)
trace = Trace(info=trace_info, data=TraceData())
result = recreated(trace)
assert isinstance(result, Feedback)
assert result.value is True
assert "test_trace_id" in result.rationale
# ============================================================================
# GATEWAY MODEL UTILITY TESTS
# ============================================================================
def test_is_gateway_model():
assert is_gateway_model("gateway:/my-endpoint") is True
assert is_gateway_model("openai:/gpt-4") is False
assert is_gateway_model(None) is False
def test_extract_and_build_gateway_model():
assert extract_endpoint_ref("gateway:/my-endpoint") == "my-endpoint"
assert build_gateway_model("my-endpoint") == "gateway:/my-endpoint"
assert extract_endpoint_ref(build_gateway_model("test")) == "test"
def test_extract_model_from_serialized_scorer():
instructions_judge_scorer = {
"mlflow_version": "3.3.2",
"serialization_version": 1,
"name": "quality_scorer",
"description": "Evaluates response quality",
"aggregations": [],
"is_session_level_scorer": False,
"builtin_scorer_class": None,
"builtin_scorer_pydantic_data": None,
"call_source": None,
"call_signature": None,
"original_func_name": None,
INSTRUCTIONS_JUDGE_PYDANTIC_DATA: {
"instructions": "Evaluate the response quality",
"model": "gateway:/my-endpoint",
},
}
assert extract_model_from_serialized_scorer(instructions_judge_scorer) == "gateway:/my-endpoint"
builtin_scorer = {
"mlflow_version": "3.3.2",
"serialization_version": 1,
"name": "guidelines_scorer",
"description": None,
"aggregations": [],
"is_session_level_scorer": False,
"builtin_scorer_class": "Guidelines",
BUILTIN_SCORER_PYDANTIC_DATA: {
"name": "guidelines_scorer",
"required_columns": ["outputs", "inputs"],
"guidelines": ["Be helpful", "Be accurate"],
"model": "openai:/gpt-4",
},
"call_source": None,
"call_signature": None,
"original_func_name": None,
"instructions_judge_pydantic_data": None,
}
assert extract_model_from_serialized_scorer(builtin_scorer) == "openai:/gpt-4"
assert extract_model_from_serialized_scorer({}) is None
def test_update_model_in_serialized_scorer():
data = {
"mlflow_version": "3.3.2",
"serialization_version": 1,
"name": "quality_scorer",
INSTRUCTIONS_JUDGE_PYDANTIC_DATA: {
"instructions": "Evaluate quality",
"model": "gateway:/old-endpoint",
},
}
result = update_model_in_serialized_scorer(data, "gateway:/new-endpoint")
assert result[INSTRUCTIONS_JUDGE_PYDANTIC_DATA]["model"] == "gateway:/new-endpoint"
assert result[INSTRUCTIONS_JUDGE_PYDANTIC_DATA]["instructions"] == "Evaluate quality"
assert data[INSTRUCTIONS_JUDGE_PYDANTIC_DATA]["model"] == "gateway:/old-endpoint"
# ============================================================================
# TOOL CALL HELPER FUNCTION TESTS
# ============================================================================
@pytest.mark.parametrize(
"expectations",
[None, {}, {"expected_tool_calls": []}],
)
def test_parse_tool_call_expectations_returns_none_for_empty(expectations):
assert parse_tool_call_expectations(expectations) is None
def test_parse_tool_call_expectations_parses_dict():
expectations = {
"expected_tool_calls": [
{"name": "search", "arguments": {"query": "test"}},
{"name": "summarize"},
]
}
result = parse_tool_call_expectations(expectations)
assert len(result) == 2
assert result[0].name == "search"
assert result[0].arguments == {"query": "test"}
assert result[1].name == "summarize"
assert result[1].arguments is None
def test_parse_tool_call_expectations_parses_function_call_objects():
expectations = {
"expected_tool_calls": [
FunctionCall(name="search", arguments={"query": "test"}),
FunctionCall(name="summarize"),
]
}
result = parse_tool_call_expectations(expectations)
assert len(result) == 2
assert result[0].name == "search"
assert result[1].name == "summarize"
@pytest.mark.parametrize(
("expectations", "expected_error"),
[
(
{"expected_tool_calls": [{"name": "search", "arguments": "invalid"}]},
"Arguments must be a dict",
),
({"expected_tool_calls": ["invalid_string"]}, "Invalid expected tool call format"),
],
)
def test_parse_tool_call_expectations_raises_for_invalid_input(expectations, expected_error):
with pytest.raises(MlflowException, match=expected_error):
parse_tool_call_expectations(expectations)
@pytest.mark.parametrize(
("args", "expected"),
[
(None, {}),
({"query": "test", "limit": 10}, {"query": "test", "limit": 10}),
],
)
def test_normalize_tool_call_arguments(args, expected):
assert normalize_tool_call_arguments(args) == expected
def test_normalize_tool_call_arguments_raises_for_invalid_type():
with pytest.raises(MlflowException, match="Arguments must be a dict"):
normalize_tool_call_arguments("invalid")
@pytest.mark.parametrize(
("call", "include_arguments", "expected"),
[
(FunctionCall(name="search", arguments={"query": "test"}), False, "search"),
(
FunctionCall(name="search", arguments={"query": "test"}),
True,
'search({"query": "test"})',
),
(FunctionCall(name="search"), True, "search({})"),
(FunctionCall(name=None), False, None),
],
)
def test_get_tool_call_signature(call, include_arguments, expected):
assert get_tool_call_signature(call, include_arguments) == expected
def test_get_tool_call_signature_sorts_arguments():
call1 = FunctionCall(name="search", arguments={"b": 2, "a": 1})
call2 = FunctionCall(name="search", arguments={"a": 1, "b": 2})
sig1 = get_tool_call_signature(call1, include_arguments=True)
sig2 = get_tool_call_signature(call2, include_arguments=True)
assert sig1 == sig2
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/test_scorer_utils.py",
"license": "Apache License 2.0",
"lines": 457,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/test_serialization.py | import json
from unittest.mock import patch
import pytest
from mlflow.entities import Feedback
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers import Scorer, scorer
from mlflow.genai.scorers.builtin_scorers import Guidelines
@pytest.fixture(autouse=True)
def mock_databricks_runtime():
with patch("mlflow.genai.scorers.base.is_databricks_uri", return_value=True):
yield
# ============================================================================
# FORMAT VALIDATION TESTS (Minimal - just check serialization structure)
# ============================================================================
def test_decorator_scorer_serialization_format():
@scorer(name="test_scorer", aggregations=["mean"])
def test_scorer(outputs):
return outputs == "correct"
serialized = test_scorer.model_dump()
# Check required fields for decorator scorers
assert serialized["name"] == "test_scorer"
assert serialized["aggregations"] == ["mean"]
assert "call_source" in serialized
assert "original_func_name" in serialized
assert serialized["original_func_name"] == "test_scorer"
assert "call_signature" in serialized
# Check version metadata
assert "mlflow_version" in serialized
assert "serialization_version" in serialized
assert serialized["serialization_version"] == 1
# Builtin scorer fields should be None (not populated for decorator scorers)
assert serialized["builtin_scorer_class"] is None
assert serialized["builtin_scorer_pydantic_data"] is None
def test_builtin_scorer_serialization_format():
from mlflow.genai.scorers.builtin_scorers import RelevanceToQuery
serialized = RelevanceToQuery().model_dump()
# Check required top-level fields for builtin scorers
assert serialized["name"] == "relevance_to_query"
assert "builtin_scorer_class" in serialized
assert serialized["builtin_scorer_class"] == "RelevanceToQuery"
assert "builtin_scorer_pydantic_data" in serialized
# Check fields within builtin_scorer_pydantic_data
pydantic_data = serialized["builtin_scorer_pydantic_data"]
assert "required_columns" in pydantic_data
# Check version metadata
assert "mlflow_version" in serialized
assert "serialization_version" in serialized
assert serialized["serialization_version"] == 1
# Decorator scorer fields should be None (not populated for builtin scorers)
assert serialized["call_source"] is None
assert serialized["call_signature"] is None
assert serialized["original_func_name"] is None
# ============================================================================
# ROUND-TRIP FUNCTIONALITY TESTS (Comprehensive - test complete cycles)
# ============================================================================
def test_simple_scorer_round_trip():
@scorer
def simple_scorer(outputs):
return outputs == "correct"
# Test original functionality
assert simple_scorer(outputs="correct") is True
assert simple_scorer(outputs="wrong") is False
# Serialize and deserialize
serialized = simple_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test deserialized functionality matches original
assert deserialized.name == "simple_scorer"
assert deserialized(outputs="correct") is True
assert deserialized(outputs="wrong") is False
def test_custom_name_and_aggregations_round_trip():
@scorer(name="length_check", aggregations=["mean", "max"])
def my_scorer(inputs, outputs):
return len(outputs) > len(inputs)
# Test original
assert my_scorer(inputs="hi", outputs="hello world") is True
assert my_scorer(inputs="hello", outputs="hi") is False
# Round-trip
serialized = my_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test preserved properties and functionality
assert deserialized.name == "length_check"
assert deserialized.aggregations == ["mean", "max"]
assert deserialized(inputs="hi", outputs="hello world") is True
assert deserialized(inputs="hello", outputs="hi") is False
def test_multiple_parameters_round_trip():
@scorer
def multi_param_scorer(inputs, outputs, expectations):
return outputs.startswith(inputs) and len(outputs) > expectations.get("min_length", 0)
# Test original
test_args = {
"inputs": "Hello",
"outputs": "Hello world!",
"expectations": {"min_length": 5},
}
assert multi_param_scorer(**test_args) is True
# Round-trip
serialized = multi_param_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test functionality preserved
assert deserialized(**test_args) is True
assert (
deserialized(inputs="Hi", outputs="Hello world!", expectations={"min_length": 5}) is False
)
def test_complex_logic_round_trip():
@scorer
def complex_scorer(outputs):
if not outputs:
return 0
words = outputs.split()
score = 0
for word in words:
if word.isupper():
score += 2
elif word.islower():
score += 1
return score
# Test original functionality
test_cases = [
("", 0),
("hello world", 2), # 2 lowercase words
("HELLO WORLD", 4), # 2 uppercase words
("Hello WORLD", 2), # mixed case "Hello" (0) + "WORLD" (2)
]
for test_input, expected in test_cases:
assert complex_scorer(outputs=test_input) == expected
# Round-trip
serialized = complex_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test all cases still work
for test_input, expected in test_cases:
assert deserialized(outputs=test_input) == expected
def test_imports_and_feedback_round_trip():
@scorer
def feedback_scorer(outputs):
import re # clint: disable=lazy-import
pattern = r"\b\w+\b"
words = re.findall(pattern, outputs)
return Feedback(value=len(words), rationale=f"Found {len(words)} words")
# Test original
result = feedback_scorer(outputs="hello world test")
assert isinstance(result, Feedback)
assert result.value == 3
assert "Found 3 words" in result.rationale
# Round-trip
serialized = feedback_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test functionality preserved
result = deserialized(outputs="hello world test")
assert isinstance(result, Feedback)
assert result.value == 3
assert "Found 3 words" in result.rationale
def test_default_parameters_round_trip():
@scorer
def default_scorer(outputs, threshold=5):
return len(outputs) > threshold
# Test original with and without default
assert default_scorer(outputs="short") is False # len=5, not > 5
assert default_scorer(outputs="longer") is True # len=6, > 5
assert default_scorer(outputs="hi", threshold=1) is True # len=2, > 1
# Round-trip
serialized = default_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test defaults work
assert deserialized(outputs="short") is False
assert deserialized(outputs="longer") is True
def test_json_workflow_round_trip():
@scorer(name="json_test", aggregations=["mean"])
def json_scorer(outputs):
return len(outputs.split()) > 3
# Test original
assert json_scorer(outputs="one two three four") is True
assert json_scorer(outputs="one two") is False
# JSON round-trip
serialized = json_scorer.model_dump()
json_str = json.dumps(serialized)
loaded_dict = json.loads(json_str)
deserialized = Scorer.model_validate(loaded_dict)
# Test functionality preserved through JSON
assert deserialized.name == "json_test"
assert deserialized.aggregations == ["mean"]
assert deserialized(outputs="one two three four") is True
assert deserialized(outputs="one two") is False
def test_end_to_end_complex_round_trip():
@scorer(name="complete_test", aggregations=["mean", "max"])
def complete_scorer(inputs, outputs, expectations):
input_words = len(inputs.split())
output_words = len(outputs.split())
expected_ratio = expectations.get("word_ratio", 1.0)
actual_ratio = output_words / input_words if input_words > 0 else 0
return actual_ratio >= expected_ratio
test_args = {
"inputs": "hello world",
"outputs": "hello beautiful world today",
"expectations": {"word_ratio": 1.5},
}
# Test original
original_result = complete_scorer(**test_args)
assert original_result is True
# Round-trip
serialized = complete_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test everything preserved
assert deserialized.name == "complete_test"
assert deserialized.aggregations == ["mean", "max"]
deserialized_result = deserialized(**test_args)
assert original_result == deserialized_result is True
def test_deserialized_scorer_runs_without_global_context():
# Create a simple scorer that only uses built-in functions and parameters
@scorer(name="isolated_test")
def simple_scorer(outputs):
# Only use built-in functions and the parameter - no external dependencies
return len(outputs.split()) > 2
# Test original works
assert simple_scorer(outputs="one two three") is True
assert simple_scorer(outputs="one two") is False
# Serialize the scorer
serialized_data = simple_scorer.model_dump()
# Test deserialized scorer in completely isolated namespace using exec
test_code = """
# Import required modules in isolated namespace
from mlflow.genai.scorers import Scorer
# Deserialize the scorer (no external context available)
deserialized = Scorer.model_validate(serialized_data)
# Test that it can run successfully in isolation
result1 = deserialized(outputs="one two three")
result2 = deserialized(outputs="one two")
result3 = deserialized(outputs="hello world test case")
# Store results for verification
test_results = {
"result1": result1,
"result2": result2,
"result3": result3,
"name": deserialized.name,
"aggregations": deserialized.aggregations
}
"""
# Execute in isolated namespace with only serialized_data available
isolated_namespace = {"serialized_data": serialized_data}
exec(test_code, isolated_namespace) # noqa: S102
# Verify results from isolated execution
results = isolated_namespace["test_results"]
assert results["result1"] is True # "one two three" has 3 words > 2
assert results["result2"] is False # "one two" has 2 words, not > 2
assert results["result3"] is True # "hello world test case" has 4 words > 2
assert results["name"] == "isolated_test"
assert results["aggregations"] is None
def test_builtin_scorer_round_trip():
# from mlflow.genai.scorers import relevance_to_query
from mlflow.genai.scorers.builtin_scorers import RelevanceToQuery
# Round-trip serialization
serialized = RelevanceToQuery().model_dump()
deserialized = Scorer.model_validate(serialized)
# Test class type and properties preserved
assert isinstance(deserialized, RelevanceToQuery)
assert deserialized.name == "relevance_to_query"
assert hasattr(deserialized, "required_columns")
assert deserialized.required_columns == {"inputs", "outputs"}
# Test execution with mocking
with patch(
"mlflow.genai.judges.is_context_relevant",
return_value=Feedback(name="relevance_to_query", value="yes", metadata={"chunk_index": 0}),
) as mock_judge:
result = deserialized(
inputs={"question": "What is machine learning?"},
outputs=(
"Machine learning is a subset of AI that enables computers to learn without "
"explicit programming."
),
)
# Verify execution worked correctly
mock_judge.assert_called_once_with(
request="{'question': 'What is machine learning?'}",
context=(
"Machine learning is a subset of AI that enables computers to learn without "
"explicit programming."
),
name="relevance_to_query",
model=None,
)
assert isinstance(result, Feedback)
assert result.name == "relevance_to_query"
assert result.value == "yes"
assert result.metadata == {"chunk_index": 0} # chunk_index should be preserved
def test_builtin_scorer_with_parameters_round_trip():
from mlflow.genai.scorers.builtin_scorers import Guidelines
# Create scorer with custom parameters
tone = (
"The response must maintain a courteous, respectful tone throughout. "
"It must show empathy for customer concerns."
)
tone_scorer = Guidelines(name="tone", guidelines=[tone])
# Verify original properties
assert tone_scorer.name == "tone"
assert tone_scorer.guidelines == [tone]
assert isinstance(tone_scorer, Guidelines)
# Round-trip serialization
serialized = tone_scorer.model_dump()
# Verify serialization format includes all fields
assert "builtin_scorer_class" in serialized
assert serialized["builtin_scorer_class"] == "Guidelines"
assert "builtin_scorer_pydantic_data" in serialized
pydantic_data = serialized["builtin_scorer_pydantic_data"]
assert "guidelines" in pydantic_data
assert pydantic_data["guidelines"] == [tone]
assert pydantic_data["name"] == "tone"
# Deserialize
deserialized = Scorer.model_validate(serialized)
# Test class type and all properties preserved
assert isinstance(deserialized, Guidelines)
assert deserialized.name == "tone"
assert deserialized.guidelines == [tone]
assert hasattr(deserialized, "required_columns")
assert deserialized.required_columns == {"inputs", "outputs"}
# Test that it can be executed with mocking
with patch(
"mlflow.genai.judges.meets_guidelines",
return_value=Feedback(
name="tone", value=True, rationale="Response is appropriately courteous"
),
) as mock_judge:
result = deserialized(
inputs={"question": "What is the issue?"},
outputs=(
"Thank you for bringing this to my attention. I understand your concern and "
"will help resolve this issue promptly."
),
)
# Verify execution worked correctly
mock_judge.assert_called_once_with(
guidelines=[tone],
context={
"request": "{'question': 'What is the issue?'}",
"response": (
"Thank you for bringing this to my attention. I understand your concern and "
"will help resolve this issue promptly."
),
},
name="tone",
model=None,
)
assert isinstance(result, Feedback)
assert result.name == "tone"
assert result.value is True
def test_direct_subclass_scorer_rejected():
class DirectSubclassScorer(Scorer):
"""An unsupported direct subclass of Scorer."""
def __init__(self, **data):
super().__init__(name="direct_subclass", **data)
def __call__(self, *, outputs):
return len(outputs) > 5
# Create instance - this should work
direct_scorer = DirectSubclassScorer()
# Calling it should work
assert direct_scorer(outputs="hello world") is True
assert direct_scorer(outputs="hi") is False
# But serialization should raise an error
with pytest.raises(MlflowException, match="Unsupported scorer type: DirectSubclassScorer"):
direct_scorer.model_dump()
# Verify the error message is informative
try:
direct_scorer.model_dump()
except MlflowException as e:
error_msg = str(e)
assert "Builtin scorers" in error_msg
assert "Decorator-created scorers" in error_msg
assert "@scorer decorator" in error_msg
assert "Direct subclassing of Scorer is not supported" in error_msg
def test_builtin_scorer_with_aggregations_round_trip():
from mlflow.genai.scorers.builtin_scorers import RelevanceToQuery
scorer_with_aggs = RelevanceToQuery(name="relevance_with_aggs", aggregations=["mean", "max"])
# Test that aggregations were set
assert scorer_with_aggs.name == "relevance_with_aggs"
assert scorer_with_aggs.aggregations == ["mean", "max"]
# Round-trip serialization
serialized = scorer_with_aggs.model_dump()
deserialized = Scorer.model_validate(serialized)
# Test properties preserved
assert isinstance(deserialized, RelevanceToQuery)
assert deserialized.name == "relevance_with_aggs"
assert deserialized.aggregations == ["mean", "max"]
assert hasattr(deserialized, "required_columns")
assert deserialized.required_columns == {"inputs", "outputs"}
# Test that both can be executed with mocking
test_args = {
"inputs": {"question": "What is machine learning?"},
"outputs": "Machine learning is a subset of AI.",
}
with patch(
"mlflow.genai.judges.is_context_relevant",
return_value=Feedback(name="relevance_with_aggs", value="yes"),
) as mock_judge:
# Test original scorer
original_result = scorer_with_aggs(**test_args)
# Test deserialized scorer
deserialized_result = deserialized(**test_args)
# Verify both results are equivalent
assert original_result.name == deserialized_result.name == "relevance_with_aggs"
assert original_result.value == deserialized_result.value == "yes"
# Judge should be called twice (once for each scorer)
assert mock_judge.call_count == 2
# ============================================================================
# COMPATIBILITY TESTS (Fixed serialized strings for backward compatibility)
# ============================================================================
def test_builtin_scorer_with_custom_name_compatibility():
# Fixed serialized string for Guidelines scorer with custom name and parameters
fixed_serialized_data = {
"name": "custom_guidelines",
"aggregations": ["mean", "max"],
"mlflow_version": "3.1.0",
"serialization_version": 1,
"builtin_scorer_class": "Guidelines",
"builtin_scorer_pydantic_data": {
"name": "custom_guidelines",
"aggregations": ["mean", "max"],
"required_columns": ["inputs", "outputs"],
"guidelines": [
"Be polite and professional",
"Provide accurate information",
],
},
"call_source": None,
"call_signature": None,
"original_func_name": None,
}
# Test deserialization
deserialized = Scorer.model_validate(fixed_serialized_data)
# Verify correct type and properties
from mlflow.genai.scorers.builtin_scorers import Guidelines
assert isinstance(deserialized, Guidelines)
assert deserialized.name == "custom_guidelines"
assert deserialized.aggregations == ["mean", "max"]
assert deserialized.guidelines == [
"Be polite and professional",
"Provide accurate information",
]
assert deserialized.required_columns == {"inputs", "outputs"}
def test_custom_scorer_compatibility_from_fixed_string():
# Fixed serialized string representing a simple custom scorer
fixed_serialized_data = {
"name": "word_count_scorer",
"aggregations": ["mean"],
"mlflow_version": "3.1.0",
"serialization_version": 1,
"builtin_scorer_class": None,
"builtin_scorer_pydantic_data": None,
"call_source": "return len(outputs.split())",
"call_signature": "(outputs)",
"original_func_name": "word_count_scorer",
}
# Test deserialization
deserialized = Scorer.model_validate(fixed_serialized_data)
# Verify correct properties
assert deserialized.name == "word_count_scorer"
assert deserialized.aggregations == ["mean"]
# Test functionality
assert deserialized(outputs="hello world test") == 3
assert deserialized(outputs="single") == 1
assert deserialized(outputs="") == 0
def test_complex_custom_scorer_compatibility():
# Fixed serialized string for a more complex custom scorer
fixed_serialized_data = {
"name": "length_comparison",
"aggregations": None,
"mlflow_version": "2.9.0",
"serialization_version": 1,
"builtin_scorer_class": None,
"builtin_scorer_pydantic_data": None,
"call_source": (
"input_len = len(inputs) if inputs else 0\n"
"output_len = len(outputs) if outputs else 0\n"
"min_ratio = expectations.get('min_ratio', 1.0) if expectations else 1.0\n"
"return output_len >= input_len * min_ratio"
),
"call_signature": "(inputs, outputs, expectations)",
"original_func_name": "length_comparison",
}
# Test deserialization
deserialized = Scorer.model_validate(fixed_serialized_data)
# Verify properties
assert deserialized.name == "length_comparison"
assert deserialized.aggregations is None
# Test functionality with various inputs
assert (
deserialized(inputs="hello", outputs="hello world", expectations={"min_ratio": 1.5}) is True
) # 11 >= 5 * 1.5 (7.5)
assert (
deserialized(inputs="hello", outputs="hi", expectations={"min_ratio": 1.5}) is False
) # 2 < 5 * 1.5 (7.5)
assert deserialized(inputs="test", outputs="test", expectations={}) is True # 4 >= 4 * 1.0
def test_decorator_scorer_multiple_serialization_round_trips():
@scorer
def multi_round_scorer(outputs):
return len(outputs) > 5
# First serialization
first_dump = multi_round_scorer.model_dump()
# Deserialize
recovered = Scorer.model_validate(first_dump)
# Second serialization - this should work now with caching
second_dump = recovered.model_dump()
# Verify the dumps are identical
assert first_dump == second_dump
# Third serialization to ensure it's truly reusable
third_dump = recovered.model_dump()
assert first_dump == third_dump
# Verify functionality is preserved
assert recovered(outputs="hello world") is True
assert recovered(outputs="hi") is False
def test_builtin_scorer_instructions_preserved_through_serialization():
scorer = Guidelines(name="test_guidelines", guidelines=["Be helpful"])
original_instructions = scorer.instructions
serialized = scorer.model_dump()
assert "builtin_scorer_pydantic_data" in serialized
pydantic_data = serialized["builtin_scorer_pydantic_data"]
assert "instructions" in pydantic_data
assert pydantic_data["instructions"] == original_instructions
deserialized = Scorer.model_validate(serialized)
assert isinstance(deserialized, Guidelines)
assert deserialized.instructions == original_instructions
assert deserialized.name == "test_guidelines"
assert deserialized.guidelines == ["Be helpful"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/test_serialization.py",
"license": "Apache License 2.0",
"lines": 520,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scheduled_scorers.py | from dataclasses import dataclass
from mlflow.genai.scorers.base import Scorer
_ERROR_MSG = (
"The `databricks-agents` package is required to use `mlflow.genai.scheduled_scorers`. "
"Please install it with `pip install databricks-agents`."
)
@dataclass()
class ScorerScheduleConfig:
"""
A scheduled scorer configuration for automated monitoring of generative AI applications.
Scheduled scorers are used to automatically evaluate traces logged to MLflow experiments
by production applications. They are part of `Databricks Lakehouse Monitoring for GenAI
<https://docs.databricks.com/aws/en/generative-ai/agent-evaluation/monitoring>`_,
which helps track quality metrics like groundedness, safety, and guideline adherence
alongside operational metrics like volume, latency, and cost.
When configured, scheduled scorers run automatically in the background to evaluate
a sample of traces based on the specified sampling rate and filter criteria. The
Assessments are displayed in the Traces tab of the MLflow experiment and can be used to
identify quality issues in production.
Args:
scorer: The scorer function to run on sampled traces. Must be either a built-in
scorer (e.g., Safety, Correctness) or a function decorated with @scorer.
Subclasses of Scorer are not supported.
scheduled_scorer_name: The name for this scheduled scorer configuration
within the experiment. This name must be unique among all scheduled scorers
in the same experiment.
We recommend using the scorer's name (e.g., scorer.name) for consistency.
sample_rate: The fraction of traces to evaluate, between 0.0 and 1.0. For example,
0.1 means 10% of traces will be randomly selected for evaluation.
filter_string: An optional MLflow search_traces compatible filter string to apply
before sampling traces. Only traces matching this filter will be considered
for evaluation. Uses the same syntax as mlflow.search_traces().
Example:
.. code-block:: python
from mlflow.genai.scorers import Safety, scorer
from mlflow.genai.scheduled_scorers import ScorerScheduleConfig
# Using a built-in scorer
safety_config = ScorerScheduleConfig(
scorer=Safety(),
scheduled_scorer_name="production_safety",
sample_rate=0.2, # Evaluate 20% of traces
filter_string="trace.status = 'OK'",
)
# Using a custom scorer
@scorer
def response_length(outputs):
return len(str(outputs)) > 100
length_config = ScorerScheduleConfig(
scorer=response_length,
scheduled_scorer_name="adequate_length",
sample_rate=0.1, # Evaluate 10% of traces
filter_string="trace.status = 'OK'",
)
Note:
Scheduled scorers are executed automatically by Databricks and do not need to be
manually triggered. The Assessments appear in the Traces tab of the MLflow
experiment. Only traces logged directly to the experiment are monitored; traces
logged to individual runs within the experiment are not evaluated.
.. warning::
This API is in Beta and may change or be removed in a future release without warning.
"""
scorer: Scorer
scheduled_scorer_name: str
sample_rate: float
filter_string: str | None = None
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scheduled_scorers.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/genai/test_scheduled_scorers.py | from mlflow.genai.scheduled_scorers import (
ScorerScheduleConfig,
)
from mlflow.genai.scorers.base import Scorer
class MockScorer(Scorer):
"""Mock scorer for testing purposes."""
name: str = "mock_scorer"
def __call__(self, *, outputs=None, **kwargs):
return {"score": 1.0}
def test_scheduled_scorer_class_instantiation():
mock_scorer = MockScorer()
scheduled_scorer = ScorerScheduleConfig(
scorer=mock_scorer,
scheduled_scorer_name="test_scorer",
sample_rate=0.5,
filter_string="test_filter",
)
assert scheduled_scorer.scorer == mock_scorer
assert scheduled_scorer.scheduled_scorer_name == "test_scorer"
assert scheduled_scorer.sample_rate == 0.5
assert scheduled_scorer.filter_string == "test_filter"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/test_scheduled_scorers.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/utils/enum_utils.py | from enum import Enum, EnumMeta
class MetaEnum(EnumMeta):
"""Metaclass for Enum classes that allows to check if a value is a valid member of the Enum."""
def __contains__(cls, item):
try:
cls(item)
except ValueError:
return False
return True
class StrEnum(str, Enum, metaclass=MetaEnum):
def __str__(self):
"""Return the string representation of the enum using its value."""
return self.value
@classmethod
def values(cls) -> list[str]:
"""Return a list of all string values of the Enum."""
return [str(member) for member in cls]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/utils/enum_utils.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.