sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
mlflow/mlflow:mlflow/genai/judges/tools/get_traces_in_session.py | """
Get traces in session tool for MLflow GenAI judges.
This module provides a tool for retrieving traces from the same session
to enable multi-turn evaluation capabilities.
"""
from mlflow.entities.trace import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.genai.judges.tools.search_traces import SearchTracesTool
from mlflow.genai.judges.tools.types import JudgeToolTraceInfo
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.tracing.constant import TraceMetadataKey
from mlflow.types.llm import FunctionToolDefinition, ToolDefinition, ToolParamsSchema
from mlflow.utils.annotations import experimental
@experimental(version="3.5.0")
class GetTracesInSession(JudgeTool):
"""
Tool for retrieving traces from the same session for multi-turn evaluation.
This tool extracts the session ID from the current trace and searches for other traces
within the same session to provide conversational context to judges.
"""
@property
def name(self) -> str:
return ToolNames._GET_TRACES_IN_SESSION
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames._GET_TRACES_IN_SESSION,
description=(
"Retrieve traces from the same session for multi-turn evaluation. "
"Extracts the session ID from the current trace and searches for other "
"traces in the same session to provide conversational context. "
"Returns a list of JudgeToolTraceInfo objects containing trace metadata, "
"request, and response."
),
parameters=ToolParamsSchema(
type="object",
properties={
"max_results": {
"type": "integer",
"description": "Maximum number of traces to return (default: 20)",
"default": 20,
},
"order_by": {
"type": "array",
"items": {"type": "string"},
"description": (
"List of order by clauses for sorting results "
"(default: ['timestamp ASC'] for chronological order)"
),
},
},
required=[],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
max_results: int = 20,
order_by: list[str] | None = None,
) -> list[JudgeToolTraceInfo]:
"""
Retrieve traces from the same session.
Args:
trace: The current MLflow trace object
max_results: Maximum number of traces to return
order_by: List of order by clauses for sorting results
Returns:
List of JudgeToolTraceInfo objects containing trace metadata, request, and response
Raises:
MlflowException: If session ID is not found or has invalid format
"""
session_id = trace.info.trace_metadata.get(TraceMetadataKey.TRACE_SESSION)
if not session_id:
raise MlflowException(
"No session ID found in trace metadata. Traces in session require a session ID "
"to identify related traces within the same conversation session.",
error_code=INVALID_PARAMETER_VALUE,
)
if not session_id.replace("-", "").replace("_", "").isalnum():
raise MlflowException(
(
f"Invalid session ID format: {session_id}. Session IDs should contain only "
"alphanumeric characters, hyphens, and underscores."
),
error_code=INVALID_PARAMETER_VALUE,
)
filter_string = (
f"metadata.`{TraceMetadataKey.TRACE_SESSION}` = '{session_id}' "
f"AND trace.timestamp < {trace.info.request_time}"
)
return SearchTracesTool().invoke(
trace=trace, filter_string=filter_string, order_by=order_by, max_results=max_results
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/get_traces_in_session.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/tools/search_traces.py | """
Search traces tool for MLflow GenAI judges.
This module provides a tool for searching and retrieving traces from an MLflow experiment
based on filter criteria, ordering, and result limits. It enables judges to analyze
traces within the same experiment context.
"""
import logging
import mlflow
from mlflow.entities.assessment import Assessment, Expectation, Feedback
from mlflow.entities.trace import Trace
from mlflow.entities.trace_location import TraceLocationType
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.genai.judges.tools.types import (
JudgeToolExpectation,
JudgeToolFeedback,
JudgeToolTraceInfo,
)
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.types.llm import (
FunctionToolDefinition,
ToolDefinition,
ToolParamsSchema,
)
from mlflow.utils.annotations import experimental
_logger = logging.getLogger(__name__)
def _convert_assessments_to_tool_types(
assessments: list[Assessment],
) -> list[JudgeToolExpectation | JudgeToolFeedback]:
tool_types: list[JudgeToolExpectation | JudgeToolFeedback] = []
for assessment in assessments:
if isinstance(assessment, Expectation):
tool_types.append(
JudgeToolExpectation(
name=assessment.name,
source=assessment.source.source_type,
rationale=assessment.rationale,
span_id=assessment.span_id,
assessment_id=assessment.assessment_id,
value=assessment.value,
)
)
elif isinstance(assessment, Feedback):
tool_types.append(
JudgeToolFeedback(
name=assessment.name,
source=assessment.source.source_type,
rationale=assessment.rationale,
span_id=assessment.span_id,
assessment_id=assessment.assessment_id,
value=assessment.value,
error_code=assessment.error.error_code if assessment.error else None,
error_message=assessment.error.error_message if assessment.error else None,
stack_trace=assessment.error.stack_trace if assessment.error else None,
overrides=assessment.overrides,
valid=assessment.valid,
)
)
return tool_types
def _get_experiment_id(trace: Trace) -> str:
"""
Get and validate experiment ID from trace.
Args:
trace: The MLflow trace object
Returns:
Experiment ID
Raises:
MlflowException: If trace is not from MLflow experiment or has no experiment ID
"""
if not trace.info.trace_location:
raise MlflowException(
"Current trace has no trace location. Cannot determine experiment context.",
error_code=INVALID_PARAMETER_VALUE,
)
if trace.info.trace_location.type != TraceLocationType.MLFLOW_EXPERIMENT:
raise MlflowException(
f"Current trace is not from an MLflow experiment "
f"(type: {trace.info.trace_location.type}). "
"Traces can only be retrieved for traces within MLflow experiments.",
error_code=INVALID_PARAMETER_VALUE,
)
if not (
trace.info.trace_location.mlflow_experiment
and trace.info.trace_location.mlflow_experiment.experiment_id
):
raise MlflowException(
"Current trace has no experiment_id. Cannot search for traces.",
error_code=INVALID_PARAMETER_VALUE,
)
return trace.info.trace_location.mlflow_experiment.experiment_id
@experimental(version="3.5.0")
class SearchTracesTool(JudgeTool):
"""
Tool for searching and retrieving traces from an MLflow experiment.
This tool enables judges to search for traces within the same experiment context
as the current trace being evaluated. It supports filtering, ordering, and
pagination of results. The tool returns trace metadata including request/response
data, execution metrics, and assessments for analysis.
"""
@property
def name(self) -> str:
return ToolNames._SEARCH_TRACES
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames._SEARCH_TRACES,
description=(
"Search for traces within the same MLflow experiment as the current trace. "
"Returns trace metadata including trace_id, request_time, state, request, "
"response, execution_duration, and assessments. Supports filtering with "
"MLflow search syntax (e.g., 'attributes.status = \"OK\"'), custom ordering "
"(e.g., ['timestamp DESC']), and result limits. Use this to analyze patterns "
"across traces or find specific traces matching criteria."
),
parameters=ToolParamsSchema(
type="object",
properties={
"filter_string": {
"type": "string",
"description": (
"Optional filter string using SQL-like search syntax. "
"If not specified, all traces are returned.\n\n"
"SUPPORTED FIELDS:\n"
"- Attributes: request_id, timestamp_ms, execution_time_ms, "
"status, name, run_id\n"
"- Tags: Use 'tags.' or 'tag.' prefix "
"(e.g., tags.operation_type, tag.model_name)\n"
"- Metadata: Use 'metadata.' prefix (e.g., metadata.run_id)\n"
"- Use backticks for special characters: tags.`model-name`\n\n"
"VALUE SYNTAX:\n"
"- String values MUST be quoted: status = 'OK'\n"
"- Numeric values don't need quotes: execution_time_ms > 1000\n"
"- Tag and metadata values MUST be quoted as strings\n\n"
"COMPARATORS:\n"
"- Numeric (timestamp_ms, execution_time_ms): "
">, >=, =, !=, <, <=\n"
"- String (name, status, request_id): =, !=, IN, NOT IN\n"
"- Tags/Metadata: =, !=\n\n"
"STATUS VALUES: 'OK', 'ERROR', 'IN_PROGRESS'\n\n"
"EXAMPLES:\n"
"- status = 'OK'\n"
"- execution_time_ms > 1000\n"
"- tags.model_name = 'gpt-4'\n"
"- tags.`model-version` = 'v2' AND status = 'OK'\n"
"- timestamp_ms >= 1234567890000 AND execution_time_ms < 5000\n"
"- status IN ('OK', 'ERROR')\n"
"- tags.environment = 'production' AND status = 'ERROR' "
"AND execution_time_ms > 500\n"
"- status = 'OK' AND tag.importance = 'high'"
),
"default": None,
},
"order_by": {
"type": "array",
"items": {"type": "string"},
"description": (
"Optional list of order by expressions (e.g., ['timestamp DESC']). "
"Defaults to ['timestamp ASC'] for chronological order."
),
"default": ["timestamp ASC"],
},
"max_results": {
"type": "integer",
"description": "Maximum number of traces to return (default: 20)",
"default": 20,
},
},
required=[],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
filter_string: str | None = None,
order_by: list[str] | None = None,
max_results: int = 20,
) -> list[JudgeToolTraceInfo]:
"""
Search for traces within the same experiment as the current trace.
Args:
trace: The current MLflow trace object (used to determine experiment context)
filter_string: Optional filter using MLflow search syntax
(e.g., 'attributes.status = "OK"')
order_by: Optional list of order by expressions (e.g., ['timestamp DESC'])
max_results: Maximum number of traces to return (default: 20)
Returns:
List of JudgeToolTraceInfo objects containing trace metadata, request/response data,
and assessments for each matching trace
Raises:
MlflowException: If trace has no experiment context or search fails
"""
# Extract and validate experiment ID from trace
experiment_id = _get_experiment_id(trace)
locations = [experiment_id]
# Default to chronological order
if order_by is None:
order_by = ["timestamp ASC"]
_logger.debug(
"Searching for traces with properties:\n\n"
+ "\n".join(
[
f"* experiment_id={experiment_id}",
f"* filter_string={filter_string}",
f"* order_by={order_by}",
f"* max_results={max_results}",
]
)
)
try:
trace_objs = mlflow.search_traces(
locations=locations,
filter_string=filter_string,
order_by=order_by,
max_results=max_results,
return_type="list",
)
except Exception as e:
raise MlflowException(
f"Failed to search traces: {e!s}",
error_code="INTERNAL_ERROR",
) from e
traces = []
for trace_obj in trace_objs:
try:
trace_info = JudgeToolTraceInfo(
trace_id=trace_obj.info.trace_id,
request_time=trace_obj.info.request_time,
state=trace_obj.info.state,
request=trace_obj.data.request,
response=trace_obj.data.response,
execution_duration=trace_obj.info.execution_duration,
assessments=_convert_assessments_to_tool_types(trace_obj.info.assessments),
)
traces.append(trace_info)
except Exception as e:
_logger.warning(f"Failed to process trace {trace_obj.info.trace_id}: {e}")
continue
_logger.debug(f"Retrieved {len(traces)} traces")
return traces
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/search_traces.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/judges/test_judge_tool_get_traces_in_session.py | from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities.trace import Trace, TraceData
from mlflow.entities.trace_info import TraceInfo as MlflowTraceInfo
from mlflow.entities.trace_location import TraceLocation
from mlflow.entities.trace_state import TraceState
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.tools.get_traces_in_session import GetTracesInSession
from mlflow.genai.judges.tools.types import JudgeToolTraceInfo
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, ErrorCode
from mlflow.tracing.utils import TraceMetadataKey
from mlflow.types.llm import ToolDefinition
def test_get_traces_in_session_tool_name() -> None:
tool = GetTracesInSession()
assert tool.name == "_get_traces_in_session"
def test_get_traces_in_session_tool_get_definition() -> None:
tool = GetTracesInSession()
definition = tool.get_definition()
assert isinstance(definition, ToolDefinition)
assert definition.function.name == "_get_traces_in_session"
assert "session" in definition.function.description.lower()
assert "multi-turn evaluation" in definition.function.description.lower()
assert definition.function.parameters.type == "object"
assert len(definition.function.parameters.required) == 0
assert definition.type == "function"
params = definition.function.parameters.properties
assert "max_results" in params
assert "order_by" in params
assert params["max_results"].type == "integer"
assert params["order_by"].type == "array"
def create_mock_trace(session_id: str | None = None) -> Trace:
metadata = {}
if session_id:
metadata[TraceMetadataKey.TRACE_SESSION] = session_id
trace_info = MlflowTraceInfo(
trace_id="current-trace",
trace_location=TraceLocation.from_experiment_id("exp-123"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=250,
trace_metadata=metadata,
)
return Trace(info=trace_info, data=TraceData(spans=[]))
def test_get_traces_in_session_tool_invoke_success() -> None:
with patch(
"mlflow.genai.judges.tools.get_traces_in_session.SearchTracesTool"
) as mock_search_tool_class:
tool = GetTracesInSession()
current_trace = create_mock_trace("session-123")
mock_search_tool = MagicMock()
mock_result = [
JudgeToolTraceInfo(
trace_id="trace-1",
request_time=1000,
state=TraceState.OK,
request="What is machine learning?",
response="Machine learning is a subset of AI...",
execution_duration=100,
assessments=[],
),
JudgeToolTraceInfo(
trace_id="trace-2",
request_time=2000,
state=TraceState.OK,
request="Can you give an example?",
response="Sure! A common example is...",
execution_duration=150,
assessments=[],
),
]
mock_search_tool.invoke.return_value = mock_result
mock_search_tool_class.return_value = mock_search_tool
result = tool.invoke(current_trace)
assert len(result) == 2
assert all(isinstance(ti, JudgeToolTraceInfo) for ti in result)
assert result[0].trace_id == "trace-1"
assert result[0].request == "What is machine learning?"
assert result[1].trace_id == "trace-2"
mock_search_tool.invoke.assert_called_once_with(
trace=current_trace,
filter_string=(
f"metadata.`{TraceMetadataKey.TRACE_SESSION}` = 'session-123' "
"AND trace.timestamp < 1234567890"
),
order_by=None,
max_results=20,
)
def test_get_traces_in_session_tool_invoke_custom_parameters() -> None:
with patch(
"mlflow.genai.judges.tools.get_traces_in_session.SearchTracesTool"
) as mock_search_tool_class:
tool = GetTracesInSession()
current_trace = create_mock_trace("session-456")
mock_search_tool = MagicMock()
mock_search_tool.invoke.return_value = []
mock_search_tool_class.return_value = mock_search_tool
tool.invoke(current_trace, max_results=50, order_by=["timestamp DESC"])
mock_search_tool.invoke.assert_called_once_with(
trace=current_trace,
filter_string=(
f"metadata.`{TraceMetadataKey.TRACE_SESSION}` = 'session-456' "
"AND trace.timestamp < 1234567890"
),
order_by=["timestamp DESC"],
max_results=50,
)
def test_get_traces_in_session_tool_invoke_no_session_id() -> None:
tool = GetTracesInSession()
current_trace = create_mock_trace(session_id=None)
with pytest.raises(MlflowException, match="No session ID found in trace metadata") as exc_info:
tool.invoke(current_trace)
assert exc_info.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_get_traces_in_session_tool_invoke_invalid_session_id() -> None:
tool = GetTracesInSession()
current_trace = create_mock_trace("session@123!invalid")
with pytest.raises(MlflowException, match="Invalid session ID format") as exc_info:
tool.invoke(current_trace)
assert exc_info.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_get_traces_in_session_tool_invoke_empty_result() -> None:
with patch(
"mlflow.genai.judges.tools.get_traces_in_session.SearchTracesTool"
) as mock_search_tool_class:
tool = GetTracesInSession()
current_trace = create_mock_trace("session-123")
mock_search_tool = MagicMock()
mock_search_tool.invoke.return_value = []
mock_search_tool_class.return_value = mock_search_tool
result = tool.invoke(current_trace)
assert result == []
assert len(result) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/test_judge_tool_get_traces_in_session.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/judges/test_judge_tool_search_traces.py | from unittest import mock
import pytest
from mlflow.entities.assessment import Expectation, Feedback
from mlflow.entities.assessment_error import AssessmentError
from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType
from mlflow.entities.span import Span
from mlflow.entities.trace import Trace
from mlflow.entities.trace_data import TraceData
from mlflow.entities.trace_info import TraceInfo
from mlflow.entities.trace_location import TraceLocation, TraceLocationType
from mlflow.entities.trace_state import TraceState
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.tools.search_traces import (
SearchTracesTool,
_convert_assessments_to_tool_types,
_get_experiment_id,
)
from mlflow.genai.judges.tools.types import (
JudgeToolExpectation,
JudgeToolFeedback,
JudgeToolTraceInfo,
)
from mlflow.tracing.constant import SpanAttributeKey
from mlflow.types.llm import ToolDefinition
from tests.tracing.helper import create_mock_otel_span
def test_search_traces_tool_name() -> None:
tool = SearchTracesTool()
assert tool.name == "_search_traces"
def test_search_traces_tool_get_definition() -> None:
tool = SearchTracesTool()
definition = tool.get_definition()
assert isinstance(definition, ToolDefinition)
assert definition.function.name == "_search_traces"
assert "Search for traces within the same MLflow experiment" in definition.function.description
assert definition.function.parameters.type == "object"
assert definition.function.parameters.required == []
assert definition.type == "function"
properties = definition.function.parameters.properties
assert "filter_string" in properties
assert "order_by" in properties
assert "max_results" in properties
def test_convert_assessments_to_tool_types_with_expectations() -> None:
source = AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="user123")
expectation = Expectation(
name="test_expectation",
source=source,
span_id="span-1",
value=True,
)
expectation.rationale = "Expected to be true"
expectation.assessment_id = "assess-1"
expectations = [expectation]
result = _convert_assessments_to_tool_types(expectations)
assert len(result) == 1
assert isinstance(result[0], JudgeToolExpectation)
assert result[0].name == "test_expectation"
assert result[0].source == AssessmentSourceType.HUMAN
assert result[0].rationale == "Expected to be true"
assert result[0].span_id == "span-1"
assert result[0].assessment_id == "assess-1"
assert result[0].value is True
def test_convert_assessments_to_tool_types_with_feedback() -> None:
source = AssessmentSource(source_type=AssessmentSourceType.LLM_JUDGE, source_id="judge-1")
error = AssessmentError(
error_code="VALIDATION_ERROR",
error_message="Invalid input",
stack_trace="Stack trace here",
)
feedback = Feedback(
name="test_feedback",
source=source,
span_id="span-2",
value="positive",
error=error,
overrides="old-assess-id",
valid=False,
)
feedback.rationale = "Feedback rationale"
feedback.assessment_id = "assess-2"
feedbacks = [feedback]
result = _convert_assessments_to_tool_types(feedbacks)
assert len(result) == 1
assert isinstance(result[0], JudgeToolFeedback)
assert result[0].name == "test_feedback"
assert result[0].source == AssessmentSourceType.LLM_JUDGE
assert result[0].rationale == "Feedback rationale"
assert result[0].span_id == "span-2"
assert result[0].assessment_id == "assess-2"
assert result[0].value == "positive"
assert result[0].error_code == "VALIDATION_ERROR"
assert result[0].error_message == "Invalid input"
assert result[0].stack_trace == "Stack trace here"
assert result[0].overrides == "old-assess-id"
assert result[0].valid is False
def test_convert_assessments_to_tool_types_with_feedback_no_error() -> None:
source = AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="user456")
feedback = Feedback(
name="feedback_no_error",
source=source,
span_id=None,
value="negative",
error=None,
)
feedback.assessment_id = "assess-3"
feedbacks = [feedback]
result = _convert_assessments_to_tool_types(feedbacks)
assert len(result) == 1
assert isinstance(result[0], JudgeToolFeedback)
assert result[0].error_code is None
assert result[0].error_message is None
assert result[0].stack_trace is None
assert result[0].overrides is None
assert result[0].valid is True
def test_convert_assessments_to_tool_types_mixed() -> None:
source = AssessmentSource(source_type=AssessmentSourceType.HUMAN)
assessments = [
Expectation(name="exp1", source=source, value=True),
Feedback(name="fb1", source=source, value="positive"),
]
result = _convert_assessments_to_tool_types(assessments)
assert len(result) == 2
assert isinstance(result[0], JudgeToolExpectation)
assert isinstance(result[1], JudgeToolFeedback)
def test_get_experiment_id_success() -> None:
trace_location = TraceLocation.from_experiment_id("exp-123")
trace_info = TraceInfo(
trace_id="trace-1",
trace_location=trace_location,
request_time=1234567890,
state=TraceState.OK,
)
trace = Trace(info=trace_info, data=None)
experiment_id = _get_experiment_id(trace)
assert experiment_id == "exp-123"
def test_get_experiment_id_no_trace_location() -> None:
trace_info = TraceInfo(
trace_id="trace-1",
trace_location=None,
request_time=1234567890,
state=TraceState.OK,
)
trace = Trace(info=trace_info, data=None)
with pytest.raises(MlflowException, match="Current trace has no trace location"):
_get_experiment_id(trace)
def test_get_experiment_id_not_mlflow_experiment() -> None:
trace_location = TraceLocation(type=TraceLocationType.INFERENCE_TABLE)
trace_info = TraceInfo(
trace_id="trace-1",
trace_location=trace_location,
request_time=1234567890,
state=TraceState.OK,
)
trace = Trace(info=trace_info, data=None)
with pytest.raises(MlflowException, match="Current trace is not from an MLflow experiment"):
_get_experiment_id(trace)
def test_get_experiment_id_no_experiment_id() -> None:
trace_location = TraceLocation(type=TraceLocationType.MLFLOW_EXPERIMENT)
trace_info = TraceInfo(
trace_id="trace-1",
trace_location=trace_location,
request_time=1234567890,
state=TraceState.OK,
)
trace = Trace(info=trace_info, data=None)
with pytest.raises(MlflowException, match="Current trace has no experiment_id"):
_get_experiment_id(trace)
@pytest.fixture
def mock_trace() -> Trace:
trace_location = TraceLocation.from_experiment_id("exp-456")
trace_info = TraceInfo(
trace_id="trace-current",
trace_location=trace_location,
request_time=1234567890,
state=TraceState.OK,
)
return Trace(info=trace_info, data=None)
@pytest.fixture
def mock_search_traces_list() -> list[Trace]:
source = AssessmentSource(source_type=AssessmentSourceType.HUMAN)
# Create trace 1 with request and response in root span
otel_span1 = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root",
start_time=1000000000,
end_time=1000000150,
parent_id=None,
)
otel_span1.set_attributes(
{
SpanAttributeKey.INPUTS: "request1",
SpanAttributeKey.OUTPUTS: "response1",
}
)
span1 = Span(otel_span1)
mock_trace1_info = TraceInfo(
trace_id="trace-1",
trace_location=TraceLocation.from_experiment_id("exp-456"),
request_time=1000000000,
state=TraceState.OK,
execution_duration=150,
assessments=[Expectation(name="quality", source=source, value=True)],
)
mock_trace1_data = TraceData(spans=[span1])
mock_trace1 = Trace(info=mock_trace1_info, data=mock_trace1_data)
# Create trace 2 with request and response in root span
otel_span2 = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="root",
start_time=1000000100,
end_time=1000000300,
parent_id=None,
)
otel_span2.set_attributes(
{
SpanAttributeKey.INPUTS: "request2",
SpanAttributeKey.OUTPUTS: "response2",
}
)
span2 = Span(otel_span2)
mock_trace2_info = TraceInfo(
trace_id="trace-2",
trace_location=TraceLocation.from_experiment_id("exp-456"),
request_time=1000000100,
state=TraceState.ERROR,
execution_duration=200,
assessments=[],
)
mock_trace2_data = TraceData(spans=[span2])
mock_trace2 = Trace(info=mock_trace2_info, data=mock_trace2_data)
return [mock_trace1, mock_trace2]
def test_search_traces_tool_invoke_success(
mock_trace: Trace, mock_search_traces_list: list[Trace]
) -> None:
tool = SearchTracesTool()
with mock.patch("mlflow.search_traces", return_value=mock_search_traces_list) as mock_search:
result = tool.invoke(mock_trace, filter_string='attributes.status = "OK"', max_results=10)
mock_search.assert_called_once_with(
locations=["exp-456"],
filter_string='attributes.status = "OK"',
order_by=["timestamp ASC"],
max_results=10,
return_type="list",
)
assert len(result) == 2
assert isinstance(result[0], JudgeToolTraceInfo)
assert result[0].trace_id == "trace-1"
assert result[0].request == "request1"
assert result[0].response == "response1"
assert result[0].state == TraceState.OK
assert result[0].execution_duration == 150
assert len(result[0].assessments) == 1
assert result[1].trace_id == "trace-2"
assert result[1].state == TraceState.ERROR
assert result[1].execution_duration == 200
assert len(result[1].assessments) == 0
def test_search_traces_tool_invoke_with_order_by(
mock_trace: Trace, mock_search_traces_list: list[Trace]
) -> None:
tool = SearchTracesTool()
with mock.patch("mlflow.search_traces", return_value=mock_search_traces_list) as mock_search:
result = tool.invoke(mock_trace, order_by=["timestamp DESC", "trace_id ASC"], max_results=5)
mock_search.assert_called_once_with(
locations=["exp-456"],
filter_string=None,
order_by=["timestamp DESC", "trace_id ASC"],
max_results=5,
return_type="list",
)
assert len(result) == 2
def test_search_traces_tool_invoke_default_order_by(
mock_trace: Trace, mock_search_traces_list: list[Trace]
) -> None:
tool = SearchTracesTool()
with mock.patch("mlflow.search_traces", return_value=mock_search_traces_list) as mock_search:
result = tool.invoke(mock_trace)
mock_search.assert_called_once()
call_kwargs = mock_search.call_args[1]
assert call_kwargs["order_by"] == ["timestamp ASC"]
assert call_kwargs["max_results"] == 20
assert call_kwargs["return_type"] == "list"
assert len(result) == 2
def test_search_traces_tool_invoke_empty_results(mock_trace: Trace) -> None:
tool = SearchTracesTool()
empty_list: list[Trace] = []
with mock.patch("mlflow.search_traces", return_value=empty_list):
result = tool.invoke(mock_trace)
assert len(result) == 0
assert result == []
def test_search_traces_tool_invoke_search_fails(mock_trace: Trace) -> None:
tool = SearchTracesTool()
with mock.patch("mlflow.search_traces", side_effect=Exception("Search failed")):
with pytest.raises(MlflowException, match="Failed to search traces"):
tool.invoke(mock_trace)
def test_search_traces_tool_invoke_invalid_trace_json(mock_trace: Trace) -> None:
tool = SearchTracesTool()
# Create traces with missing required attributes to trigger exceptions
invalid_trace1_info = TraceInfo(
trace_id="trace-1",
trace_location=TraceLocation.from_experiment_id("exp-456"),
request_time=1000000000,
state=TraceState.OK,
)
# Create a trace without data to trigger an exception when accessing data.request
invalid_trace1 = Trace(info=invalid_trace1_info, data=None)
invalid_trace2_info = TraceInfo(
trace_id="trace-2",
trace_location=TraceLocation.from_experiment_id("exp-456"),
request_time=1000000100,
state=TraceState.OK,
)
invalid_trace2 = Trace(info=invalid_trace2_info, data=None)
invalid_list = [invalid_trace1, invalid_trace2]
with mock.patch("mlflow.search_traces", return_value=invalid_list):
result = tool.invoke(mock_trace)
# Both traces should fail to process due to missing data
assert len(result) == 0
def test_search_traces_tool_invoke_partial_failure(mock_trace: Trace) -> None:
tool = SearchTracesTool()
# First trace will fail (missing data)
invalid_trace1_info = TraceInfo(
trace_id="trace-1",
trace_location=TraceLocation.from_experiment_id("exp-456"),
request_time=1000000000,
state=TraceState.OK,
execution_duration=150,
assessments=[],
)
invalid_trace1 = Trace(info=invalid_trace1_info, data=None)
# Second trace will succeed
otel_span2 = create_mock_otel_span(
trace_id=12345,
span_id=102,
name="root",
start_time=1000000100,
end_time=1000000300,
parent_id=None,
)
otel_span2.set_attributes(
{
SpanAttributeKey.INPUTS: "request2",
SpanAttributeKey.OUTPUTS: "response2",
}
)
span2 = Span(otel_span2)
valid_trace2_info = TraceInfo(
trace_id="trace-2",
trace_location=TraceLocation.from_experiment_id("exp-456"),
request_time=1000000100,
state=TraceState.ERROR,
execution_duration=200,
assessments=[],
)
valid_trace2_data = TraceData(spans=[span2])
valid_trace2 = Trace(info=valid_trace2_info, data=valid_trace2_data)
partial_list = [invalid_trace1, valid_trace2]
with mock.patch("mlflow.search_traces", return_value=partial_list):
result = tool.invoke(mock_trace)
# Only the valid trace should be in results
assert len(result) == 1
assert result[0].trace_id == "trace-2"
def test_search_traces_tool_invoke_no_filter(
mock_trace: Trace, mock_search_traces_list: list[Trace]
) -> None:
tool = SearchTracesTool()
with mock.patch("mlflow.search_traces", return_value=mock_search_traces_list) as mock_search:
result = tool.invoke(mock_trace, filter_string=None)
assert mock_search.call_args[1]["filter_string"] is None
assert len(result) == 2
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/test_judge_tool_search_traces.py",
"license": "Apache License 2.0",
"lines": 370,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/test_otel_loading.py | import uuid
from pathlib import Path
import pytest
from opentelemetry import trace as otel_trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource as OTelSDKResource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.trace import Status, StatusCode
from opentelemetry.util._once import Once
import mlflow
from mlflow.entities import SpanStatusCode
from mlflow.entities.assessment import AssessmentSource, Expectation, Feedback
from mlflow.entities.assessment_source import AssessmentSourceType
from mlflow.server import handlers
from mlflow.server.fastapi_app import app
from mlflow.server.handlers import initialize_backend_stores
from mlflow.tracing.constant import SpanAttributeKey
from mlflow.tracing.otel.translation.base import OtelSchemaTranslator
from mlflow.tracing.otel.translation.genai_semconv import GenAiTranslator
from mlflow.tracing.otel.translation.open_inference import OpenInferenceTranslator
from mlflow.tracing.otel.translation.traceloop import TraceloopTranslator
from mlflow.tracing.provider import _get_trace_exporter
from mlflow.tracing.utils import encode_trace_id
from mlflow.tracing.utils.otlp import MLFLOW_EXPERIMENT_ID_HEADER
from mlflow.tracking._tracking_service.utils import _use_tracking_uri
from mlflow.version import IS_TRACING_SDK_ONLY
from tests.helper_functions import get_safe_port
from tests.tracking.integration_test_utils import ServerThread
if IS_TRACING_SDK_ONLY:
pytest.skip("OTel get_trace tests require full MLflow server", allow_module_level=True)
@pytest.fixture
def mlflow_server(tmp_path: Path, db_uri: str):
artifact_uri = tmp_path.joinpath("artifacts").as_uri()
# Force-reset backend stores before each test
handlers._tracking_store = None
handlers._model_registry_store = None
initialize_backend_stores(db_uri, default_artifact_root=artifact_uri)
with ServerThread(app, get_safe_port()) as url:
yield url
@pytest.fixture(autouse=True)
def tracking_uri_setup(mlflow_server):
with _use_tracking_uri(mlflow_server):
yield
@pytest.fixture(params=[True, False])
def is_async(request, monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING", "true" if request.param else "false")
def _flush_async_logging():
exporter = _get_trace_exporter()
assert hasattr(exporter, "_async_queue"), "Async queue is not initialized"
exporter._async_queue.flush(terminate=True)
def create_tracer(mlflow_server: str, experiment_id: str, service_name: str = "test-service"):
resource = OTelSDKResource.create({"service.name": service_name, "service.version": "1.0.0"})
tracer_provider = TracerProvider(resource=resource)
exporter = OTLPSpanExporter(
endpoint=f"{mlflow_server}/v1/traces",
headers={MLFLOW_EXPERIMENT_ID_HEADER: experiment_id},
timeout=10,
)
span_processor = SimpleSpanProcessor(exporter)
tracer_provider.add_span_processor(span_processor)
# Reset the global tracer provider
otel_trace._TRACER_PROVIDER_SET_ONCE = Once()
otel_trace._TRACER_PROVIDER = None
otel_trace.set_tracer_provider(tracer_provider)
return otel_trace.get_tracer(__name__)
def test_get_trace_for_otel_sent_span(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-get-trace-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "test-service-get-trace")
# Create a span with various attributes to test conversion
with tracer.start_as_current_span("otel-test-span") as span:
span.set_attribute("test.string", "string-value")
span.set_attribute("test.number", 42)
span.set_attribute("test.boolean", True)
span.set_attribute("operation.type", "llm_request")
# Capture the OTel trace ID
otel_trace_id = span.get_span_context().trace_id
assert span.get_span_context().is_valid
assert otel_trace_id != 0
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
assert len(traces) > 0, "No traces found in the database"
trace_id = traces[0].info.trace_id
retrieved_trace = mlflow.get_trace(trace_id)
assert retrieved_trace.info.trace_id == trace_id
assert retrieved_trace.info.trace_location.mlflow_experiment.experiment_id == experiment_id
assert len(retrieved_trace.data.spans) == 1
span = retrieved_trace.data.spans[0]
assert span.name == "otel-test-span"
assert span.trace_id == trace_id
# OTel spans default to UNSET status if not explicitly set
assert span.status.status_code == SpanStatusCode.UNSET
# Verify attributes were converted correctly
assert span.attributes["test.string"] == "string-value"
assert span.attributes["test.number"] == 42
assert span.attributes["test.boolean"] is True
assert span.attributes["operation.type"] == "llm_request"
# Verify the trace ID matches the expected format
expected_trace_id = f"tr-{encode_trace_id(otel_trace_id)}"
assert trace_id == expected_trace_id
def test_get_trace_for_otel_nested_spans(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-nested-spans-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "nested-test-service")
# Create nested spans
with tracer.start_as_current_span("parent-span") as parent_span:
parent_span.set_attribute("span.level", "parent")
with tracer.start_as_current_span("child-span") as child_span:
child_span.set_attribute("span.level", "child")
child_span.set_attribute("child.operation", "process_data")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
assert len(traces) > 0, "No traces found in the database"
trace_id = traces[0].info.trace_id
retrieved_trace = mlflow.get_trace(trace_id)
assert len(retrieved_trace.data.spans) == 2
spans_by_name = {span.name: span for span in retrieved_trace.data.spans}
assert "parent-span" in spans_by_name
assert "child-span" in spans_by_name
parent_span = spans_by_name["parent-span"]
child_span = spans_by_name["child-span"]
assert parent_span.attributes["span.level"] == "parent"
assert parent_span.parent_id is None # Root span has no parent
assert child_span.attributes["span.level"] == "child"
assert child_span.attributes["child.operation"] == "process_data"
assert child_span.parent_id == parent_span.span_id # Child should reference parent
def test_get_trace_with_otel_span_events(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-events-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "events-test-service")
# Create span with events using OTel SDK
with tracer.start_as_current_span("span-with-events") as span:
span.add_event("test_event", attributes={"event.type": "processing"})
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
trace_id = traces[0].info.trace_id
retrieved_trace = mlflow.get_trace(trace_id)
assert len(retrieved_trace.data.spans) == 1
retrieved_span = retrieved_trace.data.spans[0]
assert retrieved_span.name == "span-with-events"
assert len(retrieved_span.events) == 1
event = retrieved_span.events[0]
assert event.name == "test_event"
assert event.attributes["event.type"] == "processing"
def test_get_trace_nonexistent_otel_trace(mlflow_server: str):
# Create a fake trace ID in OTel format
fake_otel_trace_id = uuid.uuid4().hex
fake_trace_id = f"tr-{fake_otel_trace_id}"
# MLflow get_trace returns None for non-existent traces
trace = mlflow.get_trace(fake_trace_id)
assert trace is None
def test_get_trace_with_otel_span_status(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-status-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "status-test-service")
# Create span with error status using OTel SDK
with tracer.start_as_current_span("error-span") as span:
span.set_status(Status(StatusCode.ERROR, "Something went wrong"))
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
trace_id = traces[0].info.trace_id
retrieved_trace = mlflow.get_trace(trace_id)
assert len(retrieved_trace.data.spans) == 1
retrieved_span = retrieved_trace.data.spans[0]
assert retrieved_span.name == "error-span"
assert retrieved_span.status.status_code == SpanStatusCode.ERROR
assert "Something went wrong" in retrieved_span.status.description
def test_set_trace_tag_on_otel_trace(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-tag-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "tag-test-service")
with tracer.start_as_current_span("tagged-span") as span:
span.set_attribute("test.attribute", "value")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
trace_id = traces[0].info.trace_id
mlflow.set_trace_tag(trace_id, "environment", "test")
mlflow.set_trace_tag(trace_id, "version", "1.0.0")
retrieved_trace = mlflow.get_trace(trace_id)
assert retrieved_trace.info.tags["environment"] == "test"
assert retrieved_trace.info.tags["version"] == "1.0.0"
def test_log_expectation_on_otel_trace(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-expectation-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "expectation-test-service")
# Create a span that represents a question-answer scenario
with tracer.start_as_current_span("qa-span") as span:
span.set_attribute("question", "What is MLflow?")
span.set_attribute("answer", "MLflow is an open-source ML platform")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
trace_id = traces[0].info.trace_id
expectation_source = AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="test_user@example.com"
)
logged_assessment = mlflow.log_expectation(
trace_id=trace_id,
name="expected_answer",
value="MLflow is an open-source machine learning platform",
source=expectation_source,
metadata={"confidence": "high", "reviewed_by": "expert"},
)
expectation = mlflow.get_assessment(
trace_id=trace_id, assessment_id=logged_assessment.assessment_id
)
assert expectation.name == "expected_answer"
assert expectation.value == "MLflow is an open-source machine learning platform"
assert expectation.source.source_type == AssessmentSourceType.HUMAN
assert expectation.metadata["confidence"] == "high"
def test_log_feedback_on_otel_trace(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-feedback-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "feedback-test-service")
# Create a span representing a model prediction
with tracer.start_as_current_span("prediction-span") as span:
span.set_attribute("model", "gpt-4")
span.set_attribute("prediction", "The weather is sunny")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
assert len(traces) > 0, "No traces found in the database"
trace_id = traces[0].info.trace_id
llm_source = AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE, source_id="gpt-4o-mini"
)
logged_quality = mlflow.log_feedback(
trace_id=trace_id,
name="quality_score",
value=8.5,
source=llm_source,
metadata={"scale": "1-10", "criterion": "accuracy"},
)
feedback = mlflow.get_assessment(trace_id=trace_id, assessment_id=logged_quality.assessment_id)
assert feedback.name == "quality_score"
assert feedback.value == 8.5
assert feedback.source.source_type == AssessmentSourceType.LLM_JUDGE
human_source = AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="reviewer@example.com"
)
logged_approval = mlflow.log_feedback(
trace_id=trace_id,
name="approved",
value=True,
source=human_source,
metadata={"review_date": "2024-01-15"},
)
feedback = mlflow.get_assessment(trace_id=trace_id, assessment_id=logged_approval.assessment_id)
assert feedback.name == "approved"
assert feedback.value is True
assert feedback.source.source_type == AssessmentSourceType.HUMAN
def test_multiple_assessments_on_otel_trace(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("otel-multi-assessment-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "multi-assessment-test-service")
# Create a complex trace with nested spans
with tracer.start_as_current_span("conversation") as parent_span:
parent_span.set_attribute("user_query", "Explain quantum computing")
with tracer.start_as_current_span("retrieval") as retrieval_span:
retrieval_span.set_attribute("documents_found", 5)
with tracer.start_as_current_span("generation") as generation_span:
generation_span.set_attribute("model", "gpt-4")
generation_span.set_attribute("response", "Quantum computing uses quantum mechanics...")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
trace_id = traces[0].info.trace_id
mlflow.set_trace_tag(trace_id, "topic", "quantum_computing")
mlflow.set_trace_tag(trace_id, "complexity", "high")
human_source = AssessmentSource(AssessmentSourceType.HUMAN, "expert@physics.edu")
llm_source = AssessmentSource(AssessmentSourceType.LLM_JUDGE, "claude-3")
expectation = Expectation(
name="expected_quality",
value="Should explain quantum superposition and entanglement",
source=human_source,
)
mlflow.log_assessment(trace_id=trace_id, assessment=expectation)
feedback_items = [
Feedback(name="accuracy", value=9.0, source=llm_source, metadata={"max_score": "10"}),
Feedback(name="clarity", value=8.5, source=llm_source, metadata={"max_score": "10"}),
Feedback(
name="helpfulness",
value=True,
source=human_source,
metadata={"reviewer_expertise": "quantum_physics"},
),
Feedback(
name="contains_errors",
value=False,
source=human_source,
metadata={"fact_checked": "True"},
),
]
for feedback in feedback_items:
mlflow.log_assessment(trace_id=trace_id, assessment=feedback)
retrieved_trace = mlflow.get_trace(trace_id)
assessments = retrieved_trace.info.assessments
assert len(assessments) == 5
assert [a.name for a in assessments] == [
"expected_quality",
"accuracy",
"clarity",
"helpfulness",
"contains_errors",
]
assert retrieved_trace.info.tags["topic"] == "quantum_computing"
assert retrieved_trace.info.tags["complexity"] == "high"
assert len(retrieved_trace.data.spans) == 3
span_names = {span.name for span in retrieved_trace.data.spans}
assert span_names == {"conversation", "retrieval", "generation"}
tagged_traces = mlflow.search_traces(
locations=[experiment_id],
filter_string='tags.topic = "quantum_computing"',
return_type="list",
)
assert len(tagged_traces) == 1
assert tagged_traces[0].info.trace_id == trace_id
def test_span_kind_translation(mlflow_server: str, is_async):
experiment = mlflow.set_experiment("span-kind-translation-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "span-kind-translation-test-service")
with tracer.start_as_current_span("llm-call") as span:
span.set_attribute(OpenInferenceTranslator.SPAN_KIND_ATTRIBUTE_KEY, "LLM")
with tracer.start_as_current_span("retriever-call") as span:
span.set_attribute(OpenInferenceTranslator.SPAN_KIND_ATTRIBUTE_KEY, "RETRIEVER")
with tracer.start_as_current_span("tool-call") as span:
span.set_attribute(TraceloopTranslator.SPAN_KIND_ATTRIBUTE_KEY, "tool")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
assert len(traces) == 3
for trace_info in traces:
retrieved_trace = mlflow.get_trace(trace_info.info.trace_id)
for span in retrieved_trace.data.spans:
if span.name == "llm-call":
assert span.span_type == "LLM"
elif span.name == "retriever-call":
assert span.span_type == "RETRIEVER"
elif span.name == "tool-call":
assert span.span_type == "TOOL"
@pytest.mark.parametrize(
"translator", [GenAiTranslator, OpenInferenceTranslator, TraceloopTranslator]
)
def test_span_inputs_outputs_translation(
mlflow_server: str, is_async, translator: OtelSchemaTranslator
):
experiment = mlflow.set_experiment("span-inputs-outputs-translation-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(
mlflow_server, experiment_id, "span-inputs-outputs-translation-test-service"
)
with tracer.start_as_current_span("llm-call") as span:
span.set_attribute(translator.INPUT_VALUE_KEYS[0], "Hello, world!")
span.set_attribute(translator.OUTPUT_VALUE_KEYS[0], "Bye!")
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
assert len(traces) == 1
retrieved_trace = mlflow.get_trace(traces[0].info.trace_id)
assert retrieved_trace.data.spans[0].inputs == "Hello, world!"
assert retrieved_trace.data.spans[0].outputs == "Bye!"
assert retrieved_trace.info.request_preview == '"Hello, world!"'
assert retrieved_trace.info.response_preview == '"Bye!"'
@pytest.mark.parametrize(
"translator", [GenAiTranslator, OpenInferenceTranslator, TraceloopTranslator]
)
def test_span_token_usage_translation(
mlflow_server: str, is_async, translator: OtelSchemaTranslator
):
experiment = mlflow.set_experiment("span-token-usage-translation-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(
mlflow_server, experiment_id, "span-token-usage-translation-test-service"
)
with tracer.start_as_current_span("llm-call") as span:
span.set_attribute(translator.INPUT_TOKEN_KEY, 100)
span.set_attribute(translator.OUTPUT_TOKEN_KEY, 50)
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
assert len(traces) > 0
for trace_info in traces:
assert trace_info.info.token_usage == {
"input_tokens": 100,
"output_tokens": 50,
"total_tokens": 150,
}
retrieved_trace = mlflow.get_trace(trace_info.info.trace_id)
assert (
retrieved_trace.data.spans[0].attributes[SpanAttributeKey.CHAT_USAGE]
== trace_info.info.token_usage
)
@pytest.mark.parametrize(
"translator", [GenAiTranslator, OpenInferenceTranslator, TraceloopTranslator]
)
def test_aggregated_token_usage_from_multiple_spans(
mlflow_server: str, is_async, translator: OtelSchemaTranslator
):
experiment = mlflow.set_experiment("aggregated-token-usage-test")
experiment_id = experiment.experiment_id
tracer = create_tracer(mlflow_server, experiment_id, "token-aggregation-service")
with tracer.start_as_current_span("parent-llm-call") as parent:
parent.set_attribute(translator.INPUT_TOKEN_KEY, 100)
parent.set_attribute(translator.OUTPUT_TOKEN_KEY, 50)
with tracer.start_as_current_span("child-llm-call-1") as child1:
child1.set_attribute(translator.INPUT_TOKEN_KEY, 200)
child1.set_attribute(translator.OUTPUT_TOKEN_KEY, 75)
with tracer.start_as_current_span("child-llm-call-2") as child2:
child2.set_attribute(translator.INPUT_TOKEN_KEY, 150)
child2.set_attribute(translator.OUTPUT_TOKEN_KEY, 100)
if is_async:
_flush_async_logging()
traces = mlflow.search_traces(
locations=[experiment_id], include_spans=False, return_type="list"
)
trace_id = traces[0].info.trace_id
retrieved_trace = mlflow.get_trace(trace_id)
assert retrieved_trace.info.token_usage is not None
assert retrieved_trace.info.token_usage["input_tokens"] == 450
assert retrieved_trace.info.token_usage["output_tokens"] == 225
assert retrieved_trace.info.token_usage["total_tokens"] == 675
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/test_otel_loading.py",
"license": "Apache License 2.0",
"lines": 449,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/test_scorer_description.py | from unittest.mock import patch
import pytest
from mlflow.genai import scorer
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.instructions_judge import InstructionsJudge
from mlflow.genai.scorers import RelevanceToQuery
@pytest.fixture(autouse=True)
def mock_databricks_runtime():
with patch("mlflow.genai.scorers.base.is_databricks_uri", return_value=True):
yield
def test_decorator_scorer_with_description():
description = "Checks if output length exceeds 100 characters"
@scorer(description=description)
def length_check(outputs) -> bool:
return len(outputs) > 100
assert length_check.description == description
def test_decorator_scorer_without_description():
@scorer
def simple_scorer(outputs) -> bool:
return True
assert simple_scorer.description is None
def test_decorator_scorer_with_name_and_description():
description = "Custom description for scorer"
@scorer(name="custom_name", description=description)
def my_scorer(outputs) -> bool:
return True
assert my_scorer.name == "custom_name"
assert my_scorer.description == description
def test_builtin_scorer_with_description():
description = "Custom description for relevance scorer"
scorer_instance = RelevanceToQuery(description=description)
assert scorer_instance.description == description
def test_builtin_scorer_without_description():
# Built-in scorers now have default descriptions for improved discoverability
scorer_instance = RelevanceToQuery()
assert scorer_instance.description is not None
assert isinstance(scorer_instance.description, str)
assert len(scorer_instance.description) > 0
@pytest.mark.parametrize(
("name", "description"),
[
("test_judge", "Evaluates response quality"),
("another_judge", None),
("judge_with_desc", "This is a test description"),
],
)
def test_make_judge_with_description(name: str, description: str | None):
judge = make_judge(
name=name,
instructions="Evaluate if {{ outputs }} is good quality",
model="openai:/gpt-4",
description=description,
feedback_value_type=str,
)
assert judge.name == name
assert judge.description == description
@pytest.mark.parametrize(
"description",
[
"Direct InstructionsJudge with description",
None,
],
)
def test_instructions_judge_description(description: str | None):
judge = InstructionsJudge(
name="test_judge",
instructions="Evaluate {{ outputs }}",
model="openai:/gpt-4",
description=description,
)
assert judge.description == description
@pytest.mark.parametrize(
"description",
[
"Test description for serialization",
None,
],
)
def test_scorer_serialization(description: str | None):
@scorer(description=description)
def test_scorer(outputs) -> bool:
return True
serialized = test_scorer.model_dump()
assert "description" in serialized
assert serialized["description"] == description
assert serialized["name"] == "test_scorer"
def test_scorer_deserialization_with_description():
from mlflow.genai.scorers.base import Scorer
description = "Test description for deserialization"
@scorer(description=description)
def test_scorer(outputs) -> bool:
return True
# Serialize and deserialize
serialized = test_scorer.model_dump()
deserialized = Scorer.model_validate(serialized)
assert deserialized.description == description
assert deserialized.name == "test_scorer"
def test_backward_compatibility_scorer_without_description():
# Test decorator scorer - custom scorers still default to None
@scorer
def old_scorer(outputs) -> bool:
return True
assert old_scorer.description is None
# Test builtin scorer - built-in scorers now have default descriptions
builtin = RelevanceToQuery()
assert builtin.description is not None
assert isinstance(builtin.description, str)
assert len(builtin.description) > 0
# Test InstructionsJudge - custom judges still default to None
judge = InstructionsJudge(
name="old_judge",
instructions="Evaluate {{ outputs }}",
model="openai:/gpt-4",
)
assert judge.description is None
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/test_scorer_description.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/pytorch/test_forecasting_model.py | import os
import numpy as np
import pytest
import torch
from lightning.pytorch import Trainer
from pytorch_forecasting import DeepAR, TimeSeriesDataSet
from pytorch_forecasting.data.examples import generate_ar_data
import mlflow
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
def _gen_forecasting_model_and_data(n_series, timesteps, max_prediction_length):
data = generate_ar_data(seasonality=10.0, timesteps=timesteps, n_series=n_series)
max_encoder_length = 30
time_series_dataset = TimeSeriesDataSet(
data[lambda x: x.time_idx <= timesteps - max_prediction_length],
time_idx="time_idx",
target="value",
group_ids=["series"],
max_encoder_length=max_encoder_length,
max_prediction_length=max_prediction_length,
time_varying_unknown_reals=["value"],
)
deepar = DeepAR.from_dataset(
time_series_dataset,
learning_rate=1e-3,
hidden_size=16,
rnn_layers=2,
)
dataloader = time_series_dataset.to_dataloader(train=True, batch_size=32)
trainer = Trainer(max_epochs=2, gradient_clip_val=0.1, accelerator="auto")
trainer.fit(deepar, train_dataloaders=dataloader)
return deepar, data
def test_forecasting_model_pyfunc_loader(model_path: str):
n_series = 10
max_prediction_length = 20
deepar, data = _gen_forecasting_model_and_data(
n_series=n_series,
timesteps=100,
max_prediction_length=max_prediction_length,
)
torch.manual_seed(42)
predicted = deepar.predict(data).numpy()
assert predicted.shape == (n_series, max_prediction_length)
mlflow.pytorch.save_model(deepar, model_path)
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
torch.manual_seed(42)
np.testing.assert_array_almost_equal(pyfunc_loaded.predict(data), predicted, decimal=4)
with pytest.raises(
TypeError,
match="The pytorch forecasting model does not support numpy.ndarray",
):
pyfunc_loaded.predict(np.array([1.0, 2.0]))
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/pytorch/test_forecasting_model.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/databricks.py | from mlflow.exceptions import MlflowException
from mlflow.utils.annotations import experimental
from mlflow.utils.uri import is_databricks_uri
@experimental(version="3.5.0")
def set_databricks_monitoring_sql_warehouse_id(
sql_warehouse_id: str, experiment_id: str | None = None
) -> None:
"""
Set the SQL warehouse ID used for Databricks production monitoring on traces logged to the given
MLflow experiment. This only has an effect for experiments with UC schema as trace location.
Args:
sql_warehouse_id: The SQL warehouse ID to use for monitoring.
experiment_id: The MLflow experiment ID. If not provided, the current active experiment
will be used.
"""
from mlflow.entities import ExperimentTag
from mlflow.tracking import get_tracking_uri
from mlflow.tracking._tracking_service.utils import _get_store
from mlflow.tracking.fluent import _get_experiment_id
tracking_uri = get_tracking_uri()
if not is_databricks_uri(tracking_uri):
raise MlflowException(
"This function is only supported when the tracking URI is set to 'databricks'. "
f"Current tracking URI: {tracking_uri}"
)
resolved_experiment_id = experiment_id or _get_experiment_id()
if not resolved_experiment_id:
raise MlflowException(
"No experiment ID provided and no active experiment found. "
"Please provide an experiment_id or set an active experiment "
"using mlflow.set_experiment()."
)
store = _get_store()
store.set_experiment_tag(
resolved_experiment_id,
ExperimentTag("mlflow.monitoring.sqlWarehouseId", sql_warehouse_id),
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/databricks.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/tracing/test_databricks.py | from unittest import mock
import pytest
from mlflow.exceptions import MlflowException
from mlflow.tracing.databricks import set_databricks_monitoring_sql_warehouse_id
def test_set_databricks_monitoring_sql_warehouse_id_requires_databricks_tracking_uri():
with mock.patch("mlflow.get_tracking_uri", return_value="file:///tmp"):
with pytest.raises(MlflowException, match="only supported when the tracking URI"):
set_databricks_monitoring_sql_warehouse_id(
sql_warehouse_id="warehouse123", experiment_id="exp456"
)
def test_set_databricks_monitoring_sql_warehouse_id_with_explicit_experiment_id():
mock_store = mock.MagicMock()
with (
mock.patch("mlflow.tracking.get_tracking_uri", return_value="databricks"),
mock.patch(
"mlflow.tracking._tracking_service.utils._get_store",
return_value=mock_store,
),
):
set_databricks_monitoring_sql_warehouse_id(
sql_warehouse_id="warehouse123", experiment_id="exp456"
)
mock_store.set_experiment_tag.assert_called_once()
call_args = mock_store.set_experiment_tag.call_args
assert call_args[0][0] == "exp456"
assert call_args[0][1].key == "mlflow.monitoring.sqlWarehouseId"
assert call_args[0][1].value == "warehouse123"
def test_set_databricks_monitoring_sql_warehouse_id_with_default_experiment_id():
mock_store = mock.MagicMock()
with (
mock.patch("mlflow.tracking.get_tracking_uri", return_value="databricks"),
mock.patch(
"mlflow.tracking._tracking_service.utils._get_store",
return_value=mock_store,
),
mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value="default_exp"),
):
set_databricks_monitoring_sql_warehouse_id(sql_warehouse_id="warehouse789")
mock_store.set_experiment_tag.assert_called_once()
call_args = mock_store.set_experiment_tag.call_args
assert call_args[0][0] == "default_exp"
assert call_args[0][1].key == "mlflow.monitoring.sqlWarehouseId"
assert call_args[0][1].value == "warehouse789"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/test_databricks.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/equivalence.py | from mlflow.genai.prompts.utils import format_prompt
# NB: User-facing name for the equivalence assessment.
EQUIVALENCE_FEEDBACK_NAME = "equivalence"
EQUIVALENCE_PROMPT_INSTRUCTIONS = """\
Compare the following actual output against the expected output. You must determine whether they \
are semantically equivalent or convey the same meaning, and if the output format matches the \
expected format (e.g., JSON structure, list format, sentence structure).
<actual_output>{{output}}</actual_output>
<expected_output>{{expected_output}}</expected_output>\
"""
EQUIVALENCE_PROMPT_OUTPUT = """
Please indicate whether the actual output is equivalent to the expected output using only the following json format. Do not use any markdown formatting or output additional lines.
{
"rationale": "Reason for the assessment. Explain whether the outputs are semantically equivalent and whether the format matches. Start each rationale with `Let's think step by step`",
"result": "yes|no"
}\
""" # noqa: E501
EQUIVALENCE_PROMPT = EQUIVALENCE_PROMPT_INSTRUCTIONS + EQUIVALENCE_PROMPT_OUTPUT
def get_prompt(
output: str,
expected_output: str,
) -> str:
"""Generate output equivalence evaluation prompt.
Args:
output: The actual output to evaluate
expected_output: The expected output to compare against
Returns:
Formatted prompt string
"""
return format_prompt(
EQUIVALENCE_PROMPT,
output=output,
expected_output=expected_output,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/equivalence.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/optimize/optimize.py | import logging
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import nullcontext
from typing import TYPE_CHECKING, Any, Callable
import mlflow
from mlflow.entities import Trace
from mlflow.entities.evaluation_dataset import EvaluationDataset as EntityEvaluationDataset
from mlflow.entities.model_registry import PromptVersion
from mlflow.environment_variables import MLFLOW_GENAI_EVAL_MAX_WORKERS
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets import EvaluationDataset as ManagedEvaluationDataset
from mlflow.genai.evaluation.utils import (
_convert_eval_set_to_df,
)
from mlflow.genai.optimize.optimizers import BasePromptOptimizer
from mlflow.genai.optimize.types import (
AggregationFn,
EvaluationResultRecord,
PromptOptimizationResult,
)
from mlflow.genai.optimize.util import (
create_metric_from_scorers,
prompt_optimization_autolog,
validate_train_data,
)
from mlflow.genai.prompts import load_prompt, register_prompt
from mlflow.genai.scorers import Scorer
from mlflow.genai.utils.trace_utils import convert_predict_fn
from mlflow.models.evaluation.utils.trace import configure_autologging_for_evaluation
from mlflow.prompt.constants import PROMPT_TEXT_TAG_KEY
from mlflow.telemetry.events import PromptOptimizationEvent
from mlflow.telemetry.track import record_usage_event
from mlflow.utils import gorilla
from mlflow.utils.annotations import experimental
from mlflow.utils.autologging_utils.safety import _wrap_patch
if TYPE_CHECKING:
from mlflow.genai.evaluation.utils import EvaluationDatasetTypes
_logger = logging.getLogger(__name__)
@experimental(version="3.5.0")
@record_usage_event(PromptOptimizationEvent)
def optimize_prompts(
*,
predict_fn: Callable[..., Any],
train_data: "EvaluationDatasetTypes",
prompt_uris: list[str],
optimizer: BasePromptOptimizer,
scorers: list[Scorer] | None = None,
aggregation: AggregationFn | None = None,
enable_tracking: bool = True,
) -> PromptOptimizationResult:
"""
Automatically optimize prompts using evaluation metrics and training data.
This function uses the provided optimization algorithm to improve prompt
quality based on your evaluation criteria and dataset.
Args:
predict_fn: a target function that uses the prompts to be optimized.
The callable should receive inputs as keyword arguments and
return the response. The function should use MLflow prompt registry
and call `PromptVersion.format` during execution in order for this
API to optimize the prompt. This function should return the
same type as the outputs in the dataset.
train_data: an evaluation dataset used for the optimization.
It should include the inputs and outputs fields with dict values.
The data must be one of the following formats:
* An EvaluationDataset entity
* Pandas DataFrame
* Spark DataFrame
* List of dictionaries
The dataset must include the following columns:
- inputs: A column containing single inputs in dict format.
Each input should contain keys matching the variables in the prompt template.
- outputs: A column containing an output for each input
that the predict_fn should produce.
If None, the optimization will be performed in zero-shot mode.
prompt_uris: a list of prompt uris to be optimized.
The prompt templates should be used by the predict_fn.
optimizer: a prompt optimizer object that optimizes a set of prompts with
the training dataset and scorers. For example,
GepaPromptOptimizer(reflection_model="openai:/gpt-4o").
scorers: List of scorers that evaluate the inputs, outputs and expectations.
Use builtin scorers like Equivalence or Correctness,
or define custom scorers with the @scorer decorator.
If None, the optimization will be performed in zero-shot mode.
`train_data` must be provided if `scorers` is provided.
aggregation: A callable that computes the overall performance metric from individual
scorer outputs. Takes a dict mapping scorer names to scores and returns a float
value (greater is better). If None and all scorers return numerical values,
uses sum of scores by default.
enable_tracking: If True (default), automatically creates an MLflow run if no active
run exists and logs the following information:
- The optimization scores (initial, final, improvement)
- Links to the optimized prompt versions
- The optimizer name and parameters
- Optimization progress
If False, no MLflow run is created and no tracking occurs.
Returns:
The optimization result object that includes the optimized prompts
as a list of prompt versions, evaluation scores, and the optimizer name.
Examples:
.. code-block:: python
import mlflow
import openai
from mlflow.genai.optimize.optimizers import GepaPromptOptimizer
from mlflow.genai.scorers import Correctness
prompt = mlflow.genai.register_prompt(
name="qa",
template="Answer the following question: {{question}}",
)
def predict_fn(question: str) -> str:
completion = openai.OpenAI().chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt.format(question=question)}],
)
return completion.choices[0].message.content
dataset = [
{"inputs": {"question": "What is the capital of France?"}, "outputs": "Paris"},
{"inputs": {"question": "What is the capital of Germany?"}, "outputs": "Berlin"},
]
result = mlflow.genai.optimize_prompts(
predict_fn=predict_fn,
train_data=dataset,
prompt_uris=[prompt.uri],
optimizer=GepaPromptOptimizer(reflection_model="openai:/gpt-4o"),
scorers=[Correctness(model="openai:/gpt-4o")],
)
print(result.optimized_prompts[0].template)
**Example: Using custom scorers with an objective function**
.. code-block:: python
import mlflow
from mlflow.genai.optimize.optimizers import GepaPromptOptimizer
from mlflow.genai.scorers import scorer
# Define custom scorers
@scorer(name="accuracy")
def accuracy_scorer(outputs, expectations):
return 1.0 if outputs.lower() == expectations.lower() else 0.0
@scorer(name="brevity")
def brevity_scorer(outputs):
# Prefer shorter outputs (max 50 chars gets score of 1.0)
return min(1.0, 50 / max(len(outputs), 1))
# Define objective to combine scores
def weighted_objective(scores):
return 0.7 * scores["accuracy"] + 0.3 * scores["brevity"]
result = mlflow.genai.optimize_prompts(
predict_fn=predict_fn,
train_data=dataset,
prompt_uris=[prompt.uri],
optimizer=GepaPromptOptimizer(reflection_model="openai:/gpt-4o"),
scorers=[accuracy_scorer, brevity_scorer],
aggregation=weighted_objective,
)
"""
# For EvaluationDataset types, convert to DataFrame first since they don't support len()
if isinstance(train_data, (EntityEvaluationDataset, ManagedEvaluationDataset)):
train_data = train_data.to_df()
has_train_data = train_data is not None and len(train_data) > 0
has_scorers = scorers is not None and len(scorers) > 0
if has_scorers and not has_train_data:
raise MlflowException.invalid_parameter_value(
"`scorers` is provided but `train_data` is None or empty or None. "
"`train_data` must be provided if `scorers` is provided."
)
if not has_train_data:
# Zero-shot mode: no training data provided
train_data_df = None
converted_train_data = []
else:
# Few-shot mode: convert and validate training data
train_data_df = _convert_eval_set_to_df(train_data)
converted_train_data = train_data_df.to_dict("records")
validate_train_data(train_data_df, scorers, predict_fn)
sample_input = converted_train_data[0]["inputs"] if len(converted_train_data) > 0 else None
predict_fn = convert_predict_fn(predict_fn=predict_fn, sample_input=sample_input)
metric_fn = create_metric_from_scorers(scorers, aggregation) if has_scorers else None
eval_fn = _build_eval_fn(predict_fn, metric_fn) if has_train_data else None
target_prompts = [load_prompt(prompt_uri) for prompt_uri in prompt_uris]
if not all(prompt.is_text_prompt for prompt in target_prompts):
raise MlflowException("Only text prompts can be optimized")
target_prompts_dict = {prompt.name: prompt.template for prompt in target_prompts}
target_prompts_model_config = {prompt.name: prompt.model_config for prompt in target_prompts}
with (
prompt_optimization_autolog(
optimizer_name=optimizer.__class__.__name__,
num_prompts=len(target_prompts),
num_training_samples=len(converted_train_data),
train_data_df=train_data_df,
)
if enable_tracking
else nullcontext({})
) as log_results:
optimizer_output = optimizer.optimize(
eval_fn, converted_train_data, target_prompts_dict, enable_tracking
)
optimized_prompts = [
register_prompt(
name=prompt_name,
template=prompt,
model_config=target_prompts_model_config.get(prompt_name),
)
for prompt_name, prompt in optimizer_output.optimized_prompts.items()
]
log_results["optimizer_output"] = optimizer_output
log_results["optimized_prompts"] = optimized_prompts
return PromptOptimizationResult(
optimized_prompts=optimized_prompts,
optimizer_name=optimizer.__class__.__name__,
initial_eval_score=optimizer_output.initial_eval_score,
final_eval_score=optimizer_output.final_eval_score,
initial_eval_score_per_scorer=optimizer_output.initial_eval_score_per_scorer,
final_eval_score_per_scorer=optimizer_output.final_eval_score_per_scorer,
)
def _build_eval_fn(
predict_fn: Callable[..., Any],
metric_fn: Callable[
[dict[str, Any], dict[str, Any], dict[str, Any], Trace | None],
tuple[float, dict[str, str], dict[str, float]],
]
| None,
) -> Callable[[dict[str, str], list[dict[str, Any]]], list[EvaluationResultRecord]]:
"""
Build an evaluation function that uses the candidate prompts to evaluate the predict_fn.
Args:
predict_fn: The function to evaluate
metric_fn: Metric function created from scorers that takes (inputs, outputs, expectations).
If None, the evaluation function will still run predict_fn and capture traces,
but score will be None (useful for metaprompting without scorers).
Returns:
An evaluation function
"""
from mlflow.pyfunc import Context, set_prediction_context
def eval_fn(
candidate_prompts: dict[str, str], dataset: list[dict[str, Any]]
) -> list[EvaluationResultRecord]:
used_prompts = set()
@property
def _template_patch(self) -> str:
template_name = self.name
if template_name in candidate_prompts:
used_prompts.add(template_name)
return candidate_prompts[template_name]
return self._tags.get(PROMPT_TEXT_TAG_KEY, "")
patch = _wrap_patch(PromptVersion, "template", _template_patch)
def _run_single(record: dict[str, Any]):
inputs = record["inputs"]
# use expectations if provided, otherwise use outputs
expectations = record.get("expectations") or {
"expected_response": record.get("outputs")
}
eval_request_id = str(uuid.uuid4())
# set prediction context to retrieve the trace by the request id,
# and set is_evaluate to True to disable async trace logging
with set_prediction_context(Context(request_id=eval_request_id, is_evaluate=True)):
try:
program_outputs = predict_fn(inputs)
except Exception as e:
program_outputs = f"Failed to invoke the predict_fn with {inputs}: {e}"
trace = mlflow.get_trace(eval_request_id, silent=True)
if metric_fn is not None:
score, rationales, individual_scores = metric_fn(
inputs=inputs, outputs=program_outputs, expectations=expectations, trace=trace
)
else:
score = None
rationales = {}
individual_scores = {}
return EvaluationResultRecord(
inputs=inputs,
outputs=program_outputs,
expectations=expectations,
score=score,
trace=trace,
rationales=rationales,
individual_scores=individual_scores,
)
try:
with (
ThreadPoolExecutor(
max_workers=MLFLOW_GENAI_EVAL_MAX_WORKERS.get(),
thread_name_prefix="MLflowPromptOptimization",
) as executor,
configure_autologging_for_evaluation(enable_tracing=True),
):
futures = [executor.submit(_run_single, record) for record in dataset]
results = [future.result() for future in futures]
# Check for unused prompts and warn
if unused_prompts := set(candidate_prompts.keys()) - used_prompts:
_logger.warning(
"The following prompts were not used during evaluation: "
f"{sorted(unused_prompts)}. This may indicate that predict_fn is "
"not calling format() for these prompts, or the prompt names don't match. "
"Please verify that your predict_fn uses all prompts specified in prompt_uris."
)
return results
finally:
gorilla.revert(patch)
return eval_fn
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/optimize.py",
"license": "Apache License 2.0",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/optimize/optimizers/base.py | from abc import ABC, abstractmethod
from typing import Any, Callable
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
from mlflow.utils.annotations import experimental
# The evaluation function that takes candidate prompts as a dict
# (prompt template name -> prompt template) and a dataset as a list of dicts,
# and returns a list of EvaluationResultRecord.
_EvalFunc = Callable[[dict[str, str], list[dict[str, Any]]], list[EvaluationResultRecord]]
@experimental(version="3.5.0")
class BasePromptOptimizer(ABC):
@abstractmethod
def optimize(
self,
eval_fn: _EvalFunc,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
"""
Optimize the target prompts using the given evaluation function,
dataset and target prompt templates.
Args:
eval_fn: The evaluation function that takes candidate prompts as a dict
(prompt template name -> prompt template) and a dataset as a list of dicts,
and returns a list of EvaluationResultRecord. Note that eval_fn is not thread-safe.
train_data: The dataset to use for optimization. Each record should
include the inputs and outputs fields with dict values.
target_prompts: The target prompt templates to use. The key is the prompt template
name and the value is the prompt template.
enable_tracking: If True (default), automatically log optimization progress.
Returns:
The outputs of the prompt optimizer that includes the optimized prompts
as a dict (prompt template name -> prompt template).
"""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/optimizers/base.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/genai/optimize/test_optimize.py | from typing import Any
import pandas as pd
import pytest
import mlflow
from mlflow.entities.model_registry import PromptModelConfig
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets import create_dataset
from mlflow.genai.optimize.optimize import optimize_prompts
from mlflow.genai.optimize.optimizers.base import BasePromptOptimizer
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
from mlflow.genai.prompts import register_prompt
from mlflow.genai.scorers import scorer
from mlflow.models.model import PromptVersion
from mlflow.utils.import_hooks import _post_import_hooks
class MockPromptOptimizer(BasePromptOptimizer):
def __init__(self, reflection_model="openai:/gpt-4o-mini"):
self.model_name = reflection_model
def optimize(
self,
eval_fn: Any,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
optimized_prompts = {}
for prompt_name, template in target_prompts.items():
# Simple optimization: add "Be precise and accurate. " prefix
optimized_prompts[prompt_name] = f"Be precise and accurate. {template}"
# Verify the optimization by calling eval_fn (only if provided)
if eval_fn is not None:
eval_fn(optimized_prompts, train_data)
return PromptOptimizerOutput(
optimized_prompts=optimized_prompts,
initial_eval_score=0.5,
final_eval_score=0.9,
)
@pytest.fixture
def sample_translation_prompt() -> PromptVersion:
return register_prompt(
name="test_translation_prompt",
template="Translate the following text to {{language}}: {{input_text}}",
)
@pytest.fixture
def sample_summarization_prompt() -> PromptVersion:
return register_prompt(
name="test_summarization_prompt",
template="Summarize this text: {{text}}",
)
@pytest.fixture
def sample_dataset() -> pd.DataFrame:
return pd.DataFrame(
{
"inputs": [
{"input_text": "Hello", "language": "Spanish"},
{"input_text": "World", "language": "French"},
{"input_text": "Goodbye", "language": "Spanish"},
],
"outputs": [
"Hola",
"Monde",
"Adiós",
],
}
)
@pytest.fixture
def sample_summarization_dataset() -> list[dict[str, Any]]:
return [
{
"inputs": {
"text": "This is a long document that needs to be summarized into key points."
},
"outputs": "Key points summary",
},
{
"inputs": {"text": "Another document with important information for summarization."},
"outputs": "Important info summary",
},
]
def sample_predict_fn(input_text: str, language: str) -> str:
mlflow.genai.load_prompt("prompts:/test_translation_prompt/1")
translations = {
("Hello", "Spanish"): "Hola",
("World", "French"): "Monde",
("Goodbye", "Spanish"): "Adiós",
}
# Verify that auto logging is enabled during the evaluation.
assert len(_post_import_hooks) > 0
return translations.get((input_text, language), f"translated_{input_text}")
def sample_summarization_fn(text: str) -> str:
return f"Summary of: {text[:20]}..."
@mlflow.genai.scorers.scorer(name="equivalence")
def equivalence(outputs, expectations):
return 1.0 if outputs == expectations["expected_response"] else 0.0
def test_optimize_prompts_single_prompt(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
mock_optimizer = MockPromptOptimizer()
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
optimized_prompt = result.optimized_prompts[0]
assert optimized_prompt.name == sample_translation_prompt.name
assert optimized_prompt.version == sample_translation_prompt.version + 1
assert "Be precise and accurate." in optimized_prompt.template
expected_template = "Translate the following text to {{language}}: {{input_text}}"
assert expected_template in optimized_prompt.template
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
def test_optimize_prompts_multiple_prompts(
sample_translation_prompt: PromptVersion,
sample_summarization_prompt: PromptVersion,
sample_dataset: pd.DataFrame,
):
mock_optimizer = MockPromptOptimizer()
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}",
f"prompts:/{sample_summarization_prompt.name}/{sample_summarization_prompt.version}",
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 2
prompt_names = {prompt.name for prompt in result.optimized_prompts}
assert sample_translation_prompt.name in prompt_names
assert sample_summarization_prompt.name in prompt_names
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
for prompt in result.optimized_prompts:
assert "Be precise and accurate." in prompt.template
def test_optimize_prompts_eval_function_behavior(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
class TestingOptimizer(BasePromptOptimizer):
def __init__(self):
self.model_name = "openai:/gpt-4o-mini"
self.eval_fn_calls = []
def optimize(self, eval_fn, dataset, target_prompts, enable_tracking=True):
# Test that eval_fn works correctly
test_prompts = {
"test_translation_prompt": "Prompt Candidate: "
"Translate {{input_text}} to {{language}}"
}
results = eval_fn(test_prompts, dataset)
self.eval_fn_calls.append((test_prompts, results))
# Verify results structure
assert isinstance(results, list)
assert len(results) == len(dataset)
for i, result in enumerate(results):
assert isinstance(result, EvaluationResultRecord)
assert result.inputs == dataset[i]["inputs"]
assert result.outputs == dataset[i]["outputs"]
assert result.score == 1
assert result.trace is not None
return PromptOptimizerOutput(optimized_prompts=target_prompts)
predict_called_count = 0
def predict_fn(input_text, language):
prompt = mlflow.genai.load_prompt("prompts:/test_translation_prompt/1").format(
input_text=input_text, language=language
)
nonlocal predict_called_count
# the first call to the predict_fn is the model check
if predict_called_count > 0:
# validate the prompt is replaced with the candidate prompt
assert "Prompt Candidate" in prompt
predict_called_count += 1
return sample_predict_fn(input_text=input_text, language=language)
testing_optimizer = TestingOptimizer()
optimize_prompts(
predict_fn=predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=testing_optimizer,
scorers=[equivalence],
)
assert len(testing_optimizer.eval_fn_calls) == 1
_, eval_results = testing_optimizer.eval_fn_calls[0]
assert len(eval_results) == 3 # Number of records in sample_dataset
assert predict_called_count == 4 # 3 records in sample_dataset + 1 for the prediction check
def test_optimize_prompts_with_list_dataset(
sample_translation_prompt: PromptVersion, sample_summarization_dataset: list[dict[str, Any]]
):
mock_optimizer = MockPromptOptimizer()
def summarization_predict_fn(text):
return f"Summary: {text[:10]}..."
result = optimize_prompts(
predict_fn=summarization_predict_fn,
train_data=sample_summarization_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
def test_optimize_prompts_with_model_name(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
class TestOptimizer(BasePromptOptimizer):
def __init__(self):
self.model_name = "test/custom-model"
def optimize(self, eval_fn, dataset, target_prompts, enable_tracking=True):
return PromptOptimizerOutput(optimized_prompts=target_prompts)
testing_optimizer = TestOptimizer()
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=testing_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
def test_optimize_prompts_warns_on_unused_prompt(
sample_translation_prompt: PromptVersion,
sample_summarization_prompt: PromptVersion,
sample_dataset: pd.DataFrame,
capsys,
):
mock_optimizer = MockPromptOptimizer()
# Create predict_fn that only uses translation prompt, not summarization prompt
def predict_fn_single_prompt(input_text, language):
prompt = mlflow.genai.load_prompt("prompts:/test_translation_prompt/1")
prompt.format(input_text=input_text, language=language)
return sample_predict_fn(input_text=input_text, language=language)
result = optimize_prompts(
predict_fn=predict_fn_single_prompt,
train_data=sample_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}",
f"prompts:/{sample_summarization_prompt.name}/{sample_summarization_prompt.version}",
],
optimizer=mock_optimizer,
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 2
captured = capsys.readouterr()
assert "prompts were not used during evaluation" in captured.err
assert "test_summarization_prompt" in captured.err
def test_optimize_prompts_with_custom_scorers(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
# Create a custom scorer for case-insensitive matching
@scorer(name="case_insensitive_match")
def case_insensitive_match(outputs, expectations):
# Extract expected_response if expectations is a dict
if isinstance(expectations, dict) and "expected_response" in expectations:
expected_value = expectations["expected_response"]
else:
expected_value = expectations
return 1.0 if str(outputs).lower() == str(expected_value).lower() else 0.5
class MetricTestOptimizer(BasePromptOptimizer):
def __init__(self):
self.model_name = "openai:/gpt-4o-mini"
self.captured_scores = []
def optimize(self, eval_fn, dataset, target_prompts, enable_tracking=True):
# Run eval_fn and capture the scores
results = eval_fn(target_prompts, dataset)
self.captured_scores = [r.score for r in results]
return PromptOptimizerOutput(optimized_prompts=target_prompts)
testing_optimizer = MetricTestOptimizer()
# Create dataset with outputs that will test custom scorer
test_dataset = pd.DataFrame(
{
"inputs": [
{"input_text": "Hello", "language": "Spanish"},
{"input_text": "World", "language": "French"},
],
"outputs": ["HOLA", "monde"], # Different cases to test custom scorer
}
)
def predict_fn(input_text, language):
mlflow.genai.load_prompt("prompts:/test_translation_prompt/1")
# Return lowercase outputs
return {"Hello": "hola", "World": "monde"}.get(input_text, "unknown")
result = optimize_prompts(
predict_fn=predict_fn,
train_data=test_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
scorers=[case_insensitive_match],
optimizer=testing_optimizer,
)
# Verify custom scorer was used
# "hola" vs "HOLA" (case insensitive match) -> 1.0
# "monde" vs "monde" (exact match) -> 1.0
assert testing_optimizer.captured_scores == [1.0, 1.0]
assert len(result.optimized_prompts) == 1
@pytest.mark.parametrize(
("train_data", "error_match"),
[
# Missing inputs validation (handled by _convert_eval_set_to_df)
([{"outputs": "Hola"}], "Either `inputs` or `trace` column is required"),
# Empty inputs validation
(
[{"inputs": {}, "outputs": "Hola"}],
"Record 0 is missing required 'inputs' field or it is empty",
),
],
)
def test_optimize_prompts_validation_errors(
sample_translation_prompt: PromptVersion,
train_data: list[dict[str, Any]],
error_match: str,
):
with pytest.raises(MlflowException, match=error_match):
optimize_prompts(
predict_fn=sample_predict_fn,
train_data=train_data,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=MockPromptOptimizer(),
scorers=[equivalence],
)
def test_optimize_prompts_with_chat_prompt(
sample_translation_prompt: PromptVersion, sample_dataset: pd.DataFrame
):
chat_prompt = register_prompt(
name="test_chat_prompt",
template=[{"role": "user", "content": "{{input_text}}"}],
)
with pytest.raises(MlflowException, match="Only text prompts can be optimized"):
optimize_prompts(
predict_fn=sample_predict_fn,
train_data=sample_dataset,
prompt_uris=[f"prompts:/{chat_prompt.name}/{chat_prompt.version}"],
optimizer=MockPromptOptimizer(),
scorers=[equivalence],
)
def test_optimize_prompts_with_managed_evaluation_dataset(
sample_translation_prompt: PromptVersion,
sample_dataset: pd.DataFrame,
):
# Create a `ManagedEvaluationDataset` and populate it with records from sample_dataset
managed_dataset = create_dataset(name="test_optimize_managed_dataset")
managed_dataset.merge_records(sample_dataset)
result = optimize_prompts(
predict_fn=sample_predict_fn,
train_data=managed_dataset,
prompt_uris=[
f"prompts:/{sample_translation_prompt.name}/{sample_translation_prompt.version}"
],
optimizer=MockPromptOptimizer(),
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
assert result.initial_eval_score == 0.5
assert result.final_eval_score == 0.9
def test_optimize_prompts_preserves_model_config(sample_dataset: pd.DataFrame):
source_model_config = PromptModelConfig(
provider="openai",
model_name="gpt-4o",
temperature=0.7,
max_tokens=1000,
)
prompt_with_config = register_prompt(
name="test_prompt_with_model_config",
template="Translate the following text to {{language}}: {{input_text}}",
model_config=source_model_config,
)
assert prompt_with_config.model_config is not None
def predict_fn(input_text: str, language: str) -> str:
mlflow.genai.load_prompt(f"prompts:/{prompt_with_config.name}/1")
translations = {
("Hello", "Spanish"): "Hola",
("World", "French"): "Monde",
("Goodbye", "Spanish"): "Adiós",
}
return translations.get((input_text, language), f"translated_{input_text}")
result = optimize_prompts(
predict_fn=predict_fn,
train_data=sample_dataset,
prompt_uris=[f"prompts:/{prompt_with_config.name}/{prompt_with_config.version}"],
optimizer=MockPromptOptimizer(),
scorers=[equivalence],
)
assert len(result.optimized_prompts) == 1
optimized_prompt = result.optimized_prompts[0]
assert optimized_prompt.model_config["provider"] == "openai"
assert optimized_prompt.model_config["model_name"] == "gpt-4o"
assert optimized_prompt.model_config["temperature"] == 0.7
assert optimized_prompt.model_config["max_tokens"] == 1000
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/optimize/test_optimize.py",
"license": "Apache License 2.0",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/utils/prompt.py | import json
from mlflow.entities.model_registry import PromptVersion
from mlflow.exceptions import MlflowException
from mlflow.tracing.constant import TraceTagKey
# TODO: Remove tag based linking once we migrate to LinkPromptsToTraces endpoint
def update_linked_prompts_tag(current_tag_value: str | None, prompt_versions: list[PromptVersion]):
"""
Utility method to update linked prompts tag value with a new prompt version.
Args:
current_tag_value: Current JSON string value of the linked prompts tag
prompt_versions: List of PromptVersion objects to add
Returns:
Updated JSON string with new entries added (avoiding duplicates)
Raises:
MlflowException: If current tag value has invalid JSON or format
"""
if current_tag_value is not None:
try:
parsed_prompts_tag_value = json.loads(current_tag_value)
if not isinstance(parsed_prompts_tag_value, list):
raise MlflowException(
f"Invalid format for '{TraceTagKey.LINKED_PROMPTS}' tag: {current_tag_value}"
)
except json.JSONDecodeError:
raise MlflowException(
f"Invalid JSON format for '{TraceTagKey.LINKED_PROMPTS}' tag: {current_tag_value}"
)
else:
parsed_prompts_tag_value = []
new_prompt_entries = [
{"name": prompt_version.name, "version": str(prompt_version.version)}
for prompt_version in prompt_versions
]
prompts_to_add = [p for p in new_prompt_entries if p not in parsed_prompts_tag_value]
if not prompts_to_add:
return current_tag_value
parsed_prompts_tag_value.extend(prompts_to_add)
return json.dumps(parsed_prompts_tag_value)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/utils/prompt.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/tracing/utils/test_prompt.py | import json
import pytest
from mlflow.entities.model_registry import PromptVersion
from mlflow.exceptions import MlflowException
from mlflow.tracing.utils.prompt import update_linked_prompts_tag
def test_update_linked_prompts_tag():
pv1 = PromptVersion(name="test_prompt", version=1, template="Test template")
updated_tag_value = update_linked_prompts_tag(None, [pv1])
assert json.loads(updated_tag_value) == [{"name": "test_prompt", "version": "1"}]
# Adding multiple prompts to the same trace
pv2 = PromptVersion(name="test_prompt", version=2, template="Test template 2")
pv3 = PromptVersion(name="test_prompt_3", version=1, template="Test template 3")
updated_tag_value = update_linked_prompts_tag(updated_tag_value, [pv2, pv3])
assert json.loads(updated_tag_value) == [
{"name": "test_prompt", "version": "1"},
{"name": "test_prompt", "version": "2"},
{"name": "test_prompt_3", "version": "1"},
]
# Registering the same prompt should not add it again
updated_tag_value = update_linked_prompts_tag(updated_tag_value, [pv1])
assert json.loads(updated_tag_value) == [
{"name": "test_prompt", "version": "1"},
{"name": "test_prompt", "version": "2"},
{"name": "test_prompt_3", "version": "1"},
]
def test_update_linked_prompts_tag_invalid_current_tag():
prompt_version = PromptVersion(name="test_prompt", version=1, template="Test template")
with pytest.raises(MlflowException, match="Invalid JSON format for 'mlflow.linkedPrompts' tag"):
update_linked_prompts_tag("invalid json", [prompt_version])
with pytest.raises(MlflowException, match="Invalid format for 'mlflow.linkedPrompts' tag"):
update_linked_prompts_tag(json.dumps({"not": "a list"}), [prompt_version])
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/utils/test_prompt.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/nested_mock_patch.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class NestedMockPatch(Rule):
def _message(self) -> str:
return (
"Do not nest `unittest.mock.patch` context managers. "
"Use multiple context managers in a single `with` statement instead: "
"`with mock.patch(...), mock.patch(...): ...`"
)
@staticmethod
def check(node: ast.With, resolver: Resolver) -> bool:
"""
Returns True if the with statement uses mock.patch and contains only a single
nested with statement that also uses mock.patch.
"""
# Check if the outer with statement uses mock.patch
outer_has_mock_patch = any(
NestedMockPatch._is_mock_patch(item.context_expr, resolver) for item in node.items
)
if not outer_has_mock_patch:
return False
# Check if the body has exactly one statement and it's a with statement
if len(node.body) == 1 and isinstance(node.body[0], ast.With):
# Check if the nested with statement also uses mock.patch
inner_has_mock_patch = any(
NestedMockPatch._is_mock_patch(item.context_expr, resolver)
for item in node.body[0].items
)
if inner_has_mock_patch:
return True
return False
@staticmethod
def _is_mock_patch(node: ast.expr, resolver: Resolver) -> bool:
"""
Returns True if the node is a call to mock.patch or any of its variants.
"""
# Handle direct calls: mock.patch(...), mock.patch.object(...), etc.
if isinstance(node, ast.Call):
if res := resolver.resolve(node.func):
match res:
# Matches unittest.mock.patch, unittest.mock.patch.object, etc.
case ["unittest", "mock", "patch", *_]:
return True
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/nested_mock_patch.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/tests/rules/test_nested_mock_patch.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.nested_mock_patch import NestedMockPatch
def test_nested_mock_patch_unittest_mock(index_path: Path) -> None:
code = """
import unittest.mock
def test_foo():
with unittest.mock.patch("foo.bar"):
with unittest.mock.patch("foo.baz"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, NestedMockPatch) for v in violations)
assert violations[0].range == Range(Position(4, 4))
def test_nested_mock_patch_from_unittest_import_mock(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"):
with mock.patch("foo.baz"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, NestedMockPatch) for v in violations)
assert violations[0].range == Range(Position(4, 4))
def test_nested_mock_patch_object(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch.object(SomeClass, "method"):
with mock.patch.object(AnotherClass, "method"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, NestedMockPatch) for v in violations)
assert violations[0].range == Range(Position(4, 4))
def test_nested_mock_patch_dict(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch.dict("os.environ", {"FOO": "bar"}):
with mock.patch.dict("os.environ", {"BAZ": "qux"}):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, NestedMockPatch) for v in violations)
assert violations[0].range == Range(Position(4, 4))
def test_nested_mock_patch_mixed(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"):
with mock.patch.object(SomeClass, "method"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, NestedMockPatch) for v in violations)
assert violations[0].range == Range(Position(4, 4))
def test_multiple_context_managers_is_ok(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"), mock.patch("foo.baz"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_multiple_context_managers_with_object_is_ok(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"), mock.patch.object(SomeClass, "method"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_nested_with_but_not_mock_patch_is_ok(index_path: Path) -> None:
code = """
def test_foo():
with open("file.txt"):
with open("file2.txt"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_nested_with_only_one_mock_patch_is_ok(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"):
with open("file.txt"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_non_nested_mock_patches_are_ok(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"):
pass
with mock.patch("foo.baz"):
pass
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_non_test_file_not_checked(index_path: Path) -> None:
code = """
from unittest import mock
def foo():
with mock.patch("foo.bar"):
with mock.patch("foo.baz"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_nested_with_code_after_is_ok(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"):
with mock.patch("foo.baz"):
...
assert True
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_deeply_nested_mock_patch(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar"):
with mock.patch("foo.baz"):
with mock.patch("foo.qux"):
...
"""
config = Config(select={NestedMockPatch.name})
violations = lint_file(Path("test_nested_mock_patch.py"), code, config, index_path)
# Should detect both levels of nesting
assert len(violations) == 2
assert all(isinstance(v.rule, NestedMockPatch) for v in violations)
assert violations[0].range == Range(Position(4, 4))
assert violations[1].range == Range(Position(5, 8))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_nested_mock_patch.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/cli/scorers.py | import json
from typing import Literal
import click
from mlflow.environment_variables import MLFLOW_EXPERIMENT_ID
from mlflow.genai.judges import make_judge
from mlflow.genai.scorers import get_all_scorers
from mlflow.genai.scorers import list_scorers as list_scorers_api
from mlflow.mcp.decorator import mlflow_mcp
from mlflow.utils.string_utils import _create_table
@click.group("scorers")
def commands():
"""
Manage scorers, including LLM judges. To manage scorers associated with a tracking
server, set the MLFLOW_TRACKING_URI environment variable to the URL of the desired server.
"""
@commands.command("list")
@mlflow_mcp(tool_name="list_scorers")
@click.option(
"--experiment-id",
"-x",
envvar=MLFLOW_EXPERIMENT_ID.name,
type=click.STRING,
required=False,
help="Experiment ID for which to list scorers. Can be set via MLFLOW_EXPERIMENT_ID env var.",
)
@click.option(
"--builtin",
"-b",
is_flag=True,
default=False,
help="List built-in scorers instead of registered scorers for an experiment.",
)
@click.option(
"--output",
type=click.Choice(["table", "json"]),
default="table",
help="Output format: 'table' for formatted table (default) or 'json' for JSON format",
)
def list_scorers(
experiment_id: str | None, builtin: bool, output: Literal["table", "json"]
) -> None:
"""
List registered scorers for an experiment, or list all built-in scorers.
\b
Examples:
.. code-block:: bash
# List built-in scorers (table format)
mlflow scorers list --builtin
mlflow scorers list -b
# List built-in scorers (JSON format)
mlflow scorers list --builtin --output json
# List registered scorers in table format (default)
mlflow scorers list --experiment-id 123
# List registered scorers in JSON format
mlflow scorers list --experiment-id 123 --output json
# Using environment variable for experiment ID
export MLFLOW_EXPERIMENT_ID=123
mlflow scorers list
"""
# Validate mutual exclusivity
if builtin and experiment_id:
raise click.UsageError(
"Cannot specify both --builtin and --experiment-id. "
"Use --builtin to list built-in scorers or --experiment-id to list "
"registered scorers for an experiment."
)
if not builtin and not experiment_id:
raise click.UsageError(
"Must specify either --builtin or --experiment-id. "
"Use --builtin to list built-in scorers or --experiment-id to list "
"registered scorers for an experiment."
)
# Get scorers based on mode
scorers = get_all_scorers() if builtin else list_scorers_api(experiment_id=experiment_id)
# Format scorer data for output
scorer_data = [{"name": scorer.name, "description": scorer.description} for scorer in scorers]
if output == "json":
result = {"scorers": scorer_data}
click.echo(json.dumps(result, indent=2))
else:
# Table output format
table = [[s["name"], s["description"] or ""] for s in scorer_data]
click.echo(_create_table(table, headers=["Scorer Name", "Description"]))
@commands.command("register-llm-judge")
@mlflow_mcp(tool_name="register_llm_judge_scorer")
@click.option(
"--name",
"-n",
type=click.STRING,
required=True,
help="Name for the judge scorer",
)
@click.option(
"--instructions",
"-i",
type=click.STRING,
required=True,
help=(
"Instructions for evaluation. Must contain at least one template variable: "
"``{{ inputs }}``, ``{{ outputs }}``, ``{{ expectations }}``, or ``{{ trace }}``. "
"See the make_judge documentation for variable interpretations."
),
)
@click.option(
"--model",
"-m",
type=click.STRING,
required=False,
help=(
"Model identifier to use for evaluation (e.g., ``openai:/gpt-4``). "
"If not provided, uses the default model."
),
)
@click.option(
"--experiment-id",
"-x",
envvar=MLFLOW_EXPERIMENT_ID.name,
type=click.STRING,
required=True,
help="Experiment ID to register the judge in. Can be set via MLFLOW_EXPERIMENT_ID env var.",
)
@click.option(
"--description",
"-d",
type=click.STRING,
required=False,
help="Description of what the judge evaluates.",
)
def register_llm_judge(
name: str, instructions: str, model: str | None, experiment_id: str, description: str | None
) -> None:
"""
Register an LLM judge scorer in the specified experiment.
This command creates an LLM judge using natural language instructions and registers
it in an experiment for use in evaluation workflows. The instructions must contain at
least one template variable (``{{ inputs }}``, ``{{ outputs }}``, ``{{ expectations }}``,
or ``{{ trace }}``) to define what the judge will evaluate.
\b
Examples:
.. code-block:: bash
# Register a basic quality judge
mlflow scorers register-llm-judge -n quality_judge \\
-i "Evaluate if {{ outputs }} answers {{ inputs }}. Return yes or no." -x 123
# Register a judge with custom model
mlflow scorers register-llm-judge -n custom_judge \\
-i "Check whether {{ outputs }} is professional and formal. Rate pass, fail, or na" \\
-m "openai:/gpt-4" -x 123
# Register a judge with description
mlflow scorers register-llm-judge -n quality_judge \\
-i "Evaluate if {{ outputs }} answers {{ inputs }}. Return yes or no." \\
-d "Evaluates response quality and relevance" -x 123
# Using environment variable
export MLFLOW_EXPERIMENT_ID=123
mlflow scorers register-llm-judge -n my_judge \\
-i "Check whether {{ outputs }} contains PII"
"""
judge = make_judge(
name=name,
instructions=instructions,
model=model,
description=description,
feedback_value_type=str,
)
registered_judge = judge.register(experiment_id=experiment_id)
click.echo(
f"Successfully created and registered judge scorer '{registered_judge.name}' "
f"in experiment {experiment_id}"
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/cli/scorers.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/cli/test_scorers.py | import json
from typing import Any
from unittest.mock import patch
import pytest
from click.testing import CliRunner
import mlflow
from mlflow.cli.scorers import commands
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers import get_all_scorers, list_scorers, scorer
from mlflow.utils.string_utils import _create_table
@pytest.fixture
def mock_databricks_environment():
with (
patch("mlflow.genai.scorers.base.is_databricks_uri", return_value=True),
):
yield
@pytest.fixture
def runner():
return CliRunner(catch_exceptions=False)
@pytest.fixture
def experiment():
"""Create a test experiment."""
experiment_id = mlflow.create_experiment(
f"test_scorers_cli_{mlflow.utils.time.get_current_time_millis()}"
)
yield experiment_id
mlflow.delete_experiment(experiment_id)
@pytest.fixture
def correctness_scorer():
"""Create a correctness scorer."""
@scorer
def _correctness_scorer(outputs) -> bool:
return len(outputs) > 0
return _correctness_scorer
@pytest.fixture
def safety_scorer():
"""Create a safety scorer."""
@scorer
def _safety_scorer(outputs) -> bool:
return len(outputs) > 0
return _safety_scorer
@pytest.fixture
def relevance_scorer():
"""Create a relevance scorer."""
@scorer
def _relevance_scorer(outputs) -> bool:
return len(outputs) > 0
return _relevance_scorer
@pytest.fixture
def generic_scorer():
"""Create a generic test scorer."""
@scorer
def _generic_scorer(outputs) -> bool:
return True
return _generic_scorer
def test_commands_group_exists():
assert commands.name == "scorers"
assert commands.help is not None
def test_list_command_params():
list_cmd = next((cmd for cmd in commands.commands.values() if cmd.name == "list"), None)
assert list_cmd is not None
param_names = {p.name for p in list_cmd.params}
assert param_names == {"experiment_id", "builtin", "output"}
def test_list_scorers_table_output(
runner: CliRunner,
experiment: str,
correctness_scorer: Any,
safety_scorer: Any,
relevance_scorer: Any,
mock_databricks_environment: Any,
):
correctness_scorer.register(experiment_id=experiment, name="Correctness")
safety_scorer.register(experiment_id=experiment, name="Safety")
relevance_scorer.register(experiment_id=experiment, name="RelevanceToQuery")
result = runner.invoke(commands, ["list", "--experiment-id", experiment])
assert result.exit_code == 0
# Construct expected table output (scorers are returned in alphabetical order)
# Note: click.echo() adds a trailing newline
expected_table = (
_create_table(
[["Correctness", ""], ["RelevanceToQuery", ""], ["Safety", ""]],
headers=["Scorer Name", "Description"],
)
+ "\n"
)
assert result.output == expected_table
def test_list_scorers_json_output(
runner: CliRunner,
experiment: str,
correctness_scorer: Any,
safety_scorer: Any,
relevance_scorer: Any,
mock_databricks_environment: Any,
):
correctness_scorer.register(experiment_id=experiment, name="Correctness")
safety_scorer.register(experiment_id=experiment, name="Safety")
relevance_scorer.register(experiment_id=experiment, name="RelevanceToQuery")
result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--output", "json"])
assert result.exit_code == 0
output_json = json.loads(result.output)
expected_scorers = [
{"name": "Correctness", "description": None},
{"name": "RelevanceToQuery", "description": None},
{"name": "Safety", "description": None},
]
assert output_json["scorers"] == expected_scorers
@pytest.mark.parametrize(
("output_format", "expected_output"),
[
("table", ""),
("json", {"scorers": []}),
],
)
def test_list_scorers_empty_experiment(
runner: CliRunner, experiment: str, output_format: str, expected_output: Any
):
args = ["list", "--experiment-id", experiment]
if output_format == "json":
args.extend(["--output", "json"])
result = runner.invoke(commands, args)
assert result.exit_code == 0
if output_format == "json":
output_json = json.loads(result.output)
assert output_json == expected_output
else:
# Empty table produces minimal output
assert result.output.strip() == expected_output
def test_list_scorers_with_experiment_id_env_var(
runner: CliRunner, experiment: str, correctness_scorer: Any, mock_databricks_environment: Any
):
correctness_scorer.register(experiment_id=experiment, name="Correctness")
result = runner.invoke(commands, ["list"], env={"MLFLOW_EXPERIMENT_ID": experiment})
assert result.exit_code == 0
assert "Correctness" in result.output
def test_list_scorers_missing_experiment_id(runner: CliRunner):
result = runner.invoke(commands, ["list"])
assert result.exit_code != 0
assert "experiment-id" in result.output.lower() or "experiment_id" in result.output.lower()
def test_list_scorers_invalid_output_format(runner: CliRunner, experiment: str):
result = runner.invoke(commands, ["list", "--experiment-id", experiment, "--output", "invalid"])
assert result.exit_code != 0
assert "invalid" in result.output.lower() or "choice" in result.output.lower()
def test_list_scorers_special_characters_in_names(
runner: CliRunner, experiment: str, generic_scorer: Any, mock_databricks_environment: Any
):
generic_scorer.register(experiment_id=experiment, name="Scorer With Spaces")
generic_scorer.register(experiment_id=experiment, name="Scorer.With.Dots")
generic_scorer.register(experiment_id=experiment, name="Scorer-With-Dashes")
generic_scorer.register(experiment_id=experiment, name="Scorer_With_Underscores")
result = runner.invoke(commands, ["list", "--experiment-id", experiment])
assert result.exit_code == 0
assert "Scorer With Spaces" in result.output
assert "Scorer.With.Dots" in result.output
assert "Scorer-With-Dashes" in result.output
assert "Scorer_With_Underscores" in result.output
@pytest.mark.parametrize(
"output_format",
["table", "json"],
)
def test_list_scorers_single_scorer(
runner: CliRunner,
experiment: str,
generic_scorer: Any,
output_format: str,
mock_databricks_environment: Any,
):
generic_scorer.register(experiment_id=experiment, name="OnlyScorer")
args = ["list", "--experiment-id", experiment]
if output_format == "json":
args.extend(["--output", "json"])
result = runner.invoke(commands, args)
assert result.exit_code == 0
if output_format == "json":
output_json = json.loads(result.output)
assert output_json == {"scorers": [{"name": "OnlyScorer", "description": None}]}
else:
assert "OnlyScorer" in result.output
@pytest.mark.parametrize(
"output_format",
["table", "json"],
)
def test_list_scorers_long_names(
runner: CliRunner,
experiment: str,
generic_scorer: Any,
output_format: str,
mock_databricks_environment: Any,
):
long_name = "VeryLongScorerNameThatShouldNotBeTruncatedEvenIfItIsReallyReallyLong"
generic_scorer.register(experiment_id=experiment, name=long_name)
args = ["list", "--experiment-id", experiment]
if output_format == "json":
args.extend(["--output", "json"])
result = runner.invoke(commands, args)
assert result.exit_code == 0
if output_format == "json":
output_json = json.loads(result.output)
assert output_json == {"scorers": [{"name": long_name, "description": None}]}
else:
# Full name should be present
assert long_name in result.output
def test_list_scorers_with_descriptions(runner: CliRunner, experiment: str):
from mlflow.genai.judges import make_judge
judge1 = make_judge(
name="quality_judge",
instructions="Evaluate {{ outputs }}",
description="Evaluates response quality",
feedback_value_type=str,
)
judge1.register(experiment_id=experiment)
judge2 = make_judge(
name="safety_judge",
instructions="Check {{ outputs }}",
description="Checks for safety issues",
feedback_value_type=str,
)
judge2.register(experiment_id=experiment)
judge3 = make_judge(
name="no_desc_judge",
instructions="Evaluate {{ outputs }}",
feedback_value_type=str,
)
judge3.register(experiment_id=experiment)
result_json = runner.invoke(
commands, ["list", "--experiment-id", experiment, "--output", "json"]
)
assert result_json.exit_code == 0
output_json = json.loads(result_json.output)
assert len(output_json["scorers"]) == 3
scorers_by_name = {s["name"]: s for s in output_json["scorers"]}
assert scorers_by_name["no_desc_judge"]["description"] is None
assert scorers_by_name["quality_judge"]["description"] == "Evaluates response quality"
assert scorers_by_name["safety_judge"]["description"] == "Checks for safety issues"
result_table = runner.invoke(commands, ["list", "--experiment-id", experiment])
assert result_table.exit_code == 0
assert "Evaluates response quality" in result_table.output
assert "Checks for safety issues" in result_table.output
def test_create_judge_basic(runner: CliRunner, experiment: str):
result = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"test_judge",
"--instructions",
"Evaluate {{ outputs }}",
"--experiment-id",
experiment,
],
)
assert result.exit_code == 0
assert "Successfully created and registered judge scorer 'test_judge'" in result.output
assert experiment in result.output
# Verify judge was registered
scorers = list_scorers(experiment_id=experiment)
scorer_names = [s.name for s in scorers]
assert "test_judge" in scorer_names
def test_create_judge_with_model(runner: CliRunner, experiment: str):
result = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"custom_model_judge",
"--instructions",
"Check {{ inputs }} and {{ outputs }}",
"--model",
"openai:/gpt-4",
"--experiment-id",
experiment,
],
)
assert result.exit_code == 0
assert "Successfully created and registered" in result.output
# Verify judge was registered with correct model
scorers = list_scorers(experiment_id=experiment)
scorer_names = [s.name for s in scorers]
assert "custom_model_judge" in scorer_names
# Get the judge and verify it uses the specified model
judge = next(s for s in scorers if s.name == "custom_model_judge")
assert judge.model == "openai:/gpt-4"
def test_create_judge_short_options(runner: CliRunner, experiment: str):
result = runner.invoke(
commands,
[
"register-llm-judge",
"-n",
"short_options_judge",
"-i",
"Evaluate {{ outputs }}",
"-x",
experiment,
],
)
assert result.exit_code == 0
assert "Successfully created and registered" in result.output
# Verify judge was registered
scorers = list_scorers(experiment_id=experiment)
scorer_names = [s.name for s in scorers]
assert "short_options_judge" in scorer_names
def test_create_judge_with_env_var(runner: CliRunner, experiment: str):
result = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"env_var_judge",
"--instructions",
"Check {{ outputs }}",
],
env={"MLFLOW_EXPERIMENT_ID": experiment},
)
assert result.exit_code == 0
assert "Successfully created and registered" in result.output
# Verify judge was registered
scorers = list_scorers(experiment_id=experiment)
scorer_names = [s.name for s in scorers]
assert "env_var_judge" in scorer_names
@pytest.mark.parametrize(
("args", "missing_param"),
[
(["--instructions", "test", "--experiment-id", "123"], "name"),
(["--name", "test", "--experiment-id", "123"], "instructions"),
(["--name", "test", "--instructions", "test"], "experiment-id"),
],
)
def test_create_judge_missing_required_params(
runner: CliRunner, args: list[str], missing_param: str
):
result = runner.invoke(commands, ["register-llm-judge"] + args)
assert result.exit_code != 0
# Click typically shows "Missing option" for required parameters
assert "missing" in result.output.lower() or "required" in result.output.lower()
def test_create_judge_invalid_prompt(runner: CliRunner, experiment: str):
# Should raise MlflowException because make_judge validates that instructions
# contain at least one variable
with pytest.raises(MlflowException, match="[Tt]emplate.*variable"):
runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"invalid_judge",
"--instructions",
"This has no template variables",
"--experiment-id",
experiment,
],
)
def test_create_judge_special_characters_in_name(runner: CliRunner, experiment: str):
# Verify experiment has no judges initially
scorers = list_scorers(experiment_id=experiment)
assert len(scorers) == 0
result = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"judge-with_special.chars",
"--instructions",
"Evaluate {{ outputs }}",
"--experiment-id",
experiment,
],
)
assert result.exit_code == 0
assert "Successfully created and registered" in result.output
# Verify experiment has exactly one judge
scorers = list_scorers(experiment_id=experiment)
assert len(scorers) == 1
assert scorers[0].name == "judge-with_special.chars"
def test_create_judge_duplicate_registration(runner: CliRunner, experiment: str):
# Create a judge
result1 = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"duplicate_judge",
"--instructions",
"Evaluate {{ outputs }}",
"--experiment-id",
experiment,
],
)
assert result1.exit_code == 0
scorers = list_scorers(experiment_id=experiment)
assert len(scorers) == 1
assert scorers[0].name == "duplicate_judge"
# Register the same judge again with same name - should succeed (replaces the old one)
result2 = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"duplicate_judge",
"--instructions",
"Evaluate {{ outputs }}",
"--experiment-id",
experiment,
],
)
assert result2.exit_code == 0
# Verify there is still only one judge (the new one replaced the old one)
scorers = list_scorers(experiment_id=experiment)
assert len(scorers) == 1
assert scorers[0].name == "duplicate_judge"
def test_create_judge_with_description(runner: CliRunner, experiment: str):
description = "Evaluates response quality and relevance"
result = runner.invoke(
commands,
[
"register-llm-judge",
"--name",
"judge_with_desc",
"--instructions",
"Evaluate {{ outputs }}",
"--description",
description,
"--experiment-id",
experiment,
],
)
assert result.exit_code == 0
assert "Successfully created and registered" in result.output
scorers = list_scorers(experiment_id=experiment)
assert len(scorers) == 1
judge = scorers[0]
assert judge.name == "judge_with_desc"
assert judge.description == description
def test_create_judge_with_description_short_flag(runner: CliRunner, experiment: str):
description = "Checks for PII in outputs"
result = runner.invoke(
commands,
[
"register-llm-judge",
"-n",
"pii_judge",
"-i",
"Check {{ outputs }}",
"-d",
description,
"-x",
experiment,
],
)
assert result.exit_code == 0
scorers = list_scorers(experiment_id=experiment)
judge = next(s for s in scorers if s.name == "pii_judge")
assert judge.description == description
@pytest.mark.parametrize("output_format", ["table", "json"])
def test_list_builtin_scorers_output_formats(runner, output_format):
args = ["list", "--builtin"]
if output_format == "json":
args.extend(["--output", "json"])
result = runner.invoke(commands, args)
assert result.exit_code == 0
if output_format == "json":
data = json.loads(result.output)
assert "scorers" in data
assert isinstance(data["scorers"], list)
assert len(data["scorers"]) > 0
# Verify each scorer has required fields
for scorer_item in data["scorers"]:
assert "name" in scorer_item
assert "description" in scorer_item
# Verify some builtin scorer names appear
scorer_names = [s["name"] for s in data["scorers"]]
assert "correctness" in scorer_names
assert "relevance_to_query" in scorer_names
assert "completeness" in scorer_names
else:
# Verify table headers
assert "Scorer Name" in result.output
assert "Description" in result.output
# Verify some builtin scorer names appear
assert "correctness" in result.output
assert "relevance_to_query" in result.output
assert "completeness" in result.output
def test_list_builtin_scorers_short_flag(runner):
result = runner.invoke(commands, ["list", "-b"])
assert result.exit_code == 0
assert "Scorer Name" in result.output
def test_list_builtin_scorers_shows_all_available_scorers(runner):
result = runner.invoke(commands, ["list", "--builtin", "--output", "json"])
assert result.exit_code == 0
expected_scorers = get_all_scorers()
expected_names = {scorer.name for scorer in expected_scorers}
data = json.loads(result.output)
actual_names = {s["name"] for s in data["scorers"]}
assert actual_names == expected_names
def test_list_scorers_mutually_exclusive_flags(runner, experiment):
result = runner.invoke(commands, ["list", "--builtin", "--experiment-id", experiment])
assert result.exit_code != 0
assert "Cannot specify both --builtin and --experiment-id" in result.output
def test_list_scorers_requires_one_flag(runner):
result = runner.invoke(commands, ["list"])
assert result.exit_code != 0
assert "Must specify either --builtin or --experiment-id" in result.output
def test_list_scorers_env_var_still_works(runner, experiment, monkeypatch):
monkeypatch.setenv("MLFLOW_EXPERIMENT_ID", experiment)
result = runner.invoke(commands, ["list"])
assert result.exit_code == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/cli/test_scorers.py",
"license": "Apache License 2.0",
"lines": 503,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/mock_patch_dict_environ.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class MockPatchDictEnviron(Rule):
def _message(self) -> str:
return (
"Do not use `mock.patch.dict` to modify `os.environ` in tests; "
"use pytest's monkeypatch fixture (monkeypatch.setenv / monkeypatch.delenv) instead."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the call is to mock.patch.dict with "os.environ" or os.environ as first arg.
Handles:
- mock.patch.dict("os.environ", {...})
- mock.patch.dict(os.environ, {...})
- @mock.patch.dict("os.environ", {...})
"""
if not isinstance(node, ast.Call):
return False
# Check if this is mock.patch.dict
resolved = resolver.resolve(node.func)
if resolved != ["unittest", "mock", "patch", "dict"]:
return False
# Check if the first argument is "os.environ" (string) or os.environ (expression)
if not node.args:
return False
first_arg = node.args[0]
# Check for string literal "os.environ"
if isinstance(first_arg, ast.Constant) and first_arg.value == "os.environ":
return True
# Check for os.environ as an expression
resolved_arg = resolver.resolve(first_arg)
if resolved_arg == ["os", "environ"]:
return True
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/mock_patch_dict_environ.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_mock_patch_dict_environ.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.mock_patch_dict_environ import MockPatchDictEnviron
def test_mock_patch_dict_environ_with_string_literal(index_path: Path) -> None:
code = """
import os
from unittest import mock
# Bad - string literal
def test_func():
with mock.patch.dict("os.environ", {"FOO": "True"}):
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchDictEnviron) for v in violations)
assert violations[0].range == Range(Position(6, 9))
def test_mock_patch_dict_environ_with_expression(index_path: Path) -> None:
code = """
import os
from unittest import mock
# Bad - os.environ as expression
def test_func():
with mock.patch.dict(os.environ, {"FOO": "bar"}):
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchDictEnviron) for v in violations)
assert violations[0].range == Range(Position(6, 9))
def test_mock_patch_dict_environ_as_decorator(index_path: Path) -> None:
code = """
import os
from unittest import mock
# Bad - as decorator
@mock.patch.dict("os.environ", {"FOO": "value"})
def test_func():
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchDictEnviron) for v in violations)
assert violations[0].range == Range(Position(5, 1))
def test_mock_patch_dict_environ_with_clear(index_path: Path) -> None:
code = """
import os
from unittest import mock
# Bad - with clear=True
def test_func():
with mock.patch.dict(os.environ, {}, clear=True):
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchDictEnviron) for v in violations)
assert violations[0].range == Range(Position(6, 9))
def test_mock_patch_dict_non_environ(index_path: Path) -> None:
code = """
from unittest import mock
# Good - not os.environ
def test_func():
with mock.patch.dict("some.other.dict", {"key": "value"}):
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_mock_patch_dict_environ_non_test_file(index_path: Path) -> None:
code = """
import os
from unittest import mock
# Good - not in test file
def normal_func():
with mock.patch.dict("os.environ", {"FOO": "True"}):
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("normal_file.py"), code, config, index_path)
assert len(violations) == 0
def test_mock_patch_dict_environ_with_mock_alias(index_path: Path) -> None:
code = """
import os
from unittest import mock as mock_lib
# Bad - with alias
def test_func():
with mock_lib.patch.dict("os.environ", {"FOO": "bar"}):
pass
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchDictEnviron) for v in violations)
assert violations[0].range == Range(Position(6, 9))
def test_mock_patch_dict_environ_nested_function_not_caught(index_path: Path) -> None:
code = """
import os
from unittest import mock
def test_outer():
def inner_function():
with mock.patch.dict("os.environ", {"FOO": "True"}):
pass
inner_function()
"""
config = Config(select={MockPatchDictEnviron.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_mock_patch_dict_environ.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/mock_patch_as_decorator.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class MockPatchAsDecorator(Rule):
def _message(self) -> str:
return (
"Do not use `unittest.mock.patch` as a decorator. "
"Use it as a context manager to avoid patches being active longer than needed "
"and to make it clear which code depends on them."
)
@staticmethod
def check(decorator_list: list[ast.expr], resolver: Resolver) -> ast.expr | None:
"""
Returns the decorator node if it is a `@mock.patch` or `@patch` decorator.
"""
for deco in decorator_list:
if res := resolver.resolve(deco):
match res:
# Resolver returns ["unittest", "mock", "patch", ...]
# The *_ captures variants like "object", "dict", etc.
case ["unittest", "mock", "patch", *_]:
return deco
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/mock_patch_as_decorator.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_mock_patch_as_decorator.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.mock_patch_as_decorator import MockPatchAsDecorator
def test_mock_patch_as_decorator_unittest_mock(index_path: Path) -> None:
code = """
import unittest.mock
@unittest.mock.patch("foo.bar")
def test_foo(mock_bar):
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("test_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchAsDecorator) for v in violations)
assert violations[0].range == Range(Position(3, 1))
def test_mock_patch_as_decorator_from_unittest_import_mock(index_path: Path) -> None:
code = """
from unittest import mock
@mock.patch("foo.bar")
def test_foo(mock_bar):
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("test_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchAsDecorator) for v in violations)
assert violations[0].range == Range(Position(3, 1))
def test_mock_patch_object_as_decorator(index_path: Path) -> None:
code = """
from unittest import mock
@mock.patch.object(SomeClass, "method")
def test_foo(mock_method):
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("test_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchAsDecorator) for v in violations)
assert violations[0].range == Range(Position(3, 1))
def test_mock_patch_dict_as_decorator(index_path: Path) -> None:
code = """
from unittest import mock
@mock.patch.dict("os.environ", {"FOO": "bar"})
def test_foo():
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("test_mock_patch.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, MockPatchAsDecorator) for v in violations)
assert violations[0].range == Range(Position(3, 1))
def test_mock_patch_as_context_manager_is_ok(index_path: Path) -> None:
code = """
from unittest import mock
def test_foo():
with mock.patch("foo.bar") as mock_bar:
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("test_mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_non_test_file_not_checked(index_path: Path) -> None:
code = """
from unittest import mock
@mock.patch("foo.bar")
def foo(mock_bar):
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("mock_patch.py"), code, config, index_path)
assert len(violations) == 0
def test_multiple_patch_decorators(index_path: Path) -> None:
code = """
from unittest import mock
@mock.patch("foo.bar")
@mock.patch("foo.baz")
def test_foo(mock_baz, mock_bar):
...
"""
config = Config(select={MockPatchAsDecorator.name})
violations = lint_file(Path("test_mock_patch.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, MockPatchAsDecorator) for v in violations)
assert violations[0].range == Range(Position(3, 1))
assert violations[1].range == Range(Position(4, 1))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_mock_patch_as_decorator.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/cli/eval.py | """
CLI commands for evaluating traces with scorers.
"""
import json
from typing import Literal
import click
import pandas as pd
import mlflow
from mlflow.cli.genai_eval_utils import (
extract_assessments_from_results,
format_table_output,
resolve_scorers,
)
from mlflow.entities import Trace
from mlflow.genai.evaluation import evaluate
from mlflow.tracking import MlflowClient
from mlflow.utils.string_utils import _create_table
def _gather_traces(trace_ids: str, experiment_id: str) -> list[Trace]:
"""
Gather and validate traces from the tracking store.
Args:
trace_ids: Comma-separated list of trace IDs to gather
experiment_id: Expected experiment ID for all traces
Returns:
List of Trace objects
Raises:
click.UsageError: If any trace is not found or belongs to wrong experiment
"""
trace_id_list = [tid.strip() for tid in trace_ids.split(",")]
client = MlflowClient()
traces = []
for trace_id in trace_id_list:
try:
trace = client.get_trace(trace_id, display=False)
except Exception as e:
raise click.UsageError(f"Failed to get trace '{trace_id}': {e}")
if trace is None:
raise click.UsageError(f"Trace with ID '{trace_id}' not found")
if trace.info.experiment_id != experiment_id:
raise click.UsageError(
f"Trace '{trace_id}' belongs to experiment '{trace.info.experiment_id}', "
f"not the specified experiment '{experiment_id}'"
)
traces.append(trace)
return traces
def evaluate_traces(
experiment_id: str,
trace_ids: str,
scorers: str,
output_format: Literal["table", "json"] = "table",
) -> None:
"""
Evaluate traces with specified scorers and output results.
Args:
experiment_id: The experiment ID to use for evaluation
trace_ids: Comma-separated list of trace IDs to evaluate
scorers: Comma-separated list of scorer names
output_format: Output format ('table' or 'json')
"""
mlflow.set_experiment(experiment_id=experiment_id)
traces = _gather_traces(trace_ids, experiment_id)
traces_df = pd.DataFrame([{"trace_id": t.info.trace_id, "trace": t} for t in traces])
scorer_names = [name.strip() for name in scorers.split(",")]
resolved_scorers = resolve_scorers(scorer_names, experiment_id)
trace_count = len(traces)
scorers_list = ", ".join(scorer_names)
if trace_count == 1:
trace_id = traces[0].info.trace_id
click.echo(f"Evaluating trace {trace_id} with scorers: {scorers_list}...")
else:
click.echo(f"Evaluating {trace_count} traces with scorers: {scorers_list}...")
try:
results = evaluate(data=traces_df, scorers=resolved_scorers)
evaluation_run_id = results.run_id
except Exception as e:
raise click.UsageError(f"Evaluation failed: {e}")
results_df = results.result_df
output_data = extract_assessments_from_results(results_df, evaluation_run_id)
if output_format == "json":
# Convert EvalResult objects to dicts for JSON serialization
json_data = [
{
"trace_id": result.trace_id,
"assessments": [
{
"name": assessment.name,
"result": assessment.result,
"rationale": assessment.rationale,
"error": assessment.error,
}
for assessment in result.assessments
],
}
for result in output_data
]
if len(json_data) == 1:
click.echo(json.dumps(json_data[0], indent=2))
else:
click.echo(json.dumps(json_data, indent=2))
else:
table_output = format_table_output(output_data)
# Extract string values from Cell objects for table display
table_data = [[cell.value for cell in row] for row in table_output.rows]
# Add new line in the output before the final result.
click.echo("")
click.echo(_create_table(table_data, headers=table_output.headers))
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/cli/eval.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/cli/test_eval.py | import re
from unittest import mock
import click
import pandas as pd
import pytest
import mlflow
from mlflow.cli.eval import evaluate_traces
from mlflow.entities import Trace, TraceInfo
from mlflow.genai.scorers.base import scorer
def test_evaluate_traces_with_single_trace_table_output():
experiment_id = mlflow.create_experiment("test_experiment")
mock_trace = mock.Mock(spec=Trace)
mock_trace.info = mock.Mock(spec=TraceInfo)
mock_trace.info.trace_id = "tr-test-123"
mock_trace.info.experiment_id = experiment_id
mock_results = mock.Mock()
mock_results.run_id = "run-eval-456"
mock_results.result_df = pd.DataFrame(
[
{
"trace_id": "tr-test-123",
"assessments": [
{
"assessment_name": "RelevanceToQuery",
"feedback": {"value": "yes"},
"rationale": "The answer is relevant",
"metadata": {"mlflow.assessment.sourceRunId": "run-eval-456"},
}
],
}
]
)
with (
mock.patch(
"mlflow.cli.eval.MlflowClient.get_trace", return_value=mock_trace
) as mock_get_trace,
mock.patch("mlflow.cli.eval.evaluate", return_value=mock_results) as mock_evaluate,
):
evaluate_traces(
experiment_id=experiment_id,
trace_ids="tr-test-123",
scorers="RelevanceToQuery",
output_format="table",
)
mock_get_trace.assert_called_once_with("tr-test-123", display=False)
assert mock_evaluate.call_count == 1
call_args = mock_evaluate.call_args
assert "data" in call_args.kwargs
expected_df = pd.DataFrame([{"trace_id": "tr-test-123", "trace": mock_trace}])
pd.testing.assert_frame_equal(call_args.kwargs["data"], expected_df)
assert "scorers" in call_args.kwargs
assert len(call_args.kwargs["scorers"]) == 1
assert call_args.kwargs["scorers"][0].__class__.__name__ == "RelevanceToQuery"
def test_evaluate_traces_with_multiple_traces_json_output():
experiment = mlflow.create_experiment("test_experiment_multi")
mock_trace1 = mock.Mock(spec=Trace)
mock_trace1.info = mock.Mock(spec=TraceInfo)
mock_trace1.info.trace_id = "tr-test-1"
mock_trace1.info.experiment_id = experiment
mock_trace2 = mock.Mock(spec=Trace)
mock_trace2.info = mock.Mock(spec=TraceInfo)
mock_trace2.info.trace_id = "tr-test-2"
mock_trace2.info.experiment_id = experiment
mock_results = mock.Mock()
mock_results.run_id = "run-eval-789"
mock_results.result_df = pd.DataFrame(
[
{
"trace_id": "tr-test-1",
"assessments": [
{
"assessment_name": "Correctness",
"feedback": {"value": "correct"},
"rationale": "Content is correct",
"metadata": {"mlflow.assessment.sourceRunId": "run-eval-789"},
}
],
},
{
"trace_id": "tr-test-2",
"assessments": [
{
"assessment_name": "Correctness",
"feedback": {"value": "correct"},
"rationale": "Also correct",
"metadata": {"mlflow.assessment.sourceRunId": "run-eval-789"},
}
],
},
]
)
with (
mock.patch(
"mlflow.cli.eval.MlflowClient.get_trace",
side_effect=[mock_trace1, mock_trace2],
) as mock_get_trace,
mock.patch("mlflow.cli.eval.evaluate", return_value=mock_results) as mock_evaluate,
):
evaluate_traces(
experiment_id=experiment,
trace_ids="tr-test-1,tr-test-2",
scorers="Correctness",
output_format="json",
)
assert mock_get_trace.call_count == 2
mock_get_trace.assert_any_call("tr-test-1", display=False)
mock_get_trace.assert_any_call("tr-test-2", display=False)
assert mock_evaluate.call_count == 1
call_args = mock_evaluate.call_args
expected_df = pd.DataFrame(
[
{"trace_id": "tr-test-1", "trace": mock_trace1},
{"trace_id": "tr-test-2", "trace": mock_trace2},
]
)
pd.testing.assert_frame_equal(call_args.kwargs["data"], expected_df)
def test_evaluate_traces_with_nonexistent_trace():
experiment = mlflow.create_experiment("test_experiment_error")
with mock.patch("mlflow.cli.eval.MlflowClient.get_trace", return_value=None) as mock_get_trace:
with pytest.raises(click.UsageError, match="Trace with ID 'tr-nonexistent' not found"):
evaluate_traces(
experiment_id=experiment,
trace_ids="tr-nonexistent",
scorers="RelevanceToQuery",
output_format="table",
)
mock_get_trace.assert_called_once_with("tr-nonexistent", display=False)
def test_evaluate_traces_with_trace_from_wrong_experiment():
experiment1 = mlflow.create_experiment("test_experiment_1")
experiment2 = mlflow.create_experiment("test_experiment_2")
mock_trace = mock.Mock(spec=Trace)
mock_trace.info = mock.Mock(spec=TraceInfo)
mock_trace.info.trace_id = "tr-test-123"
mock_trace.info.experiment_id = experiment2
with mock.patch(
"mlflow.cli.eval.MlflowClient.get_trace", return_value=mock_trace
) as mock_get_trace:
with pytest.raises(click.UsageError, match="belongs to experiment"):
evaluate_traces(
experiment_id=experiment1,
trace_ids="tr-test-123",
scorers="RelevanceToQuery",
output_format="table",
)
mock_get_trace.assert_called_once_with("tr-test-123", display=False)
def test_evaluate_traces_integration():
experiment_id = mlflow.create_experiment("test_experiment_integration")
mlflow.set_experiment(experiment_id=experiment_id)
# Create a few real traces with inputs and outputs
trace_ids = []
for i in range(3):
with mlflow.start_span(name=f"test_span_{i}") as span:
span.set_inputs({"question": f"What is test {i}?"})
span.set_outputs(f"This is answer {i}")
trace_ids.append(span.trace_id)
# Define a simple code-based scorer inline
@scorer
def simple_scorer(outputs):
"""Extract the digit from the output string and return it as the score"""
if match := re.search(r"\d+", outputs):
return float(match.group())
return 0.0
with mock.patch(
"mlflow.cli.eval.resolve_scorers", return_value=[simple_scorer]
) as mock_resolve:
evaluate_traces(
experiment_id=experiment_id,
trace_ids=",".join(trace_ids),
scorers="simple_scorer", # This will be intercepted by our mock
output_format="table",
)
mock_resolve.assert_called_once()
# Verify that the evaluation results are as expected
traces = mlflow.search_traces(locations=[experiment_id], return_type="list")
assert len(traces) == 3
# Sort traces by their outputs to get consistent ordering
traces = sorted(traces, key=lambda t: t.data.spans[0].outputs)
for i, trace in enumerate(traces):
assessments = trace.info.assessments
assert len(assessments) > 0
scorer_assessments = [a for a in assessments if a.name == "simple_scorer"]
assert len(scorer_assessments) == 1
assessment = scorer_assessments[0]
# Each trace should have a score equal to its index (0, 1, 2)
assert assessment.value == float(i)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/cli/test_eval.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/gateway/providers/test_traffic_route_provider.py | from typing import Any
import pytest
from mlflow.gateway.config import EndpointConfig
from mlflow.gateway.providers.base import TrafficRouteProvider
from tests.gateway.providers.test_openai import (
_run_test_chat,
_run_test_chat_stream,
_run_test_completions,
_run_test_completions_stream,
_run_test_embeddings,
chat_config,
chat_response,
chat_stream_response,
chat_stream_response_incomplete,
completions_config,
completions_response,
completions_stream_response,
completions_stream_response_incomplete,
embedding_config,
)
def _get_traffic_route_provider(endpoint_config: dict[str, Any]) -> TrafficRouteProvider:
"""
Returns a traffic route provider that forwards 100% traffic to the endpoint
configured by `endpoint_config`
"""
return TrafficRouteProvider(
configs=[EndpointConfig(**endpoint_config)],
traffic_splits=[100],
routing_strategy="TRAFFIC_SPLIT",
)
@pytest.mark.asyncio
async def test_chat():
config = chat_config()
provider = _get_traffic_route_provider(config)
await _run_test_chat(provider)
@pytest.mark.parametrize("resp", [chat_stream_response(), chat_stream_response_incomplete()])
@pytest.mark.asyncio
async def test_chat_stream(resp):
config = chat_config()
provider = _get_traffic_route_provider(config)
await _run_test_chat_stream(resp, provider)
@pytest.mark.parametrize("resp", [completions_response(), chat_response()])
@pytest.mark.asyncio
async def test_completions(resp):
config = completions_config()
provider = _get_traffic_route_provider(config)
await _run_test_completions(resp, provider)
@pytest.mark.parametrize(
"resp", [completions_stream_response(), completions_stream_response_incomplete()]
)
@pytest.mark.asyncio
async def test_completions_stream(resp):
config = completions_config()
provider = _get_traffic_route_provider(config)
await _run_test_completions_stream(resp, provider)
@pytest.mark.asyncio
async def test_embeddings():
config = embedding_config()
provider = _get_traffic_route_provider(config)
await _run_test_embeddings(provider)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/gateway/providers/test_traffic_route_provider.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/jobs/_job_subproc_entry.py | """
This module is used for launching subprocess to execute the job function.
If the job has timeout setting, or the job has pip requirements dependencies,
or the job has extra environment variables setting,
the job is executed as a subprocess.
"""
import importlib
import json
import logging
import os
import threading
import traceback
from contextlib import nullcontext
import cloudpickle
from mlflow.environment_variables import MLFLOW_WORKSPACE
from mlflow.server.jobs.logging_utils import configure_logging_for_jobs
from mlflow.server.jobs.utils import JobResult, _exit_when_orphaned, _load_function
from mlflow.utils.workspace_context import WorkspaceContext
_logger = logging.getLogger(__name__)
# Configure Python logging to suppress noisy job logs
configure_logging_for_jobs()
if __name__ == "__main__":
# ensure the subprocess is killed when parent process dies.
threading.Thread(
target=_exit_when_orphaned,
name="exit_when_orphaned",
daemon=True,
).start()
params = json.loads(os.environ["_MLFLOW_SERVER_JOB_PARAMS"])
function = _load_function(os.environ["_MLFLOW_SERVER_JOB_FUNCTION_FULLNAME"])
result_dump_path = os.environ["_MLFLOW_SERVER_JOB_RESULT_DUMP_PATH"]
transient_error_classes_path = os.environ["_MLFLOW_SERVER_JOB_TRANSIENT_ERROR_ClASSES_PATH"]
workspace = os.environ.get(MLFLOW_WORKSPACE.name)
ctx = WorkspaceContext(workspace) if workspace else nullcontext()
transient_error_classes = []
try:
with open(transient_error_classes_path, "rb") as f:
transient_error_classes = cloudpickle.load(f)
if transient_error_classes is None:
transient_error_classes = []
except Exception:
with open(transient_error_classes_path) as f:
content = f.read()
for cls_str in content.split("\n"):
if not cls_str:
continue
*module_parts, cls_name = cls_str.split(".")
module = importlib.import_module(".".join(module_parts))
transient_error_classes.append(getattr(module, cls_name))
try:
with ctx:
value = function(**params)
job_result = JobResult(
succeeded=True,
result=json.dumps(value),
)
job_result.dump(result_dump_path)
except Exception as e:
_logger.error(
f"Job function {os.environ['_MLFLOW_SERVER_JOB_FUNCTION_FULLNAME']} failed with "
f"error:\n{traceback.format_exc()}"
)
JobResult.from_error(e, transient_error_classes).dump(result_dump_path)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/_job_subproc_entry.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/optimize/optimizers/gepa_optimizer.py | import json
import logging
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Any
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.optimizers.base import BasePromptOptimizer, _EvalFunc
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
from mlflow.utils.annotations import experimental
if TYPE_CHECKING:
import gepa
_logger = logging.getLogger(__name__)
# Artifact path and file name constants
PROMPT_CANDIDATES_DIR = "prompt_candidates"
EVAL_RESULTS_FILE = "eval_results.json"
SCORES_FILE = "scores.json"
@experimental(version="3.5.0")
class GepaPromptOptimizer(BasePromptOptimizer):
"""
A prompt adapter that uses GEPA (Genetic-Pareto) optimization algorithm
to optimize prompts.
GEPA uses iterative mutation, reflection, and Pareto-aware candidate selection
to improve text components like prompts. It leverages large language models to
reflect on system behavior and propose improvements.
Args:
reflection_model: Name of the model to use for reflection and optimization.
Format: "<provider>:/<model>"
(e.g., "openai:/gpt-4o", "anthropic:/claude-3-5-sonnet-20241022").
max_metric_calls: Maximum number of evaluation calls during optimization.
Higher values may lead to better results but increase optimization time.
Default: 100
display_progress_bar: Whether to show a progress bar during optimization.
Default: False
gepa_kwargs: Additional keyword arguments to pass directly to
gepa.optimize <https://github.com/gepa-ai/gepa/blob/main/src/gepa/api.py>.
Useful for accessing advanced GEPA features not directly exposed
through MLflow's GEPA interface.
Note: Parameters already handled by MLflow's GEPA class will be overridden by the direct
parameters and should not be passed through gepa_kwargs. List of predefined params:
- max_metric_calls
- display_progress_bar
- seed_candidate
- trainset
- adapter
- reflection_lm
- use_mlflow
Example:
.. code-block:: python
import mlflow
import openai
from mlflow.genai.optimize.optimizers import GepaPromptOptimizer
prompt = mlflow.genai.register_prompt(
name="qa",
template="Answer the following question: {{question}}",
)
def predict_fn(question: str) -> str:
completion = openai.OpenAI().chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt.format(question=question)}],
)
return completion.choices[0].message.content
dataset = [
{"inputs": {"question": "What is the capital of France?"}, "outputs": "Paris"},
{"inputs": {"question": "What is the capital of Germany?"}, "outputs": "Berlin"},
]
result = mlflow.genai.optimize_prompts(
predict_fn=predict_fn,
train_data=dataset,
prompt_uris=[prompt.uri],
optimizer=GepaPromptOptimizer(
reflection_model="openai:/gpt-4o",
display_progress_bar=True,
),
)
print(result.optimized_prompts[0].template)
"""
def __init__(
self,
reflection_model: str,
max_metric_calls: int = 100,
display_progress_bar: bool = False,
gepa_kwargs: dict[str, Any] | None = None,
):
self.reflection_model = reflection_model
self.max_metric_calls = max_metric_calls
self.display_progress_bar = display_progress_bar
self.gepa_kwargs = gepa_kwargs or {}
def optimize(
self,
eval_fn: _EvalFunc,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
"""
Optimize the target prompts using GEPA algorithm.
Args:
eval_fn: The evaluation function that takes candidate prompts as a dict
(prompt template name -> prompt template) and a dataset as a list of dicts,
and returns a list of EvaluationResultRecord.
train_data: The dataset to use for optimization. Each record should
include the inputs and outputs fields with dict values.
target_prompts: The target prompt templates to use. The key is the prompt template
name and the value is the prompt template.
enable_tracking: If True (default), automatically log optimization progress.
Returns:
The outputs of the prompt optimizer that includes the optimized prompts
as a dict (prompt template name -> prompt template).
"""
from mlflow.metrics.genai.model_utils import _parse_model_uri
if not train_data:
raise MlflowException.invalid_parameter_value(
"GEPA optimizer requires `train_data` to be provided."
)
try:
import gepa
except ImportError as e:
raise ImportError(
"GEPA >= 0.0.26 is required. Please install it with: `pip install 'gepa>=0.0.26'`"
) from e
provider, model = _parse_model_uri(self.reflection_model)
class MlflowGEPAAdapter(gepa.GEPAAdapter):
"""
MLflow optimization adapter for GEPA optimization
Args:
eval_function: Function that evaluates candidate prompts on a dataset.
prompts_dict: Dictionary mapping prompt names to their templates.
tracking_enabled: Whether to log traces/metrics/params/artifacts during
optimization.
full_dataset_size: Size of the full training dataset, used to distinguish
full validation passes from minibatch evaluations.
"""
def __init__(self, eval_function, prompts_dict, tracking_enabled, full_dataset_size):
self.eval_function = eval_function
self.prompts_dict = prompts_dict
self.prompt_names = list(prompts_dict.keys())
self.tracking_enabled = tracking_enabled
self.full_dataset_size = full_dataset_size
self.validation_iteration = 0
def evaluate(
self,
batch: list[dict[str, Any]],
candidate: dict[str, str],
capture_traces: bool = False,
) -> "gepa.EvaluationBatch":
"""
Evaluate a candidate prompt using the MLflow eval function.
Args:
batch: List of data instances to evaluate
candidate: Proposed text components (prompts)
capture_traces: Whether to capture execution traces
Returns:
EvaluationBatch with outputs, scores, and optional trajectories
"""
eval_results = self.eval_function(candidate, batch)
outputs = [result.outputs for result in eval_results]
scores = [result.score for result in eval_results]
trajectories = eval_results if capture_traces else None
objective_scores = [result.individual_scores for result in eval_results]
# Track validation candidates only during full dataset validation
# (not during minibatch evaluation in reflective mutation)
is_full_validation = not capture_traces and len(batch) == self.full_dataset_size
if is_full_validation and self.tracking_enabled:
self._log_validation_candidate(candidate, eval_results)
return gepa.EvaluationBatch(
outputs=outputs,
scores=scores,
trajectories=trajectories,
objective_scores=objective_scores if any(objective_scores) else None,
)
def _log_validation_candidate(
self,
candidate: dict[str, str],
eval_results: list[EvaluationResultRecord],
) -> None:
"""
Log validation candidate prompts and scores as MLflow artifacts.
Args:
candidate: The candidate prompts being validated
eval_results: Evaluation results containing scores
"""
if not self.tracking_enabled:
return
iteration = self.validation_iteration
self.validation_iteration += 1
# Compute aggregate score across all records
aggregate_score = (
sum(r.score for r in eval_results) / len(eval_results) if eval_results else 0.0
)
# Collect all scorer names
scorer_names = set()
for result in eval_results:
scorer_names |= result.individual_scores.keys()
# Build the evaluation results table and log to MLflow as a table artifact
eval_results_table = {
"inputs": [r.inputs for r in eval_results],
"output": [r.outputs for r in eval_results],
"expectation": [r.expectations for r in eval_results],
"aggregate_score": [r.score for r in eval_results],
}
for scorer_name in scorer_names:
eval_results_table[scorer_name] = [
r.individual_scores.get(scorer_name) for r in eval_results
]
iteration_dir = f"{PROMPT_CANDIDATES_DIR}/iteration_{iteration}"
mlflow.log_table(
data=eval_results_table,
artifact_file=f"{iteration_dir}/{EVAL_RESULTS_FILE}",
)
# Compute per-scorer average scores
per_scorer_scores = {}
for scorer_name in scorer_names:
scores = [
r.individual_scores[scorer_name]
for r in eval_results
if scorer_name in r.individual_scores
]
if scores:
per_scorer_scores[scorer_name] = sum(scores) / len(scores)
# Log per-scorer metrics for time progression visualization
mlflow.log_metrics(
{"eval_score": aggregate_score}
| {f"eval_score.{name}": score for name, score in per_scorer_scores.items()},
step=iteration,
)
# Log scores summary as JSON artifact
scores_data = {
"aggregate": aggregate_score,
"per_scorer": per_scorer_scores,
}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
scores_path = tmp_path / SCORES_FILE
with open(scores_path, "w") as f:
json.dump(scores_data, f, indent=2)
mlflow.log_artifact(scores_path, artifact_path=iteration_dir)
# Write each prompt as a separate text file
for prompt_name, prompt_text in candidate.items():
prompt_path = tmp_path / f"{prompt_name}.txt"
with open(prompt_path, "w") as f:
f.write(prompt_text)
mlflow.log_artifact(prompt_path, artifact_path=iteration_dir)
def make_reflective_dataset(
self,
candidate: dict[str, str],
eval_batch: "gepa.EvaluationBatch[EvaluationResultRecord, Any]",
components_to_update: list[str],
) -> dict[str, list[dict[str, Any]]]:
"""
Build a reflective dataset for instruction refinement.
Args:
candidate: The evaluated candidate
eval_batch: Result of evaluate with capture_traces=True
components_to_update: Component names to update
Returns:
Dict of reflective dataset per component
"""
reflective_datasets = {}
for component_name in components_to_update:
component_data = []
trajectories = eval_batch.trajectories
for i, (trajectory, score) in enumerate(zip(trajectories, eval_batch.scores)):
trace = trajectory.trace
spans = []
if trace:
spans = [
{
"name": span.name,
"inputs": span.inputs,
"outputs": span.outputs,
}
for span in trace.data.spans
]
component_data.append(
{
"component_name": component_name,
"current_text": candidate.get(component_name, ""),
"trace": spans,
"score": score,
"inputs": trajectory.inputs,
"outputs": trajectory.outputs,
"expectations": trajectory.expectations,
"rationales": trajectory.rationales,
"index": i,
}
)
reflective_datasets[component_name] = component_data
return reflective_datasets
adapter = MlflowGEPAAdapter(
eval_fn, target_prompts, enable_tracking, full_dataset_size=len(train_data)
)
kwargs = self.gepa_kwargs | {
"seed_candidate": target_prompts,
"trainset": train_data,
"adapter": adapter,
"reflection_lm": f"{provider}/{model}",
"max_metric_calls": self.max_metric_calls,
"display_progress_bar": self.display_progress_bar,
"use_mlflow": enable_tracking,
}
gepa_result = gepa.optimize(**kwargs)
optimized_prompts = gepa_result.best_candidate
(
initial_eval_score,
final_eval_score,
initial_eval_score_per_scorer,
final_eval_score_per_scorer,
) = self._extract_eval_scores(gepa_result)
return PromptOptimizerOutput(
optimized_prompts=optimized_prompts,
initial_eval_score=initial_eval_score,
final_eval_score=final_eval_score,
initial_eval_score_per_scorer=initial_eval_score_per_scorer,
final_eval_score_per_scorer=final_eval_score_per_scorer,
)
def _extract_eval_scores(
self, result: "gepa.GEPAResult"
) -> tuple[float | None, float | None, dict[str, float], dict[str, float]]:
"""
Extract initial and final evaluation scores from GEPA result.
Args:
result: GEPA optimization result
Returns:
Tuple of (initial_eval_score, final_eval_score,
initial_eval_score_per_scorer, final_eval_score_per_scorer).
Aggregated scores can be None if unavailable.
"""
final_eval_score = None
initial_eval_score = None
initial_eval_score_per_scorer: dict[str, float] = {}
final_eval_score_per_scorer: dict[str, float] = {}
scores = result.val_aggregate_scores
if scores and len(scores) > 0:
# The first score is the initial baseline score
initial_eval_score = scores[0]
# The highest score is the final optimized score
final_eval_score = max(scores)
# Extract per-scorer scores from val_aggregate_subscores
subscores = getattr(result, "val_aggregate_subscores", None)
if subscores and len(subscores) > 0:
# The first subscore dict is the initial baseline per-scorer scores
initial_eval_score_per_scorer = subscores[0] or {}
# Find the per-scorer scores corresponding to the best aggregate score
if scores and len(scores) > 0:
best_idx = scores.index(max(scores))
if best_idx < len(subscores) and subscores[best_idx]:
final_eval_score_per_scorer = subscores[best_idx]
return (
initial_eval_score,
final_eval_score,
initial_eval_score_per_scorer,
final_eval_score_per_scorer,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/optimizers/gepa_optimizer.py",
"license": "Apache License 2.0",
"lines": 354,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/optimize/optimizers/test_gepa_optimizer.py | import json
import sys
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, Mock, patch
import pytest
import mlflow
from mlflow.genai.optimize.optimizers.gepa_optimizer import GepaPromptOptimizer
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
@pytest.fixture
def sample_train_data():
return [
{
"inputs": {"question": "What is 2+2?"},
"outputs": "4",
},
{
"inputs": {"question": "What is the capital of France?"},
"outputs": "Paris",
},
{
"inputs": {"question": "What is 3*3?"},
"outputs": "9",
},
{
"inputs": {"question": "What color is the sky?"},
"outputs": "Blue",
},
]
@pytest.fixture
def sample_target_prompts():
return {
"system_prompt": "You are a helpful assistant.",
"instruction": "Answer the following question: {{question}}",
}
@pytest.fixture
def mock_eval_fn():
def eval_fn(candidate_prompts: dict[str, str], dataset: list[dict[str, Any]]):
return [
EvaluationResultRecord(
inputs=record["inputs"],
outputs="outputs",
expectations=record["outputs"],
score=0.8,
trace={"info": "mock trace"},
rationales={"score": "mock rationale"},
individual_scores={"accuracy": 0.9, "relevance": 0.7},
)
for record in dataset
]
return eval_fn
def test_gepa_optimizer_initialization():
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
assert optimizer.reflection_model == "openai:/gpt-4o"
assert optimizer.max_metric_calls == 100
assert optimizer.display_progress_bar is False
assert optimizer.gepa_kwargs == {}
def test_gepa_optimizer_initialization_with_custom_params():
optimizer = GepaPromptOptimizer(
reflection_model="openai:/gpt-4o",
max_metric_calls=100,
display_progress_bar=True,
)
assert optimizer.reflection_model == "openai:/gpt-4o"
assert optimizer.max_metric_calls == 100
assert optimizer.display_progress_bar is True
assert optimizer.gepa_kwargs == {}
def test_gepa_optimizer_initialization_with_gepa_kwargs():
gepa_kwargs_example = {"foo": "bar"}
optimizer = GepaPromptOptimizer(
reflection_model="openai:/gpt-4o",
gepa_kwargs=gepa_kwargs_example,
)
assert optimizer.reflection_model == "openai:/gpt-4o"
assert optimizer.max_metric_calls == 100
assert optimizer.display_progress_bar is False
assert optimizer.gepa_kwargs == gepa_kwargs_example
def test_gepa_optimizer_optimize(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = {
"system_prompt": "You are a highly skilled assistant.",
"instruction": "Please answer this question carefully: {{question}}",
}
mock_result.val_aggregate_scores = [0.5, 0.6, 0.8, 0.9] # Mock scores for testing
mock_result.val_aggregate_subscores = [
{"accuracy": 0.4, "relevance": 0.6}, # Initial (index 0)
{"accuracy": 0.5, "relevance": 0.7}, # Index 1
{"accuracy": 0.7, "relevance": 0.9}, # Index 2
{"accuracy": 0.85, "relevance": 0.95}, # Final best (index 3, max score)
]
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(
reflection_model="openai:/gpt-4o-mini", max_metric_calls=50, display_progress_bar=True
)
with patch.dict(sys.modules, mock_modules):
result = optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
)
# Verify result
assert isinstance(result, PromptOptimizerOutput)
assert result.optimized_prompts == mock_result.best_candidate
assert "system_prompt" in result.optimized_prompts
assert "instruction" in result.optimized_prompts
# Verify aggregated scores are extracted
assert result.initial_eval_score == 0.5 # First score
assert result.final_eval_score == 0.9 # Max score
# Verify per-scorer scores are extracted
assert result.initial_eval_score_per_scorer == {"accuracy": 0.4, "relevance": 0.6}
assert result.final_eval_score_per_scorer == {"accuracy": 0.85, "relevance": 0.95}
# Verify GEPA was called with correct parameters
mock_gepa_module.optimize.assert_called_once()
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert call_kwargs["seed_candidate"] == sample_target_prompts
assert call_kwargs["adapter"] is not None
assert call_kwargs["max_metric_calls"] == 50
assert call_kwargs["reflection_lm"] == "openai/gpt-4o-mini"
assert call_kwargs["display_progress_bar"] is True
assert len(call_kwargs["trainset"]) == 4
def test_gepa_optimizer_optimize_with_custom_reflection_model(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(
reflection_model="anthropic:/claude-3-5-sonnet-20241022",
)
with patch.dict(sys.modules, mock_modules):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
)
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert call_kwargs["reflection_lm"] == "anthropic/claude-3-5-sonnet-20241022"
def test_gepa_optimizer_optimize_with_custom_gepa_params(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(
reflection_model="openai:/gpt-4o-mini", gepa_kwargs={"foo": "bar"}
)
with patch.dict(sys.modules, mock_modules):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
)
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert call_kwargs["foo"] == "bar"
def test_gepa_optimizer_optimize_model_name_parsing(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with patch.dict(sys.modules, mock_modules):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
)
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert call_kwargs["reflection_lm"] == "openai/gpt-4o"
def test_gepa_optimizer_import_error(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
with patch.dict("sys.modules", {"gepa": None}):
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with pytest.raises(ImportError, match="GEPA >= 0.0.26 is required"):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
)
def test_gepa_optimizer_requires_train_data(
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
from mlflow.exceptions import MlflowException
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with pytest.raises(
MlflowException,
match="GEPA optimizer requires `train_data` to be provided",
):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=[],
target_prompts=sample_target_prompts,
)
def test_gepa_optimizer_single_record_dataset(
sample_target_prompts: dict[str, str], mock_eval_fn: Any
):
single_record_data = [
{
"inputs": {"question": "What is 2+2?"},
"outputs": "4",
}
]
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with patch.dict(sys.modules, mock_modules):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=single_record_data,
target_prompts=sample_target_prompts,
)
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert len(call_kwargs["trainset"]) == 1
def test_gepa_optimizer_custom_adapter_evaluate(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with patch.dict(sys.modules, mock_modules):
result = optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
)
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert "adapter" in call_kwargs
assert call_kwargs["adapter"] is not None
assert result.optimized_prompts == sample_target_prompts
def test_make_reflective_dataset_with_traces(
sample_target_prompts: dict[str, str], mock_eval_fn: Any
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_gepa_module.EvaluationBatch = MagicMock()
mock_gepa_module.GEPAAdapter = object
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with patch.dict(sys.modules, mock_modules):
captured_adapter = None
def mock_optimize_fn(**kwargs):
nonlocal captured_adapter
captured_adapter = kwargs["adapter"]
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
return mock_result
mock_gepa_module.optimize = mock_optimize_fn
# Call optimize to create the inner adapter
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=[{"inputs": {"question": "test"}, "outputs": "test"}],
target_prompts=sample_target_prompts,
)
# Now test make_reflective_dataset with the captured adapter
mock_trace = Mock()
mock_span1 = Mock()
mock_span1.name = "llm_call"
mock_span1.inputs = {"prompt": "What is 2+2?"}
mock_span1.outputs = {"response": "4"}
mock_span2 = Mock()
mock_span2.name = "retrieval"
mock_span2.inputs = {"query": "math"}
mock_span2.outputs = {"documents": ["doc1", "doc2"]}
mock_trace.data.spans = [mock_span1, mock_span2]
# Create mock trajectories
mock_trajectory1 = Mock()
mock_trajectory1.trace = mock_trace
mock_trajectory1.inputs = {"question": "What is 2+2?"}
mock_trajectory1.outputs = "4"
mock_trajectory1.expectations = {"expected_response": "4"}
mock_trajectory2 = Mock()
mock_trajectory2.trace = None
mock_trajectory2.inputs = {"question": "What is the capital of France?"}
mock_trajectory2.outputs = "Paris"
mock_trajectory2.expectations = {"expected_response": "Paris"}
# Create mock evaluation batch
mock_eval_batch = Mock()
mock_eval_batch.trajectories = [mock_trajectory1, mock_trajectory2]
mock_eval_batch.scores = [0.9, 0.7]
# Test make_reflective_dataset
candidate = {"system_prompt": "You are helpful"}
components_to_update = ["system_prompt", "instruction"]
result = captured_adapter.make_reflective_dataset(
candidate, mock_eval_batch, components_to_update
)
# Verify result structure
assert isinstance(result, dict)
assert "system_prompt" in result
assert "instruction" in result
system_data = result["system_prompt"]
assert len(system_data) == 2
assert system_data[0]["component_name"] == "system_prompt"
assert system_data[0]["current_text"] == "You are helpful"
assert system_data[0]["score"] == 0.9
assert system_data[0]["inputs"] == {"question": "What is 2+2?"}
assert system_data[0]["outputs"] == "4"
assert system_data[0]["expectations"] == {"expected_response": "4"}
assert system_data[0]["index"] == 0
# Verify trace spans
assert len(system_data[0]["trace"]) == 2
assert system_data[0]["trace"][0]["name"] == "llm_call"
assert system_data[0]["trace"][0]["inputs"] == {"prompt": "What is 2+2?"}
assert system_data[0]["trace"][0]["outputs"] == {"response": "4"}
assert system_data[0]["trace"][1]["name"] == "retrieval"
# Verify second record (no trace)
assert system_data[1]["trace"] == []
assert system_data[1]["score"] == 0.7
assert system_data[1]["inputs"] == {"question": "What is the capital of France?"}
assert system_data[1]["outputs"] == "Paris"
assert system_data[1]["expectations"] == {"expected_response": "Paris"}
@pytest.mark.parametrize("enable_tracking", [True, False])
def test_gepa_optimizer_passes_use_mlflow(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
enable_tracking: bool,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = []
mock_result.val_aggregate_subscores = None
mock_gepa_module.optimize.return_value = mock_result
mock_gepa_module.EvaluationBatch = MagicMock()
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
with patch.dict(sys.modules, mock_modules):
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
enable_tracking=enable_tracking,
)
call_kwargs = mock_gepa_module.optimize.call_args.kwargs
assert "use_mlflow" in call_kwargs
assert call_kwargs["use_mlflow"] == enable_tracking
def test_gepa_optimizer_logs_prompt_candidates(
sample_train_data: list[dict[str, Any]],
sample_target_prompts: dict[str, str],
mock_eval_fn: Any,
):
mock_gepa_module = MagicMock()
mock_modules = {
"gepa": mock_gepa_module,
"gepa.core": MagicMock(),
"gepa.core.adapter": MagicMock(),
}
mock_gepa_module.EvaluationBatch = MagicMock()
mock_gepa_module.GEPAAdapter = object
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
logged_artifacts = []
logged_tables = []
logged_metrics = []
with patch.dict(sys.modules, mock_modules):
captured_adapter = None
def mock_optimize_fn(**kwargs):
nonlocal captured_adapter
captured_adapter = kwargs["adapter"]
mock_result = Mock()
mock_result.best_candidate = sample_target_prompts
mock_result.val_aggregate_scores = [0.8]
mock_result.val_aggregate_subscores = None
return mock_result
mock_gepa_module.optimize = mock_optimize_fn
with mlflow.start_run():
with (
patch(
"mlflow.genai.optimize.optimizers.gepa_optimizer.mlflow.log_artifact"
) as mock_log_artifact,
patch(
"mlflow.genai.optimize.optimizers.gepa_optimizer.mlflow.log_table"
) as mock_log_table,
patch(
"mlflow.genai.optimize.optimizers.gepa_optimizer.mlflow.log_metrics"
) as mock_log_metrics,
):
def capture_artifact(path, artifact_path=None):
with open(path) as f:
logged_artifacts.append(
{"path": str(path), "artifact_path": artifact_path, "content": f.read()}
)
def capture_table(data, artifact_file):
logged_tables.append({"data": data, "artifact_file": artifact_file})
def capture_metrics(metrics, step=None):
logged_metrics.append({"metrics": metrics, "step": step})
mock_log_artifact.side_effect = capture_artifact
mock_log_table.side_effect = capture_table
mock_log_metrics.side_effect = capture_metrics
optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
enable_tracking=True,
)
# First: minibatch evaluation (should NOT log any artifacts)
minibatch = sample_train_data[:2]
captured_adapter.evaluate(
minibatch, {"system_prompt": "Test"}, capture_traces=False
)
# Second: full dataset validation (should log artifacts)
candidate = {"system_prompt": "Optimized prompt", "instruction": "New instruction"}
captured_adapter.evaluate(sample_train_data, candidate, capture_traces=False)
# Verify scores.json was logged
scores_artifact = next((a for a in logged_artifacts if "scores.json" in a["path"]), None)
assert scores_artifact is not None
assert scores_artifact["artifact_path"] == "prompt_candidates/iteration_0"
scores_content = json.loads(scores_artifact["content"])
assert scores_content["aggregate"] == 0.8
assert scores_content["per_scorer"] == {"accuracy": 0.9, "relevance": 0.7}
# Verify prompt text files were logged
prompt_artifacts = [a for a in logged_artifacts if a["path"].endswith(".txt")]
assert len(prompt_artifacts) == 2 # system_prompt.txt and instruction.txt
for a in prompt_artifacts:
assert a["artifact_path"] == "prompt_candidates/iteration_0"
prompt_contents = {Path(a["path"]).stem: a["content"] for a in prompt_artifacts}
assert prompt_contents["system_prompt"] == "Optimized prompt"
assert prompt_contents["instruction"] == "New instruction"
# Verify eval results table was logged
assert len(logged_tables) == 1
table = logged_tables[0]
assert table["artifact_file"] == "prompt_candidates/iteration_0/eval_results.json"
data = table["data"]
assert "inputs" in data
assert "output" in data
assert "expectation" in data
assert "aggregate_score" in data
assert "accuracy" in data
assert "relevance" in data
assert len(data["inputs"]) == len(sample_train_data)
assert all(score == 0.9 for score in data["accuracy"])
assert all(score == 0.7 for score in data["relevance"])
# Verify metrics were logged with step for time progression
assert len(logged_metrics) == 1
metrics = logged_metrics[0]
assert metrics["step"] == 0
assert metrics["metrics"]["eval_score"] == 0.8
assert metrics["metrics"]["eval_score.accuracy"] == 0.9
assert metrics["metrics"]["eval_score.relevance"] == 0.7
@pytest.mark.parametrize(
("val_aggregate_scores", "val_aggregate_subscores", "expected"),
[
# No scores at all
([], None, (None, None, {}, {})),
(None, None, (None, None, {}, {})),
# Only aggregate scores, no subscores
([0.5, 0.7, 0.9], None, (0.5, 0.9, {}, {})),
# Both aggregate and per-scorer scores
(
[0.5, 0.7, 0.9],
[
{"Correctness": 0.4, "Safety": 0.6},
{"Correctness": 0.6, "Safety": 0.8},
{"Correctness": 0.85, "Safety": 0.95},
],
(0.5, 0.9, {"Correctness": 0.4, "Safety": 0.6}, {"Correctness": 0.85, "Safety": 0.95}),
),
# Empty subscores dict at index 0
(
[0.5, 0.9],
[{}, {"Correctness": 0.9}],
(0.5, 0.9, {}, {"Correctness": 0.9}),
),
# Best score not at last index
(
[0.5, 0.95, 0.8],
[
{"A": 0.4},
{"A": 0.95}, # Best score at index 1
{"A": 0.7},
],
(0.5, 0.95, {"A": 0.4}, {"A": 0.95}),
),
],
)
def test_extract_eval_scores_per_scorer(val_aggregate_scores, val_aggregate_subscores, expected):
optimizer = GepaPromptOptimizer(reflection_model="openai:/gpt-4o")
mock_result = Mock()
mock_result.val_aggregate_scores = val_aggregate_scores
mock_result.val_aggregate_subscores = val_aggregate_subscores
result = optimizer._extract_eval_scores(mock_result)
assert result == expected
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/optimize/optimizers/test_gepa_optimizer.py",
"license": "Apache License 2.0",
"lines": 560,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/store/tracking/databricks_rest_store.py | import base64
import logging
from collections import defaultdict
from datetime import datetime
from typing import Any
from urllib.parse import quote, urlencode
from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ExportTraceServiceRequest
from pydantic import BaseModel
from mlflow.entities import Assessment, Span, Trace, TraceInfo, TraceLocation
from mlflow.entities.assessment import ExpectationValue, FeedbackValue
from mlflow.entities.trace_location import (
UCSchemaLocation as UCSchemaLocationEntity,
)
from mlflow.entities.trace_location import (
UnityCatalog as UnityCatalogEntity,
)
from mlflow.environment_variables import (
MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT,
MLFLOW_TRACING_SQL_WAREHOUSE_ID,
)
from mlflow.exceptions import MlflowException, MlflowNotImplementedException, RestException
from mlflow.protos.databricks_pb2 import (
ALREADY_EXISTS,
BAD_REQUEST,
ENDPOINT_NOT_FOUND,
INTERNAL_ERROR,
INVALID_PARAMETER_VALUE,
ErrorCode,
)
from mlflow.protos.databricks_tracing_pb2 import Assessment as ProtoAssessment
from mlflow.protos.databricks_tracing_pb2 import (
BatchGetTraces,
BatchLinkTraceToRun,
BatchUnlinkTraceFromRun,
CreateAssessment,
CreateLocation,
CreateTraceInfo,
CreateTraceUCStorageLocation,
DatabricksTrackingService,
DeleteAssessment,
DeleteTraceTag,
GetAssessment,
GetLocation,
GetTraceInfo,
LinkExperimentToUCTraceLocation,
LinkTraceLocation,
SearchTraces,
SetTraceTag,
UnLinkExperimentToUCTraceLocation,
UpdateAssessment,
)
from mlflow.protos.databricks_tracing_pb2 import TraceInfo as ProtoTraceInfo
from mlflow.protos.service_pb2 import GetOnlineTraceDetails, MlflowService, SearchUnifiedTraces
from mlflow.store.entities import PagedList
from mlflow.store.tracking import SEARCH_TRACES_DEFAULT_MAX_RESULTS
from mlflow.store.tracking.rest_store import RestStore
from mlflow.tracing.utils import parse_trace_id_v4
from mlflow.tracing.utils.otlp import OTLP_TRACES_PATH, resource_to_otel_proto
from mlflow.utils.databricks_tracing_utils import (
assessment_to_proto,
parse_uc_location,
trace_from_proto,
trace_location_to_proto,
uc_schema_location_from_proto,
uc_schema_location_to_proto,
uc_table_prefix_location_from_proto,
uc_table_prefix_location_to_proto,
)
from mlflow.utils.databricks_utils import get_databricks_workspace_client_config
from mlflow.utils.proto_json_utils import message_to_json
from mlflow.utils.rest_utils import (
_REST_API_PATH_PREFIX,
_V4_REST_API_PATH_PREFIX,
_V4_TRACE_REST_API_PATH_PREFIX,
extract_api_info_for_service,
get_single_assessment_endpoint_v4,
get_single_trace_endpoint_v4,
http_request,
verify_rest_response,
)
DATABRICKS_UC_TABLE_HEADER = "X-Databricks-UC-Table-Name"
_V5_TRACE_LOCATION_ENDPOINT = "/api/5.0/mlflow/tracing/locations"
_logger = logging.getLogger(__name__)
def _parse_iso_timestamp_ms(timestamp_str: str) -> int:
"""Convert ISO 8601 timestamp string to milliseconds since epoch."""
return int(datetime.fromisoformat(timestamp_str.replace("Z", "+00:00")).timestamp() * 1000)
class CompositeToken(BaseModel):
"""Composite token for handling backend pagination with offset tracking."""
backend_token: str | None
offset: int = 0
@classmethod
def parse(cls, token_str: str | None) -> "CompositeToken":
"""Parse token string into CompositeToken."""
if not token_str:
return cls(backend_token=None, offset=0)
if ":" not in token_str:
return cls(backend_token=token_str, offset=0)
parts = token_str.rsplit(":", 1)
if len(parts) != 2:
return cls(backend_token=token_str, offset=0)
encoded_token, offset_str = parts
try:
offset = int(offset_str)
backend_token = (
base64.b64decode(encoded_token).decode("utf-8") if encoded_token else None
)
return cls(backend_token=backend_token, offset=offset)
except (ValueError, Exception):
return cls(backend_token=token_str, offset=0)
def encode(self) -> str | None:
"""Encode CompositeToken to string format."""
if not self.backend_token and self.offset == 0:
return None
if not self.backend_token:
return f":{self.offset}"
if self.offset == 0:
return self.backend_token
encoded_token = base64.b64encode(self.backend_token.encode("utf-8")).decode("utf-8")
return f"{encoded_token}:{self.offset}"
class DatabricksTracingRestStore(RestStore):
"""
Client for a databricks tracking server accessed via REST API calls.
This is only used for Databricks-specific tracing APIs, all other APIs including
runs, experiments, models etc. should be implemented in the RestStore.
Args
get_host_creds: Method to be invoked prior to every REST request to get the
:py:class:`mlflow.rest_utils.MlflowHostCreds` for the request. Note that this
is a function so that we can obtain fresh credentials in the case of expiry.
"""
_METHOD_TO_INFO = extract_api_info_for_service(
MlflowService, _REST_API_PATH_PREFIX
) | extract_api_info_for_service(DatabricksTrackingService, _V4_REST_API_PATH_PREFIX)
def __init__(self, get_host_creds):
super().__init__(get_host_creds)
def _call_endpoint(
self,
api,
json_body=None,
endpoint=None,
retry_timeout_seconds=None,
response_proto=None,
):
try:
return super()._call_endpoint(
api,
json_body=json_body,
endpoint=endpoint,
retry_timeout_seconds=retry_timeout_seconds,
response_proto=response_proto,
)
except RestException as e:
if (
e.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
and "Could not resolve a SQL warehouse ID" in e.message
):
raise MlflowException(
message=(
"SQL warehouse ID is required for accessing traces in UC tables.\n"
f"Please set the {MLFLOW_TRACING_SQL_WAREHOUSE_ID.name} environment "
"variable to your SQL warehouse ID.\n"
"```\nexport MLFLOW_TRACING_SQL_WAREHOUSE_ID=<your_sql_warehouse_id>\n```\n"
"See https://docs.databricks.com/compute/sql-warehouse for how to "
"set up a SQL warehouse and get its ID."
),
error_code=BAD_REQUEST,
) from e
raise
def get_trace_location(self, telemetry_profile_id: str) -> UnityCatalogEntity:
response_proto = self._call_endpoint(
GetLocation,
"{}",
endpoint=f"{_V5_TRACE_LOCATION_ENDPOINT}/{telemetry_profile_id}",
response_proto=GetLocation.Response(),
)
if response_proto.HasField("uc_table_prefix"):
return uc_table_prefix_location_from_proto(response_proto.uc_table_prefix)
raise MlflowException("GetLocation response did not include uc_table_prefix.")
def create_or_get_trace_location(
self,
location: UnityCatalogEntity,
sql_warehouse_id: str | None = None,
) -> UnityCatalogEntity:
request_proto = CreateLocation(
uc_table_prefix=uc_table_prefix_location_to_proto(location),
sql_warehouse_id=sql_warehouse_id or MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
req_body = message_to_json(request_proto)
response_proto = self._call_endpoint(
CreateLocation,
req_body,
endpoint=_V5_TRACE_LOCATION_ENDPOINT,
response_proto=CreateLocation.Response(),
)
if response_proto.HasField("uc_table_prefix"):
return uc_table_prefix_location_from_proto(response_proto.uc_table_prefix)
raise MlflowException("CreateLocation response did not include uc_table_prefix.")
def link_trace_location(
self,
experiment_id: str,
location: UnityCatalogEntity,
) -> None:
request_proto = LinkTraceLocation(
experiment_id=experiment_id,
uc_table_prefix=uc_table_prefix_location_to_proto(location),
)
req_body = message_to_json(request_proto)
self._call_endpoint(
LinkTraceLocation,
req_body,
endpoint=f"/api/5.0/mlflow/experiments/{experiment_id}/trace-location:link",
response_proto=LinkTraceLocation.Response(),
)
_logger.debug(f"Linked experiment {experiment_id} to trace location: {location}")
def start_trace(self, trace_info: TraceInfo) -> TraceInfo:
"""
Create a new trace using the V4 API format.
Args:
trace_info: The TraceInfo object to create in the backend. Currently, this
only supports trace_location with uc_schema, or mlflow_experiment that's
linked to a UC table.
Returns:
The returned TraceInfo object from the backend.
"""
try:
if trace_info._is_v4():
return self._start_trace_v4(trace_info)
# Temporarily we capture all exceptions and fallback to v3 if the trace location is not uc
# TODO: remove this once the endpoint is fully rolled out
except Exception as e:
if trace_info.trace_location.mlflow_experiment is None:
_logger.debug("MLflow experiment is not set for trace, cannot fallback to V3 API.")
raise
_logger.debug(f"Falling back to V3 API due to {e!s}")
return super().start_trace(trace_info)
def _start_trace_v4(self, trace_info: TraceInfo) -> TraceInfo:
location, otel_trace_id = parse_trace_id_v4(trace_info.trace_id)
if location is None:
raise MlflowException("Invalid trace ID format for v4 API.")
req_body = message_to_json(trace_info.to_proto())
response_proto = self._call_endpoint(
CreateTraceInfo,
req_body,
endpoint=f"{_V4_REST_API_PATH_PREFIX}/mlflow/traces/{location}/{otel_trace_id}/info",
retry_timeout_seconds=MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT.get(),
response_proto=ProtoTraceInfo(),
)
return TraceInfo.from_proto(response_proto)
def batch_get_traces(self, trace_ids: list[str], location: str | None = None) -> list[Trace]:
"""
Get a batch of complete traces with spans for given trace ids.
Args:
trace_ids: List of trace IDs to fetch.
location: Location of the trace. For example, "catalog.schema" or
"catalog.schema.table_prefix" for UC schema destinations.
Returns:
List of Trace objects.
"""
trace_ids = [parse_trace_id_v4(trace_id)[1] for trace_id in trace_ids]
req_body = message_to_json(
BatchGetTraces(
location_id=location,
trace_ids=trace_ids,
sql_warehouse_id=MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
)
response_proto = self._call_endpoint(
BatchGetTraces,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location}/batchGet",
)
return [trace_from_proto(proto, location) for proto in response_proto.traces]
def get_trace_info(self, trace_id: str) -> TraceInfo:
"""
Get the trace info matching the `trace_id`.
Args:
trace_id: String id of the trace to fetch.
Returns:
The fetched ``mlflow.entities.TraceInfo`` object.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
sql_warehouse_id = MLFLOW_TRACING_SQL_WAREHOUSE_ID.get()
trace_v4_req_body = message_to_json(
GetTraceInfo(
trace_id=trace_id, location=location, sql_warehouse_id=sql_warehouse_id
)
)
endpoint = f"{get_single_trace_endpoint_v4(location, trace_id)}/info"
response_proto = self._call_endpoint(GetTraceInfo, trace_v4_req_body, endpoint=endpoint)
return TraceInfo.from_proto(response_proto.trace.trace_info)
return super().get_trace_info(trace_id)
def get_trace(self, trace_id: str, *, allow_partial: bool = False) -> Trace:
"""
Get a trace with spans for given trace id.
Args:
trace_id: String id of the trace to fetch.
allow_partial: Whether to allow partial traces. If True, the trace will be returned
even if it is not fully exported yet. If False, MLflow retries and returns
the trace until all spans are exported or the retry timeout is reached. Default
to False.
Returns:
The fetched Trace object, of type ``mlflow.entities.Trace``.
"""
raise MlflowNotImplementedException()
def set_trace_tag(self, trace_id: str, key: str, value: str):
"""
Set a tag on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
key: The string key of the tag.
value: The string value of the tag.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
endpoint = f"{get_single_trace_endpoint_v4(location, trace_id)}/tags"
req_body = message_to_json(
SetTraceTag(
key=key,
value=value,
)
)
self._call_endpoint(SetTraceTag, req_body, endpoint=endpoint)
return
return super().set_trace_tag(trace_id, key, value)
def delete_trace_tag(self, trace_id: str, key: str):
"""
Delete a tag on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
key: The string key of the tag.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
sql_warehouse_id = MLFLOW_TRACING_SQL_WAREHOUSE_ID.get()
encoded_key = quote(key, safe="")
endpoint = f"{get_single_trace_endpoint_v4(location, trace_id)}/tags/{encoded_key}"
req_body = message_to_json(DeleteTraceTag(sql_warehouse_id=sql_warehouse_id))
self._call_endpoint(DeleteTraceTag, req_body, endpoint=endpoint)
return
return super().delete_trace_tag(trace_id, key)
def search_traces(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
model_id: str | None = None,
locations: list[str] | None = None,
) -> tuple[list[TraceInfo], str | None]:
# This API is not client-facing, so we should always use `locations`.
if experiment_ids is not None:
raise MlflowException("`experiment_ids` is deprecated, use `locations` instead.")
if not locations:
raise MlflowException.invalid_parameter_value(
"At least one location must be specified for searching traces."
)
# model_id is only supported by V3 API
if model_id is not None:
return self._search_unified_traces(
model_id=model_id,
locations=locations,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
contains_uc_locations = False
trace_locations = []
for location in locations:
match location.split("."):
case [experiment_id]:
trace_locations.append(
trace_location_to_proto(TraceLocation.from_experiment_id(experiment_id))
)
case [catalog, schema]:
trace_locations.append(
trace_location_to_proto(
TraceLocation.from_databricks_uc_schema(catalog, schema)
)
)
contains_uc_locations = True
case [catalog, schema, table_prefix]:
trace_locations.append(
trace_location_to_proto(
TraceLocation.from_databricks_uc_table_prefix(
catalog, schema, table_prefix
)
)
)
contains_uc_locations = True
case _:
raise MlflowException.invalid_parameter_value(
f"Invalid location type: {location}. Expected type: "
"`<catalog_name>.<schema_name>[.<table_prefix>]` or `<experiment_id>`."
)
request = SearchTraces(
locations=trace_locations,
filter=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
sql_warehouse_id=MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
req_body = message_to_json(request)
try:
response_proto = self._call_endpoint(
SearchTraces,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/search",
)
except MlflowException as e:
# There are 2 expected failure cases:
# 1. Server does not support SearchTracesV4 API yet.
# 2. Server supports V4 API but the experiment location is not supported yet.
# For these known cases, MLflow fallback to V3 API.
if e.error_code == ErrorCode.Name(ENDPOINT_NOT_FOUND):
if contains_uc_locations:
raise MlflowException.invalid_parameter_value(
"Searching traces in UC tables is not supported yet. Only experiment IDs "
"are supported for searching traces."
)
_logger.debug("SearchTracesV4 API is not available yet. Falling back to V3 API.")
elif (
e.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
and "locations not yet supported" in e.message
):
if contains_uc_locations:
raise MlflowException.invalid_parameter_value(
"The `locations` parameter cannot contain both MLflow experiment and UC "
"schema in the same request. Please specify only one type of location "
"at a time."
)
_logger.debug("Experiment locations are not supported yet. Falling back to V3 API.")
else:
raise
return self._search_traces(
locations=locations,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
trace_infos = [TraceInfo.from_proto(t) for t in response_proto.trace_infos]
return trace_infos, response_proto.next_page_token or None
def _search_unified_traces(
self,
model_id: str,
locations: list[str],
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
) -> tuple[list[TraceInfo], str | None]:
sql_warehouse_id = MLFLOW_TRACING_SQL_WAREHOUSE_ID.get()
if sql_warehouse_id is None:
raise MlflowException.invalid_parameter_value(
"SQL warehouse ID is required for searching traces by model ID in UC tables, "
f"set it with the `{MLFLOW_TRACING_SQL_WAREHOUSE_ID.name}` environment variable."
)
request = SearchUnifiedTraces(
model_id=model_id,
sql_warehouse_id=sql_warehouse_id,
experiment_ids=locations,
filter=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
req_body = message_to_json(request)
response_proto = self._call_endpoint(SearchUnifiedTraces, req_body)
# Convert TraceInfo (v2) objects to TraceInfoV3 objects for consistency
trace_infos = [TraceInfo.from_proto(t) for t in response_proto.traces]
return trace_infos, response_proto.next_page_token or None
def get_online_trace_details(
self,
trace_id: str,
source_inference_table: str,
source_databricks_request_id: str,
):
req = GetOnlineTraceDetails(
trace_id=trace_id,
sql_warehouse_id=MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
source_inference_table=source_inference_table,
source_databricks_request_id=source_databricks_request_id,
)
req_body = message_to_json(req)
response_proto = self._call_endpoint(GetOnlineTraceDetails, req_body)
return response_proto.trace_data
def set_experiment_trace_location(
self,
location: UCSchemaLocationEntity,
experiment_id: str,
sql_warehouse_id: str | None = None,
) -> UCSchemaLocationEntity:
req_body = message_to_json(
CreateTraceUCStorageLocation(
uc_schema=uc_schema_location_to_proto(location),
sql_warehouse_id=sql_warehouse_id or MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
)
try:
response = self._call_endpoint(
CreateTraceUCStorageLocation,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/location",
)
location = uc_schema_location_from_proto(response.uc_schema)
except MlflowException as e:
if e.error_code == ErrorCode.Name(ALREADY_EXISTS):
_logger.debug(f"Trace UC storage location already exists: {location}")
else:
raise
_logger.debug(f"Created trace UC storage location: {location}")
# link experiment to uc trace location
req_body = message_to_json(
LinkExperimentToUCTraceLocation(
experiment_id=experiment_id,
uc_schema=uc_schema_location_to_proto(location),
)
)
self._call_endpoint(
LinkExperimentToUCTraceLocation,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/link-location",
)
_logger.debug(f"Linked experiment {experiment_id} to UC trace location: {location}")
return location
def unset_experiment_trace_location(
self, experiment_id: str, location: UCSchemaLocationEntity
) -> None:
request = UnLinkExperimentToUCTraceLocation(
experiment_id=experiment_id,
uc_schema=uc_schema_location_to_proto(location),
)
endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/unlink-location"
req_body = message_to_json(request)
self._call_endpoint(
UnLinkExperimentToUCTraceLocation,
req_body,
endpoint=endpoint,
)
_logger.debug(f"Unlinked experiment {experiment_id} from trace location: {location}")
def log_spans(self, location: str, spans: list[Span], tracking_uri=None) -> list[Span]:
_logger.debug(f"Logging {len(spans)} spans to {location}")
if not spans:
return []
if tracking_uri is None:
raise MlflowException(
"`tracking_uri` must be provided to log spans to with Databricks tracking server."
)
endpoint = f"/api/2.0/otel{OTLP_TRACES_PATH}"
try:
config = get_databricks_workspace_client_config(tracking_uri)
except Exception as e:
raise MlflowException(
"Failed to log spans to UC table: could not identify Databricks workspace "
"configuration"
) from e
request = ExportTraceServiceRequest()
resource_spans = request.resource_spans.add()
resource = getattr(spans[0]._span, "resource", None)
resource_spans.resource.CopyFrom(resource_to_otel_proto(resource))
scope_spans = resource_spans.scope_spans.add()
scope_spans.spans.extend(span.to_otel_proto() for span in spans)
response = http_request(
host_creds=self.get_host_creds(),
endpoint=endpoint,
method="POST",
data=request.SerializeToString(),
extra_headers={
"Content-Type": "application/x-protobuf",
DATABRICKS_UC_TABLE_HEADER: location,
**config.authenticate(),
},
)
verify_rest_response(response, endpoint)
return spans
def create_assessment(self, assessment: Assessment) -> Assessment:
"""
Create an assessment entity in the backend store.
Args:
assessment: The assessment to log (without an assessment_id).
Returns:
The created Assessment object.
"""
location, trace_id = parse_trace_id_v4(assessment.trace_id)
if location is not None:
req_body = message_to_json(assessment_to_proto(assessment))
endpoint = self._append_sql_warehouse_id_param(
f"{get_single_trace_endpoint_v4(location, trace_id)}/assessments",
)
response_proto = self._call_endpoint(
CreateAssessment,
req_body,
endpoint=endpoint,
response_proto=ProtoAssessment(),
)
return Assessment.from_proto(response_proto)
return super().create_assessment(assessment)
def update_assessment(
self,
trace_id: str,
assessment_id: str,
name: str | None = None,
expectation: ExpectationValue | None = None,
feedback: FeedbackValue | None = None,
rationale: str | None = None,
metadata: dict[str, str] | None = None,
) -> Assessment:
"""
Update an existing assessment entity in the backend store.
Args:
trace_id: The ID of the trace.
assessment_id: The ID of the assessment to update.
name: The updated name of the assessment.
expectation: The updated expectation value of the assessment.
feedback: The updated feedback value of the assessment.
rationale: The updated rationale of the feedback. Not applicable for expectations.
metadata: Additional metadata for the assessment.
"""
if expectation is not None and feedback is not None:
raise MlflowException.invalid_parameter_value(
"Exactly one of `expectation` or `feedback` should be specified."
)
location, parsed_trace_id = parse_trace_id_v4(trace_id)
if location is not None:
assessment = UpdateAssessment().assessment
assessment.assessment_id = assessment_id
catalog, schema, table_prefix = parse_uc_location(location)
if table_prefix:
trace_loc = TraceLocation.from_databricks_uc_table_prefix(
catalog, schema, table_prefix
)
else:
trace_loc = TraceLocation.from_databricks_uc_schema(catalog, schema)
assessment.trace_location.CopyFrom(trace_location_to_proto(trace_loc))
assessment.trace_id = parsed_trace_id
# Field mask specifies which fields to update.
mask = UpdateAssessment().update_mask
if name is not None:
assessment.assessment_name = name
mask.paths.append("assessment_name")
if expectation is not None:
assessment.expectation.CopyFrom(expectation.to_proto())
mask.paths.append("expectation")
if feedback is not None:
assessment.feedback.CopyFrom(feedback.to_proto())
mask.paths.append("feedback")
if rationale is not None:
assessment.rationale = rationale
mask.paths.append("rationale")
if metadata is not None:
assessment.metadata.update(metadata)
mask.paths.append("metadata")
endpoint = get_single_assessment_endpoint_v4(location, parsed_trace_id, assessment_id)
endpoint = self._append_sql_warehouse_id_param(endpoint)
if mask.paths:
mask_param = ",".join(mask.paths)
endpoint = f"{endpoint}&update_mask={mask_param}"
req_body = message_to_json(assessment)
response_proto = self._call_endpoint(
UpdateAssessment,
req_body,
endpoint=endpoint,
response_proto=ProtoAssessment(),
)
return Assessment.from_proto(response_proto)
else:
return super().update_assessment(
trace_id, assessment_id, name, expectation, feedback, rationale, metadata
)
def get_assessment(self, trace_id: str, assessment_id: str) -> Assessment:
"""
Get an assessment entity from the backend store.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
endpoint = self._append_sql_warehouse_id_param(
get_single_assessment_endpoint_v4(location, trace_id, assessment_id)
)
response_proto = self._call_endpoint(
GetAssessment, endpoint=endpoint, response_proto=ProtoAssessment()
)
return Assessment.from_proto(response_proto)
return super().get_assessment(trace_id, assessment_id)
def delete_assessment(self, trace_id: str, assessment_id: str):
"""
Delete an assessment associated with a trace.
Args:
trace_id: String ID of the trace.
assessment_id: String ID of the assessment to delete.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
endpoint = self._append_sql_warehouse_id_param(
get_single_assessment_endpoint_v4(location, trace_id, assessment_id)
)
self._call_endpoint(DeleteAssessment, endpoint=endpoint)
else:
return super().delete_assessment(trace_id, assessment_id)
def _group_traces_by_location(self, trace_ids: list[str]) -> dict[str | None, list[str]]:
"""
Group trace IDs by location to separate V3 and V4 traces.
Args:
trace_ids: List of trace IDs (can be V3 or V4 format).
Returns:
Dict mapping location to list of trace IDs where:
- None key: List of V3 trace IDs (without location prefix)
- str keys: Location IDs (e.g., "catalog.schema" or
"catalog.schema.table_prefix") mapping to OTEL trace IDs
"""
traces_by_location: dict[str | None, list[str]] = defaultdict(list)
for trace_id in trace_ids:
location_id, trace_id = parse_trace_id_v4(trace_id)
traces_by_location[location_id].append(trace_id)
return traces_by_location
def _batch_link_traces_to_run(
self, location_id: str, otel_trace_ids: list[str], run_id: str
) -> None:
"""
Link multiple traces to a run by creating internal trace-to-run relationships.
Args:
location_id: The location ID (e.g., "catalog.schema" or
"catalog.schema.table_prefix") for the traces.
otel_trace_ids: List of OTEL trace IDs to link to the run.
run_id: ID of the run to link traces to.
"""
if not otel_trace_ids:
return
req_body = message_to_json(
BatchLinkTraceToRun(
location_id=location_id,
trace_ids=otel_trace_ids,
run_id=run_id,
)
)
endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location_id}/link-to-run/batchCreate"
self._call_endpoint(BatchLinkTraceToRun, req_body, endpoint=endpoint)
def _batch_unlink_traces_from_run(
self, location_id: str, otel_trace_ids: list[str], run_id: str
) -> None:
"""
Unlink multiple traces from a run by removing the internal trace-to-run relationships.
Args:
location_id: The location ID (e.g., "catalog.schema" or
"catalog.schema.table_prefix") for the traces.
otel_trace_ids: List of OTEL trace IDs to unlink from the run.
run_id: ID of the run to unlink traces from.
"""
if not otel_trace_ids:
return
req_body = message_to_json(
BatchUnlinkTraceFromRun(
location_id=location_id,
trace_ids=otel_trace_ids,
run_id=run_id,
)
)
endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location_id}/unlink-from-run/batchDelete"
self._call_endpoint(BatchUnlinkTraceFromRun, req_body, endpoint=endpoint)
def link_traces_to_run(self, trace_ids: list[str], run_id: str) -> None:
"""
Link multiple traces to a run by creating trace-to-run relationships.
Args:
trace_ids: List of trace IDs to link to the run.
run_id: ID of the run to link traces to.
"""
if not trace_ids:
return
traces_by_location = self._group_traces_by_location(trace_ids)
for location_id, batch_trace_ids in traces_by_location.items():
if location_id is None:
super().link_traces_to_run(batch_trace_ids, run_id)
else:
self._batch_link_traces_to_run(location_id, batch_trace_ids, run_id)
def unlink_traces_from_run(self, trace_ids: list[str], run_id: str) -> None:
"""
Unlink multiple traces from a run by removing trace-to-run relationships.
Args:
trace_ids: List of trace IDs to unlink from the run.
run_id: ID of the run to unlink traces from.
"""
if not trace_ids:
return
traces_by_location = self._group_traces_by_location(trace_ids)
if v3_trace_ids := traces_by_location.pop(None, []):
raise MlflowException(
"Unlinking traces from runs is only supported for traces with UC schema "
f"locations. Unsupported trace IDs: {v3_trace_ids}"
)
for location_id, batch_trace_ids in traces_by_location.items():
self._batch_unlink_traces_from_run(location_id, batch_trace_ids, run_id)
def _validate_search_datasets_params(
self,
filter_string: str | None,
order_by: list[str] | None,
experiment_ids: list[str] | None,
):
"""Validate parameters for search_datasets and raise errors for unsupported ones."""
if filter_string:
raise MlflowException(
"filter_string parameter is not supported by Databricks managed-evals API",
error_code=INVALID_PARAMETER_VALUE,
)
if order_by:
raise MlflowException(
"order_by parameter is not supported by Databricks managed-evals API",
error_code=INVALID_PARAMETER_VALUE,
)
if experiment_ids and len(experiment_ids) > 1:
raise MlflowException(
"Databricks managed-evals API does not support searching multiple experiment IDs. "
"Please search for one experiment at a time.",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_datasets_from_response(self, response_json: dict[str, Any]) -> list[Any]:
"""Parse EvaluationDataset entities from managed-evals API response."""
from mlflow.entities import EvaluationDataset
datasets = []
for dataset_dict in response_json.get("datasets", []):
try:
dataset_id = dataset_dict["dataset_id"]
name = dataset_dict["name"]
digest = dataset_dict["digest"]
created_time_str = dataset_dict["create_time"]
last_update_time_str = dataset_dict["last_update_time"]
except KeyError as e:
_logger.error(f"Unexpected response format from managed-evals API: {response_json}")
raise MlflowException(
f"Failed to parse dataset search response: missing required field {e}",
error_code=INTERNAL_ERROR,
) from e
try:
created_time = _parse_iso_timestamp_ms(created_time_str)
last_update_time = _parse_iso_timestamp_ms(last_update_time_str)
except (ValueError, OSError) as e:
_logger.error(f"Failed to parse timestamp from managed-evals API: {response_json}")
raise MlflowException(
f"Failed to parse dataset search response: invalid timestamp format: {e}",
error_code=INTERNAL_ERROR,
) from e
dataset = EvaluationDataset(
dataset_id=dataset_id,
name=name,
digest=digest,
created_time=created_time,
last_update_time=last_update_time,
tags=None,
schema=None,
profile=None,
created_by=dataset_dict.get("created_by"),
last_updated_by=dataset_dict.get("last_updated_by"),
)
datasets.append(dataset)
return datasets
def _fetch_datasets_page(
self,
experiment_ids: list[str] | None = None,
page_size: int = 1000,
page_token: str | None = None,
):
"""Fetch a single page of datasets from the backend."""
params = {}
if experiment_ids:
params["filter"] = f"experiment_id='{experiment_ids[0]}'"
if page_size:
params["page_size"] = str(page_size)
if page_token:
params["page_token"] = page_token
endpoint = "/api/2.0/managed-evals/datasets"
if params:
endpoint = f"{endpoint}?{urlencode(params)}"
try:
response = http_request(
host_creds=self.get_host_creds(),
endpoint=endpoint,
method="GET",
)
verify_rest_response(response, endpoint)
except RestException as e:
if e.error_code == ErrorCode.Name(ENDPOINT_NOT_FOUND):
raise MlflowException(
message=(
"Dataset search is not available in this Databricks workspace. "
"This feature requires managed-evals API support. "
"Please contact your workspace administrator."
),
error_code=ENDPOINT_NOT_FOUND,
) from e
raise
response_json = response.json()
datasets = self._parse_datasets_from_response(response_json)
next_page_token = response_json.get("next_page_token")
return PagedList(datasets, next_page_token)
def search_datasets(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = 1000,
order_by: list[str] | None = None,
page_token: str | None = None,
):
"""
Search for evaluation datasets in Databricks using managed-evals API.
Args:
experiment_ids: List of experiment IDs to filter by. Only supports a single
experiment ID - raises error if multiple IDs are provided.
filter_string: Not supported by managed-evals API (raises error)
max_results: Maximum number of results to return
order_by: Not supported by managed-evals API (raises error)
page_token: Token for retrieving the next batch of results
Returns:
PagedList of EvaluationDataset entities
"""
self._validate_search_datasets_params(filter_string, order_by, experiment_ids)
token = CompositeToken.parse(page_token)
all_datasets = []
current_backend_token = token.backend_token
skip_count = token.offset
last_used_token = None
last_page_size = 0
while len(all_datasets) < max_results:
last_used_token = current_backend_token
page = self._fetch_datasets_page(
experiment_ids=experiment_ids,
page_size=max_results,
page_token=current_backend_token,
)
page_results = list(page)[skip_count:]
skip_count = 0
last_page_size = len(page_results)
all_datasets.extend(page_results)
if not page.token:
return PagedList(all_datasets, None)
current_backend_token = page.token
results_to_return = all_datasets[:max_results]
# Composite tokens handle cases where the backend returns more results than requested
# (overfetch). When this happens, we create a token with format "backend_token:offset"
# to remember which backend page we're on and how many results to skip on the next call.
#
# Edge case: If datasets are created/deleted between pagination calls, the offset may
# point to different datasets than originally intended, potentially causing results to
# be skipped or repeated. This will be addressed by additional logic in the Databricks
# backend to ensure stable pagination.
if len(all_datasets) > max_results:
results_from_last_page = max_results - (len(all_datasets) - last_page_size)
next_token = CompositeToken(
backend_token=last_used_token, offset=results_from_last_page
).encode()
else:
next_token = current_backend_token
return PagedList(results_to_return, next_token)
def _append_sql_warehouse_id_param(self, endpoint: str) -> str:
if sql_warehouse_id := MLFLOW_TRACING_SQL_WAREHOUSE_ID.get():
return f"{endpoint}?sql_warehouse_id={sql_warehouse_id}"
return endpoint
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/store/tracking/databricks_rest_store.py",
"license": "Apache License 2.0",
"lines": 957,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/tracing/enablement.py | """
Trace enablement functionality for MLflow to enable tracing to Databricks Storage.
"""
import logging
import mlflow
from mlflow.entities.trace_location import UCSchemaLocation
from mlflow.exceptions import MlflowException
from mlflow.utils.annotations import experimental
from mlflow.utils.uri import is_databricks_uri
from mlflow.version import IS_TRACING_SDK_ONLY
_logger = logging.getLogger(__name__)
@experimental(version="3.5.0")
def set_experiment_trace_location(
location: UCSchemaLocation,
experiment_id: str | None = None,
sql_warehouse_id: str | None = None,
) -> UCSchemaLocation:
"""
Configure the storage location for traces of an experiment.
Unity Catalog tables for storing trace data will be created in the specified schema.
When tracing is enabled, all traces for the specified experiment will be
stored in the provided Unity Catalog schema.
.. note::
If the experiment is already linked to a storage location, this will raise an error.
Use `mlflow.tracing.unset_experiment_trace_location` to remove the existing storage
location first and then set a new one.
Args:
location: The storage location for experiment traces in Unity Catalog.
experiment_id: The MLflow experiment ID to set the storage location for.
If not specified, the current active experiment will be used.
sql_warehouse_id: SQL warehouse ID for creating views and querying.
If not specified, uses the value from MLFLOW_TRACING_SQL_WAREHOUSE_ID,
fallback to the default SQL warehouse if the environment variable is not set.
Returns:
The UCSchemaLocation object representing the configured storage location, including
the table names of the spans and logs tables.
Example:
.. code-block:: python
import mlflow
from mlflow.entities import UCSchemaLocation
location = UCSchemaLocation(catalog_name="my_catalog", schema_name="my_schema")
result = mlflow.tracing.set_experiment_trace_location(
location=location,
experiment_id="12345",
)
print(result.full_otel_spans_table_name) # my_catalog.my_schema.otel_spans_table
@mlflow.trace
def add(x):
return x + 1
add(1) # this writes the trace to the storage location set above
"""
from mlflow.tracing.client import TracingClient
from mlflow.tracking import get_tracking_uri
from mlflow.tracking.fluent import _get_experiment_id
if not is_databricks_uri(get_tracking_uri()):
raise MlflowException(
"The `set_experiment_trace_location` API is only supported on Databricks."
)
experiment_id = experiment_id or _get_experiment_id()
if experiment_id is None:
raise MlflowException.invalid_parameter_value(
"Experiment ID is required to set storage location, either pass it as an argument or "
"use `mlflow.set_experiment` to set the current experiment."
)
# Check if the experiment exists. In Databricks notebook, this `get_experiment` call triggers
# a side effect to create the experiment for the notebook if it doesn't exist. This side effect
# is convenient for users.
if experiment_id and not IS_TRACING_SDK_ONLY:
try:
mlflow.get_experiment(str(experiment_id))
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Could not find experiment with ID {experiment_id}. Please make sure the "
"experiment exists before setting the storage location."
) from e
uc_schema_location = TracingClient()._set_experiment_trace_location(
location=location,
experiment_id=experiment_id,
sql_warehouse_id=sql_warehouse_id,
)
_logger.info(
f"Successfully configured storage location for experiment `{experiment_id}` to "
f"Databricks storage at {uc_schema_location}"
)
return uc_schema_location
@experimental(version="3.5.0")
def unset_experiment_trace_location(
location: UCSchemaLocation,
experiment_id: str | None = None,
) -> None:
"""
Unset the storage location for traces of an experiment.
This function removes the experiment storage location configuration,
including the view and the experiment tag.
Args:
location: The storage location to unset.
experiment_id: The MLflow experiment ID to unset the storage location for. If not provided,
the current active experiment will be used.
Example:
.. code-block:: python
import mlflow
from mlflow.entities import UCSchemaLocation
mlflow.tracing.unset_experiment_trace_location(
location=UCSchemaLocation(catalog_name="my_catalog", schema_name="my_schema"),
experiment_id="12345",
)
"""
from mlflow.tracing.client import TracingClient
from mlflow.tracking import get_tracking_uri
from mlflow.tracking.fluent import _get_experiment_id
if not is_databricks_uri(get_tracking_uri()):
raise MlflowException(
"The `unset_experiment_trace_location` API is only supported on Databricks."
)
if not isinstance(location, UCSchemaLocation):
raise MlflowException.invalid_parameter_value(
"`location` must be an instance of `mlflow.entities.UCSchemaLocation`."
)
experiment_id = experiment_id or _get_experiment_id()
if experiment_id is None:
raise MlflowException.invalid_parameter_value(
"Experiment ID is required to clear storage location, either pass it as an argument or "
"use `mlflow.set_experiment` to set the current experiment."
)
TracingClient()._unset_experiment_trace_location(experiment_id, location)
_logger.info(f"Successfully cleared storage location for experiment `{experiment_id}`")
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/enablement.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/tracing/export/uc_table.py | import logging
from typing import Sequence
from opentelemetry.sdk.trace import ReadableSpan
from mlflow.entities.span import Span
from mlflow.entities.trace_info import TraceInfo
from mlflow.environment_variables import MLFLOW_ENABLE_ASYNC_TRACE_LOGGING
from mlflow.tracing.export.mlflow_v3 import MlflowV3SpanExporter
from mlflow.tracing.export.span_batcher import SpanBatcher
from mlflow.tracing.utils import get_active_spans_table_name
_logger = logging.getLogger(__name__)
class DatabricksUCTableSpanExporter(MlflowV3SpanExporter):
"""
An exporter implementation that logs the traces to Databricks Unity Catalog table.
"""
def __init__(self, tracking_uri: str | None = None) -> None:
super().__init__(tracking_uri)
# Track if we've raised an error for span export to avoid raising it multiple times.
self._has_raised_span_export_error = False
if hasattr(self, "_async_queue"):
self._span_batcher = SpanBatcher(
async_task_queue=self._async_queue,
log_spans_func=self._log_spans,
)
def _export_spans_incrementally(self, spans: Sequence[ReadableSpan]) -> None:
"""
Export spans incrementally as they complete.
Args:
spans: Sequence of ReadableSpan objects to export.
"""
location = get_active_spans_table_name()
if not location:
# this should not happen since this exporter is only used when a destination
# is set to UCSchemaLocation
_logger.debug("No active spans table name found. Skipping span export.")
return
# Wrapping with MLflow span interface for easier downstream handling
spans = [Span(span) for span in spans]
if self._should_log_async():
for span in spans:
self._span_batcher.add_span(location=location, span=span)
else:
self._log_spans(location, spans)
def _log_spans(self, location: str, spans: list[Span]) -> None:
try:
self._client.log_spans(location, spans)
except Exception as e:
if self._has_raised_span_export_error:
_logger.debug(f"Failed to log spans to the trace server: {e}", exc_info=True)
else:
_logger.warning(f"Failed to log spans to the trace server: {e}")
self._has_raised_span_export_error = True
def _should_enable_async_logging(self) -> bool:
return MLFLOW_ENABLE_ASYNC_TRACE_LOGGING.get()
# Override this to False since spans are logged to UC table instead of artifacts.
def _should_log_spans_to_artifacts(self, trace_info: TraceInfo) -> bool:
return False
def flush(self) -> None:
self._span_batcher.shutdown()
self._async_queue.flush(terminate=True)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/export/uc_table.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/tracing/processor/uc_table.py | import logging
from opentelemetry.sdk.trace import ReadableSpan as OTelReadableSpan
from opentelemetry.sdk.trace import Span as OTelSpan
from opentelemetry.sdk.trace.export import SpanExporter
from mlflow.entities.trace_info import TraceInfo
from mlflow.entities.trace_location import TraceLocation, UCSchemaLocation, UnityCatalog
from mlflow.entities.trace_state import TraceState
from mlflow.exceptions import MlflowException
from mlflow.tracing.constant import TRACE_SCHEMA_VERSION_KEY, SpanAttributeKey, TraceMetadataKey
from mlflow.tracing.processor.base_mlflow import BaseMlflowSpanProcessor
from mlflow.tracing.utils import (
_bypass_attribute_guard,
generate_trace_id_v4,
get_mlflow_span_for_otel_span,
)
_logger = logging.getLogger(__name__)
class DatabricksUCTableSpanProcessor(BaseMlflowSpanProcessor):
"""
Defines custom hooks to be executed when a span is started or ended (before exporting).
This processor is used for exporting traces to Databricks Unity Catalog table.
"""
def __init__(self, span_exporter: SpanExporter):
# metrics export is not supported for UC table yet
super().__init__(span_exporter, export_metrics=False)
def _start_trace(self, root_span: OTelSpan) -> TraceInfo:
"""
Create a new TraceInfo object and register it with the trace manager.
This method is called in the on_start method of the base class.
"""
from mlflow.tracing.provider import _MLFLOW_TRACE_USER_DESTINATION
destination = _MLFLOW_TRACE_USER_DESTINATION.get()
if isinstance(destination, UnityCatalog):
trace_location = TraceLocation.from_databricks_uc_table_prefix(
destination.catalog_name, destination.schema_name, destination.table_prefix
)
trace_location.uc_table_prefix._otel_spans_table_name = (
destination._otel_spans_table_name
)
trace_id = generate_trace_id_v4(root_span, destination.full_table_prefix)
elif isinstance(destination, UCSchemaLocation):
trace_location = TraceLocation.from_databricks_uc_schema(
destination.catalog_name, destination.schema_name
)
trace_location.uc_schema._otel_spans_table_name = destination._otel_spans_table_name
trace_id = generate_trace_id_v4(root_span, destination.schema_location)
else:
raise MlflowException(
"Unity Catalog spans table name is not set for trace. It can not be exported to "
"Databricks Unity Catalog table."
)
metadata = self._get_basic_trace_metadata()
# Override the schema version to 4 for UC table
metadata[TRACE_SCHEMA_VERSION_KEY] = "4"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location,
request_time=root_span.start_time // 1_000_000, # nanosecond to millisecond
execution_duration=None,
state=TraceState.IN_PROGRESS,
trace_metadata=metadata,
tags=self._get_basic_trace_tags(root_span),
)
self._trace_manager.register_trace(root_span.context.trace_id, trace_info)
return trace_info
def on_end(self, span: OTelReadableSpan) -> None:
if span._parent is None:
self._set_user_session_span_attributes(span)
super().on_end(span)
def _set_user_session_span_attributes(self, root_span: OTelReadableSpan) -> None:
mlflow_span = get_mlflow_span_for_otel_span(root_span)
if mlflow_span is None:
return
with self._trace_manager.get_trace(mlflow_span.trace_id) as trace:
if trace is None:
return
for meta_key, attr_key in (
(TraceMetadataKey.TRACE_USER, SpanAttributeKey.USER_ID),
(TraceMetadataKey.TRACE_SESSION, SpanAttributeKey.SESSION_ID),
):
if value := trace.info.trace_metadata.get(meta_key):
with _bypass_attribute_guard(mlflow_span._span):
mlflow_span._span.set_attribute(attr_key, value)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/processor/uc_table.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/utils/databricks_tracing_utils.py | import logging
from google.protobuf.duration_pb2 import Duration
from google.protobuf.timestamp_pb2 import Timestamp
from mlflow.entities import Assessment, Span, Trace, TraceData, TraceInfo
from mlflow.entities.trace_info_v2 import _truncate_request_metadata, _truncate_tags
from mlflow.entities.trace_location import (
InferenceTableLocation,
MlflowExperimentLocation,
TraceLocation,
TraceLocationType,
UCSchemaLocation,
UnityCatalog,
)
from mlflow.protos import assessments_pb2
from mlflow.protos import databricks_tracing_pb2 as pb
from mlflow.tracing.utils import (
construct_trace_id_v4,
parse_trace_id_v4,
)
_logger = logging.getLogger(__name__)
def parse_uc_location(location: str) -> tuple[str, str, str | None]:
parts = location.split(".")
if len(parts) == 2:
return parts[0], parts[1], None
if len(parts) == 3:
return parts[0], parts[1], parts[2]
raise ValueError(
f"Invalid UC location: {location}. Expected format: <catalog>.<schema>[.<table_prefix>]."
)
def uc_location_to_str(catalog: str, schema: str, table_prefix: str | None = None) -> str:
return f"{catalog}.{schema}.{table_prefix}" if table_prefix else f"{catalog}.{schema}"
def uc_schema_location_to_proto(
uc_schema_location: UCSchemaLocation,
) -> pb.UCSchemaLocation:
return pb.UCSchemaLocation(
catalog_name=uc_schema_location.catalog_name,
schema_name=uc_schema_location.schema_name,
otel_spans_table_name=uc_schema_location._otel_spans_table_name,
otel_logs_table_name=uc_schema_location._otel_logs_table_name,
)
def uc_schema_location_from_proto(
proto: pb.UCSchemaLocation,
) -> UCSchemaLocation:
location = UCSchemaLocation(
catalog_name=proto.catalog_name,
schema_name=proto.schema_name,
)
if proto.HasField("otel_spans_table_name"):
location._otel_spans_table_name = proto.otel_spans_table_name
if proto.HasField("otel_logs_table_name"):
location._otel_logs_table_name = proto.otel_logs_table_name
return location
def uc_table_prefix_location_to_proto(
location: UnityCatalog,
) -> pb.UcTablePrefixLocation:
proto = pb.UcTablePrefixLocation(
catalog_name=location.catalog_name,
schema_name=location.schema_name,
table_prefix=location.table_prefix,
)
if location._otel_spans_table_name:
proto.spans_table_name = location._otel_spans_table_name
if location._otel_logs_table_name:
proto.logs_table_name = location._otel_logs_table_name
if location._annotations_table_name:
proto.annotations_table_name = location._annotations_table_name
return proto
def uc_table_prefix_location_from_proto(proto: pb.UcTablePrefixLocation) -> UnityCatalog:
location = UnityCatalog(
catalog_name=proto.catalog_name,
schema_name=proto.schema_name,
table_prefix=proto.table_prefix,
)
if proto.HasField("spans_table_name"):
location._otel_spans_table_name = proto.spans_table_name
if proto.HasField("logs_table_name"):
location._otel_logs_table_name = proto.logs_table_name
if proto.HasField("annotations_table_name"):
location._annotations_table_name = proto.annotations_table_name
return location
def inference_table_location_to_proto(
inference_table_location: InferenceTableLocation,
) -> pb.InferenceTableLocation:
return pb.InferenceTableLocation(full_table_name=inference_table_location.full_table_name)
def mlflow_experiment_location_to_proto(
mlflow_experiment_location: MlflowExperimentLocation,
) -> pb.MlflowExperimentLocation:
return pb.MlflowExperimentLocation(experiment_id=mlflow_experiment_location.experiment_id)
def trace_location_to_proto(trace_location: TraceLocation) -> pb.TraceLocation:
if trace_location.type == TraceLocationType.UC_SCHEMA:
return pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.UC_SCHEMA,
uc_schema=uc_schema_location_to_proto(trace_location.uc_schema),
)
elif trace_location.type == TraceLocationType.UC_TABLE_PREFIX:
return pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.UC_TABLE_PREFIX,
uc_table_prefix=uc_table_prefix_location_to_proto(trace_location.uc_table_prefix),
)
elif trace_location.type == TraceLocationType.MLFLOW_EXPERIMENT:
return pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.MLFLOW_EXPERIMENT,
mlflow_experiment=mlflow_experiment_location_to_proto(trace_location.mlflow_experiment),
)
elif trace_location.type == TraceLocationType.INFERENCE_TABLE:
return pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.INFERENCE_TABLE,
inference_table=inference_table_location_to_proto(trace_location.inference_table),
)
else:
raise ValueError(f"Unsupported trace location type: {trace_location.type}")
def trace_location_type_from_proto(proto: pb.TraceLocation.TraceLocationType) -> TraceLocationType:
return TraceLocationType(pb.TraceLocation.TraceLocationType.Name(proto))
def trace_location_from_proto(proto: pb.TraceLocation) -> TraceLocation:
type_ = trace_location_type_from_proto(proto.type)
if proto.WhichOneof("identifier") == "uc_schema":
return TraceLocation(
type=type_,
uc_schema=uc_schema_location_from_proto(proto.uc_schema),
)
elif proto.WhichOneof("identifier") == "uc_table_prefix":
return TraceLocation(
type=type_,
uc_table_prefix=uc_table_prefix_location_from_proto(proto.uc_table_prefix),
)
elif proto.WhichOneof("identifier") == "mlflow_experiment":
return TraceLocation(
type=type_,
mlflow_experiment=MlflowExperimentLocation.from_proto(proto.mlflow_experiment),
)
elif proto.WhichOneof("identifier") == "inference_table":
return TraceLocation(
type=type_,
inference_table=InferenceTableLocation.from_proto(proto.inference_table),
)
else:
return TraceLocation(TraceLocationType.TRACE_LOCATION_TYPE_UNSPECIFIED)
def trace_info_to_v4_proto(trace_info: TraceInfo) -> pb.TraceInfo:
request_time = Timestamp()
request_time.FromMilliseconds(trace_info.request_time)
execution_duration = None
if trace_info.execution_duration is not None:
execution_duration = Duration()
execution_duration.FromMilliseconds(trace_info.execution_duration)
if trace_info.trace_location.uc_schema or trace_info.trace_location.uc_table_prefix:
_, trace_id = parse_trace_id_v4(trace_info.trace_id)
else:
trace_id = trace_info.trace_id
return pb.TraceInfo(
trace_id=trace_id,
client_request_id=trace_info.client_request_id,
trace_location=trace_location_to_proto(trace_info.trace_location),
request_preview=trace_info.request_preview,
response_preview=trace_info.response_preview,
request_time=request_time,
execution_duration=execution_duration,
state=pb.TraceInfo.State.Value(trace_info.state),
trace_metadata=_truncate_request_metadata(trace_info.trace_metadata),
tags=_truncate_tags(trace_info.tags),
assessments=[assessment_to_proto(a) for a in trace_info.assessments],
)
def trace_to_proto(trace: Trace) -> pb.Trace:
return pb.Trace(
trace_info=trace.info.to_proto(),
spans=[span.to_otel_proto() for span in trace.data.spans],
)
def trace_from_proto(proto: pb.Trace, location_id: str) -> Trace:
return Trace(
info=TraceInfo.from_proto(proto.trace_info),
data=TraceData(spans=[Span.from_otel_proto(span, location_id) for span in proto.spans]),
)
def assessment_to_proto(assessment: Assessment) -> pb.Assessment:
assessment_proto = pb.Assessment()
assessment_proto.assessment_name = assessment.name
location, trace_id = parse_trace_id_v4(assessment.trace_id)
if location:
catalog, schema, table_prefix = parse_uc_location(location)
if table_prefix:
uc_table_prefix = pb.UcTablePrefixLocation(
catalog_name=catalog,
schema_name=schema,
table_prefix=table_prefix,
)
assessment_proto.trace_location.CopyFrom(
pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.UC_TABLE_PREFIX,
uc_table_prefix=uc_table_prefix,
)
)
else:
uc_schema = pb.UCSchemaLocation(catalog_name=catalog, schema_name=schema)
assessment_proto.trace_location.CopyFrom(
pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.UC_SCHEMA,
uc_schema=uc_schema,
)
)
assessment_proto.trace_id = trace_id
assessment_proto.source.CopyFrom(assessment.source.to_proto())
# Convert time in milliseconds to protobuf Timestamp
assessment_proto.create_time.FromMilliseconds(assessment.create_time_ms)
assessment_proto.last_update_time.FromMilliseconds(assessment.last_update_time_ms)
if assessment.span_id is not None:
assessment_proto.span_id = assessment.span_id
if assessment.rationale is not None:
assessment_proto.rationale = assessment.rationale
if assessment.assessment_id is not None:
assessment_proto.assessment_id = assessment.assessment_id
if assessment.expectation is not None:
assessment_proto.expectation.CopyFrom(assessment.expectation.to_proto())
elif assessment.feedback is not None:
assessment_proto.feedback.CopyFrom(assessment.feedback.to_proto())
if assessment.metadata:
for key, value in assessment.metadata.items():
assessment_proto.metadata[key] = str(value)
if assessment.overrides:
assessment_proto.overrides = assessment.overrides
if assessment.valid is not None:
assessment_proto.valid = assessment.valid
return assessment_proto
def get_trace_id_from_assessment_proto(proto: pb.Assessment | assessments_pb2.Assessment) -> str:
if "trace_location" in proto.DESCRIPTOR.fields_by_name and proto.trace_location.HasField(
"uc_schema"
):
trace_location = proto.trace_location
return construct_trace_id_v4(
uc_location_to_str(
trace_location.uc_schema.catalog_name,
trace_location.uc_schema.schema_name,
),
proto.trace_id,
)
elif "trace_location" in proto.DESCRIPTOR.fields_by_name and proto.trace_location.HasField(
"uc_table_prefix"
):
trace_location = proto.trace_location
return construct_trace_id_v4(
uc_location_to_str(
trace_location.uc_table_prefix.catalog_name,
trace_location.uc_table_prefix.schema_name,
trace_location.uc_table_prefix.table_prefix,
),
proto.trace_id,
)
else:
return proto.trace_id
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/utils/databricks_tracing_utils.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/store/tracking/test_databricks_rest_store.py | import base64
import json
import time
from unittest import mock
import pytest
from google.protobuf.json_format import MessageToDict
from opentelemetry.proto.trace.v1.trace_pb2 import Span as OTelProtoSpan
import mlflow
from mlflow.entities import Span
from mlflow.entities.assessment import (
AssessmentSource,
AssessmentSourceType,
Feedback,
FeedbackValue,
)
from mlflow.entities.trace import Trace
from mlflow.entities.trace_data import TraceData
from mlflow.entities.trace_info import TraceInfo
from mlflow.entities.trace_location import TraceLocation, UCSchemaLocation, UnityCatalog
from mlflow.entities.trace_state import TraceState
from mlflow.entities.trace_status import TraceStatus
from mlflow.environment_variables import (
MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT,
MLFLOW_TRACING_SQL_WAREHOUSE_ID,
)
from mlflow.exceptions import MlflowException, RestException
from mlflow.protos import databricks_pb2
from mlflow.protos.databricks_pb2 import ENDPOINT_NOT_FOUND
from mlflow.protos.databricks_tracing_pb2 import (
BatchGetTraces,
CreateLocation,
CreateTraceUCStorageLocation,
DeleteTraceTag,
GetLocation,
GetTraceInfo,
LinkExperimentToUCTraceLocation,
LinkTraceLocation,
SetTraceTag,
UnLinkExperimentToUCTraceLocation,
)
from mlflow.protos.databricks_tracing_pb2 import UCSchemaLocation as ProtoUCSchemaLocation
from mlflow.protos.databricks_tracing_pb2 import (
UcTablePrefixLocation as ProtoUcTablePrefixLocation,
)
from mlflow.protos.service_pb2 import DeleteTraceTag as DeleteTraceTagV3
from mlflow.protos.service_pb2 import GetTraceInfoV3, StartTraceV3
from mlflow.protos.service_pb2 import SetTraceTag as SetTraceTagV3
from mlflow.store.tracking.databricks_rest_store import CompositeToken, DatabricksTracingRestStore
from mlflow.store.tracking.rest_store import RestStore
from mlflow.tracing.constant import TRACE_ID_V4_PREFIX
from mlflow.utils.databricks_tracing_utils import assessment_to_proto, trace_to_proto
from mlflow.utils.proto_json_utils import message_to_json
from mlflow.utils.rest_utils import (
_V3_TRACE_REST_API_PATH_PREFIX,
_V4_TRACE_REST_API_PATH_PREFIX,
MlflowHostCreds,
)
@pytest.fixture
def sql_warehouse_id(monkeypatch):
wh_id = "test-warehouse"
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, wh_id)
return wh_id
def create_mock_spans(diff_trace_id=False):
otel_span1 = OTelProtoSpan()
otel_span1.name = "span1"
otel_span1.trace_id = b"trace123"
otel_span2 = OTelProtoSpan()
otel_span2.name = "span2"
otel_span2.trace_id = b"trace456" if diff_trace_id else b"trace123"
# Mock spans
mock_span1 = mock.MagicMock(spec=Span)
mock_span1.trace_id = "trace123"
mock_span1.to_otel_proto.return_value = otel_span1
mock_span1._span = mock.MagicMock()
mock_span1._span.resource = None
mock_span2 = mock.MagicMock(spec=Span)
mock_span2.trace_id = "trace456" if diff_trace_id else "trace123"
mock_span2.to_otel_proto.return_value = otel_span2
mock_span2._span = mock.MagicMock()
mock_span2._span.resource = None
return [mock_span1, mock_span2]
def _to_v4_trace(trace: Trace) -> Trace:
trace_location = TraceLocation.from_databricks_uc_schema("catalog", "schema")
trace.info.trace_location = trace_location
trace.info.trace_id = (
f"{TRACE_ID_V4_PREFIX}{trace_location.uc_schema.schema_location}/{trace.info.trace_id}"
)
return trace
def _args(host_creds, endpoint, method, json_body, version, retry_timeout_seconds=None):
res = {
"host_creds": host_creds,
"endpoint": f"/api/{version}/mlflow/{endpoint}",
"method": method,
}
if retry_timeout_seconds is not None:
res["retry_timeout_seconds"] = retry_timeout_seconds
if method == "GET":
res["params"] = json.loads(json_body) if json_body is not None else None
else:
res["json"] = json.loads(json_body) if json_body is not None else None
return res
def _verify_requests(
http_request,
host_creds,
endpoint,
method,
json_body,
version="4.0",
retry_timeout_seconds=None,
):
"""
Verify HTTP requests in tests.
Args:
http_request: The mocked HTTP request object
host_creds: MlflowHostCreds object
endpoint: The endpoint being called (e.g., "traces/123")
method: The HTTP method (e.g., "GET", "POST")
json_body: The request body as a JSON string
version: The version of the API to use (e.g., "2.0", "3.0", "4.0")
retry_timeout_seconds: The retry timeout seconds to use for the request
"""
http_request.assert_any_call(
**(_args(host_creds, endpoint, method, json_body, version, retry_timeout_seconds))
)
def test_create_trace_v4_uc_location(monkeypatch):
monkeypatch.setenv(MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT.name, "1")
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "test-warehouse")
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
trace_info = TraceInfo(
trace_id="trace:/catalog.schema/123",
trace_location=TraceLocation.from_databricks_uc_schema("catalog", "schema"),
request_time=123,
execution_duration=10,
state=TraceState.OK,
request_preview="",
response_preview="",
trace_metadata={},
)
# Mock successful v4 response
response = mock.MagicMock()
response.status_code = 200
expected_trace_info = MessageToDict(trace_info.to_proto(), preserving_proto_field_name=True)
# The returned trace_id in proto should be otel_trace_id
expected_trace_info.update({"trace_id": "123"})
response.text = json.dumps(expected_trace_info)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
result = store.start_trace(trace_info)
_verify_requests(
mock_http,
creds,
"traces/catalog.schema/123/info",
"POST",
message_to_json(trace_info.to_proto()),
version="4.0",
retry_timeout_seconds=1,
)
assert result.trace_id == "trace:/catalog.schema/123"
def test_create_trace_experiment_location_fallback_to_v3(monkeypatch):
monkeypatch.setenv(MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT.name, "1")
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "test-warehouse")
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
trace_info = TraceInfo(
trace_id="tr-456",
trace_location=TraceLocation.from_experiment_id("456"),
request_time=456,
execution_duration=20,
state=TraceState.OK,
request_preview="preview",
response_preview="response",
trace_metadata={"key": "value"},
)
trace = Trace(info=trace_info, data=TraceData())
v3_response = StartTraceV3.Response(trace=trace.to_proto())
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
mock_call_endpoint.side_effect = [v3_response]
result = store.start_trace(trace_info)
assert mock_call_endpoint.call_count == 1
call_args = mock_call_endpoint.call_args_list[0]
assert call_args[0][0] == StartTraceV3
assert result.trace_id == "tr-456"
def test_get_trace_info(monkeypatch):
with mlflow.start_span(name="test_span_v4") as span:
span.set_inputs({"input": "test_value"})
span.set_outputs({"output": "result"})
trace = mlflow.get_trace(span.trace_id)
trace = _to_v4_trace(trace)
mock_response = GetTraceInfo.Response(trace=trace_to_proto(trace))
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("https://test"))
location = "catalog.schema"
v4_trace_id = f"{TRACE_ID_V4_PREFIX}{location}/{span.trace_id}"
monkeypatch.setenv("MLFLOW_TRACING_SQL_WAREHOUSE_ID", "test-warehouse")
with mock.patch.object(store, "_call_endpoint", return_value=mock_response) as mock_call:
result = store.get_trace_info(v4_trace_id)
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == GetTraceInfo
request_body = call_args[0][1]
request_data = json.loads(request_body)
assert request_data["trace_id"] == span.trace_id
assert request_data["location"] == location
assert request_data["sql_warehouse_id"] == "test-warehouse"
endpoint = call_args[1]["endpoint"]
assert f"/traces/{location}/{span.trace_id}/info" in endpoint
assert isinstance(result, TraceInfo)
assert result.trace_id == trace.info.trace_id
def test_get_trace_info_fallback_to_v3():
with mlflow.start_span(name="test_span_v3") as span:
span.set_inputs({"input": "test_value"})
trace = mlflow.get_trace(span.trace_id)
mock_v3_response = GetTraceInfoV3.Response(trace=trace.to_proto())
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("https://test"))
with mock.patch.object(store, "_call_endpoint", return_value=mock_v3_response) as mock_call:
result = store.get_trace_info(span.trace_id)
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == GetTraceInfoV3
request_body = call_args[0][1]
request_data = json.loads(request_body)
assert request_data["trace_id"] == span.trace_id
assert isinstance(result, TraceInfo)
assert result.trace_id == span.trace_id
def test_get_trace_info_missing_warehouse_id():
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("https://test"))
with mock.patch.object(
RestStore,
"_call_endpoint",
side_effect=RestException(
json={
"error_code": databricks_pb2.ErrorCode.Name(databricks_pb2.INVALID_PARAMETER_VALUE),
"message": "Could not resolve a SQL warehouse ID. Please provide one.",
}
),
):
with pytest.raises(MlflowException, match="SQL warehouse ID is required for "):
store.get_trace_info("trace:/catalog.schema/1234567890")
def test_set_trace_tag():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
location = "catalog.schema"
trace_id = "tr-1234"
request = SetTraceTag(
key="k",
value="v",
)
response.text = "{}"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.set_trace_tag(
trace_id=f"{TRACE_ID_V4_PREFIX}{location}/{trace_id}",
key=request.key,
value=request.value,
)
expected_json = {
"key": request.key,
"value": request.value,
}
mock_http.assert_called_once_with(
host_creds=creds,
endpoint=f"/api/4.0/mlflow/traces/{location}/{trace_id}/tags",
method="PATCH",
json=expected_json,
)
assert res is None
def test_set_trace_tag_fallback():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
trace_id = "tr-1234"
response.text = "{}"
with mock.patch.object(
store, "_call_endpoint", return_value=SetTraceTagV3.Response()
) as mock_call:
result = store.set_trace_tag(
trace_id=trace_id,
key="k",
value="v",
)
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == SetTraceTagV3
request_body = call_args[0][1]
request_data = json.loads(request_body)
assert request_data["key"] == "k"
assert request_data["value"] == "v"
assert result is None
def test_delete_trace_tag(monkeypatch):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
location = "catalog.schema"
trace_id = "tr-1234"
sql_warehouse_id = "warehouse_456"
request = DeleteTraceTag(
trace_id=trace_id,
location_id=location,
key="k",
)
response.text = "{}"
monkeypatch.setenv("MLFLOW_TRACING_SQL_WAREHOUSE_ID", sql_warehouse_id)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.delete_trace_tag(
trace_id=f"{TRACE_ID_V4_PREFIX}{location}/{trace_id}",
key=request.key,
)
expected_json = {
"sql_warehouse_id": sql_warehouse_id,
}
mock_http.assert_called_once_with(
host_creds=creds,
endpoint=f"/api/4.0/mlflow/traces/{location}/{trace_id}/tags/{request.key}",
method="DELETE",
json=expected_json,
)
assert res is None
def test_delete_trace_tag_with_special_characters(monkeypatch):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
location = "catalog.schema"
trace_id = "tr-1234"
sql_warehouse_id = "warehouse_456"
key_with_slash = "foo/bar"
response.text = "{}"
monkeypatch.setenv("MLFLOW_TRACING_SQL_WAREHOUSE_ID", sql_warehouse_id)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.delete_trace_tag(
trace_id=f"{TRACE_ID_V4_PREFIX}{location}/{trace_id}",
key=key_with_slash,
)
expected_json = {
"sql_warehouse_id": sql_warehouse_id,
}
# Verify that the key is URL-encoded in the endpoint (/ becomes %2F)
mock_http.assert_called_once_with(
host_creds=creds,
endpoint=f"/api/4.0/mlflow/traces/{location}/{trace_id}/tags/foo%2Fbar",
method="DELETE",
json=expected_json,
)
assert res is None
def test_delete_trace_tag_fallback():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
trace_id = "tr-1234"
response.text = "{}"
with mock.patch.object(
store, "_call_endpoint", return_value=DeleteTraceTagV3.Response()
) as mock_call:
result = store.delete_trace_tag(
trace_id=trace_id,
key="k",
)
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == DeleteTraceTagV3
request_body = call_args[0][1]
request_data = json.loads(request_body)
assert request_data["key"] == "k"
assert result is None
@pytest.mark.parametrize("sql_warehouse_id", [None, "warehouse_override"])
def test_batch_get_traces(monkeypatch, sql_warehouse_id):
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "test-warehouse")
with mlflow.start_span(name="test_span_1") as span1:
span1.set_inputs({"input": "test_value_1"})
span1.set_outputs({"output": "result_1"})
with mlflow.start_span(name="test_span_2") as span2:
span2.set_inputs({"input": "test_value_2"})
span2.set_outputs({"output": "result_2"})
trace1 = mlflow.get_trace(span1.trace_id)
trace2 = mlflow.get_trace(span2.trace_id)
# trace obtained from OSS backend is still v3
trace1 = _to_v4_trace(trace1)
trace2 = _to_v4_trace(trace2)
mock_response = BatchGetTraces.Response()
mock_response.traces.extend([trace_to_proto(trace1), trace_to_proto(trace2)])
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("https://test"))
location = "catalog.schema"
trace_ids = [trace1.info.trace_id, trace2.info.trace_id]
with (
mock.patch.object(store, "_call_endpoint", return_value=mock_response) as mock_call,
):
result = store.batch_get_traces(trace_ids, location)
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == BatchGetTraces
request_body = call_args[0][1]
request_data = json.loads(request_body)
assert request_data["sql_warehouse_id"] == "test-warehouse"
# trace_ids in the request payload should be original OTel format
assert request_data["trace_ids"] == [span1.trace_id, span2.trace_id]
endpoint = call_args[1]["endpoint"]
assert endpoint == f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location}/batchGet"
assert isinstance(result, list)
assert len(result) == 2
assert all(isinstance(trace, Trace) for trace in result)
assert result[0].info.trace_id == trace1.info.trace_id
assert result[1].info.trace_id == trace2.info.trace_id
def test_search_traces_uc_schema(monkeypatch):
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "test-warehouse")
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"trace_infos": [
{
# REST API uses raw otel id as trace_id
"trace_id": "1234",
"trace_location": {
"type": "UC_SCHEMA",
"uc_schema": {"catalog_name": "catalog", "schema_name": "schema"},
},
"request_time": "1970-01-01T00:00:00.123Z",
"execution_duration_ms": 456,
"state": "OK",
"trace_metadata": {"key": "value"},
"tags": {"k": "v"},
}
],
"next_page_token": "token",
}
)
filter_string = "state = 'OK'"
max_results = 50
order_by = ["request_time ASC", "execution_duration_ms DESC"]
locations = ["catalog.schema"]
page_token = "12345abcde"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
trace_infos, token = store.search_traces(
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
locations=locations,
page_token=page_token,
)
# V4 endpoint should be called for UC schema locations
assert mock_http.call_count == 1
call_args = mock_http.call_args[1]
assert call_args["endpoint"] == f"{_V4_TRACE_REST_API_PATH_PREFIX}/search"
json_body = call_args["json"]
assert "locations" in json_body
assert len(json_body["locations"]) == 1
assert json_body["locations"][0]["uc_schema"]["catalog_name"] == "catalog"
assert json_body["locations"][0]["uc_schema"]["schema_name"] == "schema"
assert json_body["filter"] == filter_string
assert json_body["max_results"] == max_results
assert json_body["order_by"] == order_by
assert json_body["page_token"] == page_token
assert json_body["sql_warehouse_id"] == "test-warehouse"
assert len(trace_infos) == 1
assert isinstance(trace_infos[0], TraceInfo)
assert trace_infos[0].trace_id == "trace:/catalog.schema/1234"
assert trace_infos[0].trace_location.uc_schema.catalog_name == "catalog"
assert trace_infos[0].trace_location.uc_schema.schema_name == "schema"
assert trace_infos[0].request_time == 123
assert trace_infos[0].state == TraceStatus.OK.to_state()
assert trace_infos[0].tags == {"k": "v"}
assert trace_infos[0].trace_metadata == {"key": "value"}
assert token == "token"
@pytest.mark.parametrize(
"exception",
[
# Workspace where SearchTracesV4 is not supported yet
RestException(
json={
"error_code": databricks_pb2.ErrorCode.Name(databricks_pb2.ENDPOINT_NOT_FOUND),
"message": "Not found",
}
),
# V4 endpoint does not support searching by experiment ID (yet)
RestException(
json={
"error_code": databricks_pb2.ErrorCode.Name(databricks_pb2.INVALID_PARAMETER_VALUE),
"message": "MLFLOW_EXPERIMENT locations not yet supported",
}
),
],
)
def test_search_traces_experiment_id(exception):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"traces": [
{
"trace_id": "tr-1234",
"trace_location": {
"type": "MLFLOW_EXPERIMENT",
"mlflow_experiment": {"experiment_id": "1"},
},
"request_time": "1970-01-01T00:00:00.123Z",
"execution_duration_ms": 456,
"state": "OK",
"trace_metadata": {"key": "value"},
"tags": {"k": "v"},
}
],
"next_page_token": "token",
}
)
filter_string = "state = 'OK'"
page_token = "12345abcde"
locations = ["1"]
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
# v4 call -> exception, v3 call -> response
mock_http.side_effect = [exception, response]
trace_infos, token = store.search_traces(
filter_string=filter_string,
page_token=page_token,
locations=locations,
)
# MLflow first tries V4 endpoint, then falls back to V3
assert mock_http.call_count == 2
first_call_args = mock_http.call_args_list[0][1]
assert first_call_args["endpoint"] == f"{_V4_TRACE_REST_API_PATH_PREFIX}/search"
json_body = first_call_args["json"]
assert "locations" in json_body
assert len(json_body["locations"]) == 1
assert json_body["locations"][0]["mlflow_experiment"]["experiment_id"] == "1"
assert json_body["filter"] == filter_string
assert json_body["max_results"] == 100
second_call_args = mock_http.call_args_list[1][1]
assert second_call_args["endpoint"] == f"{_V3_TRACE_REST_API_PATH_PREFIX}/search"
json_body = second_call_args["json"]
assert len(json_body["locations"]) == 1
assert json_body["locations"][0]["mlflow_experiment"]["experiment_id"] == "1"
assert json_body["filter"] == filter_string
assert json_body["max_results"] == 100
assert len(trace_infos) == 1
assert isinstance(trace_infos[0], TraceInfo)
assert trace_infos[0].trace_id == "tr-1234"
assert trace_infos[0].experiment_id == "1"
assert trace_infos[0].request_time == 123
assert trace_infos[0].state == TraceStatus.OK.to_state()
assert trace_infos[0].tags == {"k": "v"}
assert trace_infos[0].trace_metadata == {"key": "value"}
assert token == "token"
@pytest.mark.parametrize(
"exception",
[
# Workspace where SearchTracesV4 is not supported yet
RestException(
json={
"error_code": databricks_pb2.ErrorCode.Name(databricks_pb2.ENDPOINT_NOT_FOUND),
"message": "Not found",
}
),
# V4 endpoint does not support searching by experiment ID (yet)
RestException(
json={
"error_code": databricks_pb2.ErrorCode.Name(databricks_pb2.INVALID_PARAMETER_VALUE),
"message": "MLFLOW_EXPERIMENT locations not yet supported",
}
),
],
)
def test_search_traces_with_mixed_locations(exception):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
expected_error_message = (
"Searching traces in UC tables is not supported yet."
if exception.error_code == databricks_pb2.ErrorCode.Name(databricks_pb2.ENDPOINT_NOT_FOUND)
else "The `locations` parameter cannot contain both MLflow experiment and UC schema "
)
with mock.patch("mlflow.utils.rest_utils.http_request", side_effect=exception) as mock_http:
with pytest.raises(MlflowException, match=expected_error_message):
store.search_traces(
filter_string="state = 'OK'",
locations=["1", "catalog.schema"],
)
# V4 endpoint should be called first. Not fallback to V3 because location includes UC schema.
mock_http.assert_called_once()
call_args = mock_http.call_args[1]
assert call_args["endpoint"] == f"{_V4_TRACE_REST_API_PATH_PREFIX}/search"
json_body = call_args["json"]
assert "locations" in json_body
assert len(json_body["locations"]) == 2
assert json_body["locations"][0]["mlflow_experiment"]["experiment_id"] == "1"
assert json_body["locations"][1]["uc_schema"]["catalog_name"] == "catalog"
assert json_body["locations"][1]["uc_schema"]["schema_name"] == "schema"
def test_search_traces_does_not_fallback_when_uc_schemas_are_specified():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
def mock_http_request(*args, **kwargs):
if kwargs.get("endpoint") == f"{_V4_TRACE_REST_API_PATH_PREFIX}/search":
raise MlflowException("V4 endpoint not supported", error_code=ENDPOINT_NOT_FOUND)
return mock.MagicMock()
with mock.patch("mlflow.utils.rest_utils.http_request", side_effect=mock_http_request):
with pytest.raises(
MlflowException,
match="Searching traces in UC tables is not supported yet.",
):
store.search_traces(locations=["catalog.schema"])
def test_search_traces_non_fallback_errors():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
mock_http.side_effect = MlflowException("Random error")
with pytest.raises(MlflowException, match="Random error"):
store.search_traces(locations=["catalog.schema"])
def test_search_traces_experiment_ids_deprecated():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
# Test that using experiment_ids raises error saying it's deprecated
with pytest.raises(
MlflowException,
match="experiment_ids.*deprecated.*use.*locations",
):
store.search_traces(
experiment_ids=["123"],
)
def test_search_traces_with_missing_location():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with pytest.raises(MlflowException, match="location.*must be specified"):
store.search_traces()
with pytest.raises(MlflowException, match="location.*must be specified"):
store.search_traces(locations=[])
def test_search_traces_with_invalid_location():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with pytest.raises(MlflowException, match="Invalid location type:"):
store.search_traces(locations=["catalog.schema.prefix.extra"])
def test_get_trace_location_v5():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_proto = GetLocation.Response(
uc_table_prefix=ProtoUcTablePrefixLocation(
catalog_name="catalog",
schema_name="schema",
table_prefix="prefix",
),
)
with mock.patch.object(store, "_call_endpoint", return_value=response_proto) as mock_call:
location = store.get_trace_location("loc-123")
call_args = mock_call.call_args
assert call_args[0][0] == GetLocation
assert call_args[1]["endpoint"] == "/api/5.0/mlflow/tracing/locations/loc-123"
assert location.catalog_name == "catalog"
assert location.schema_name == "schema"
assert location.table_prefix == "prefix"
def test_create_or_get_trace_location_v5_with_table_prefix(monkeypatch):
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "warehouse-1")
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_proto = CreateLocation.Response(
uc_table_prefix=ProtoUcTablePrefixLocation(
catalog_name="catalog",
schema_name="schema",
table_prefix="prefix",
),
)
with mock.patch.object(store, "_call_endpoint", return_value=response_proto) as mock_call:
location = store.create_or_get_trace_location(
UnityCatalog(catalog_name="catalog", schema_name="schema", table_prefix="prefix")
)
call_args = mock_call.call_args
assert call_args[0][0] == CreateLocation
assert call_args[1]["endpoint"] == "/api/5.0/mlflow/tracing/locations"
request_payload = json.loads(call_args[0][1])
assert request_payload["sql_warehouse_id"] == "warehouse-1"
assert request_payload["uc_table_prefix"]["catalog_name"] == "catalog"
assert request_payload["uc_table_prefix"]["schema_name"] == "schema"
assert request_payload["uc_table_prefix"]["table_prefix"] == "prefix"
assert location.catalog_name == "catalog"
assert location.schema_name == "schema"
assert location.table_prefix == "prefix"
def test_link_trace_location_v5_with_table_prefix():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_proto = LinkTraceLocation.Response()
with mock.patch.object(store, "_call_endpoint", return_value=response_proto) as mock_call:
store.link_trace_location(
experiment_id="exp-123",
location=UnityCatalog(
catalog_name="catalog", schema_name="schema", table_prefix="prefix"
),
)
call_args = mock_call.call_args
assert call_args[0][0] == LinkTraceLocation
assert call_args[1]["endpoint"] == "/api/5.0/mlflow/experiments/exp-123/trace-location:link"
request_payload = json.loads(call_args[0][1])
assert request_payload["experiment_id"] == "exp-123"
assert request_payload["uc_table_prefix"]["catalog_name"] == "catalog"
assert request_payload["uc_table_prefix"]["schema_name"] == "schema"
assert request_payload["uc_table_prefix"]["table_prefix"] == "prefix"
def test_search_unified_traces(monkeypatch):
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "test-warehouse")
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
# Format the response (using TraceInfo format for online path)
response.text = json.dumps(
{
"traces": [
{
"request_id": "tr-1234",
"experiment_id": "1234",
"timestamp_ms": 123,
"execution_time_ms": 456,
"status": "OK",
"tags": [
{"key": "k", "value": "v"},
],
"request_metadata": [
{"key": "key", "value": "value"},
],
}
],
"next_page_token": "token",
}
)
# Parameters for search_traces
experiment_ids = ["1234"]
filter_string = "status = 'OK'"
max_results = 10
order_by = ["timestamp_ms DESC"]
page_token = "12345abcde"
model_id = "model123"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
trace_infos, token = store.search_traces(
locations=experiment_ids,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
model_id=model_id,
)
# Verify the correct endpoint was called
call_args = mock_http.call_args[1]
assert call_args["endpoint"] == "/api/2.0/mlflow/unified-traces"
# Verify the correct trace info objects were returned
assert len(trace_infos) == 1
assert isinstance(trace_infos[0], TraceInfo)
assert trace_infos[0].trace_id == "tr-1234"
assert trace_infos[0].experiment_id == "1234"
assert trace_infos[0].request_time == 123
# V3's state maps to V2's status
assert trace_infos[0].state == TraceStatus.OK.to_state()
assert trace_infos[0].tags == {"k": "v"}
assert trace_infos[0].trace_metadata == {"key": "value"}
assert token == "token"
def test_set_experiment_trace_location():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
experiment_id = "123"
uc_schema = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
sql_warehouse_id = "test-warehouse-id"
# Mock response for CreateTraceUCStorageLocation
create_location_response = mock.MagicMock()
create_location_response.uc_schema = ProtoUCSchemaLocation(
catalog_name="test_catalog",
schema_name="test_schema",
otel_spans_table_name="test_spans",
otel_logs_table_name="test_logs",
)
# Mock response for LinkExperimentToUCTraceLocation
link_response = mock.MagicMock()
link_response.status_code = 200
link_response.text = "{}"
with mock.patch.object(store, "_call_endpoint") as mock_call:
mock_call.side_effect = [create_location_response, link_response]
result = store.set_experiment_trace_location(
location=uc_schema,
experiment_id=experiment_id,
sql_warehouse_id=sql_warehouse_id,
)
assert mock_call.call_count == 2
# Verify CreateTraceUCStorageLocation call
first_call = mock_call.call_args_list[0]
assert first_call[0][0] == CreateTraceUCStorageLocation
create_request_body = json.loads(first_call[0][1])
assert create_request_body["uc_schema"]["catalog_name"] == "test_catalog"
assert create_request_body["uc_schema"]["schema_name"] == "test_schema"
assert create_request_body["sql_warehouse_id"] == sql_warehouse_id
assert first_call[1]["endpoint"] == f"{_V4_TRACE_REST_API_PATH_PREFIX}/location"
# Verify LinkExperimentToUCTraceLocation call
second_call = mock_call.call_args_list[1]
assert second_call[0][0] == LinkExperimentToUCTraceLocation
link_request_body = json.loads(second_call[0][1])
assert link_request_body["experiment_id"] == experiment_id
assert link_request_body["uc_schema"]["catalog_name"] == "test_catalog"
assert link_request_body["uc_schema"]["schema_name"] == "test_schema"
assert link_request_body["uc_schema"]["otel_spans_table_name"] == "test_spans"
assert link_request_body["uc_schema"]["otel_logs_table_name"] == "test_logs"
assert (
second_call[1]["endpoint"]
== f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/link-location"
)
assert isinstance(result, UCSchemaLocation)
assert result.catalog_name == "test_catalog"
assert result.schema_name == "test_schema"
assert result.full_otel_spans_table_name == "test_catalog.test_schema.test_spans"
assert result.full_otel_logs_table_name == "test_catalog.test_schema.test_logs"
def test_set_experiment_trace_location_with_existing_location():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
experiment_id = "123"
uc_schema = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
sql_warehouse_id = "test-warehouse-id"
create_location_response = MlflowException(
"Location already exists", error_code=databricks_pb2.ALREADY_EXISTS
)
# Mock response for LinkExperimentToUCTraceLocation
link_response = mock.MagicMock()
link_response.status_code = 200
link_response.text = "{}"
with mock.patch.object(store, "_call_endpoint") as mock_call:
mock_call.side_effect = [create_location_response, link_response]
result = store.set_experiment_trace_location(
location=uc_schema,
experiment_id=experiment_id,
sql_warehouse_id=sql_warehouse_id,
)
assert mock_call.call_count == 2
# Verify CreateTraceUCStorageLocation call
first_call = mock_call.call_args_list[0]
assert first_call[0][0] == CreateTraceUCStorageLocation
create_request_body = json.loads(first_call[0][1])
assert create_request_body["uc_schema"]["catalog_name"] == "test_catalog"
assert create_request_body["uc_schema"]["schema_name"] == "test_schema"
assert create_request_body["sql_warehouse_id"] == sql_warehouse_id
assert first_call[1]["endpoint"] == f"{_V4_TRACE_REST_API_PATH_PREFIX}/location"
# Verify LinkExperimentToUCTraceLocation call
second_call = mock_call.call_args_list[1]
assert second_call[0][0] == LinkExperimentToUCTraceLocation
link_request_body = json.loads(second_call[0][1])
assert link_request_body["experiment_id"] == experiment_id
assert link_request_body["uc_schema"]["catalog_name"] == "test_catalog"
assert link_request_body["uc_schema"]["schema_name"] == "test_schema"
assert (
second_call[1]["endpoint"]
== f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/link-location"
)
assert isinstance(result, UCSchemaLocation)
assert result.catalog_name == "test_catalog"
assert result.schema_name == "test_schema"
def test_unset_experiment_trace_location_with_uc_schema():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
experiment_id = "123"
response = mock.MagicMock()
response.status_code = 200
response.text = "{}"
with mock.patch.object(store, "_call_endpoint", return_value=response) as mock_call:
store.unset_experiment_trace_location(
experiment_id=experiment_id,
location=UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema"),
)
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == UnLinkExperimentToUCTraceLocation
request_body = json.loads(call_args[0][1])
assert request_body["experiment_id"] == experiment_id
assert request_body["uc_schema"]["catalog_name"] == "test_catalog"
assert request_body["uc_schema"]["schema_name"] == "test_schema"
expected_endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/unlink-location"
assert call_args[1]["endpoint"] == expected_endpoint
def test_log_spans_to_uc_table_empty_spans():
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("http://localhost"))
result = store.log_spans("catalog.schema.table", [], tracking_uri="databricks")
assert result == []
@pytest.mark.parametrize("diff_trace_id", [True, False])
def test_log_spans_to_uc_table_success(diff_trace_id):
# Mock configuration
mock_config = mock.MagicMock()
mock_config.authenticate.return_value = {"Authorization": "Bearer token"}
spans = create_mock_spans(diff_trace_id)
# Mock HTTP response
mock_response = mock.MagicMock()
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("http://localhost"))
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.verify_rest_response"
) as mock_verify,
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request", return_value=mock_response
) as mock_http_request,
mock.patch(
"mlflow.store.tracking.databricks_rest_store.get_databricks_workspace_client_config",
return_value=mock_config,
) as mock_get_config,
):
# Execute
store.log_spans("catalog.schema.spans", spans, tracking_uri="databricks")
# Verify calls
mock_get_config.assert_called_once_with("databricks")
mock_http_request.assert_called_once()
mock_verify.assert_called_once_with(mock_response, "/api/2.0/otel/v1/traces")
# Verify HTTP request details
call_kwargs = mock_http_request.call_args
assert call_kwargs[1]["method"] == "POST"
assert call_kwargs[1]["endpoint"] == "/api/2.0/otel/v1/traces"
assert "Content-Type" in call_kwargs[1]["extra_headers"]
assert call_kwargs[1]["extra_headers"]["Content-Type"] == "application/x-protobuf"
assert "X-Databricks-UC-Table-Name" in call_kwargs[1]["extra_headers"]
assert call_kwargs[1]["extra_headers"]["X-Databricks-UC-Table-Name"] == "catalog.schema.spans"
def test_log_spans_to_uc_table_config_error():
mock_span = mock.MagicMock(spec=Span, trace_id="trace123")
spans = [mock_span]
store = DatabricksTracingRestStore(lambda: MlflowHostCreds("http://localhost"))
with mock.patch(
"mlflow.store.tracking.databricks_rest_store.get_databricks_workspace_client_config",
side_effect=Exception("Config failed"),
):
with pytest.raises(MlflowException, match="Failed to log spans to UC table"):
store.log_spans("catalog.schema.spans", spans, tracking_uri="databricks")
def test_create_assessment(sql_warehouse_id):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"assessment_id": "1234",
"assessment_name": "assessment_name",
"trace_identifier": {
"uc_schema": {
"catalog_name": "catalog",
"schema_name": "schema",
},
"trace_id": "1234",
},
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-20T05:47:23Z",
"feedback": {"value": True},
"rationale": "rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
)
feedback = Feedback(
trace_id="trace:/catalog.schema/1234",
name="assessment_name",
value=True,
source=AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE, source_id="gpt-4o-mini"
),
create_time_ms=int(time.time() * 1000),
last_update_time_ms=int(time.time() * 1000),
rationale="rationale",
metadata={"model": "gpt-4o-mini"},
span_id=None,
)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.create_assessment(assessment=feedback)
_verify_requests(
mock_http,
creds,
f"traces/catalog.schema/1234/assessments?sql_warehouse_id={sql_warehouse_id}",
"POST",
message_to_json(assessment_to_proto(feedback)),
version="4.0",
)
assert isinstance(res, Feedback)
assert res.assessment_id is not None
assert res.value == feedback.value
def test_get_assessment(sql_warehouse_id):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
trace_id = "trace:/catalog.schema/1234"
response.text = json.dumps(
{
"assessment_id": "1234",
"assessment_name": "assessment_name",
"trace_id": trace_id,
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-20T05:47:23Z",
"feedback": {"value": True},
"rationale": "rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.get_assessment(
trace_id=trace_id,
assessment_id="1234",
)
_verify_requests(
mock_http,
creds,
f"traces/catalog.schema/1234/assessments/1234?sql_warehouse_id={sql_warehouse_id}",
"GET",
json_body=None,
version="4.0",
)
assert isinstance(res, Feedback)
assert res.assessment_id == "1234"
assert res.value is True
def test_update_assessment(sql_warehouse_id):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
trace_id = "trace:/catalog.schema/1234"
response.text = json.dumps(
{
"assessment_id": "1234",
"assessment_name": "updated_assessment_name",
"trace_location": {
"type": "UC_SCHEMA",
"uc_schema": {
"catalog_name": "catalog",
"schema_name": "schema",
},
},
"trace_id": "1234",
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-20T05:47:23Z",
"feedback": {"value": False},
"rationale": "updated_rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
)
request = {
"assessment_id": "1234",
"trace_location": {
"type": "UC_SCHEMA",
"uc_schema": {
"catalog_name": "catalog",
"schema_name": "schema",
"otel_spans_table_name": "mlflow_experiment_trace_otel_spans",
"otel_logs_table_name": "mlflow_experiment_trace_otel_logs",
},
},
"trace_id": "1234",
"feedback": {"value": False},
"rationale": "updated_rationale",
"metadata": {"model": "gpt-4o-mini"},
}
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.update_assessment(
trace_id=trace_id,
assessment_id="1234",
feedback=FeedbackValue(value=False),
rationale="updated_rationale",
metadata={"model": "gpt-4o-mini"},
)
_verify_requests(
mock_http,
creds,
f"traces/catalog.schema/1234/assessments/1234?sql_warehouse_id={sql_warehouse_id}&update_mask=feedback,rationale,metadata",
"PATCH",
json.dumps(request),
version="4.0",
)
assert isinstance(res, Feedback)
assert res.assessment_id == "1234"
assert res.value is False
assert res.rationale == "updated_rationale"
def test_update_assessment_uc_table_prefix(sql_warehouse_id):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
trace_id = "trace:/catalog.schema.prefix/1234"
response.text = json.dumps(
{
"assessment_id": "1234",
"assessment_name": "updated_assessment_name",
"trace_location": {
"type": "UC_TABLE_PREFIX",
"uc_table_prefix": {
"catalog_name": "catalog",
"schema_name": "schema",
"table_prefix": "prefix",
},
},
"trace_id": "1234",
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-20T05:47:23Z",
"feedback": {"value": False},
"rationale": "updated_rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
)
request = {
"assessment_id": "1234",
"trace_location": {
"type": "UC_TABLE_PREFIX",
"uc_table_prefix": {
"catalog_name": "catalog",
"schema_name": "schema",
"table_prefix": "prefix",
},
},
"trace_id": "1234",
"feedback": {"value": False},
"rationale": "updated_rationale",
"metadata": {"model": "gpt-4o-mini"},
}
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.update_assessment(
trace_id=trace_id,
assessment_id="1234",
feedback=FeedbackValue(value=False),
rationale="updated_rationale",
metadata={"model": "gpt-4o-mini"},
)
_verify_requests(
mock_http,
creds,
f"traces/catalog.schema.prefix/1234/assessments/1234?sql_warehouse_id={sql_warehouse_id}&update_mask=feedback,rationale,metadata",
"PATCH",
json.dumps(request),
version="4.0",
)
assert isinstance(res, Feedback)
assert res.assessment_id == "1234"
assert res.value is False
assert res.rationale == "updated_rationale"
def test_search_traces_uc_table_prefix(monkeypatch):
monkeypatch.setenv(MLFLOW_TRACING_SQL_WAREHOUSE_ID.name, "test-warehouse")
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"trace_infos": [
{
"trace_id": "1234",
"trace_location": {
"type": "UC_TABLE_PREFIX",
"uc_table_prefix": {
"catalog_name": "catalog",
"schema_name": "schema",
"table_prefix": "prefix",
},
},
"request_time": "1970-01-01T00:00:00.123Z",
"execution_duration_ms": 456,
"state": "OK",
"trace_metadata": {"key": "value"},
"tags": {"k": "v"},
}
],
"next_page_token": "token",
}
)
locations = ["catalog.schema.prefix"]
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
trace_infos, token = store.search_traces(
locations=locations,
)
assert mock_http.call_count == 1
call_args = mock_http.call_args[1]
assert call_args["endpoint"] == f"{_V4_TRACE_REST_API_PATH_PREFIX}/search"
json_body = call_args["json"]
assert "locations" in json_body
assert len(json_body["locations"]) == 1
assert json_body["locations"][0]["uc_table_prefix"]["catalog_name"] == "catalog"
assert json_body["locations"][0]["uc_table_prefix"]["schema_name"] == "schema"
assert json_body["locations"][0]["uc_table_prefix"]["table_prefix"] == "prefix"
assert json_body["sql_warehouse_id"] == "test-warehouse"
assert len(trace_infos) == 1
assert isinstance(trace_infos[0], TraceInfo)
assert trace_infos[0].trace_id == "trace:/catalog.schema.prefix/1234"
assert trace_infos[0].trace_location.uc_table_prefix.catalog_name == "catalog"
assert trace_infos[0].trace_location.uc_table_prefix.schema_name == "schema"
assert trace_infos[0].trace_location.uc_table_prefix.table_prefix == "prefix"
assert token == "token"
def test_delete_assessment(sql_warehouse_id):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
trace_id = "trace:/catalog.schema/1234"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.delete_assessment(
trace_id=trace_id,
assessment_id="1234",
)
_verify_requests(
mock_http,
creds,
f"traces/catalog.schema/1234/assessments/1234?sql_warehouse_id={sql_warehouse_id}",
"DELETE",
json_body=None,
version="4.0",
)
def test_link_traces_to_run_with_v4_trace_ids_uses_batch_v4_endpoint():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
location = "catalog.schema"
trace_ids = [
f"{TRACE_ID_V4_PREFIX}{location}/trace123",
f"{TRACE_ID_V4_PREFIX}{location}/trace456",
]
run_id = "run_abc"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.link_traces_to_run(trace_ids=trace_ids, run_id=run_id)
expected_json = {
"location_id": location,
"trace_ids": ["trace123", "trace456"],
"run_id": run_id,
}
mock_http.assert_called_once_with(
host_creds=creds,
endpoint=f"/api/4.0/mlflow/traces/{location}/link-to-run/batchCreate",
method="POST",
json=expected_json,
)
def test_link_traces_to_run_with_v3_trace_ids_uses_v3_endpoint():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
trace_ids = ["tr-123", "tr-456"]
run_id = "run_abc"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.link_traces_to_run(trace_ids=trace_ids, run_id=run_id)
expected_json = {
"trace_ids": trace_ids,
"run_id": run_id,
}
mock_http.assert_called_once_with(
host_creds=creds,
endpoint="/api/2.0/mlflow/traces/link-to-run",
method="POST",
json=expected_json,
)
def test_link_traces_to_run_with_mixed_v3_v4_trace_ids_handles_both():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
location = "catalog.schema"
v3_trace_id = "tr-123"
v4_trace_id = f"{TRACE_ID_V4_PREFIX}{location}/trace456"
trace_ids = [v3_trace_id, v4_trace_id]
run_id = "run_abc"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.link_traces_to_run(trace_ids=trace_ids, run_id=run_id)
# Should make 2 separate calls: one for V3 and one for V4
assert mock_http.call_count == 2
# Verify V3 call
v3_call = next(
call for call in mock_http.call_args_list if "2.0" in call.kwargs["endpoint"]
)
assert v3_call.kwargs["endpoint"] == "/api/2.0/mlflow/traces/link-to-run"
assert v3_call.kwargs["json"]["trace_ids"] == [v3_trace_id]
assert v3_call.kwargs["json"]["run_id"] == run_id
# Verify V4 call
v4_call = next(
call for call in mock_http.call_args_list if "4.0" in call.kwargs["endpoint"]
)
expected_v4_endpoint = f"/api/4.0/mlflow/traces/{location}/link-to-run/batchCreate"
assert v4_call.kwargs["endpoint"] == expected_v4_endpoint
assert v4_call.kwargs["json"]["trace_ids"] == ["trace456"]
assert v4_call.kwargs["json"]["run_id"] == run_id
def test_link_traces_to_run_with_different_locations_groups_by_location():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
location1 = "catalog1.schema1"
location2 = "catalog2.schema2"
trace_ids = [
f"{TRACE_ID_V4_PREFIX}{location1}/trace123",
f"{TRACE_ID_V4_PREFIX}{location2}/trace456",
f"{TRACE_ID_V4_PREFIX}{location1}/trace789",
]
run_id = "run_abc"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.link_traces_to_run(trace_ids=trace_ids, run_id=run_id)
# Should make 2 separate batch calls, one for each location
assert mock_http.call_count == 2
# Verify calls were made for both locations
calls = mock_http.call_args_list
call_endpoints = {call.kwargs["endpoint"] for call in calls}
expected_endpoints = {
f"/api/4.0/mlflow/traces/{location1}/link-to-run/batchCreate",
f"/api/4.0/mlflow/traces/{location2}/link-to-run/batchCreate",
}
assert call_endpoints == expected_endpoints
# Verify the trace IDs were grouped correctly
for call in calls:
endpoint = call.kwargs["endpoint"]
json_body = call.kwargs["json"]
if location1 in endpoint:
assert set(json_body["trace_ids"]) == {"trace123", "trace789"}
elif location2 in endpoint:
assert json_body["trace_ids"] == ["trace456"]
assert json_body["run_id"] == run_id
def test_link_traces_to_run_with_empty_list_does_nothing():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
store.link_traces_to_run(trace_ids=[], run_id="run_abc")
mock_http.assert_not_called()
def test_unlink_traces_from_run_with_v4_trace_ids_uses_batch_v4_endpoint():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
location = "catalog.schema"
trace_ids = [
f"{TRACE_ID_V4_PREFIX}{location}/trace123",
f"{TRACE_ID_V4_PREFIX}{location}/trace456",
]
run_id = "run_abc"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.unlink_traces_from_run(trace_ids=trace_ids, run_id=run_id)
expected_json = {
"location_id": location,
"trace_ids": ["trace123", "trace456"],
"run_id": run_id,
}
mock_http.assert_called_once_with(
host_creds=creds,
endpoint=f"/api/4.0/mlflow/traces/{location}/unlink-from-run/batchDelete",
method="DELETE",
json=expected_json,
)
def test_unlink_traces_from_run_with_v3_trace_ids_raises_error():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
trace_ids = ["tr-123", "tr-456"]
run_id = "run_abc"
with pytest.raises(
MlflowException,
match="Unlinking traces from runs is only supported for traces with UC schema",
):
store.unlink_traces_from_run(trace_ids=trace_ids, run_id=run_id)
def test_unlink_traces_from_run_with_mixed_v3_v4_trace_ids_raises_error():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
location = "catalog.schema"
v3_trace_id = "tr-123"
v4_trace_id = f"{TRACE_ID_V4_PREFIX}{location}/trace456"
trace_ids = [v3_trace_id, v4_trace_id]
run_id = "run_abc"
# Should raise error because V3 traces are not supported for unlinking
with pytest.raises(
MlflowException,
match="Unlinking traces from runs is only supported for traces with UC schema",
):
store.unlink_traces_from_run(trace_ids=trace_ids, run_id=run_id)
def test_unlink_traces_from_run_with_different_locations_groups_by_location():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
location1 = "catalog1.schema1"
location2 = "catalog2.schema2"
trace_ids = [
f"{TRACE_ID_V4_PREFIX}{location1}/trace123",
f"{TRACE_ID_V4_PREFIX}{location2}/trace456",
f"{TRACE_ID_V4_PREFIX}{location1}/trace789",
]
run_id = "run_abc"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.unlink_traces_from_run(trace_ids=trace_ids, run_id=run_id)
# Should make 2 separate batch calls, one for each location
assert mock_http.call_count == 2
# Verify calls were made for both locations
calls = mock_http.call_args_list
call_endpoints = {call.kwargs["endpoint"] for call in calls}
expected_endpoints = {
f"/api/4.0/mlflow/traces/{location1}/unlink-from-run/batchDelete",
f"/api/4.0/mlflow/traces/{location2}/unlink-from-run/batchDelete",
}
assert call_endpoints == expected_endpoints
# Verify the trace IDs were grouped correctly
for call in calls:
endpoint = call.kwargs["endpoint"]
json_body = call.kwargs["json"]
if location1 in endpoint:
assert set(json_body["trace_ids"]) == {"trace123", "trace789"}
elif location2 in endpoint:
assert json_body["trace_ids"] == ["trace456"]
assert json_body["run_id"] == run_id
def test_unlink_traces_from_run_with_empty_list_does_nothing():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
store.unlink_traces_from_run(trace_ids=[], run_id="run_abc")
mock_http.assert_not_called()
def test_search_datasets_basic():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_data = {
"datasets": [
{
"dataset_id": "dataset_1",
"name": "test_dataset",
"digest": "abc123",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
"created_by": "user@example.com",
"last_updated_by": "user@example.com",
"source": '{"table_name":"main.default.test"}',
"source_type": "databricks-uc-table",
"last_sync_time": "1970-01-01T00:00:00Z",
}
],
"next_page_token": None,
}
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
return_value=mock.Mock(json=lambda: response_data),
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
result = store.search_datasets(experiment_ids=["exp_1"], max_results=100)
# Verify the mock was called correctly
mock_http.assert_called_once()
call_args = mock_http.call_args
endpoint = call_args[1]["endpoint"]
assert call_args[1]["method"] == "GET"
assert "/api/2.0/managed-evals/datasets" in endpoint
# URL encoding: = becomes %3D, ' becomes %27
assert "experiment_id%3D%27exp_1%27" in endpoint or "experiment_id='exp_1'" in endpoint
# Verify max_results is passed as page_size
assert "page_size=100" in endpoint
# Verify the results
assert len(result) == 1
assert result[0].dataset_id == "dataset_1"
assert result[0].name == "test_dataset"
assert result[0].digest == "abc123"
assert result[0].created_by == "user@example.com"
assert result[0].last_updated_by == "user@example.com"
assert result.token is None
def test_search_datasets_multiple_experiment_ids():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with pytest.raises(
MlflowException,
match="Databricks managed-evals API does not support searching multiple experiment IDs",
):
store.search_datasets(experiment_ids=["exp_1", "exp_2"], max_results=100)
def test_search_datasets_pagination():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
mock_response = mock.MagicMock()
mock_response.json.return_value = {"datasets": [], "next_page_token": None}
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request", return_value=mock_response
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
store.search_datasets(experiment_ids=["exp_1"], max_results=50, page_token="prev_token")
# Verify the API call includes page_token
call_args = mock_http.call_args
endpoint = call_args[1]["endpoint"]
assert "page_token=prev_token" in endpoint
def test_search_datasets_empty_results():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
return_value=mock.Mock(json=lambda: {"datasets": []}),
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
result = store.search_datasets(experiment_ids=["exp_1"])
mock_http.assert_called_once()
assert len(result) == 0
assert result.token is None
@pytest.mark.parametrize(
("param_name", "param_value", "error_match"),
[
("filter_string", "name LIKE 'test%'", "filter_string parameter is not supported"),
("order_by", ["created_time DESC"], "order_by parameter is not supported"),
],
)
def test_search_datasets_unsupported_parameters(param_name, param_value, error_match):
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
kwargs = {"experiment_ids": ["exp_1"], param_name: param_value}
with pytest.raises(MlflowException, match=error_match):
store.search_datasets(**kwargs)
def test_search_datasets_endpoint_not_found():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
with mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
side_effect=RestException({"error_code": "ENDPOINT_NOT_FOUND", "message": "Not found"}),
):
with pytest.raises(MlflowException, match="not available in this Databricks workspace"):
store.search_datasets(experiment_ids=["exp_1"])
def test_search_datasets_missing_required_field():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_data = {
"datasets": [
{
"dataset_id": "dataset_1",
"digest": "abc123",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
# missing 'name' field
}
]
}
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
return_value=mock.Mock(json=lambda: response_data),
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
with pytest.raises(MlflowException, match="missing required field"):
store.search_datasets(experiment_ids=["exp_1"])
mock_http.assert_called_once()
def test_search_datasets_invalid_timestamp():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_data = {
"datasets": [
{
"dataset_id": "dataset_1",
"name": "test_dataset",
"digest": "abc123",
"create_time": "invalid-timestamp",
"last_update_time": "2025-11-28T21:30:53.195Z",
}
]
}
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
return_value=mock.Mock(json=lambda: response_data),
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
with pytest.raises(MlflowException, match="invalid timestamp format"):
store.search_datasets(experiment_ids=["exp_1"])
mock_http.assert_called_once()
@pytest.mark.parametrize(
("token_str", "expected_backend_token", "expected_offset"),
[
("simple_token", "simple_token", 0),
(
f"{base64.b64encode(b'backend_token_123').decode('utf-8')}:5",
"backend_token_123",
5,
),
(None, None, 0),
(":10", None, 10),
],
)
def test_composite_token_parsing(token_str, expected_backend_token, expected_offset):
token = CompositeToken.parse(token_str)
assert token.backend_token == expected_backend_token
assert token.offset == expected_offset
def test_search_datasets_multi_page_aggregation():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
responses = [
{
"datasets": [
{
"dataset_id": "dataset_1",
"name": "test_dataset_1",
"digest": "abc123",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
},
{
"dataset_id": "dataset_2",
"name": "test_dataset_2",
"digest": "def456",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
},
],
"next_page_token": "token1",
},
{"datasets": [], "next_page_token": "token2"},
{
"datasets": [
{
"dataset_id": f"dataset_{i}",
"name": f"test_dataset_{i}",
"digest": f"hash{i}",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
}
for i in range(3, 11)
],
"next_page_token": "token3",
},
]
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
side_effect=[mock.Mock(json=lambda r=r: r) for r in responses],
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
result = store.search_datasets(experiment_ids=["exp_1"], max_results=5)
assert mock_http.call_count == 3
assert {d.name for d in result} == {
"test_dataset_1",
"test_dataset_2",
"test_dataset_3",
"test_dataset_4",
"test_dataset_5",
}
def test_search_datasets_resume_from_composite_token():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_data = {
"datasets": [
{
"dataset_id": f"dataset_{i}",
"name": f"test_dataset_{i}",
"digest": f"hash{i}",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
}
for i in range(1, 16)
],
"next_page_token": "backend_token_B",
}
composite_token = CompositeToken(backend_token="backend_token_A", offset=5).encode()
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
return_value=mock.Mock(json=lambda: response_data),
),
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
result = store.search_datasets(
experiment_ids=["exp_1"], max_results=10, page_token=composite_token
)
assert {d.name for d in result} == {f"test_dataset_{i}" for i in range(6, 16)}
def test_search_datasets_exact_match_no_offset():
creds = MlflowHostCreds("https://hello")
store = DatabricksTracingRestStore(lambda: creds)
response_data = {
"datasets": [
{
"dataset_id": f"dataset_{i}",
"name": f"test_dataset_{i}",
"digest": f"hash{i}",
"create_time": "2025-11-28T21:30:53.195Z",
"last_update_time": "2025-11-28T21:30:53.195Z",
}
for i in range(1, 11) # Exactly 10 datasets
],
"next_page_token": "backend_token_next",
}
with (
mock.patch(
"mlflow.store.tracking.databricks_rest_store.http_request",
return_value=mock.Mock(json=lambda: response_data),
) as mock_http,
mock.patch("mlflow.store.tracking.databricks_rest_store.verify_rest_response"),
):
result = store.search_datasets(experiment_ids=["exp_1"], max_results=10)
# Should return exactly 10 datasets
assert {d.name for d in result} == {f"test_dataset_{i}" for i in range(1, 11)}
# Token is the backend token, parseable as composite token with offset=0
parsed = CompositeToken.parse(result.token)
assert parsed.backend_token == "backend_token_next"
assert parsed.offset == 0 # No offset needed for exact match
mock_http.assert_called_once()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/store/tracking/test_databricks_rest_store.py",
"license": "Apache License 2.0",
"lines": 1709,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/export/test_uc_table_exporter.py | import time
from concurrent.futures import ThreadPoolExecutor
from unittest import mock
import pytest
from mlflow.entities.span import Span
from mlflow.tracing.export.uc_table import DatabricksUCTableSpanExporter
from mlflow.tracing.trace_manager import InMemoryTraceManager
from mlflow.tracing.utils import generate_trace_id_v4
from tests.tracing.helper import (
create_mock_otel_span,
create_test_trace_info_with_uc_table,
)
@pytest.mark.parametrize("is_async", [True, False], ids=["async", "sync"])
def test_export_spans_to_uc_table(is_async, monkeypatch):
monkeypatch.setenv("MLFLOW_ENABLE_ASYNC_TRACE_LOGGING", str(is_async))
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "1") # no batch
trace_manager = InMemoryTraceManager.get_instance()
mock_client = mock.MagicMock()
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock_client
otel_span = create_mock_otel_span(trace_id=12345, span_id=1)
trace_id = generate_trace_id_v4(otel_span, "catalog.schema")
span = Span(otel_span)
# Create trace info with UC table
trace_info = create_test_trace_info_with_uc_table(trace_id, "catalog", "schema")
trace_manager.register_trace(otel_span.context.trace_id, trace_info)
trace_manager.register_span(span)
# Export the span
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.spans",
):
exporter.export([otel_span])
if is_async:
# For async tests, we need to flush the specific exporter's queue
exporter._async_queue.flush(terminate=True)
# Verify UC table logging was called
mock_client.log_spans.assert_called_once()
args = mock_client.log_spans.call_args
assert args[0][0] == "catalog.schema.spans"
assert len(args[0][1]) == 1
assert isinstance(args[0][1][0], Span)
assert args[0][1][0].to_dict() == span.to_dict()
def test_log_trace_no_upload_data_for_uc_schema():
mock_client = mock.MagicMock()
# Mock trace info with UC schema
mock_trace_info = mock.MagicMock()
mock_trace_info.trace_location.uc_schema = mock.MagicMock()
mock_client.start_trace.return_value = mock_trace_info
mock_trace = mock.MagicMock()
mock_trace.info = mock.MagicMock()
mock_prompts = []
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock_client
with mock.patch("mlflow.tracing.utils.add_size_stats_to_trace_metadata"):
exporter._log_trace(mock_trace, mock_prompts)
# Verify start_trace was called but _upload_trace_data was not
mock_client.start_trace.assert_called_once_with(mock_trace.info)
mock_client._upload_trace_data.assert_not_called()
def test_log_trace_no_log_spans_if_no_uc_schema():
mock_client = mock.MagicMock()
# Mock trace info without UC schema
mock_trace_info = mock.MagicMock()
mock_trace_info.trace_location.uc_schema = None
mock_client.start_trace.return_value = mock_trace_info
mock_trace = mock.MagicMock()
mock_trace.info = mock.MagicMock()
mock_trace.data = mock.MagicMock()
mock_prompts = []
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock_client
with mock.patch("mlflow.tracing.utils.add_size_stats_to_trace_metadata"):
exporter._log_trace(mock_trace, mock_prompts)
# Verify both start_trace and _upload_trace_data were called
mock_client.start_trace.assert_called_once_with(mock_trace.info)
mock_client.log_spans.assert_not_called()
def test_export_spans_batch_max_size(monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "5")
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS", "10000")
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock.MagicMock()
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.spans",
):
exporter._export_spans_incrementally(
[
create_mock_otel_span(trace_id=12345, span_id=1),
create_mock_otel_span(trace_id=12345, span_id=2),
create_mock_otel_span(trace_id=12345, span_id=3),
create_mock_otel_span(trace_id=12345, span_id=4),
]
)
exporter._client.log_spans.assert_not_called()
exporter._export_spans_incrementally([create_mock_otel_span(trace_id=12345, span_id=5)])
# NB: There can be a tiny delay once the batch becomes full and the worker thread
# is interrupted by the threading event and activate the async queue. Flush has to
# happen after the activation.
time.sleep(1)
exporter._async_queue.flush()
exporter._client.log_spans.assert_called_once()
location, spans = exporter._client.log_spans.call_args[0]
assert location == "catalog.schema.spans"
assert len(spans) == 5
assert all(isinstance(span, Span) for span in spans)
def test_export_spans_batch_flush_on_interval(monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "10")
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS", "1000")
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock.MagicMock()
otel_span = create_mock_otel_span(trace_id=12345, span_id=1)
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.spans",
):
exporter._export_spans_incrementally([otel_span])
# Allow the batcher's interval timer to fire
time.sleep(1.5)
exporter._client.log_spans.assert_called_once()
location, spans = exporter._client.log_spans.call_args[0]
assert location == "catalog.schema.spans"
assert len(spans) == 1
def test_export_spans_batch_shutdown(monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "10")
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS", "1000")
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock.MagicMock()
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.spans",
):
exporter._export_spans_incrementally(
[
create_mock_otel_span(trace_id=12345, span_id=1),
create_mock_otel_span(trace_id=12345, span_id=2),
create_mock_otel_span(trace_id=12345, span_id=3),
]
)
exporter.flush()
exporter._client.log_spans.assert_called_once()
location, spans = exporter._client.log_spans.call_args[0]
assert location == "catalog.schema.spans"
assert len(spans) == 3
def test_export_spans_batch_thread_safety(monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "10")
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS", "1000")
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock.MagicMock()
def _generate_spans():
exporter._export_spans_incrementally(
[create_mock_otel_span(trace_id=12345, span_id=i) for i in range(5)]
)
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.spans",
):
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(_generate_spans) for _ in range(5)]
for future in futures:
future.result()
exporter.flush()
assert exporter._client.log_spans.call_count == 3
for i in range(3):
location, spans = exporter._client.log_spans.call_args_list[i][0]
assert location == "catalog.schema.spans"
assert len(spans) == 10 if i < 2 else 5, f"Batch {i} had {len(spans)} spans"
def test_export_spans_batch_split_spans_by_location(monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "10")
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS", "1000")
exporter = DatabricksUCTableSpanExporter()
exporter._client = mock.MagicMock()
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.table_1",
):
exporter._export_spans_incrementally(
[
create_mock_otel_span(trace_id=12345, span_id=1),
create_mock_otel_span(trace_id=12345, span_id=2),
]
)
with mock.patch(
"mlflow.tracing.export.uc_table.get_active_spans_table_name",
return_value="catalog.schema.table_2",
):
exporter._export_spans_incrementally(
[
create_mock_otel_span(trace_id=12345, span_id=3),
create_mock_otel_span(trace_id=12345, span_id=4),
create_mock_otel_span(trace_id=12345, span_id=5),
]
)
exporter.flush()
assert exporter._client.log_spans.call_count == 2
location, spans = exporter._client.log_spans.call_args_list[0][0]
assert location == "catalog.schema.table_1"
assert len(spans) == 2
location, spans = exporter._client.log_spans.call_args_list[1][0]
assert location == "catalog.schema.table_2"
assert len(spans) == 3
def test_at_exit_callback_registered_in_correct_order(monkeypatch):
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE", "10")
monkeypatch.setenv("MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS", "1000")
# This test validates that the two atexit callbacks are registered in the correct order.
# AsyncTraceExportQueue must be shut down AFTER SpanBatcher. Since atexit executes callbacks in
# last-in-first-out order, we must register the callback for AsyncTraceExportQueue first.
# https://docs.python.org/3/library/atexit.html#atexit.register
with mock.patch("atexit.register") as mock_atexit:
DatabricksUCTableSpanExporter()
assert mock_atexit.call_count == 2
handlers = [call[0][0] for call in mock_atexit.call_args_list]
assert len(handlers) == 2
assert handlers[0].__self__.__class__.__name__ == "AsyncTraceExportQueue"
assert handlers[1].__self__.__class__.__name__ == "SpanBatcher"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/export/test_uc_table_exporter.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/processor/test_uc_table_processor.py | from unittest import mock
import pytest
import mlflow
import mlflow.tracking.context.default_context
from mlflow.entities.span import LiveSpan
from mlflow.entities.trace_location import TraceLocationType, UCSchemaLocation
from mlflow.entities.trace_state import TraceState
from mlflow.environment_variables import MLFLOW_TRACKING_USERNAME
from mlflow.exceptions import MlflowException
from mlflow.tracing.constant import TraceMetadataKey
from mlflow.tracing.processor.uc_table import DatabricksUCTableSpanProcessor
from mlflow.tracing.provider import _MLFLOW_TRACE_USER_DESTINATION
from mlflow.tracing.trace_manager import InMemoryTraceManager
from tests.tracing.helper import (
create_mock_otel_span,
create_test_trace_info,
)
@pytest.fixture
def active_uc_schema_destination():
destination = UCSchemaLocation(catalog_name="catalog1", schema_name="schema1")
destination._otel_spans_table_name = "spans_table"
_MLFLOW_TRACE_USER_DESTINATION.set(destination)
try:
yield
finally:
_MLFLOW_TRACE_USER_DESTINATION.reset()
def test_on_start_with_uc_table_name(monkeypatch, active_uc_schema_destination):
monkeypatch.setattr(mlflow.tracking.context.default_context, "_get_source_name", lambda: "test")
monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, "alice")
# Root span should create a new trace on start
trace_id = 12345
span = create_mock_otel_span(trace_id=trace_id, span_id=1, parent_id=None, start_time=5_000_000)
processor = DatabricksUCTableSpanProcessor(span_exporter=mock.MagicMock())
processor.on_start(span)
# Check that trace was created in trace manager
trace_manager = InMemoryTraceManager.get_instance()
traces = trace_manager._traces
assert len(traces) == 1
# Get the created trace
created_trace = list(traces.values())[0]
trace_info = created_trace.info
# Verify trace location is UC_SCHEMA type
assert trace_info.trace_location.type == TraceLocationType.UC_SCHEMA
uc_schema = trace_info.trace_location.uc_schema
assert uc_schema.catalog_name == "catalog1"
assert uc_schema.schema_name == "schema1"
# Verify trace state and timing
assert trace_info.state == TraceState.IN_PROGRESS
assert trace_info.request_time == 5 # 5_000_000 nanoseconds -> 5 milliseconds
assert trace_info.execution_duration is None
def test_on_start_without_uc_table_name(monkeypatch):
monkeypatch.setattr(mlflow.tracking.context.default_context, "_get_source_name", lambda: "test")
monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, "alice")
# Root span should create a new trace on start
trace_id = 12345
span = create_mock_otel_span(trace_id=trace_id, span_id=1, parent_id=None, start_time=5_000_000)
_MLFLOW_TRACE_USER_DESTINATION.reset()
processor = DatabricksUCTableSpanProcessor(span_exporter=mock.MagicMock())
with pytest.raises(MlflowException, match="Unity Catalog spans table name is not set"):
processor.on_start(span)
# Check that trace was still created in trace manager
trace_manager = InMemoryTraceManager.get_instance()
traces = trace_manager._traces
assert len(traces) == 0
def test_constructor_disables_metrics_export():
mock_exporter = mock.MagicMock()
processor = DatabricksUCTableSpanProcessor(span_exporter=mock_exporter)
# The export_metrics should be False
assert not processor._export_metrics
def test_trace_id_generation_with_uc_schema(active_uc_schema_destination):
trace_id = 12345
span = create_mock_otel_span(trace_id=trace_id, span_id=1, parent_id=None, start_time=5_000_000)
with mock.patch(
"mlflow.tracing.processor.uc_table.generate_trace_id_v4",
return_value="trace:/catalog1.schema1/12345",
) as mock_generate_trace_id:
processor = DatabricksUCTableSpanProcessor(span_exporter=mock.MagicMock())
processor.on_start(span)
# Verify generate_trace_id_v4 was called with correct arguments
mock_generate_trace_id.assert_called_once_with(span, "catalog1.schema1")
def test_on_end():
trace_info = create_test_trace_info("request_id", 0)
trace_manager = InMemoryTraceManager.get_instance()
trace_manager.register_trace("trace_id", trace_info)
otel_span = create_mock_otel_span(
name="foo",
trace_id="trace_id",
span_id=1,
parent_id=None,
start_time=5_000_000,
end_time=9_000_000,
)
span = LiveSpan(otel_span, "request_id")
span.set_status("OK")
span.set_inputs({"input1": "test input"})
span.set_outputs({"output": "test output"})
mock_exporter = mock.MagicMock()
processor = DatabricksUCTableSpanProcessor(span_exporter=mock_exporter)
processor.on_end(otel_span)
# Verify span was exported
mock_exporter.export.assert_called_once_with((otel_span,))
def test_on_end_sets_user_session_span_attributes():
trace_manager = InMemoryTraceManager.get_instance()
with mock.patch.object(trace_manager, "pop_trace", return_value=None):
with mlflow.start_span("foo") as live_span:
mlflow.update_current_trace(
metadata={
TraceMetadataKey.TRACE_USER: "alice",
TraceMetadataKey.TRACE_SESSION: "sess-123",
}
)
otel_span = live_span._span
processor = DatabricksUCTableSpanProcessor(span_exporter=mock.MagicMock())
processor.on_end(otel_span)
assert otel_span.attributes["user.id"] == "alice"
assert otel_span.attributes["session.id"] == "sess-123"
def test_on_end_does_not_set_user_session_attributes_when_missing():
trace_manager = InMemoryTraceManager.get_instance()
with mock.patch.object(trace_manager, "pop_trace", return_value=None):
with mlflow.start_span("foo") as live_span:
otel_span = live_span._span
processor = DatabricksUCTableSpanProcessor(span_exporter=mock.MagicMock())
processor.on_end(otel_span)
assert "user.id" not in otel_span.attributes
assert "session.id" not in otel_span.attributes
def test_trace_metadata_and_tags(active_uc_schema_destination):
trace_id = 12345
span = create_mock_otel_span(trace_id=trace_id, span_id=1, parent_id=None, start_time=5_000_000)
processor = DatabricksUCTableSpanProcessor(span_exporter=mock.MagicMock())
processor.on_start(span)
# Get the created trace
trace_manager = InMemoryTraceManager.get_instance()
traces = trace_manager._traces
created_trace = list(traces.values())[0]
trace_info = created_trace.info
# Check that metadata and tags are present
assert trace_info.trace_metadata is not None
assert trace_info.tags is not None
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/processor/test_uc_table_processor.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/test_enablement.py | """
Tests for mlflow.tracing.enablement module
"""
from unittest import mock
import pytest
import mlflow
from mlflow.entities.trace_location import UCSchemaLocation
from mlflow.exceptions import MlflowException
from mlflow.tracing.enablement import (
set_experiment_trace_location,
unset_experiment_trace_location,
)
from tests.tracing.helper import skip_when_testing_trace_sdk
@pytest.fixture
def mock_databricks_tracking_uri():
with mock.patch("mlflow.tracking.get_tracking_uri", return_value="databricks"):
yield
@skip_when_testing_trace_sdk
def test_set_experiment_trace_location(mock_databricks_tracking_uri):
experiment_id = mlflow.create_experiment("test_experiment")
location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
sql_warehouse_id = "test-warehouse-id"
with mock.patch("mlflow.tracing.client.TracingClient") as mock_client_class:
mock_client = mock.MagicMock()
mock_client_class.return_value = mock_client
expected_location = UCSchemaLocation(
catalog_name="test_catalog",
schema_name="test_schema",
)
expected_location._otel_logs_table_name = "logs_table"
expected_location._otel_spans_table_name = "spans_table"
mock_client._set_experiment_trace_location.return_value = expected_location
result = set_experiment_trace_location(
location=location,
experiment_id=experiment_id,
sql_warehouse_id=sql_warehouse_id,
)
mock_client._set_experiment_trace_location.assert_called_once_with(
location=location,
experiment_id=experiment_id,
sql_warehouse_id=sql_warehouse_id,
)
assert result == expected_location
def test_set_experiment_trace_location_with_default_experiment(mock_databricks_tracking_uri):
location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
default_experiment_id = mlflow.set_experiment("test_experiment").experiment_id
with (
mock.patch("mlflow.tracing.client.TracingClient") as mock_client_class,
mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value=default_experiment_id),
):
mock_client = mock.MagicMock()
mock_client_class.return_value = mock_client
expected_location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
mock_client._set_experiment_trace_location.return_value = expected_location
result = set_experiment_trace_location(location=location)
mock_client._set_experiment_trace_location.assert_called_once_with(
location=location,
experiment_id=default_experiment_id,
sql_warehouse_id=None,
)
assert result == expected_location
def test_set_experiment_trace_location_no_experiment(mock_databricks_tracking_uri):
location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
with mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value=None):
with pytest.raises(MlflowException, match="Experiment ID is required"):
set_experiment_trace_location(location=location)
@skip_when_testing_trace_sdk
def test_set_experiment_trace_location_non_existent_experiment(mock_databricks_tracking_uri):
location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
experiment_id = "12345"
with pytest.raises(MlflowException, match="Could not find experiment with ID"):
set_experiment_trace_location(location=location, experiment_id=experiment_id)
def test_unset_experiment_trace_location(mock_databricks_tracking_uri):
experiment_id = "123"
location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
with mock.patch("mlflow.tracing.client.TracingClient") as mock_client_class:
mock_client = mock.MagicMock()
mock_client_class.return_value = mock_client
unset_experiment_trace_location(
location=location,
experiment_id=experiment_id,
)
mock_client._unset_experiment_trace_location.assert_called_once_with(
experiment_id,
location,
)
def test_unset_experiment_trace_location_errors(mock_databricks_tracking_uri):
with pytest.raises(MlflowException, match="must be an instance of"):
unset_experiment_trace_location(location="test_catalog.test_schema")
with mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value=None):
with pytest.raises(MlflowException, match="Experiment ID is required"):
unset_experiment_trace_location(
location=UCSchemaLocation("test_catalog", "test_schema")
)
def test_unset_experiment_trace_location_with_default_experiment(mock_databricks_tracking_uri):
default_experiment_id = "456"
with (
mock.patch("mlflow.tracing.client.TracingClient") as mock_client_class,
mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value=default_experiment_id),
):
mock_client = mock.MagicMock()
mock_client_class.return_value = mock_client
location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
unset_experiment_trace_location(location)
mock_client._unset_experiment_trace_location.assert_called_once_with(
default_experiment_id,
location,
)
def test_non_databricks_tracking_uri_errors():
with pytest.raises(
MlflowException,
match="The `set_experiment_trace_location` API is only supported on Databricks.",
):
set_experiment_trace_location(location=UCSchemaLocation("test_catalog", "test_schema"))
with pytest.raises(
MlflowException,
match="The `unset_experiment_trace_location` API is only supported on Databricks.",
):
unset_experiment_trace_location(location=UCSchemaLocation("test_catalog", "test_schema"))
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/test_enablement.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/utils/test_databricks_tracing_utils.py | import json
import pytest
from google.protobuf.timestamp_pb2 import Timestamp
import mlflow
from mlflow.entities import (
AssessmentSource,
Expectation,
Feedback,
Trace,
TraceData,
TraceInfo,
TraceState,
)
from mlflow.entities.trace_location import (
InferenceTableLocation,
MlflowExperimentLocation,
TraceLocation,
TraceLocationType,
UCSchemaLocation,
UnityCatalog,
)
from mlflow.protos import assessments_pb2
from mlflow.protos import databricks_tracing_pb2 as pb
from mlflow.protos.assessments_pb2 import AssessmentSource as ProtoAssessmentSource
from mlflow.tracing.constant import (
TRACE_ID_V4_PREFIX,
TRACE_SCHEMA_VERSION,
TRACE_SCHEMA_VERSION_KEY,
SpanAttributeKey,
)
from mlflow.tracing.utils import TraceMetadataKey, add_size_stats_to_trace_metadata
from mlflow.utils.databricks_tracing_utils import (
assessment_to_proto,
get_trace_id_from_assessment_proto,
inference_table_location_to_proto,
mlflow_experiment_location_to_proto,
parse_uc_location,
trace_from_proto,
trace_location_from_proto,
trace_location_to_proto,
trace_to_proto,
uc_location_to_str,
uc_schema_location_from_proto,
uc_schema_location_to_proto,
)
def test_trace_location_to_proto_uc_schema():
trace_location = TraceLocation.from_databricks_uc_schema(
catalog_name="test_catalog", schema_name="test_schema"
)
proto = trace_location_to_proto(trace_location)
assert proto.type == pb.TraceLocation.TraceLocationType.UC_SCHEMA
assert proto.uc_schema.catalog_name == "test_catalog"
assert proto.uc_schema.schema_name == "test_schema"
def test_parse_uc_location():
assert parse_uc_location("catalog.schema") == ("catalog", "schema", None)
assert parse_uc_location("catalog.schema.prefix") == ("catalog", "schema", "prefix")
with pytest.raises(ValueError, match="Invalid UC location"):
parse_uc_location("a.b.c.d")
def test_uc_location_to_str():
assert uc_location_to_str("catalog", "schema") == "catalog.schema"
assert uc_location_to_str("catalog", "schema", "prefix") == "catalog.schema.prefix"
def test_trace_location_to_proto_mlflow_experiment():
trace_location = TraceLocation.from_experiment_id(experiment_id="1234")
proto = trace_location_to_proto(trace_location)
assert proto.type == pb.TraceLocation.TraceLocationType.MLFLOW_EXPERIMENT
assert proto.mlflow_experiment.experiment_id == "1234"
def test_trace_location_to_proto_inference_table():
trace_location = TraceLocation(
type=TraceLocationType.INFERENCE_TABLE,
inference_table=InferenceTableLocation(
full_table_name="test_catalog.test_schema.test_table"
),
)
proto = trace_location_to_proto(trace_location)
assert proto.type == pb.TraceLocation.TraceLocationType.INFERENCE_TABLE
assert proto.inference_table.full_table_name == "test_catalog.test_schema.test_table"
def test_uc_schema_location_to_proto():
schema_location = UCSchemaLocation(catalog_name="test_catalog", schema_name="test_schema")
proto = uc_schema_location_to_proto(schema_location)
assert proto.catalog_name == "test_catalog"
assert proto.schema_name == "test_schema"
def test_uc_schema_location_from_proto():
proto = pb.UCSchemaLocation(
catalog_name="test_catalog",
schema_name="test_schema",
otel_spans_table_name="test_spans",
otel_logs_table_name="test_logs",
)
schema_location = uc_schema_location_from_proto(proto)
assert schema_location.catalog_name == "test_catalog"
assert schema_location.schema_name == "test_schema"
assert schema_location.full_otel_spans_table_name == "test_catalog.test_schema.test_spans"
assert schema_location.full_otel_logs_table_name == "test_catalog.test_schema.test_logs"
def test_inference_table_location_to_proto():
table_location = InferenceTableLocation(full_table_name="test_catalog.test_schema.test_table")
proto = inference_table_location_to_proto(table_location)
assert proto.full_table_name == "test_catalog.test_schema.test_table"
def test_mlflow_experiment_location_to_proto():
experiment_location = MlflowExperimentLocation(experiment_id="1234")
proto = mlflow_experiment_location_to_proto(experiment_location)
assert proto.experiment_id == "1234"
def test_schema_location_to_proto():
schema_location = UCSchemaLocation(
catalog_name="test_catalog",
schema_name="test_schema",
)
schema_location._otel_spans_table_name = "test_spans"
schema_location._otel_logs_table_name = "test_logs"
proto = uc_schema_location_to_proto(schema_location)
assert proto.catalog_name == "test_catalog"
assert proto.schema_name == "test_schema"
assert proto.otel_spans_table_name == "test_spans"
assert proto.otel_logs_table_name == "test_logs"
def test_trace_location_from_proto_uc_schema():
proto = pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.UC_SCHEMA,
uc_schema=pb.UCSchemaLocation(
catalog_name="catalog",
schema_name="schema",
otel_spans_table_name="spans",
otel_logs_table_name="logs",
),
)
trace_location = trace_location_from_proto(proto)
assert trace_location.uc_schema.catalog_name == "catalog"
assert trace_location.uc_schema.schema_name == "schema"
assert trace_location.uc_schema.full_otel_spans_table_name == "catalog.schema.spans"
assert trace_location.uc_schema.full_otel_logs_table_name == "catalog.schema.logs"
def test_trace_location_from_proto_mlflow_experiment():
proto = pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.MLFLOW_EXPERIMENT,
mlflow_experiment=mlflow_experiment_location_to_proto(
MlflowExperimentLocation(experiment_id="1234")
),
)
trace_location = trace_location_from_proto(proto)
assert trace_location.type == TraceLocationType.MLFLOW_EXPERIMENT
assert trace_location.mlflow_experiment.experiment_id == "1234"
def test_trace_location_from_proto_inference_table():
proto = pb.TraceLocation(
type=pb.TraceLocation.TraceLocationType.INFERENCE_TABLE,
inference_table=inference_table_location_to_proto(
InferenceTableLocation(full_table_name="test_catalog.test_schema.test_table")
),
)
trace_location = trace_location_from_proto(proto)
assert trace_location.type == TraceLocationType.INFERENCE_TABLE
assert trace_location.inference_table.full_table_name == "test_catalog.test_schema.test_table"
def test_trace_info_to_v4_proto():
otel_trace_id = "2efb31387ff19263f92b2c0a61b0a8bc"
trace_id = f"trace:/catalog.schema/{otel_trace_id}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=TraceLocation.from_databricks_uc_schema(
catalog_name="catalog", schema_name="schema"
),
request_time=0,
state=TraceState.OK,
request_preview="request",
response_preview="response",
client_request_id="client_request_id",
tags={"key": "value"},
)
proto_trace_info = trace_info.to_proto()
assert proto_trace_info.trace_id == otel_trace_id
assert proto_trace_info.trace_location.uc_schema.catalog_name == "catalog"
assert proto_trace_info.trace_location.uc_schema.schema_name == "schema"
assert proto_trace_info.state == 1
assert proto_trace_info.request_preview == "request"
assert proto_trace_info.response_preview == "response"
assert proto_trace_info.client_request_id == "client_request_id"
assert proto_trace_info.tags == {"key": "value"}
assert len(proto_trace_info.assessments) == 0
trace_info_from_proto = TraceInfo.from_proto(proto_trace_info)
assert trace_info_from_proto == trace_info
def test_trace_to_proto_and_from_proto():
with mlflow.start_span() as span:
otel_trace_id = span.trace_id.removeprefix("tr-")
uc_schema = "catalog.schema"
trace_id = f"trace:/{uc_schema}/{otel_trace_id}"
span.set_attribute(SpanAttributeKey.REQUEST_ID, trace_id)
mlflow_span = span.to_immutable_span()
assert mlflow_span.trace_id == trace_id
trace = Trace(
info=TraceInfo(
trace_id=trace_id,
trace_location=TraceLocation.from_databricks_uc_schema(
catalog_name="catalog", schema_name="schema"
),
request_time=0,
state=TraceState.OK,
request_preview="request",
response_preview="response",
client_request_id="client_request_id",
tags={"key": "value"},
),
data=TraceData(spans=[mlflow_span]),
)
proto_trace_v4 = trace_to_proto(trace)
assert proto_trace_v4.trace_info.trace_id == otel_trace_id
assert proto_trace_v4.trace_info.trace_location.uc_schema.catalog_name == "catalog"
assert proto_trace_v4.trace_info.trace_location.uc_schema.schema_name == "schema"
assert len(proto_trace_v4.spans) == len(trace.data.spans)
reconstructed_trace = trace_from_proto(proto_trace_v4, location_id="catalog.schema")
assert reconstructed_trace.info.trace_id == trace_id
assert reconstructed_trace.info.trace_location.uc_schema.catalog_name == "catalog"
assert reconstructed_trace.info.trace_location.uc_schema.schema_name == "schema"
assert len(reconstructed_trace.data.spans) == len(trace.data.spans)
original_span = trace.data.spans[0]
reconstructed_span = reconstructed_trace.data.spans[0]
assert reconstructed_span.name == original_span.name
assert reconstructed_span.span_id == original_span.span_id
assert reconstructed_span.trace_id == original_span.trace_id
assert reconstructed_span.inputs == original_span.inputs
assert reconstructed_span.outputs == original_span.outputs
assert reconstructed_span.get_attribute("custom") == original_span.get_attribute("custom")
def test_trace_from_proto_with_location_preserves_v4_trace_id():
with mlflow.start_span() as span:
otel_trace_id = span.trace_id.removeprefix("tr-")
uc_schema = "catalog.schema"
trace_id_v4 = f"{TRACE_ID_V4_PREFIX}{uc_schema}/{otel_trace_id}"
span.set_attribute(SpanAttributeKey.REQUEST_ID, trace_id_v4)
mlflow_span = span.to_immutable_span()
# Create trace with v4 trace ID
trace = Trace(
info=TraceInfo(
trace_id=trace_id_v4,
trace_location=TraceLocation.from_databricks_uc_schema(
catalog_name="catalog", schema_name="schema"
),
request_time=0,
state=TraceState.OK,
),
data=TraceData(spans=[mlflow_span]),
)
# Convert to proto
proto_trace = trace_to_proto(trace)
# Reconstruct with location parameter
reconstructed_trace = trace_from_proto(proto_trace, location_id=uc_schema)
# Verify that all spans have the correct v4 trace_id format
for reconstructed_span in reconstructed_trace.data.spans:
assert reconstructed_span.trace_id == trace_id_v4
assert reconstructed_span.trace_id.startswith(TRACE_ID_V4_PREFIX)
# Verify the REQUEST_ID attribute is also in v4 format
request_id = reconstructed_span.get_attribute("mlflow.traceRequestId")
assert request_id == trace_id_v4
def test_trace_info_from_proto_handles_uc_schema_location():
request_time = Timestamp()
request_time.FromMilliseconds(1234567890)
proto = pb.TraceInfo(
trace_id="test_trace_id",
trace_location=trace_location_to_proto(
TraceLocation.from_databricks_uc_schema(catalog_name="catalog", schema_name="schema")
),
request_preview="test request",
response_preview="test response",
request_time=request_time,
state=TraceState.OK.to_proto(),
trace_metadata={
TRACE_SCHEMA_VERSION_KEY: str(TRACE_SCHEMA_VERSION),
"other_key": "other_value",
},
tags={"test_tag": "test_value"},
)
trace_info = TraceInfo.from_proto(proto)
assert trace_info.trace_location.uc_schema.catalog_name == "catalog"
assert trace_info.trace_location.uc_schema.schema_name == "schema"
assert trace_info.trace_metadata[TRACE_SCHEMA_VERSION_KEY] == str(TRACE_SCHEMA_VERSION)
assert trace_info.trace_metadata["other_key"] == "other_value"
assert trace_info.tags == {"test_tag": "test_value"}
def test_add_size_stats_to_trace_metadata_for_v4_trace():
with mlflow.start_span() as span:
otel_trace_id = span.trace_id.removeprefix("tr-")
uc_schema = "catalog.schema"
trace_id = f"trace:/{uc_schema}/{otel_trace_id}"
span.set_attribute(SpanAttributeKey.REQUEST_ID, trace_id)
mlflow_span = span.to_immutable_span()
trace = Trace(
info=TraceInfo(
trace_id="test_trace_id",
trace_location=TraceLocation.from_databricks_uc_schema(
catalog_name="catalog", schema_name="schema"
),
request_time=0,
state=TraceState.OK,
request_preview="request",
response_preview="response",
client_request_id="client_request_id",
tags={"key": "value"},
),
data=TraceData(spans=[mlflow_span]),
)
add_size_stats_to_trace_metadata(trace)
assert TraceMetadataKey.SIZE_STATS in trace.info.trace_metadata
def test_assessment_to_proto():
# Test with Feedback assessment
feedback = Feedback(
name="correctness",
value=0.95,
source=AssessmentSource(source_type="LLM_JUDGE", source_id="gpt-4"),
trace_id="trace:/catalog.schema/trace123",
metadata={"model": "gpt-4", "temperature": "0.7"},
span_id="span456",
rationale="The response is accurate and complete",
overrides="old_assessment_id",
valid=False,
)
feedback.assessment_id = "assessment789"
proto_v4 = assessment_to_proto(feedback)
# Validate proto structure
assert isinstance(proto_v4, pb.Assessment)
assert proto_v4.assessment_name == "correctness"
assert proto_v4.assessment_id == "assessment789"
assert proto_v4.span_id == "span456"
assert proto_v4.rationale == "The response is accurate and complete"
assert proto_v4.overrides == "old_assessment_id"
assert proto_v4.valid is False
# Check TraceIdentifier
assert proto_v4.trace_id == "trace123"
assert proto_v4.trace_location.uc_schema.catalog_name == "catalog"
assert proto_v4.trace_location.uc_schema.schema_name == "schema"
# Check source
assert proto_v4.source.source_type == ProtoAssessmentSource.SourceType.Value("LLM_JUDGE")
assert proto_v4.source.source_id == "gpt-4"
# Check metadata
assert proto_v4.metadata["model"] == "gpt-4"
assert proto_v4.metadata["temperature"] == "0.7"
# Check feedback value
assert proto_v4.HasField("feedback")
assert proto_v4.feedback.value.number_value == 0.95
# Test with Expectation assessment
expectation = Expectation(
name="expected_answer",
value={"answer": "Paris", "confidence": 0.99},
source=AssessmentSource(source_type="HUMAN", source_id="user@example.com"),
trace_id="trace:/main.default/trace789",
metadata={"question": "What is the capital of France?"},
span_id="span111",
)
expectation.assessment_id = "exp_assessment123"
proto_v4_exp = assessment_to_proto(expectation)
assert isinstance(proto_v4_exp, pb.Assessment)
assert proto_v4_exp.assessment_name == "expected_answer"
assert proto_v4_exp.assessment_id == "exp_assessment123"
assert proto_v4_exp.span_id == "span111"
# Check TraceIdentifier for expectation
assert proto_v4_exp.trace_id == "trace789"
assert proto_v4_exp.trace_location.uc_schema.catalog_name == "main"
assert proto_v4_exp.trace_location.uc_schema.schema_name == "default"
# Check expectation value
assert proto_v4_exp.HasField("expectation")
assert proto_v4_exp.expectation.HasField("serialized_value")
assert json.loads(proto_v4_exp.expectation.serialized_value.value) == {
"answer": "Paris",
"confidence": 0.99,
}
def test_get_trace_id_from_assessment_proto():
proto = pb.Assessment(
trace_id="1234",
trace_location=trace_location_to_proto(
TraceLocation.from_databricks_uc_schema(catalog_name="catalog", schema_name="schema")
),
)
assert get_trace_id_from_assessment_proto(proto) == "trace:/catalog.schema/1234"
proto = assessments_pb2.Assessment(
trace_id="tr-123",
)
assert get_trace_id_from_assessment_proto(proto) == "tr-123"
def test_trace_location_uc_table_prefix_proto_round_trip():
location = UnityCatalog(
catalog_name="catalog",
schema_name="schema",
table_prefix="prefix",
)
location._otel_spans_table_name = "catalog.schema.prefix_otel_spans"
location._otel_logs_table_name = "catalog.schema.prefix_otel_logs"
location._annotations_table_name = "catalog.schema.prefix_otel_annotations"
trace_location = TraceLocation(type=TraceLocationType.UC_TABLE_PREFIX, uc_table_prefix=location)
proto = trace_location_to_proto(trace_location)
assert proto.type == pb.TraceLocation.TraceLocationType.UC_TABLE_PREFIX
assert proto.uc_table_prefix.catalog_name == "catalog"
assert proto.uc_table_prefix.schema_name == "schema"
assert proto.uc_table_prefix.table_prefix == "prefix"
assert proto.uc_table_prefix.spans_table_name == "catalog.schema.prefix_otel_spans"
assert proto.uc_table_prefix.logs_table_name == "catalog.schema.prefix_otel_logs"
assert proto.uc_table_prefix.annotations_table_name == "catalog.schema.prefix_otel_annotations"
reconstructed = trace_location_from_proto(proto)
assert reconstructed.type == TraceLocationType.UC_TABLE_PREFIX
uc = reconstructed.uc_table_prefix
assert uc.catalog_name == "catalog"
assert uc.schema_name == "schema"
assert uc.table_prefix == "prefix"
assert uc.full_otel_spans_table_name == "catalog.schema.prefix_otel_spans"
assert uc.full_otel_logs_table_name == "catalog.schema.prefix_otel_logs"
assert uc.full_annotations_table_name == "catalog.schema.prefix_otel_annotations"
def test_trace_info_from_proto_handles_uc_table_prefix_location():
request_time = Timestamp()
request_time.FromMilliseconds(1234567890)
proto = pb.TraceInfo(
trace_id="test_trace_id",
trace_location=trace_location_to_proto(
TraceLocation.from_databricks_uc_table_prefix(
catalog_name="catalog", schema_name="schema", table_prefix="prefix"
)
),
request_preview="test request",
response_preview="test response",
request_time=request_time,
state=TraceState.OK.to_proto(),
trace_metadata={TRACE_SCHEMA_VERSION_KEY: str(TRACE_SCHEMA_VERSION)},
)
trace_info = TraceInfo.from_proto(proto)
assert trace_info.trace_id == "trace:/catalog.schema.prefix/test_trace_id"
assert trace_info.trace_location.type == TraceLocationType.UC_TABLE_PREFIX
assert trace_info.trace_location.uc_table_prefix.catalog_name == "catalog"
assert trace_info.trace_location.uc_table_prefix.schema_name == "schema"
assert trace_info.trace_location.uc_table_prefix.table_prefix == "prefix"
def test_assessment_to_proto_uc_table_prefix():
feedback = Feedback(
name="correctness",
value=0.95,
source=AssessmentSource(source_type="LLM_JUDGE", source_id="gpt-4"),
trace_id="trace:/catalog.schema.prefix/trace123",
)
proto = assessment_to_proto(feedback)
assert proto.trace_id == "trace123"
assert proto.trace_location.type == pb.TraceLocation.TraceLocationType.UC_TABLE_PREFIX
assert proto.trace_location.uc_table_prefix.catalog_name == "catalog"
assert proto.trace_location.uc_table_prefix.schema_name == "schema"
assert proto.trace_location.uc_table_prefix.table_prefix == "prefix"
def test_get_trace_id_from_assessment_proto_uc_table_prefix():
proto = pb.Assessment(
trace_id="1234",
trace_location=trace_location_to_proto(
TraceLocation.from_databricks_uc_table_prefix(
catalog_name="catalog", schema_name="schema", table_prefix="prefix"
)
),
)
assert get_trace_id_from_assessment_proto(proto) == "trace:/catalog.schema.prefix/1234"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/utils/test_databricks_tracing_utils.py",
"license": "Apache License 2.0",
"lines": 440,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/redundant_test_docstring.py | """Rule to detect redundant docstrings in test functions and classes.
This rule flags ALL single-line docstrings in test functions and classes.
Single-line docstrings in tests rarely provide meaningful context and are
typically redundant. Multi-line docstrings are always allowed as they
generally provide meaningful context.
"""
import ast
from clint.rules.base import Rule
class RedundantTestDocstring(Rule):
@staticmethod
def check(
node: ast.FunctionDef | ast.AsyncFunctionDef | ast.ClassDef, path_name: str
) -> ast.Constant | None:
if not (path_name.startswith("test_") or path_name.endswith("_test.py")):
return None
is_class = isinstance(node, ast.ClassDef)
if is_class and not node.name.startswith("Test"):
return None
if not is_class and not node.name.startswith("test_"):
return None
# Check if docstring exists and get the raw docstring for multiline detection
if (
node.body
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Constant)
and isinstance(node.body[0].value.value, str)
):
raw_docstring = node.body[0].value.value
# If raw docstring has newlines, it's multiline - always allow
if "\n" in raw_docstring:
return None
# Return the docstring node to flag
return node.body[0].value
return None
@staticmethod
def check_module(module: ast.Module, path_name: str) -> ast.Constant | None:
"""Check if module-level docstring is redundant."""
if not (path_name.startswith("test_") or path_name.endswith("_test.py")):
return None
# Check raw docstring for multiline detection
if (
module.body
and isinstance(module.body[0], ast.Expr)
and isinstance(module.body[0].value, ast.Constant)
and isinstance(module.body[0].value.value, str)
):
raw_docstring = module.body[0].value.value
# Only flag single-line module docstrings
if "\n" not in raw_docstring:
return module.body[0].value
return None
def _message(self) -> str:
return (
"Single-line docstrings in tests rarely provide meaningful context. "
"Consider removing it."
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/redundant_test_docstring.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_redundant_test_docstring.py | from pathlib import Path
from clint.config import Config
from clint.linter import lint_file
from clint.rules.redundant_test_docstring import RedundantTestDocstring
def test_redundant_docstrings_are_flagged(index_path: Path) -> None:
code = '''
def test_feature_a():
"""
This test verifies that feature A works correctly.
It has multiple lines of documentation.
"""
assert True
def test_feature_behavior():
"""Test feature."""
assert True
def test_c():
"""Test the complex interaction between modules."""
assert True
def test_validation_logic():
"""Test validation."""
assert True
def test_feature_d():
assert True
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_something.py"), code, config, index_path)
# All single-line docstrings should be flagged
# (test_feature_behavior, test_c, and test_validation_logic)
assert len(violations) == 3
assert all(isinstance(v.rule, RedundantTestDocstring) for v in violations)
def test_docstring_word_overlap(index_path: Path) -> None:
code = '''
def test_very_long_function_name():
"""Short."""
assert True
def test_short():
"""This is a much longer docstring than the function name."""
assert True
def test_data_validation():
"""Test data validation"""
assert True
def test_multi():
"""Line 1
Line 2"""
assert True
def test_foo_bar_baz():
"""Test qux."""
assert True
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_length.py"), code, config, index_path)
# All single-line docstrings should be flagged
# (test_very_long_function_name, test_short, test_data_validation, test_foo_bar_baz)
assert len(violations) == 4
def test_class_docstrings_follow_same_rules(index_path: Path) -> None:
code = '''
class TestFeature:
"""
Tests for the Feature module.
Includes comprehensive test coverage.
"""
def test_method(self):
assert True
class TestFeatureImplementation:
"""Test feature."""
pass
class TestShort:
"""This is a longer docstring than the class name TestShort."""
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_classes.py"), code, config, index_path)
# Both classes with single-line docstrings should be flagged
assert len(violations) == 2
def test_non_test_files_are_ignored(index_path: Path) -> None:
code = '''
def test_something():
"""Short."""
assert True
class TestFeature:
"""Test."""
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("regular_module.py"), code, config, index_path)
assert len(violations) == 0
def test_supports_test_suffix_files(index_path: Path) -> None:
code = '''
def test_feature_implementation():
"""Test feature."""
assert True
class TestClassImplementation:
"""Test class."""
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("module_test.py"), code, config, index_path)
assert len(violations) == 2
def test_multiline_docstrings_are_always_allowed(index_path: Path) -> None:
code = '''def test_with_multiline():
"""
Multi-line.
"""
assert True
def test_with_multiline_compact():
"""Line 1
Line 2"""
assert True
class TestWithMultilineDoc:
"""
Multi
Line
"""
pass
class TestCompactMultiline:
"""Line1
Line2"""
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_multiline.py"), code, config, index_path)
assert len(violations) == 0
def test_error_message_content(index_path: Path) -> None:
code = '''def test_data_processing_validation():
"""Test data processing."""
pass
class TestDataProcessingValidation:
"""Test data processing."""
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_messages.py"), code, config, index_path)
assert len(violations) == 2
func_violation = violations[0]
assert "rarely provide meaningful context" in func_violation.rule.message
assert "Consider removing it" in func_violation.rule.message
class_violation = violations[1]
assert "rarely provide meaningful context" in class_violation.rule.message
assert "Consider removing it" in class_violation.rule.message
def test_module_single_line_docstrings_are_flagged(index_path: Path) -> None:
code = '''"""This is a test module."""
def test_something():
assert True
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_module.py"), code, config, index_path)
assert len(violations) == 1
assert isinstance(violations[0].rule, RedundantTestDocstring)
assert "rarely provide meaningful context" in violations[0].rule.message
def test_module_multiline_docstrings_are_allowed(index_path: Path) -> None:
code = '''"""
This is a test module.
It has multiple lines.
"""
def test_something():
assert True
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_module.py"), code, config, index_path)
assert len(violations) == 0
def test_module_without_docstring_is_not_flagged(index_path: Path) -> None:
code = """def test_something():
assert True
"""
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("test_module.py"), code, config, index_path)
assert len(violations) == 0
def test_non_test_module_docstrings_are_ignored(index_path: Path) -> None:
code = '''"""This is a regular module."""
def some_function():
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("regular_module.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_redundant_test_docstring.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/fastapi_security.py | import logging
from http import HTTPStatus
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.types import ASGIApp
from mlflow.environment_variables import (
MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE,
MLFLOW_SERVER_X_FRAME_OPTIONS,
)
from mlflow.server.security_utils import (
CORS_BLOCKED_MSG,
HEALTH_ENDPOINTS,
INVALID_HOST_MSG,
LOCALHOST_ORIGIN_PATTERNS,
get_allowed_hosts_from_env,
get_allowed_origins_from_env,
get_default_allowed_hosts,
is_allowed_host_header,
is_api_endpoint,
should_block_cors_request,
)
from mlflow.tracing.constant import TRACE_RENDERER_ASSET_PATH
_logger = logging.getLogger(__name__)
class HostValidationMiddleware:
"""Middleware to validate Host headers using fnmatch patterns."""
def __init__(self, app: ASGIApp, allowed_hosts: list[str]):
self.app = app
self.allowed_hosts = allowed_hosts
async def __call__(self, scope, receive, send):
if scope["type"] != "http":
return await self.app(scope, receive, send)
if scope["path"] in HEALTH_ENDPOINTS:
return await self.app(scope, receive, send)
headers = dict(scope.get("headers", []))
host = headers.get(b"host", b"").decode("utf-8")
if not is_allowed_host_header(self.allowed_hosts, host):
_logger.warning(f"Rejected request with invalid Host header: {host}")
async def send_403(message):
if message["type"] == "http.response.start":
message["status"] = 403
message["headers"] = [(b"content-type", b"text/plain")]
await send(message)
await send_403({"type": "http.response.start", "status": 403, "headers": []})
await send({"type": "http.response.body", "body": INVALID_HOST_MSG.encode()})
return
return await self.app(scope, receive, send)
class SecurityHeadersMiddleware:
"""Middleware to add security headers to all responses."""
def __init__(self, app: ASGIApp):
self.app = app
self.x_frame_options = MLFLOW_SERVER_X_FRAME_OPTIONS.get()
async def __call__(self, scope, receive, send):
if scope["type"] != "http":
return await self.app(scope, receive, send)
async def send_wrapper(message):
if message["type"] == "http.response.start":
headers = dict(message.get("headers", []))
headers[b"x-content-type-options"] = b"nosniff"
# Skip X-Frame-Options for notebook renderer to allow iframe embedding in notebooks
path = scope.get("path", "")
is_notebook_renderer = path.startswith(TRACE_RENDERER_ASSET_PATH)
if (
self.x_frame_options
and self.x_frame_options.upper() != "NONE"
and not is_notebook_renderer
):
headers[b"x-frame-options"] = self.x_frame_options.upper().encode()
if (
scope["method"] == "OPTIONS"
and message.get("status") == 200
and is_api_endpoint(scope["path"])
):
message["status"] = HTTPStatus.NO_CONTENT
message["headers"] = list(headers.items())
await send(message)
await self.app(scope, receive, send_wrapper)
class CORSBlockingMiddleware:
"""Middleware to actively block cross-origin state-changing requests."""
def __init__(self, app: ASGIApp, allowed_origins: list[str]):
self.app = app
self.allowed_origins = allowed_origins
async def __call__(self, scope, receive, send):
if scope["type"] != "http":
return await self.app(scope, receive, send)
if not is_api_endpoint(scope["path"]):
return await self.app(scope, receive, send)
method = scope["method"]
headers = dict(scope["headers"])
origin = headers.get(b"origin", b"").decode("utf-8")
if should_block_cors_request(origin, method, self.allowed_origins):
_logger.warning(f"Blocked cross-origin request from {origin}")
await send(
{
"type": "http.response.start",
"status": HTTPStatus.FORBIDDEN,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send(
{
"type": "http.response.body",
"body": CORS_BLOCKED_MSG.encode(),
}
)
return
await self.app(scope, receive, send)
def get_allowed_hosts() -> list[str]:
"""Get list of allowed hosts from environment or defaults."""
return get_allowed_hosts_from_env() or get_default_allowed_hosts()
def get_allowed_origins() -> list[str]:
"""Get list of allowed CORS origins from environment or defaults."""
return get_allowed_origins_from_env() or []
def init_fastapi_security(app: FastAPI) -> None:
"""
Initialize security middleware for FastAPI application.
This configures:
- Host header validation (DNS rebinding protection) via TrustedHostMiddleware
- CORS protection via CORSMiddleware
- Security headers via custom middleware
Args:
app: FastAPI application instance.
"""
if MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE.get() == "true":
return
app.add_middleware(SecurityHeadersMiddleware)
allowed_origins = get_allowed_origins()
if allowed_origins and "*" in allowed_origins:
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["*"],
)
else:
# Use CORSBlockingMiddleware for blocking CORS requests on the server side,
# and CORSMiddleware for responding to OPTIONS requests.
app.add_middleware(CORSBlockingMiddleware, allowed_origins=allowed_origins)
app.add_middleware(
CORSMiddleware,
allow_origins=allowed_origins,
allow_origin_regex="|".join(LOCALHOST_ORIGIN_PATTERNS),
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"],
allow_headers=["*"],
expose_headers=["*"],
)
allowed_hosts = get_allowed_hosts()
if allowed_hosts and "*" not in allowed_hosts:
app.add_middleware(HostValidationMiddleware, allowed_hosts=allowed_hosts)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/fastapi_security.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/server/security.py | import logging
from http import HTTPStatus
from flask import Flask, Response, request
from flask_cors import CORS
from mlflow.environment_variables import (
MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE,
MLFLOW_SERVER_X_FRAME_OPTIONS,
)
from mlflow.server.security_utils import (
CORS_BLOCKED_MSG,
HEALTH_ENDPOINTS,
INVALID_HOST_MSG,
LOCALHOST_ORIGIN_PATTERNS,
get_allowed_hosts_from_env,
get_allowed_origins_from_env,
get_default_allowed_hosts,
is_allowed_host_header,
is_api_endpoint,
should_block_cors_request,
)
from mlflow.tracing.constant import TRACE_RENDERER_ASSET_PATH
_logger = logging.getLogger(__name__)
def get_allowed_hosts() -> list[str]:
"""Get list of allowed hosts from environment or defaults."""
return get_allowed_hosts_from_env() or get_default_allowed_hosts()
def get_allowed_origins() -> list[str]:
"""Get list of allowed CORS origins from environment or defaults."""
return get_allowed_origins_from_env() or []
def init_security_middleware(app: Flask) -> None:
"""
Initialize security middleware for Flask application.
This configures:
- Host header validation (DNS rebinding protection)
- CORS protection via Flask-CORS
- Security headers
Args:
app: Flask application instance.
"""
if MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE.get() == "true":
return
allowed_origins = get_allowed_origins()
allowed_hosts = get_allowed_hosts()
x_frame_options = MLFLOW_SERVER_X_FRAME_OPTIONS.get()
if allowed_origins and "*" in allowed_origins:
CORS(app, resources={r"/*": {"origins": "*"}}, supports_credentials=True)
else:
cors_origins = (allowed_origins or []) + LOCALHOST_ORIGIN_PATTERNS
CORS(
app,
resources={r"/*": {"origins": cors_origins}},
supports_credentials=True,
methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"],
)
if allowed_hosts and "*" not in allowed_hosts:
@app.before_request
def validate_host():
if request.path in HEALTH_ENDPOINTS:
return None
if not is_allowed_host_header(allowed_hosts, host := request.headers.get("Host")):
_logger.warning(f"Rejected request with invalid Host header: {host}")
return Response(
INVALID_HOST_MSG, status=HTTPStatus.FORBIDDEN, mimetype="text/plain"
)
return None
if not (allowed_origins and "*" in allowed_origins):
@app.before_request
def block_cross_origin_state_changes():
if not is_api_endpoint(request.path):
return None
origin = request.headers.get("Origin")
if should_block_cors_request(origin, request.method, allowed_origins):
_logger.warning(f"Blocked cross-origin request from {origin}")
return Response(
CORS_BLOCKED_MSG, status=HTTPStatus.FORBIDDEN, mimetype="text/plain"
)
return None
@app.after_request
def add_security_headers(response: Response) -> Response:
response.headers["X-Content-Type-Options"] = "nosniff"
# Skip X-Frame-Options for notebook-trace-renderer to allow iframe embedding in Jupyter
is_notebook_renderer = request.path.startswith(TRACE_RENDERER_ASSET_PATH)
if x_frame_options and x_frame_options.upper() != "NONE" and not is_notebook_renderer:
response.headers["X-Frame-Options"] = x_frame_options.upper()
if (
request.method == "OPTIONS"
and response.status_code == 200
and is_api_endpoint(request.path)
):
response.status_code = HTTPStatus.NO_CONTENT
response.data = b""
return response
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/security.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/server/security_utils.py | """
Shared security utilities for MLflow server middleware.
This module contains common functions used by both Flask and FastAPI
security implementations.
"""
import fnmatch
from urllib.parse import urlparse
from mlflow.environment_variables import (
MLFLOW_SERVER_ALLOWED_HOSTS,
MLFLOW_SERVER_CORS_ALLOWED_ORIGINS,
)
# Security response messages
INVALID_HOST_MSG = "Invalid Host header - possible DNS rebinding attack detected"
CORS_BLOCKED_MSG = "Cross-origin request blocked"
# HTTP methods that modify state
STATE_CHANGING_METHODS = ["POST", "PUT", "DELETE", "PATCH"]
# Paths exempt from host validation
HEALTH_ENDPOINTS = ["/health", "/version"]
# API path prefixes for MLflow endpoints
API_PATH_PREFIX = "/api/"
AJAX_API_PATH_PREFIX = "/ajax-api/"
# Test-only endpoints that should not have CORS blocking
TEST_ENDPOINTS = ["/test", "/api/test"]
# Localhost addresses
LOCALHOST_VARIANTS = ["localhost", "127.0.0.1", "[::1]", "0.0.0.0"]
CORS_LOCALHOST_HOSTS = ["localhost", "127.0.0.1", "[::1]", "::1"]
# Private IP range start values for 172.16.0.0/12
PRIVATE_172_RANGE_START = 16
PRIVATE_172_RANGE_END = 32
# Regex patterns for localhost origins
LOCALHOST_ORIGIN_PATTERNS = [
r"^http://localhost(:[0-9]+)?$",
r"^http://127\.0\.0\.1(:[0-9]+)?$",
r"^http://\[::1\](:[0-9]+)?$",
]
def get_localhost_addresses() -> list[str]:
"""Get localhost/loopback addresses."""
return LOCALHOST_VARIANTS
def get_private_ip_patterns() -> list[str]:
"""
Generate wildcard patterns for private IP ranges.
These are the standard RFC-defined private address ranges:
- RFC 1918 (IPv4): 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16
https://datatracker.ietf.org/doc/html/rfc1918
- RFC 4193 (IPv6): fc00::/7
https://datatracker.ietf.org/doc/html/rfc4193
Additional references:
- IANA IPv4 Special-Purpose Address Registry:
https://www.iana.org/assignments/iana-ipv4-special-registry/
- IANA IPv6 Special-Purpose Address Registry:
https://www.iana.org/assignments/iana-ipv6-special-registry/
"""
return [
"192.168.*",
"10.*",
*[f"172.{i}.*" for i in range(PRIVATE_172_RANGE_START, PRIVATE_172_RANGE_END)],
"fc00:*",
"fd00:*",
]
def get_allowed_hosts_from_env() -> list[str] | None:
"""Get allowed hosts from environment variable."""
if allowed_hosts_env := MLFLOW_SERVER_ALLOWED_HOSTS.get():
return [host.strip() for host in allowed_hosts_env.split(",")]
return None
def get_allowed_origins_from_env() -> list[str] | None:
"""Get allowed CORS origins from environment variable."""
if allowed_origins_env := MLFLOW_SERVER_CORS_ALLOWED_ORIGINS.get():
return [origin.strip() for origin in allowed_origins_env.split(",")]
return None
def is_localhost_origin(origin: str) -> bool:
"""Check if an origin is from localhost."""
if not origin:
return False
try:
parsed = urlparse(origin)
hostname = parsed.hostname
return hostname in CORS_LOCALHOST_HOSTS
except Exception:
return False
def should_block_cors_request(origin: str, method: str, allowed_origins: list[str] | None) -> bool:
"""Determine if a CORS request should be blocked."""
if not origin or method not in STATE_CHANGING_METHODS:
return False
if is_localhost_origin(origin):
return False
if allowed_origins:
# If wildcard "*" is in the list, allow all origins
if "*" in allowed_origins:
return False
if origin in allowed_origins:
return False
return True
def is_api_endpoint(path: str) -> bool:
"""Check if a path is an API endpoint that should have CORS/OPTIONS handling."""
return (
path.startswith(API_PATH_PREFIX) or path.startswith(AJAX_API_PATH_PREFIX)
) and path not in TEST_ENDPOINTS
def is_allowed_host_header(allowed_hosts: list[str], host: str) -> bool:
"""Validate if the host header matches allowed patterns."""
if not host:
return False
# If wildcard "*" is in the list, allow all hosts
if "*" in allowed_hosts:
return True
return any(
fnmatch.fnmatch(host, allowed) if "*" in allowed else host == allowed
for allowed in allowed_hosts
)
def get_default_allowed_hosts() -> list[str]:
"""Get default allowed hosts patterns."""
wildcard_hosts = []
for host in get_localhost_addresses():
if host.startswith("["):
# IPv6: escape opening bracket for fnmatch
escaped = host.replace("[", "[[]", 1)
wildcard_hosts.append(f"{escaped}:*")
else:
wildcard_hosts.append(f"{host}:*")
return get_localhost_addresses() + wildcard_hosts + get_private_ip_patterns()
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/security_utils.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/server/test_security.py | import pytest
from fastapi import FastAPI
from flask import Flask
from starlette.testclient import TestClient
from werkzeug.test import Client
from mlflow.server import security
from mlflow.server.fastapi_security import init_fastapi_security
from mlflow.server.security_utils import is_allowed_host_header, is_api_endpoint
def test_default_allowed_hosts():
hosts = security.get_allowed_hosts()
assert "localhost" in hosts
assert "127.0.0.1" in hosts
assert "[::1]" in hosts
assert "localhost:*" in hosts
assert "127.0.0.1:*" in hosts
assert "[[]::1]:*" in hosts
assert "192.168.*" in hosts
assert "10.*" in hosts
def test_custom_allowed_hosts(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("MLFLOW_SERVER_ALLOWED_HOSTS", "example.com,app.example.com")
hosts = security.get_allowed_hosts()
assert "example.com" in hosts
assert "app.example.com" in hosts
@pytest.mark.parametrize(
("host_header", "expected_status", "expected_error"),
[
("localhost", 200, None),
("127.0.0.1", 200, None),
("evil.attacker.com", 403, b"Invalid Host header"),
],
)
def test_dns_rebinding_protection(
test_app, host_header, expected_status, expected_error, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.setenv("MLFLOW_SERVER_ALLOWED_HOSTS", "localhost,127.0.0.1")
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.get("/test", headers={"Host": host_header})
assert response.status_code == expected_status
if expected_error:
assert expected_error in response.data
@pytest.mark.parametrize(
("method", "origin", "expected_status", "expected_cors_header"),
[
("POST", "http://localhost:3000", 200, "http://localhost:3000"),
("POST", "http://evil.com", 403, None),
("POST", None, 200, None),
("GET", "http://evil.com", 200, None),
],
)
def test_cors_protection(
test_app, method, origin, expected_status, expected_cors_header, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.setenv(
"MLFLOW_SERVER_CORS_ALLOWED_ORIGINS", "http://localhost:3000,https://app.example.com"
)
security.init_security_middleware(test_app)
client = Client(test_app)
headers = {"Origin": origin} if origin else {}
response = getattr(client, method.lower())("/api/2.0/mlflow/experiments/list", headers=headers)
assert response.status_code == expected_status
if expected_cors_header:
assert response.headers.get("Access-Control-Allow-Origin") == expected_cors_header
def test_insecure_cors_mode(test_app, monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("MLFLOW_SERVER_CORS_ALLOWED_ORIGINS", "*")
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.post(
"/api/2.0/mlflow/experiments/list", headers={"Origin": "http://evil.com"}
)
assert response.status_code == 200
assert response.headers.get("Access-Control-Allow-Origin") == "http://evil.com"
@pytest.mark.parametrize(
("origin", "expected_cors_header"),
[
("http://localhost:3000", "http://localhost:3000"),
("http://evil.com", None),
],
)
def test_preflight_options_request(
test_app, origin, expected_cors_header, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.setenv("MLFLOW_SERVER_CORS_ALLOWED_ORIGINS", "http://localhost:3000")
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.options(
"/api/2.0/mlflow/experiments/list",
headers={
"Origin": origin,
"Access-Control-Request-Method": "POST",
"Access-Control-Request-Headers": "Content-Type",
},
)
assert response.status_code == 204
if expected_cors_header:
assert response.headers.get("Access-Control-Allow-Origin") == expected_cors_header
def test_security_headers(test_app):
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.get("/test")
assert response.headers.get("X-Content-Type-Options") == "nosniff"
assert response.headers.get("X-Frame-Options") == "SAMEORIGIN"
def test_disable_security_middleware(test_app, monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE", "true")
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.get("/test")
assert "X-Content-Type-Options" not in response.headers
assert "X-Frame-Options" not in response.headers
response = client.get("/test", headers={"Host": "evil.com"})
assert response.status_code == 200
def test_x_frame_options_configuration(monkeypatch: pytest.MonkeyPatch):
app = Flask(__name__)
@app.route("/test")
def test():
return "OK"
monkeypatch.setenv("MLFLOW_SERVER_X_FRAME_OPTIONS", "DENY")
security.init_security_middleware(app)
client = Client(app)
response = client.get("/test")
assert response.headers.get("X-Frame-Options") == "DENY"
app2 = Flask(__name__)
@app2.route("/test")
def test2():
return "OK"
# Reset for the second app
monkeypatch.setenv("MLFLOW_SERVER_X_FRAME_OPTIONS", "NONE")
security.init_security_middleware(app2)
client = Client(app2)
response = client.get("/test")
assert "X-Frame-Options" not in response.headers
def test_notebook_trace_renderer_skips_x_frame_options(monkeypatch: pytest.MonkeyPatch):
from mlflow.tracing.constant import TRACE_RENDERER_ASSET_PATH
app = Flask(__name__)
@app.route(f"{TRACE_RENDERER_ASSET_PATH}/index.html")
def notebook_renderer():
return "<html>trace renderer</html>"
@app.route(f"{TRACE_RENDERER_ASSET_PATH}/js/main.js")
def notebook_renderer_js():
return "console.log('trace renderer');"
@app.route("/static-files/other-page.html")
def other_page():
return "<html>other page</html>"
# Set X-Frame-Options to DENY to test that it's skipped for notebook renderer
monkeypatch.setenv("MLFLOW_SERVER_X_FRAME_OPTIONS", "DENY")
security.init_security_middleware(app)
client = Client(app)
response = client.get(f"{TRACE_RENDERER_ASSET_PATH}/index.html")
assert response.status_code == 200
assert "X-Frame-Options" not in response.headers
response = client.get(f"{TRACE_RENDERER_ASSET_PATH}/js/main.js")
assert response.status_code == 200
assert "X-Frame-Options" not in response.headers
response = client.get("/static-files/other-page.html")
assert response.status_code == 200
assert response.headers.get("X-Frame-Options") == "DENY"
def test_wildcard_hosts(test_app, monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("MLFLOW_SERVER_ALLOWED_HOSTS", "*")
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.get("/test", headers={"Host": "any.domain.com"})
assert response.status_code == 200
@pytest.mark.parametrize(
("endpoint", "host_header", "expected_status"),
[
("/health", "evil.com", 200),
("/test", "evil.com", 403),
],
)
def test_endpoint_security_bypass(
test_app, endpoint, host_header, expected_status, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.setenv("MLFLOW_SERVER_ALLOWED_HOSTS", "localhost")
security.init_security_middleware(test_app)
client = Client(test_app)
response = client.get(endpoint, headers={"Host": host_header})
assert response.status_code == expected_status
@pytest.mark.parametrize(
("hostname", "expected_valid"),
[
("192.168.1.1", True),
("10.0.0.1", True),
("172.16.0.1", True),
("127.0.0.1", True),
("localhost", True),
("[::1]", True),
("192.168.1.1:8080", True),
("[::1]:8080", True),
("evil.com", False),
],
)
def test_host_validation(hostname, expected_valid):
hosts = security.get_allowed_hosts()
assert is_allowed_host_header(hosts, hostname) == expected_valid
@pytest.mark.parametrize(
("env_var", "env_value", "expected_result"),
[
(
"MLFLOW_SERVER_CORS_ALLOWED_ORIGINS",
"http://app1.com,http://app2.com",
["http://app1.com", "http://app2.com"],
),
("MLFLOW_SERVER_ALLOWED_HOSTS", "app1.com,app2.com:8080", ["app1.com", "app2.com:8080"]),
],
)
def test_environment_variable_configuration(
env_var, env_value, expected_result, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.setenv(env_var, env_value)
if "ORIGINS" in env_var:
result = security.get_allowed_origins()
for expected in expected_result:
assert expected in result
else:
result = security.get_allowed_hosts()
for expected in expected_result:
assert expected in result
@pytest.mark.parametrize(
("path", "expected"),
[
("/api/2.0/mlflow/experiments/list", True),
("/ajax-api/2.0/mlflow/experiments/list", True),
("/ajax-api/3.0/mlflow/runs/search", True),
("/api/test", False),
("/test", False),
("/health", False),
("/static/index.html", False),
],
)
def test_is_api_endpoint(path, expected):
assert is_api_endpoint(path) == expected
@pytest.mark.parametrize(
("origin", "expect_cors_header"),
[
("http://localhost:3000", True),
("http://127.0.0.1:5000", True),
("http://[::1]:8080", True),
("http://evil.com", False),
],
)
def test_fastapi_cors_allows_localhost_origins(fastapi_client, origin, expect_cors_header):
response = fastapi_client.get(
"/api/2.0/mlflow/experiments/list", headers={"Host": "localhost", "Origin": origin}
)
if expect_cors_header:
assert response.headers.get("access-control-allow-origin") == origin
else:
assert response.headers.get("access-control-allow-origin") is None
def test_fastapi_cors_allows_configured_origin(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("MLFLOW_SERVER_CORS_ALLOWED_ORIGINS", "https://trusted.com")
app = FastAPI()
@app.api_route("/api/2.0/mlflow/experiments/list", methods=["GET", "POST", "OPTIONS"])
async def api_endpoint():
return {"ok": True}
init_fastapi_security(app)
client = TestClient(app, raise_server_exceptions=False)
response = client.get(
"/api/2.0/mlflow/experiments/list",
headers={"Host": "localhost", "Origin": "https://trusted.com"},
)
assert response.headers.get("access-control-allow-origin") == "https://trusted.com"
response = client.get(
"/api/2.0/mlflow/experiments/list",
headers={"Host": "localhost", "Origin": "http://evil.com"},
)
assert response.headers.get("access-control-allow-origin") is None
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/test_security.py",
"license": "Apache License 2.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/test_security_integration.py | import json
import pytest
from werkzeug.test import Client
@pytest.mark.parametrize(
("host", "origin", "expected_status", "should_block"),
[
("evil.attacker.com:5000", "http://evil.attacker.com:5000", 403, True),
("localhost:5000", None, None, False),
],
)
def test_dns_rebinding_and_cors_protection(
mlflow_app_client, host, origin, expected_status, should_block
):
headers = {"Host": host, "Content-Type": "application/json"}
if origin:
headers["Origin"] = origin
response = mlflow_app_client.post(
"/api/2.0/mlflow/experiments/search",
headers=headers,
data=json.dumps({"order_by": ["creation_time DESC", "name ASC"], "max_results": 50}),
)
if should_block:
assert response.status_code == expected_status
assert (
b"Invalid Host header" in response.data
or b"Cross-origin request blocked" in response.data
)
else:
assert response.status_code != 403
@pytest.mark.parametrize(
("origin", "endpoint", "expected_blocked"),
[
("http://malicious-site.com", "/api/2.0/mlflow/experiments/create", True),
("http://localhost:3000", "/api/2.0/mlflow/experiments/search", False),
],
)
def test_cors_for_state_changing_requests(mlflow_app_client, origin, endpoint, expected_blocked):
response = mlflow_app_client.post(
endpoint,
headers={"Origin": origin, "Content-Type": "application/json"},
data=json.dumps({"name": "test-experiment"} if "create" in endpoint else {}),
)
if expected_blocked:
assert response.status_code == 403
assert b"Cross-origin request blocked" in response.data
else:
assert response.status_code != 403
def test_cors_with_configured_origins(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("MLFLOW_SERVER_CORS_ALLOWED_ORIGINS", "https://trusted-app.com")
from flask import Flask
from mlflow.server import handlers, security
app = Flask(__name__)
for http_path, handler, methods in handlers.get_endpoints():
app.add_url_rule(http_path, handler.__name__, handler, methods=methods)
security.init_security_middleware(app)
client = Client(app)
test_cases = [
("https://trusted-app.com", False),
("http://evil.com", True),
]
for origin, should_block in test_cases:
response = client.post(
"/api/2.0/mlflow/experiments/search",
headers={"Origin": origin, "Content-Type": "application/json"},
data=json.dumps({}),
)
if should_block:
assert response.status_code == 403
else:
assert response.status_code != 403
def test_security_headers_on_responses(mlflow_app_client):
response = mlflow_app_client.get("/health")
assert response.headers.get("X-Content-Type-Options") == "nosniff"
assert response.headers.get("X-Frame-Options") == "SAMEORIGIN"
@pytest.mark.parametrize(
("origin", "expected_status", "should_have_cors"),
[
("http://localhost:3000", 204, True),
("http://evil.com", None, False),
],
)
def test_preflight_options_requests(mlflow_app_client, origin, expected_status, should_have_cors):
response = mlflow_app_client.options(
"/api/2.0/mlflow/experiments/search",
headers={
"Origin": origin,
"Access-Control-Request-Method": "POST",
"Access-Control-Request-Headers": "Content-Type",
},
)
if expected_status:
assert response.status_code == expected_status
if should_have_cors:
assert response.headers.get("Access-Control-Allow-Origin") == origin
assert "POST" in response.headers.get("Access-Control-Allow-Methods", "")
else:
assert (
"Access-Control-Allow-Origin" not in response.headers
or response.headers.get("Access-Control-Allow-Origin") != origin
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/test_security_integration.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/cli/genai_eval_utils.py | """
Utility functions for trace evaluation output formatting.
"""
from dataclasses import dataclass
from typing import Any
import click
import pandas as pd
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers import Scorer, get_all_scorers, get_scorer
from mlflow.tracing.constant import AssessmentMetadataKey
# Represents the absence of a value for an assessment
NA_VALUE = "N/A"
@dataclass
class Assessment:
"""
Structured assessment data for a trace evaluation.
"""
name: str | None
"""The name of the assessment"""
result: Any | None = None
"""The result value from the assessment"""
rationale: str | None = None
"""The rationale text explaining the assessment"""
error: str | None = None
"""Error message if the assessment failed"""
@dataclass
class Cell:
"""
Structured cell data for table display with metadata.
"""
value: str
"""The formatted display value for the cell"""
assessment: Assessment | None = None
"""The assessment data for this cell, if it represents an assessment"""
@dataclass
class EvalResult:
"""
Container for evaluation results for a single trace.
This dataclass provides structured access to trace evaluation data,
replacing dict-based access for better type safety.
"""
trace_id: str
"""The trace ID"""
assessments: list[Assessment]
"""List of Assessment objects for this trace"""
@dataclass
class TableOutput:
"""Container for formatted table data."""
headers: list[str]
rows: list[list[Cell]]
def _format_assessment_cell(assessment: Assessment | None) -> Cell:
"""
Format a single assessment cell for table display.
Args:
assessment: Assessment object with result, rationale, and error fields
Returns:
Cell object with formatted value and assessment metadata
"""
if not assessment:
return Cell(value=NA_VALUE)
if assessment.error:
display_value = f"error: {assessment.error}"
elif assessment.result is not None and assessment.rationale:
display_value = f"value: {assessment.result}, rationale: {assessment.rationale}"
elif assessment.result is not None:
display_value = f"value: {assessment.result}"
elif assessment.rationale:
display_value = f"rationale: {assessment.rationale}"
else:
display_value = NA_VALUE
return Cell(value=display_value, assessment=assessment)
def resolve_scorers(scorer_names: list[str], experiment_id: str) -> list[Scorer]:
"""
Resolve scorer names to scorer objects.
Checks built-in scorers first, then registered scorers.
Supports both class names (e.g., "RelevanceToQuery") and snake_case
scorer names (e.g., "relevance_to_query").
Args:
scorer_names: List of scorer names to resolve
experiment_id: Experiment ID for looking up registered scorers
Returns:
List of resolved scorer objects
Raises:
click.UsageError: If a scorer is not found or no valid scorers specified
"""
resolved_scorers = []
builtin_scorers = get_all_scorers()
# Build map with both class name and snake_case name for lookup
builtin_scorer_map = {}
for scorer in builtin_scorers:
# Map by class name (e.g., "RelevanceToQuery")
builtin_scorer_map[scorer.__class__.__name__] = scorer
# Map by scorer.name (snake_case, e.g., "relevance_to_query")
if scorer.name is not None:
builtin_scorer_map[scorer.name] = scorer
for scorer_name in scorer_names:
if scorer_name in builtin_scorer_map:
resolved_scorers.append(builtin_scorer_map[scorer_name])
else:
# Try to get it as a registered scorer
try:
registered_scorer = get_scorer(name=scorer_name, experiment_id=experiment_id)
resolved_scorers.append(registered_scorer)
except MlflowException as e:
error_message = str(e)
if "not found" in error_message.lower():
available_builtin = ", ".join(
sorted({scorer.__class__.__name__ for scorer in builtin_scorers})
)
raise click.UsageError(
f"Could not identify Scorer '{scorer_name}'. "
f"Only built-in or registered scorers can be resolved. "
f"Available built-in scorers: {available_builtin}. "
f"To use a custom scorer, register it first in experiment {experiment_id} "
f"using the register_scorer() API."
)
else:
raise click.UsageError(
f"An error occurred when retrieving information for Scorer "
f"`{scorer_name}`: {error_message}"
)
if not resolved_scorers:
raise click.UsageError("No valid scorers specified")
return resolved_scorers
def extract_assessments_from_results(
results_df: pd.DataFrame, evaluation_run_id: str
) -> list[EvalResult]:
"""
Extract assessments from evaluation results DataFrame.
The evaluate() function returns results with a DataFrame that contains
an 'assessments' column. Each row has a list of assessment dictionaries
with metadata including AssessmentMetadataKey.SOURCE_RUN_ID that we use to
filter assessments from this specific evaluation run.
Args:
results_df: DataFrame from evaluate() results containing assessments column
evaluation_run_id: The MLflow run ID from the evaluation that generated the assessments
Returns:
List of EvalResult objects with trace_id and assessments
"""
output_data = []
for _, row in results_df.iterrows():
trace_id = row.get("trace_id", "unknown")
assessments_list = []
for assessment_dict in row.get("assessments", []):
# Only consider assessments from the evaluation run
metadata = assessment_dict.get("metadata", {})
source_run_id = metadata.get(AssessmentMetadataKey.SOURCE_RUN_ID)
if source_run_id != evaluation_run_id:
continue
assessment_name = assessment_dict.get("assessment_name")
assessment_result = None
assessment_rationale = None
assessment_error = None
if (feedback := assessment_dict.get("feedback")) and isinstance(feedback, dict):
assessment_result = feedback.get("value")
if rationale := assessment_dict.get("rationale"):
assessment_rationale = rationale
if error := assessment_dict.get("error"):
assessment_error = str(error)
assessments_list.append(
Assessment(
name=assessment_name,
result=assessment_result,
rationale=assessment_rationale,
error=assessment_error,
)
)
# If no assessments were found for this trace, add error markers
if not assessments_list:
assessments_list.append(
Assessment(
name=NA_VALUE,
result=None,
rationale=None,
error="No assessments found on trace",
)
)
output_data.append(EvalResult(trace_id=trace_id, assessments=assessments_list))
return output_data
def format_table_output(output_data: list[EvalResult]) -> TableOutput:
"""
Format evaluation results as table data.
Args:
output_data: List of EvalResult objects with assessments
Returns:
TableOutput dataclass containing headers and rows
"""
# Extract unique assessment names from output_data to use as column headers
# Note: assessment name can be None, so we filter it out
assessment_names_set = set()
for trace_result in output_data:
for assessment in trace_result.assessments:
if assessment.name and assessment.name != NA_VALUE:
assessment_names_set.add(assessment.name)
# Sort for consistent ordering
assessment_names = sorted(assessment_names_set)
headers = ["trace_id"] + assessment_names
table_data = []
for trace_result in output_data:
# Create Cell for trace_id column
row = [Cell(value=trace_result.trace_id)]
# Build a map of assessment name -> assessment for this trace
assessment_map = {
assessment.name: assessment
for assessment in trace_result.assessments
if assessment.name and assessment.name != NA_VALUE
}
# For each assessment name in headers, get the corresponding assessment
for assessment_name in assessment_names:
cell_content = _format_assessment_cell(assessment_map.get(assessment_name))
row.append(cell_content)
table_data.append(row)
return TableOutput(headers=headers, rows=table_data)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/cli/genai_eval_utils.py",
"license": "Apache License 2.0",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/cli/test_genai_eval_utils.py | from unittest import mock
import click
import pandas as pd
import pytest
from mlflow.cli.genai_eval_utils import (
NA_VALUE,
Assessment,
EvalResult,
extract_assessments_from_results,
format_table_output,
resolve_scorers,
)
from mlflow.exceptions import MlflowException
from mlflow.tracing.constant import AssessmentMetadataKey
def test_format_single_trace_with_result_and_rationale():
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale="The answer is relevant",
)
],
)
]
table_output = format_table_output(output_data)
# Headers should use assessment names from output_data
assert table_output.headers == ["trace_id", "RelevanceToQuery"]
assert len(table_output.rows) == 1
assert table_output.rows[0][0].value == "tr-123"
assert "value: yes" in table_output.rows[0][1].value
assert "rationale: The answer is relevant" in table_output.rows[0][1].value
def test_format_multiple_traces_multiple_scorers():
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale="Relevant",
),
Assessment(name="Safety", result="yes", rationale="Safe"),
],
),
EvalResult(
trace_id="tr-456",
assessments=[
Assessment(
name="RelevanceToQuery",
result="no",
rationale="Not relevant",
),
Assessment(name="Safety", result="yes", rationale="Safe"),
],
),
]
table_output = format_table_output(output_data)
# Assessment names should be sorted
assert table_output.headers == ["trace_id", "RelevanceToQuery", "Safety"]
assert len(table_output.rows) == 2
assert table_output.rows[0][0].value == "tr-123"
assert table_output.rows[1][0].value == "tr-456"
assert "value: yes" in table_output.rows[0][1].value
assert "value: no" in table_output.rows[1][1].value
def test_format_long_rationale_not_truncated():
long_rationale = "x" * 150
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale=long_rationale,
)
],
)
]
table_output = format_table_output(output_data)
assert long_rationale in table_output.rows[0][1].value
assert len(table_output.rows[0][1].value) >= len(long_rationale)
def test_format_error_message_formatting():
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result=None,
rationale=None,
error="OpenAI API error",
)
],
)
]
table_output = format_table_output(output_data)
assert table_output.rows[0][1].value == "error: OpenAI API error"
def test_format_na_for_missing_results():
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result=None,
rationale=None,
)
],
)
]
table_output = format_table_output(output_data)
assert table_output.rows[0][1].value == NA_VALUE
def test_format_result_only_without_rationale():
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale=None,
)
],
)
]
table_output = format_table_output(output_data)
assert table_output.rows[0][1].value == "value: yes"
def test_format_rationale_only_without_result():
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="RelevanceToQuery",
result=None,
rationale="Some reasoning",
)
],
)
]
table_output = format_table_output(output_data)
assert table_output.rows[0][1].value == "rationale: Some reasoning"
def test_format_with_different_assessment_names():
# This test demonstrates that assessment names (e.g., "relevance_to_query")
# are used in headers, not scorer class names (e.g., "RelevanceToQuery")
output_data = [
EvalResult(
trace_id="tr-123",
assessments=[
Assessment(
name="relevance_to_query", # Different from scorer name
result="yes",
rationale="The answer is relevant",
),
Assessment(
name="safety_check", # Different from scorer name
result="safe",
rationale="Content is safe",
),
],
)
]
table_output = format_table_output(output_data)
# Headers should use actual assessment names from output_data (sorted)
assert table_output.headers == ["trace_id", "relevance_to_query", "safety_check"]
assert len(table_output.rows) == 1
assert table_output.rows[0][0].value == "tr-123"
assert "value: yes" in table_output.rows[0][1].value
assert "value: safe" in table_output.rows[0][2].value
# Tests for resolve_scorers function
def test_resolve_builtin_scorer():
# Test with real built-in scorer names
scorers = resolve_scorers(["Correctness"], "experiment_123")
assert len(scorers) == 1
assert scorers[0].__class__.__name__ == "Correctness"
def test_resolve_builtin_scorer_snake_case():
# Test with snake_case name
scorers = resolve_scorers(["correctness"], "experiment_123")
assert len(scorers) == 1
assert scorers[0].__class__.__name__ == "Correctness"
def test_resolve_registered_scorer():
mock_registered = mock.Mock()
with (
mock.patch(
"mlflow.cli.genai_eval_utils.get_all_scorers", return_value=[]
) as mock_get_all_scorers,
mock.patch(
"mlflow.cli.genai_eval_utils.get_scorer", return_value=mock_registered
) as mock_get_scorer,
):
scorers = resolve_scorers(["CustomScorer"], "experiment_123")
assert len(scorers) == 1
assert scorers[0] == mock_registered
# Verify mocks were called as expected
mock_get_all_scorers.assert_called_once()
mock_get_scorer.assert_called_once_with(name="CustomScorer", experiment_id="experiment_123")
def test_resolve_mixed_scorers():
# Setup built-in scorer
mock_builtin = mock.Mock()
mock_builtin.__class__.__name__ = "Safety"
mock_builtin.name = None
# Setup registered scorer
mock_registered = mock.Mock()
with (
mock.patch(
"mlflow.cli.genai_eval_utils.get_all_scorers", return_value=[mock_builtin]
) as mock_get_all_scorers,
mock.patch(
"mlflow.cli.genai_eval_utils.get_scorer", return_value=mock_registered
) as mock_get_scorer,
):
scorers = resolve_scorers(["Safety", "CustomScorer"], "experiment_123")
assert len(scorers) == 2
assert scorers[0] == mock_builtin
assert scorers[1] == mock_registered
# Verify mocks were called as expected
mock_get_all_scorers.assert_called_once()
mock_get_scorer.assert_called_once_with(name="CustomScorer", experiment_id="experiment_123")
def test_resolve_scorer_not_found_raises_error():
with (
mock.patch(
"mlflow.cli.genai_eval_utils.get_all_scorers", return_value=[]
) as mock_get_all_scorers,
mock.patch(
"mlflow.cli.genai_eval_utils.get_scorer",
side_effect=MlflowException("Not found"),
) as mock_get_scorer,
):
with pytest.raises(click.UsageError, match="Could not identify Scorer 'UnknownScorer'"):
resolve_scorers(["UnknownScorer"], "experiment_123")
# Verify mocks were called as expected
mock_get_all_scorers.assert_called_once()
mock_get_scorer.assert_called_once_with(
name="UnknownScorer", experiment_id="experiment_123"
)
def test_resolve_empty_scorers_raises_error():
with pytest.raises(click.UsageError, match="No valid scorers"):
resolve_scorers([], "experiment_123")
# Tests for extract_assessments_from_results function
def test_extract_with_matching_run_id():
results_df = pd.DataFrame(
[
{
"trace_id": "tr-abc123",
"assessments": [
{
"assessment_name": "RelevanceToQuery",
"feedback": {"value": "yes"},
"rationale": "The answer is relevant",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-123"},
}
],
}
]
)
result = extract_assessments_from_results(results_df, "run-123")
expected = [
EvalResult(
trace_id="tr-abc123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale="The answer is relevant",
)
],
)
]
assert result == expected
def test_extract_with_different_assessment_name():
results_df = pd.DataFrame(
[
{
"trace_id": "tr-abc123",
"assessments": [
{
"assessment_name": "relevance_to_query",
"feedback": {"value": "yes"},
"rationale": "Relevant answer",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-123"},
}
],
}
]
)
result = extract_assessments_from_results(results_df, "run-123")
expected = [
EvalResult(
trace_id="tr-abc123",
assessments=[
Assessment(
name="relevance_to_query",
result="yes",
rationale="Relevant answer",
)
],
)
]
assert result == expected
def test_extract_filter_out_assessments_with_different_run_id():
results_df = pd.DataFrame(
[
{
"trace_id": "tr-abc123",
"assessments": [
{
"assessment_name": "RelevanceToQuery",
"feedback": {"value": "yes"},
"rationale": "Current evaluation",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-123"},
},
{
"assessment_name": "Safety",
"feedback": {"value": "yes"},
"rationale": "Old evaluation",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-456"},
},
],
}
]
)
result = extract_assessments_from_results(results_df, "run-123")
expected = [
EvalResult(
trace_id="tr-abc123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale="Current evaluation",
)
],
)
]
assert result == expected
def test_extract_no_assessments_for_run_id():
results_df = pd.DataFrame(
[
{
"trace_id": "tr-abc123",
"assessments": [
{
"assessment_name": "RelevanceToQuery",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-456"},
}
],
}
]
)
result = extract_assessments_from_results(results_df, "run-123")
assert len(result) == 1
assert len(result[0].assessments) == 1
assert result[0].assessments[0].result is None
assert result[0].assessments[0].rationale is None
assert result[0].assessments[0].error is not None
def test_extract_multiple_assessments_from_same_run():
results_df = pd.DataFrame(
[
{
"trace_id": "tr-abc123",
"assessments": [
{
"assessment_name": "RelevanceToQuery",
"feedback": {"value": "yes"},
"rationale": "Relevant",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-123"},
},
{
"assessment_name": "Safety",
"feedback": {"value": "yes"},
"rationale": "Safe",
"metadata": {AssessmentMetadataKey.SOURCE_RUN_ID: "run-123"},
},
],
}
]
)
result = extract_assessments_from_results(results_df, "run-123")
expected = [
EvalResult(
trace_id="tr-abc123",
assessments=[
Assessment(
name="RelevanceToQuery",
result="yes",
rationale="Relevant",
),
Assessment(
name="Safety",
result="yes",
rationale="Safe",
),
],
)
]
assert result == expected
def test_extract_no_assessments_on_trace_shows_error():
results_df = pd.DataFrame([{"trace_id": "tr-abc123", "assessments": []}])
result = extract_assessments_from_results(results_df, "run-123")
assert len(result) == 1
assert len(result[0].assessments) == 1
assert result[0].assessments[0].error == "No assessments found on trace"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/cli/test_genai_eval_utils.py",
"license": "Apache License 2.0",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/claude_code/test_autolog.py | import sys
from unittest.mock import MagicMock, patch
import pytest
from claude_agent_sdk.types import AssistantMessage, ResultMessage, TextBlock, UserMessage
import mlflow.anthropic
from mlflow.anthropic.autolog import patched_claude_sdk_init
def test_anthropic_autolog_without_claude_sdk():
sys.modules.pop("claude_agent_sdk", None)
with (
patch.dict(
"sys.modules",
{
"anthropic": MagicMock(__version__="0.35.0"),
"anthropic.resources": MagicMock(Messages=MagicMock, AsyncMessages=MagicMock),
},
),
patch("mlflow.anthropic.safe_patch"),
):
mlflow.anthropic.autolog()
def _patch_sdk_init(mock_self, response_messages):
original_init = MagicMock()
async def fake_receive_response():
for msg in response_messages:
yield msg
mock_self.receive_response = fake_receive_response
patched_claude_sdk_init(original_init, mock_self)
return original_init
def test_patched_claude_sdk_init_wraps_receive_response():
mock_self = MagicMock()
async def fake_receive_response():
yield "msg1"
mock_self.receive_response = fake_receive_response
original_init = MagicMock()
patched_claude_sdk_init(original_init, mock_self)
original_init.assert_called_once_with(mock_self, None)
assert mock_self.receive_response is not fake_receive_response
@pytest.mark.asyncio
async def test_receive_response_builds_trace():
mock_self = MagicMock()
messages = [
UserMessage(content="Hello"),
AssistantMessage(content=[TextBlock(text="Hi!")], model="claude-sonnet-4-20250514"),
ResultMessage(
subtype="success",
duration_ms=5000,
duration_api_ms=4000,
is_error=False,
num_turns=1,
session_id="test-session",
usage={"input_tokens": 100, "output_tokens": 20},
),
]
_patch_sdk_init(mock_self, messages)
with (
patch("mlflow.utils.autologging_utils.autologging_is_disabled", return_value=False),
patch(
"mlflow.claude_code.tracing.process_sdk_messages", return_value=MagicMock()
) as mock_process,
):
[msg async for msg in mock_self.receive_response()]
mock_process.assert_called_once()
called_messages = mock_process.call_args[0][0]
assert len(called_messages) == 3
result_messages = [m for m in called_messages if isinstance(m, ResultMessage)]
assert len(result_messages) == 1
assert result_messages[0].usage == {"input_tokens": 100, "output_tokens": 20}
@pytest.mark.asyncio
async def test_query_captures_async_generator_prompt():
mock_self = MagicMock()
async def fake_query(prompt, *args, **kwargs):
# Consume the generator like the real SDK would
async for _ in prompt:
pass
mock_self.query = fake_query
response_messages = [
AssistantMessage(content=[TextBlock(text="Hi!")], model="claude-sonnet-4-20250514"),
ResultMessage(
subtype="success",
duration_ms=1000,
duration_api_ms=800,
is_error=False,
num_turns=1,
session_id="s",
),
]
_patch_sdk_init(mock_self, response_messages)
async def prompt_generator():
yield {"type": "user", "message": {"role": "user", "content": "Hello from generator"}}
with (
patch("mlflow.utils.autologging_utils.autologging_is_disabled", return_value=False),
patch(
"mlflow.claude_code.tracing.process_sdk_messages", return_value=MagicMock()
) as mock_process,
):
await mock_self.query(prompt_generator())
[msg async for msg in mock_self.receive_response()]
mock_process.assert_called_once()
called_messages = mock_process.call_args[0][0]
user_messages = [m for m in called_messages if isinstance(m, UserMessage)]
assert len(user_messages) == 1
assert user_messages[0].content == "Hello from generator"
@pytest.mark.asyncio
async def test_receive_response_skips_when_autologging_disabled():
mock_self = MagicMock()
_patch_sdk_init(mock_self, ["msg1", "msg2"])
with (
patch("mlflow.utils.autologging_utils.autologging_is_disabled", return_value=True),
patch("mlflow.claude_code.tracing.process_sdk_messages") as mock_process,
):
[msg async for msg in mock_self.receive_response()]
mock_process.assert_not_called()
@pytest.mark.asyncio
async def test_sdk_hook_handler_when_disabled():
from mlflow.claude_code.hooks import sdk_stop_hook_handler
with (
patch("mlflow.utils.autologging_utils.autologging_is_disabled", return_value=True),
patch("mlflow.claude_code.hooks._process_stop_hook") as mock_process,
):
result = await sdk_stop_hook_handler(
input_data={"session_id": "test", "transcript_path": "/fake/path"},
tool_use_id=None,
context=None,
)
mock_process.assert_not_called()
assert result == {"continue": True}
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/claude_code/test_autolog.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/jobs/test_utils.py | import os
import pytest
from mlflow.exceptions import MlflowException
from mlflow.server.jobs.utils import _load_function, _validate_function_parameters
pytestmark = pytest.mark.skipif(
os.name == "nt", reason="MLflow job execution is not supported on Windows"
)
def test_validate_function_parameters():
def test_func(a, b, c=None):
return a + b + (c or 0)
# Test with all required parameters present
_validate_function_parameters(test_func, {"a": 1, "b": 2})
_validate_function_parameters(test_func, {"a": 1, "b": 2, "c": 3})
# Test with missing required parameters
with pytest.raises(MlflowException, match=r"Missing required parameters.*\['b'\]"):
_validate_function_parameters(test_func, {"a": 1})
# Test with multiple missing required parameters
with pytest.raises(MlflowException, match=r"Missing required parameters.*\['a', 'b'\]"):
_validate_function_parameters(test_func, {})
def test_validate_function_parameters_with_varargs():
def test_func_with_kwargs(a, **kwargs):
return a
# Should not raise error even with extra parameters due to **kwargs
_validate_function_parameters(test_func_with_kwargs, {"a": 1, "extra": 2})
# Should still raise error for missing required parameters
with pytest.raises(MlflowException, match=r"Missing required parameters.*\['a'\]"):
_validate_function_parameters(test_func_with_kwargs, {"extra": 2})
def test_validate_function_parameters_with_positional_args():
def test_func_with_args(a, *args):
return a
# Should work fine with just required parameter
_validate_function_parameters(test_func_with_args, {"a": 1})
# Should still raise error for missing required parameters
with pytest.raises(MlflowException, match=r"Missing required parameters.*\['a'\]"):
_validate_function_parameters(test_func_with_args, {})
def test_job_status_conversion():
from mlflow.entities._job_status import JobStatus
assert JobStatus.from_int(1) == JobStatus.RUNNING
assert JobStatus.from_str("RUNNING") == JobStatus.RUNNING
assert JobStatus.RUNNING.to_int() == 1
assert str(JobStatus.RUNNING) == "RUNNING"
with pytest.raises(
MlflowException, match="The value -1 can't be converted to JobStatus enum value."
):
JobStatus.from_int(-1)
with pytest.raises(
MlflowException, match="The value 6 can't be converted to JobStatus enum value."
):
JobStatus.from_int(6)
with pytest.raises(
MlflowException, match="The string 'ABC' can't be converted to JobStatus enum value."
):
JobStatus.from_str("ABC")
def test_load_function_invalid_function_format():
with pytest.raises(MlflowException, match="Invalid function fullname format"):
_load_function("invalid_format_no_module")
def test_load_function_module_not_found():
with pytest.raises(MlflowException, match="Module not found"):
_load_function("non_existent_module.some_function")
def test_load_function_function_not_found():
with pytest.raises(MlflowException, match="Function not found in module"):
_load_function("os.non_exist_function")
def test_compute_exclusive_lock_key():
from mlflow.server.jobs.utils import _compute_exclusive_lock_key
# Same params produce same key
key1 = _compute_exclusive_lock_key("job_name", {"a": 1, "b": 2})
key2 = _compute_exclusive_lock_key("job_name", {"a": 1, "b": 2})
assert key1 == key2
# Order doesn't matter for params
key3 = _compute_exclusive_lock_key("job_name", {"b": 2, "a": 1})
assert key1 == key3
# Different params produce different keys
key4 = _compute_exclusive_lock_key("job_name", {"a": 1, "b": 3})
assert key1 != key4
# Different job names produce different keys
key5 = _compute_exclusive_lock_key("other_job", {"a": 1, "b": 2})
assert key1 != key5
# Test with filtered params (simulating exclusive parameter list)
# When only "a" is used, different "b" values should produce same key
key6 = _compute_exclusive_lock_key("job_name", {"a": 1})
key7 = _compute_exclusive_lock_key("job_name", {"a": 1})
assert key6 == key7
# But different "a" values should produce different keys
key8 = _compute_exclusive_lock_key("job_name", {"a": 2})
assert key6 != key8
# Test that same filtered params produce same key
filtered_params = {"a": 1, "b": 2}
key9 = _compute_exclusive_lock_key("job_name", filtered_params)
key10 = _compute_exclusive_lock_key("job_name", {"a": 1, "b": 2})
assert key9 == key10
# Different filtered params produce different keys
key11 = _compute_exclusive_lock_key("job_name", {"a": 1, "b": 3})
assert key9 != key11
# Key format is job_name:hash
assert key1.startswith("job_name:")
assert key5.startswith("other_job:")
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/test_utils.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:examples/pytorch/HPOExample/hpo_mnist.py | """
Hyperparameter Optimization Example with Pure PyTorch and MLflow
This example demonstrates:
- Using MLflow to track hyperparameter optimization trials
- Parent/child run structure for organizing HPO experiments
- Pure PyTorch training (no Lightning dependencies)
- Simple MNIST classification with configurable hyperparameters
Run with: python hpo_mnist.py --n-trials 5 --max-epochs 3
"""
import argparse
import optuna
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import mlflow
class SimpleNet(nn.Module):
def __init__(self, hidden_size, dropout_rate):
super().__init__()
self.fc1 = nn.Linear(784, hidden_size)
self.dropout = nn.Dropout(dropout_rate)
self.fc2 = nn.Linear(hidden_size, 10)
def forward(self, x):
x = x.view(-1, 784)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train_epoch(model, device, train_loader, optimizer):
model.train()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def evaluate(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
return test_loss, accuracy
def objective(trial, args, train_loader, test_loader, device):
# Suggest hyperparameters
lr = trial.suggest_float("lr", 1e-4, 1e-1, log=True)
hidden_size = trial.suggest_int("hidden_size", 64, 512, step=64)
dropout_rate = trial.suggest_float("dropout_rate", 0.1, 0.5)
batch_size = trial.suggest_categorical("batch_size", [32, 64, 128])
# Recreate data loaders with new batch size
train_loader = DataLoader(train_loader.dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_loader.dataset, batch_size=batch_size, shuffle=False)
# Start nested MLflow run for this trial
with mlflow.start_run(nested=True, run_name=f"trial_{trial.number}"):
# Log hyperparameters
mlflow.log_params(
{
"lr": lr,
"hidden_size": hidden_size,
"dropout_rate": dropout_rate,
"batch_size": batch_size,
}
)
# Create model and optimizer
model = SimpleNet(hidden_size, dropout_rate).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# Training loop
for epoch in range(args.max_epochs):
train_epoch(model, device, train_loader, optimizer)
test_loss, accuracy = evaluate(model, device, test_loader)
# Log metrics for each epoch
mlflow.log_metrics({"test_loss": test_loss, "accuracy": accuracy}, step=epoch)
# Return final accuracy for optimization
return accuracy
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--n-trials", type=int, default=10, help="Number of HPO trials")
parser.add_argument("--max-epochs", type=int, default=5, help="Epochs per trial")
parser.add_argument("--batch-size", type=int, default=64, help="Initial batch size")
args = parser.parse_args()
# Setup device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load MNIST data
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST("./data", train=True, download=True, transform=transform)
test_dataset = datasets.MNIST("./data", train=False, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
# Start parent MLflow run
with mlflow.start_run(run_name="HPO_Parent"):
mlflow.log_params({"n_trials": args.n_trials, "max_epochs": args.max_epochs})
# Create Optuna study
study = optuna.create_study(direction="maximize", study_name="mnist_hpo")
# Run optimization
study.optimize(
lambda trial: objective(trial, args, train_loader, test_loader, device),
n_trials=args.n_trials,
)
# Log best results to parent run
mlflow.log_metrics(
{
"best_accuracy": study.best_value,
"best_trial": study.best_trial.number,
}
)
# Log best hyperparameters with 'best_' prefix to avoid conflicts
best_params = {f"best_{k}": v for k, v in study.best_params.items()}
mlflow.log_params(best_params)
print(f"\nBest trial: {study.best_trial.number}")
print(f"Best accuracy: {study.best_value:.4f}")
print(f"Best params: {study.best_params}")
if __name__ == "__main__":
main()
| {
"repo_id": "mlflow/mlflow",
"file_path": "examples/pytorch/HPOExample/hpo_mnist.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/server/jobs/_huey_consumer.py | """
This module is used for launching Huey consumer
the command is like:
```
export _MLFLOW_HUEY_STORAGE_PATH={huey_store_dir}
export _MLFLOW_HUEY_INSTANCE_KEY={huey_instance_key}
huey_consumer.py mlflow.server.jobs.huey_consumer.huey_instance -w {max_workers}
```
It launches the Huey consumer that polls tasks from the huey storage file path
`{huey_store_dir}/{huey_instance_key}.mlflow-huey-store`
and schedules the job execution continuously.
"""
import os
import threading
from mlflow.server.constants import MLFLOW_HUEY_INSTANCE_KEY
from mlflow.server.jobs.logging_utils import configure_logging_for_jobs
from mlflow.server.jobs.utils import (
_exit_when_orphaned,
_get_or_init_huey_instance,
)
# Configure Python logging to suppress noisy job logs
configure_logging_for_jobs()
# ensure the subprocess is killed when parent process dies.
# The huey consumer's parent process is `_job_runner` process,
# if `_job_runner` process is died, it means the MLflow server exits.
threading.Thread(
target=_exit_when_orphaned,
name="exit_when_orphaned",
daemon=True,
).start()
huey_instance = _get_or_init_huey_instance(os.environ[MLFLOW_HUEY_INSTANCE_KEY]).instance
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/_huey_consumer.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/server/jobs/_job_runner.py | """
This module is used for launching the job runner process.
The job runner will:
* enqueue all unfinished huey tasks when MLflow server is down last time.
* Watch the `_MLFLOW_HUEY_STORAGE_PATH` path,
if new files (named like `XXX.mlflow-huey-store`) are created,
it means a new Huey queue is created, then the job runner
launches an individual Huey consumer process for each Huey queue.
See module `mlflow/server/jobs/_huey_consumer.py` for details of Huey consumer.
* Initialize periodic tasks on a dedicated Huey instance.
"""
import logging
import os
import time
from mlflow.server import HUEY_STORAGE_PATH_ENV_VAR
from mlflow.server.jobs.utils import (
_enqueue_unfinished_jobs,
_job_name_to_fn_fullname_map,
_launch_huey_consumer,
_launch_periodic_tasks_consumer,
_start_watcher_to_kill_job_runner_if_mlflow_server_dies,
)
if __name__ == "__main__":
logger = logging.getLogger("mlflow.server.jobs._job_runner")
server_up_time = int(time.time() * 1000)
_start_watcher_to_kill_job_runner_if_mlflow_server_dies()
huey_store_path = os.environ[HUEY_STORAGE_PATH_ENV_VAR]
for job_name in _job_name_to_fn_fullname_map:
try:
_launch_huey_consumer(job_name)
except Exception as e:
logging.warning(f"Launch Huey consumer for {job_name} jobs failed, root cause: {e!r}")
# Launch dedicated consumer for periodic tasks
# (periodic tasks are registered when the consumer starts up)
_launch_periodic_tasks_consumer()
time.sleep(10) # wait for huey consumer launching
_enqueue_unfinished_jobs(server_up_time)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/_job_runner.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/webhooks/test_delivery.py | from pathlib import Path
from unittest.mock import patch
import pytest
from mlflow.entities.webhook import Webhook, WebhookAction, WebhookEntity, WebhookEvent
from mlflow.store.model_registry.file_store import FileStore
from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore
from mlflow.webhooks.delivery import deliver_webhook
from mlflow.webhooks.delivery import test_webhook as send_test_webhook
@pytest.fixture
def file_store(tmp_path: Path) -> FileStore:
return FileStore(str(tmp_path))
@pytest.fixture
def sql_store(tmp_path: Path) -> SqlAlchemyStore:
db_file = tmp_path / "test.db"
db_uri = f"sqlite:///{db_file}"
return SqlAlchemyStore(db_uri)
@pytest.fixture
def webhook_event() -> WebhookEvent:
return WebhookEvent(WebhookEntity.REGISTERED_MODEL, WebhookAction.CREATED)
@pytest.fixture
def webhook_payload() -> dict[str, str]:
return {"name": "test_model", "description": "Test model"}
def test_deliver_webhook_exits_early_for_file_store(
file_store: FileStore, webhook_event: WebhookEvent, webhook_payload: dict[str, str]
) -> None:
with patch("mlflow.webhooks.delivery._deliver_webhook_impl") as mock_impl:
deliver_webhook(
event=webhook_event,
payload=webhook_payload,
store=file_store,
)
# _deliver_webhook_impl should not be called for FileStore
mock_impl.assert_not_called()
def test_deliver_webhook_calls_impl_for_sql_store(
sql_store: SqlAlchemyStore, webhook_event: WebhookEvent, webhook_payload: dict[str, str]
) -> None:
with patch("mlflow.webhooks.delivery._deliver_webhook_impl") as mock_impl:
deliver_webhook(
event=webhook_event,
payload=webhook_payload,
store=sql_store,
)
# _deliver_webhook_impl should be called for SqlAlchemyStore
mock_impl.assert_called_once_with(
event=webhook_event,
payload=webhook_payload,
store=sql_store,
)
def test_deliver_webhook_handles_exception_for_sql_store(
sql_store: SqlAlchemyStore, webhook_event: WebhookEvent, webhook_payload: dict[str, str]
) -> None:
with (
patch("mlflow.webhooks.delivery._deliver_webhook_impl", side_effect=Exception("Test")),
patch("mlflow.webhooks.delivery._logger") as mock_logger,
):
# This should not raise an exception
deliver_webhook(
event=webhook_event,
payload=webhook_payload,
store=sql_store,
)
# Verify that the error was logged
mock_logger.error.assert_called_once()
assert "Failed to deliver webhook for event" in str(mock_logger.error.call_args)
def test_deliver_webhook_no_exception_for_file_store(
file_store: FileStore, webhook_event: WebhookEvent, webhook_payload: dict[str, str]
) -> None:
with (
patch(
"mlflow.webhooks.delivery._deliver_webhook_impl", side_effect=Exception("Test")
) as mock_impl,
patch("mlflow.webhooks.delivery._logger") as mock_logger,
):
# This should not raise an exception and should return early
deliver_webhook(
event=webhook_event,
payload=webhook_payload,
store=file_store,
)
# _deliver_webhook_impl should not be called, so no error should be logged
mock_impl.assert_not_called()
mock_logger.error.assert_not_called()
def test_test_webhook_rejects_private_ip():
event = WebhookEvent(WebhookEntity.MODEL_VERSION, WebhookAction.CREATED)
webhook = Webhook(
webhook_id="wh-1",
name="test",
url="https://localhost/hook",
events=[event],
creation_timestamp=0,
last_updated_timestamp=0,
)
result = send_test_webhook(webhook)
assert result.success is False
assert "must not resolve to a non-public" in result.error_message
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/webhooks/test_delivery.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/labeling/databricks_utils.py | """
Databricks utilities for MLflow GenAI labeling functionality.
"""
_ERROR_MSG = (
"The `databricks-agents` package is required to use labeling functionality. "
"Please install it with `pip install databricks-agents`."
)
def get_databricks_review_app(experiment_id: str | None = None):
"""Import databricks.agents.review_app and return a review app instance."""
try:
from databricks.agents import review_app
except ImportError as e:
raise ImportError(_ERROR_MSG) from e
return review_app.get_review_app(experiment_id)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/labeling/databricks_utils.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/labeling/stores.py | """
Labeling store functionality for MLflow GenAI.
This module provides store implementations to manage labeling sessions and schemas
"""
import warnings
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Callable
from mlflow.entities import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.label_schemas.label_schemas import LabelSchema
from mlflow.genai.labeling.databricks_utils import get_databricks_review_app
from mlflow.genai.labeling.labeling import LabelingSession
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST
from mlflow.tracking._tracking_service import utils as tracking_utils
from mlflow.utils.plugins import get_entry_points
from mlflow.utils.uri import get_uri_scheme
if TYPE_CHECKING:
from databricks.agents.review_app.labeling import LabelingSession as _DatabricksLabelingSession
class UnsupportedLabelingStoreURIException(MlflowException):
"""Exception thrown when building a labeling store with an unsupported URI"""
def __init__(self, unsupported_uri: str, supported_uri_schemes: list[str]) -> None:
message = (
f"Labeling functionality is unavailable; got unsupported URI"
f" '{unsupported_uri}' for labeling data storage. Supported URI schemes are:"
f" {supported_uri_schemes}."
)
super().__init__(message)
self.supported_uri_schemes = supported_uri_schemes
class AbstractLabelingStore(metaclass=ABCMeta):
"""
Abstract class defining the interface for labeling store implementations.
This class defines the API interface for labeling operations that can be implemented
by different backend stores (e.g., MLflow tracking store, Databricks API).
"""
def __init__(self, tracking_uri: str | None = None) -> None:
"""
Initialize the labeling store.
Args:
tracking_uri: The tracking URI for the store.
"""
@abstractmethod
def get_labeling_session(self, run_id: str) -> LabelingSession:
"""
Get a labeling session by MLflow run ID.
Args:
run_id: The MLflow run ID of the labeling session.
Returns:
LabelingSession: The labeling session.
Raises:
mlflow.MlflowException: If labeling session is not found.
"""
@abstractmethod
def get_labeling_sessions(self, experiment_id: str | None = None) -> list[LabelingSession]:
"""
Get all labeling sessions for an experiment.
Args:
experiment_id: The experiment ID. If None, uses the currently active experiment.
Returns:
list[LabelingSession]: List of labeling sessions.
"""
@abstractmethod
def create_labeling_session(
self,
name: str,
*,
assigned_users: list[str] | None = None,
agent: str | None = None,
label_schemas: list[str] | None = None,
enable_multi_turn_chat: bool = False,
custom_inputs: dict[str, Any] | None = None,
experiment_id: str | None = None,
) -> LabelingSession:
"""
Create a new labeling session.
Args:
name: The name of the labeling session.
assigned_users: The users that will be assigned to label items in the session.
agent: The agent to be used to generate responses for the items in the session.
label_schemas: The label schemas to be used in the session.
enable_multi_turn_chat: Whether to enable multi-turn chat labeling for the session.
custom_inputs: Optional. Custom inputs to be used in the session.
experiment_id: The experiment ID. If None, uses the currently active experiment.
Returns:
LabelingSession: The created labeling session.
"""
@abstractmethod
def delete_labeling_session(self, labeling_session: LabelingSession) -> None:
"""
Delete a labeling session.
Args:
labeling_session: The labeling session to delete.
"""
@abstractmethod
def get_label_schema(self, name: str) -> LabelSchema:
"""
Get a label schema by name.
Args:
name: The name of the label schema.
Returns:
LabelSchema: The label schema.
Raises:
mlflow.MlflowException: If label schema is not found.
"""
@abstractmethod
def create_label_schema(
self,
name: str,
*,
type: str,
title: str,
input: Any,
instruction: str | None = None,
enable_comment: bool = False,
overwrite: bool = False,
) -> LabelSchema:
"""
Create a new label schema.
Args:
name: The name of the label schema. Must be unique across the review app.
type: The type of the label schema. Either "feedback" or "expectation".
title: The title of the label schema shown to stakeholders.
input: The input type of the label schema.
instruction: Optional. The instruction shown to stakeholders.
enable_comment: Optional. Whether to enable comments for the label schema.
overwrite: Optional. Whether to overwrite the existing label schema with the same name.
Returns:
LabelSchema: The created label schema.
"""
@abstractmethod
def delete_label_schema(self, name: str) -> None:
"""
Delete a label schema.
Args:
name: The name of the label schema to delete.
"""
@abstractmethod
def add_dataset_to_session(
self,
labeling_session: LabelingSession,
dataset_name: str,
record_ids: list[str] | None = None,
) -> LabelingSession:
"""
Add a dataset to a labeling session.
Args:
labeling_session: The labeling session to add the dataset to.
dataset_name: The name of the dataset.
record_ids: Optional. The individual record ids to be added to the session.
Returns:
LabelingSession: The updated labeling session.
"""
@abstractmethod
def add_traces_to_session(
self,
labeling_session: LabelingSession,
traces: list[Trace],
) -> LabelingSession:
"""
Add traces to a labeling session.
Args:
labeling_session: The labeling session to add traces to.
traces: List of Trace objects to add.
Returns:
LabelingSession: The updated labeling session.
"""
@abstractmethod
def sync_session_expectations(self, labeling_session: LabelingSession, dataset: str) -> None:
"""
Sync traces and expectations from a labeling session to a dataset.
Args:
labeling_session: The labeling session to sync.
dataset: The name of the dataset to sync traces and expectations to.
"""
@abstractmethod
def set_session_assigned_users(
self, labeling_session: LabelingSession, assigned_users: list[str]
) -> LabelingSession:
"""
Set the assigned users for a labeling session.
Args:
labeling_session: The labeling session to update.
assigned_users: The list of users to assign to the session.
Returns:
LabelingSession: The updated labeling session.
"""
class LabelingStoreRegistry:
"""
Scheme-based registry for labeling store implementations.
This class allows the registration of a function or class to provide an
implementation for a given scheme of `store_uri` through the `register`
methods. Implementations declared though the entrypoints
`mlflow.labeling_store` group can be automatically registered through the
`register_entrypoints` method.
When instantiating a store through the `get_store` method, the scheme of
the store URI provided (or inferred from environment) will be used to
select which implementation to instantiate, which will be called with same
arguments passed to the `get_store` method.
"""
def __init__(self) -> None:
self._registry: dict[str, Callable[..., AbstractLabelingStore]] = {}
self.group_name = "mlflow.labeling_store"
def register(self, scheme: str, store_builder: Callable[..., AbstractLabelingStore]) -> None:
self._registry[scheme] = store_builder
def register_entrypoints(self) -> None:
"""Register labeling stores provided by other packages"""
for entrypoint in get_entry_points(self.group_name):
try:
self.register(entrypoint.name, entrypoint.load())
except (AttributeError, ImportError) as exc:
warnings.warn(
'Failure attempting to register labeling store for scheme "{}": {}'.format(
entrypoint.name, str(exc)
),
stacklevel=2,
)
def get_store_builder(self, store_uri: str) -> Callable[..., AbstractLabelingStore]:
"""Get a store from the registry based on the scheme of store_uri
Args:
store_uri: The store URI. If None, it will be inferred from the environment. This
URI is used to select which labeling store implementation to instantiate
and is passed to the constructor of the implementation.
Returns:
A function that returns an instance of
``mlflow.genai.labeling.stores.AbstractLabelingStore`` that fulfills the store
URI requirements.
"""
scheme = store_uri if store_uri == "databricks" else get_uri_scheme(store_uri)
try:
store_builder = self._registry[scheme]
except KeyError:
raise UnsupportedLabelingStoreURIException(
unsupported_uri=store_uri, supported_uri_schemes=list(self._registry.keys())
)
return store_builder
def get_store(self, tracking_uri: str | None = None) -> AbstractLabelingStore:
resolved_store_uri = tracking_utils._resolve_tracking_uri(tracking_uri)
builder = self.get_store_builder(resolved_store_uri)
return builder(tracking_uri=resolved_store_uri)
class DatabricksLabelingStore(AbstractLabelingStore):
"""
Databricks store that provides labeling functionality through the Databricks API.
This store delegates all labeling operations to the Databricks agents API.
"""
def _get_backend_session(
self, labeling_session: LabelingSession
) -> "_DatabricksLabelingSession":
"""
Get the backend session for a labeling session.
Note: We have to list all sessions and match by ID because the Databricks
agents API doesn't provide a direct get/fetch API for individual labeling sessions.
"""
app = get_databricks_review_app(labeling_session.experiment_id)
backend_sessions = app.get_labeling_sessions()
backend_session = next(
(
session
for session in backend_sessions
if session.labeling_session_id == labeling_session.labeling_session_id
),
None,
)
if backend_session is None:
raise MlflowException(
f"Labeling session {labeling_session.labeling_session_id} not found",
error_code=RESOURCE_DOES_NOT_EXIST,
)
return backend_session
def _databricks_session_to_labeling_session(
self, databricks_session: "_DatabricksLabelingSession"
) -> LabelingSession:
"""Create a LabelingSession from a Databricks backend session object."""
return LabelingSession(
name=databricks_session.name,
assigned_users=databricks_session.assigned_users,
agent=databricks_session.agent,
label_schemas=databricks_session.label_schemas,
labeling_session_id=databricks_session.labeling_session_id,
mlflow_run_id=databricks_session.mlflow_run_id,
review_app_id=databricks_session.review_app_id,
experiment_id=databricks_session.experiment_id,
url=databricks_session.url,
enable_multi_turn_chat=databricks_session.enable_multi_turn_chat,
custom_inputs=databricks_session.custom_inputs,
)
def get_labeling_session(self, run_id: str) -> LabelingSession:
"""Get a labeling session by MLflow run ID."""
labeling_sessions = self.get_labeling_sessions()
labeling_session = next(
(
labeling_session
for labeling_session in labeling_sessions
if labeling_session.mlflow_run_id == run_id
),
None,
)
if labeling_session is None:
raise MlflowException(f"Labeling session with run_id `{run_id}` not found")
return labeling_session
def get_labeling_sessions(self, experiment_id: str | None = None) -> list[LabelingSession]:
"""Get all labeling sessions for an experiment."""
app = get_databricks_review_app(experiment_id)
sessions = app.get_labeling_sessions()
return [self._databricks_session_to_labeling_session(session) for session in sessions]
def create_labeling_session(
self,
name: str,
*,
assigned_users: list[str] | None = None,
agent: str | None = None,
label_schemas: list[str] | None = None,
enable_multi_turn_chat: bool = False,
custom_inputs: dict[str, Any] | None = None,
experiment_id: str | None = None,
) -> LabelingSession:
"""Create a new labeling session."""
app = get_databricks_review_app(experiment_id)
backend_session = app.create_labeling_session(
name=name,
assigned_users=assigned_users or [],
agent=agent,
label_schemas=label_schemas or [],
enable_multi_turn_chat=enable_multi_turn_chat,
custom_inputs=custom_inputs,
)
return self._databricks_session_to_labeling_session(backend_session)
def delete_labeling_session(self, labeling_session: LabelingSession) -> None:
"""Delete a labeling session."""
backend_session = self._get_backend_session(labeling_session)
app = get_databricks_review_app(labeling_session.experiment_id)
app.delete_labeling_session(backend_session)
def get_label_schema(self, name: str) -> LabelSchema:
"""Get a label schema by name."""
app = get_databricks_review_app()
label_schema = next(
(label_schema for label_schema in app.label_schemas if label_schema.name == name),
None,
)
if label_schema is None:
raise MlflowException(f"Label schema with name `{name}` not found")
return LabelSchema._from_databricks_label_schema(label_schema)
def create_label_schema(
self,
name: str,
*,
type: str,
title: str,
input: Any,
instruction: str | None = None,
enable_comment: bool = False,
overwrite: bool = False,
) -> LabelSchema:
"""Create a new label schema."""
app = get_databricks_review_app()
return app.create_label_schema(
name=name,
type=type,
title=title,
input=input._to_databricks_input(),
instruction=instruction,
enable_comment=enable_comment,
overwrite=overwrite,
)
def delete_label_schema(self, name: str) -> None:
"""Delete a label schema."""
app = get_databricks_review_app()
app.delete_label_schema(name)
def add_dataset_to_session(
self,
labeling_session: LabelingSession,
dataset_name: str,
record_ids: list[str] | None = None,
) -> LabelingSession:
"""Add a dataset to a labeling session."""
backend_session = self._get_backend_session(labeling_session)
updated_session = backend_session.add_dataset(dataset_name, record_ids)
return self._databricks_session_to_labeling_session(updated_session)
def add_traces_to_session(
self,
labeling_session: LabelingSession,
traces: list[Trace],
) -> LabelingSession:
"""Add traces to a labeling session."""
backend_session = self._get_backend_session(labeling_session)
updated_session = backend_session.add_traces(traces)
return self._databricks_session_to_labeling_session(updated_session)
def sync_session_expectations(self, labeling_session: LabelingSession, dataset: str) -> None:
"""Sync traces and expectations from a labeling session to a dataset."""
backend_session = self._get_backend_session(labeling_session)
backend_session.sync_expectations(dataset)
def set_session_assigned_users(
self, labeling_session: LabelingSession, assigned_users: list[str]
) -> LabelingSession:
"""Set the assigned users for a labeling session."""
backend_session = self._get_backend_session(labeling_session)
updated_session = backend_session.set_assigned_users(assigned_users)
return self._databricks_session_to_labeling_session(updated_session)
# Create the global labeling store registry instance
_labeling_store_registry = LabelingStoreRegistry()
def _register_labeling_stores() -> None:
"""Register the default labeling store implementations"""
# Register Databricks store
_labeling_store_registry.register("databricks", DatabricksLabelingStore)
# Register entrypoints for custom implementations
_labeling_store_registry.register_entrypoints()
# Register the default stores
_register_labeling_stores()
def _get_labeling_store(tracking_uri: str | None = None) -> AbstractLabelingStore:
"""Get a labeling store from the registry"""
return _labeling_store_registry.get_store(tracking_uri)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/labeling/stores.py",
"license": "Apache License 2.0",
"lines": 409,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/tools/get_span_performance_and_timing_report.py | """
Get span timing report tool for MLflow traces.
This tool generates a timing report showing span latencies, execution order,
and concurrency patterns for performance analysis.
"""
from collections import defaultdict
from dataclasses import dataclass
from mlflow.entities.span import Span
from mlflow.entities.trace import Trace
from mlflow.entities.trace_info import TraceInfo
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.types.llm import FunctionToolDefinition, ToolDefinition, ToolParamsSchema
from mlflow.utils.annotations import experimental
@dataclass
class SpanTimingData:
"""Timing data for a single span."""
span_id: str
name: str
span_type: str
total_duration_s: float
self_duration_s: float
child_duration_s: float
span_number: str
parent_number: str | None
ancestors: list[str]
depth: int
@dataclass
class ConcurrentPair:
"""Information about concurrent span execution."""
span1_num: str
span2_num: str
span1_name: str
span2_name: str
overlap_s: float
@experimental(version="3.5.0")
class GetSpanPerformanceAndTimingReportTool(JudgeTool):
"""
A tool that generates a span timing report for a trace.
The report includes span timing hierarchy, summary statistics,
longest-running spans, and concurrent operations detection.
"""
MAX_NAME_LENGTH = 30
MIN_OVERLAP_THRESHOLD_S = 0.01
TOP_SPANS_COUNT = 10
MAX_CONCURRENT_PAIRS = 20
@property
def name(self) -> str:
"""Return the name of this tool.
Returns:
The tool name constant for the span timing report tool.
"""
return ToolNames.GET_SPAN_PERFORMANCE_AND_TIMING_REPORT
def get_definition(self) -> ToolDefinition:
"""Get the tool definition for LiteLLM/OpenAI function calling.
Returns:
ToolDefinition object containing the tool specification.
"""
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.GET_SPAN_PERFORMANCE_AND_TIMING_REPORT,
description=(
"Generate a comprehensive span timing report for the trace, showing "
"latencies, execution order, hierarchy, duration statistics, longest "
"spans, and concurrent operations. Useful for analyzing system "
"performance and identifying bottlenecks."
),
parameters=ToolParamsSchema(
type="object",
properties={},
required=[],
),
),
type="function",
)
def invoke(self, trace: Trace) -> str:
"""Generate span timing report for the trace.
Args:
trace: The MLflow trace object to analyze.
Returns:
Formatted timing report as a string.
"""
if not trace or not trace.data or not trace.data.spans:
return "No spans found in trace"
spans = trace.data.spans
trace_info = trace.info
timing_data = self._calculate_timing_data(spans)
concurrent_pairs = self._find_concurrent_operations(spans)
type_summary = self._calculate_type_summary(spans)
return self._format_report(
trace_info=trace_info,
timing_data=timing_data,
concurrent_pairs=concurrent_pairs,
type_summary=type_summary,
)
def _calculate_timing_data(self, spans: list[Span]) -> dict[str, SpanTimingData]:
"""Calculate timing data for all spans.
Args:
spans: List of spans from the trace.
Returns:
Dictionary mapping span IDs to their timing data.
"""
children_by_parent = defaultdict(list)
for span in spans:
children_by_parent[span.parent_id].append(span)
for parent_spans in children_by_parent.values():
parent_spans.sort(key=lambda s: s.start_time_ns)
self_durations = self._calculate_self_durations(spans, children_by_parent)
timing_data = {}
span_counter = [0]
def process_span_tree(
span_id: str | None, ancestors: list[str] | None = None, depth: int = 0
) -> None:
"""Recursively traverse and process the span tree.
Args:
span_id: ID of the current span being processed.
ancestors: List of ancestor span numbers for hierarchy tracking.
depth: Current depth in the span tree.
"""
ancestors = ancestors or []
for span in children_by_parent.get(span_id, []):
span_counter[0] += 1
span_num = f"s{span_counter[0]}"
total_dur_s = (span.end_time_ns - span.start_time_ns) / 1_000_000_000
self_dur_s = self_durations[span.span_id]
child_dur_s = total_dur_s - self_dur_s
parent_num = (
timing_data.get(
span.parent_id,
SpanTimingData(
span_id="",
name="",
span_type="",
total_duration_s=0,
self_duration_s=0,
child_duration_s=0,
span_number="",
parent_number=None,
ancestors=[],
depth=0,
),
).span_number
or None
)
timing_data[span.span_id] = SpanTimingData(
span_id=span.span_id,
name=span.name,
span_type=span.span_type or "UNKNOWN",
total_duration_s=total_dur_s,
self_duration_s=self_dur_s,
child_duration_s=child_dur_s,
span_number=span_num,
parent_number=parent_num,
ancestors=ancestors.copy(),
depth=depth,
)
process_span_tree(span.span_id, ancestors + [span_num], depth + 1)
process_span_tree(None)
return timing_data
def _calculate_self_durations(
self, spans: list[Span], children_by_parent: dict[str | None, list[Span]]
) -> dict[str, float]:
"""Calculate self duration for each span (total minus children).
Args:
spans: List of all spans in the trace.
children_by_parent: Dictionary mapping parent IDs to their child spans.
Returns:
Dictionary mapping span IDs to their self durations in seconds.
"""
self_durations = {}
for span in spans:
total_dur_ns = span.end_time_ns - span.start_time_ns
children = children_by_parent.get(span.span_id, [])
if not children:
self_durations[span.span_id] = total_dur_ns / 1_000_000_000
continue
intervals = [(child.start_time_ns, child.end_time_ns) for child in children]
merged_intervals = self._merge_intervals(intervals)
children_dur_ns = sum(end - start for start, end in merged_intervals)
self_durations[span.span_id] = (total_dur_ns - children_dur_ns) / 1_000_000_000
return self_durations
@staticmethod
def _merge_intervals(intervals: list[tuple[int, int]]) -> list[tuple[int, int]]:
"""Merge overlapping time intervals.
Args:
intervals: List of (start, end) time intervals in nanoseconds.
Returns:
List of merged non-overlapping intervals.
"""
if not intervals:
return []
intervals.sort()
merged = [intervals[0]]
for start, end in intervals[1:]:
if start <= merged[-1][1]:
merged[-1] = (merged[-1][0], max(merged[-1][1], end))
else:
merged.append((start, end))
return merged
def _find_concurrent_operations(self, spans: list[Span]) -> list[ConcurrentPair]:
"""Find spans that execute concurrently.
Args:
spans: List of all spans to analyze for concurrency.
Returns:
List of concurrent span pairs with overlap information.
"""
concurrent_pairs = []
for i, span1 in enumerate(spans):
for span2 in spans[i + 1 :]:
if span1.parent_id != span2.parent_id:
continue
overlap_start = max(span1.start_time_ns, span2.start_time_ns)
overlap_end = min(span1.end_time_ns, span2.end_time_ns)
if overlap_start >= overlap_end:
continue
overlap_s = (overlap_end - overlap_start) / 1_000_000_000
if overlap_s > self.MIN_OVERLAP_THRESHOLD_S:
concurrent_pairs.append(
ConcurrentPair(
span1_num="",
span2_num="",
span1_name=self._truncate_name(span1.name),
span2_name=self._truncate_name(span2.name),
overlap_s=overlap_s,
)
)
if len(concurrent_pairs) >= self.MAX_CONCURRENT_PAIRS:
return concurrent_pairs
return concurrent_pairs
def _calculate_type_summary(self, spans: list[Span]) -> dict[str, tuple[int, float]]:
"""Calculate summary statistics by span type.
Args:
spans: List of spans to summarize.
Returns:
Dictionary mapping span types to (count, total_duration) tuples.
"""
type_stats = defaultdict(lambda: [0, 0.0])
for span in spans:
span_type = span.span_type or "UNKNOWN"
duration_s = (span.end_time_ns - span.start_time_ns) / 1_000_000_000
type_stats[span_type][0] += 1
type_stats[span_type][1] += duration_s
return {k: tuple(v) for k, v in type_stats.items()}
def _truncate_name(self, name: str) -> str:
"""Truncate long names for display.
Args:
name: The span name to potentially truncate.
Returns:
Truncated name if it exceeds MAX_NAME_LENGTH, otherwise original name.
"""
if len(name) <= self.MAX_NAME_LENGTH:
return name
return name[: self.MAX_NAME_LENGTH - 3] + "..."
def _format_report(
self,
trace_info: TraceInfo,
timing_data: dict[str, SpanTimingData],
concurrent_pairs: list[ConcurrentPair],
type_summary: dict[str, tuple[int, float]],
) -> str:
"""Format the complete timing report.
Args:
trace_info: Trace metadata information.
timing_data: Calculated timing data for all spans.
concurrent_pairs: List of concurrent span pairs.
type_summary: Summary statistics by span type.
Returns:
Formatted report as a string.
"""
lines = []
self._add_header(lines, trace_info, len(timing_data))
self._add_column_definitions(lines)
self._add_span_table(lines, timing_data)
self._add_type_summary(lines, type_summary)
self._add_top_spans(lines, timing_data)
self._add_concurrent_operations(lines, concurrent_pairs, timing_data)
return "\n".join(lines)
def _add_header(self, lines: list[str], trace_info: TraceInfo, span_count: int) -> None:
"""Add report header.
Args:
lines: List to append header lines to.
trace_info: Trace metadata for header information.
span_count: Total number of spans in the trace.
"""
lines.extend(
[
f"SPAN TIMING REPORT FOR TRACE: {trace_info.trace_id}",
f"Total Duration: {trace_info.execution_duration / 1000:.2f}s",
f"Total Spans: {span_count}",
"",
]
)
def _add_column_definitions(self, lines: list[str]) -> None:
"""Add column definitions section.
Args:
lines: List to append column definition lines to.
"""
lines.extend(
[
"COLUMN DEFINITIONS:",
" self_dur: Time spent in this span excluding its children (actual work)",
" total_dur: Total time from span start to end (includes waiting for children)",
" child_dur: Time spent waiting for child spans to complete",
" parent: The immediate parent span number",
" ancestors: Complete chain from root to parent",
"",
]
)
def _add_span_table(self, lines: list[str], timing_data: dict[str, SpanTimingData]) -> None:
"""Add the main span timing table.
Args:
lines: List to append table lines to.
timing_data: Timing data for all spans to display.
"""
lines.extend(
[
"SPAN TABLE:",
"-" * 200,
f"{'span_num':<8} {'span_id':<20} {'name':<30} "
f"{'type':<12} {'self_dur':>9} {'total_dur':>10} {'child_dur':>10} "
f"{'parent':<8} {'ancestors':<60}",
"-" * 200,
]
)
sorted_data = sorted(
timing_data.values(), key=lambda x: int(x.span_number[1:]) if x.span_number else 0
)
for data in sorted_data:
if not data.span_number:
continue
name = self._truncate_name(data.name)
parent = data.parent_number or "-"
ancestors_str = "→".join(data.ancestors) if data.ancestors else "root"
lines.append(
f"{data.span_number:<8} {data.span_id:<20} {name:<30} "
f"{data.span_type:<12} {data.self_duration_s:>9.3f} "
f"{data.total_duration_s:>10.3f} {data.child_duration_s:>10.3f} "
f"{parent:<8} {ancestors_str:<60}"
)
def _add_type_summary(
self, lines: list[str], type_summary: dict[str, tuple[int, float]]
) -> None:
"""Add summary by span type.
Args:
lines: List to append summary lines to.
type_summary: Summary statistics organized by span type.
"""
lines.extend(
[
"",
"SUMMARY BY TYPE:",
"-" * 80,
f"{'type':<20} {'count':>8} {'total_dur':>12} {'avg_dur':>12}",
"-" * 80,
]
)
for span_type in sorted(type_summary.keys()):
count, total_dur = type_summary[span_type]
avg_dur = total_dur / count
lines.append(f"{span_type:<20} {count:>8} {total_dur:>12.3f}s {avg_dur:>12.3f}s")
def _add_top_spans(self, lines: list[str], timing_data: dict[str, SpanTimingData]) -> None:
"""Add top spans by self duration.
Args:
lines: List to append top spans section to.
timing_data: Timing data for all spans to rank.
"""
lines.extend(
[
"",
"TOP 10 SPANS BY SELF DURATION (actual work, not including children):",
"-" * 110,
f"{'rank':<6} {'span_num':<10} {'span_id':<20} {'name':<30} "
f"{'type':<12} {'self_dur':>12}",
"-" * 110,
]
)
sorted_spans = sorted(timing_data.values(), key=lambda x: x.self_duration_s, reverse=True)[
: self.TOP_SPANS_COUNT
]
for i, data in enumerate(sorted_spans):
name = self._truncate_name(data.name)
lines.append(
f"{i + 1:<6} {data.span_number:<10} {data.span_id:<20} {name:<30} "
f"{data.span_type:<12} {data.self_duration_s:>12.3f}s"
)
def _add_concurrent_operations(
self,
lines: list[str],
concurrent_pairs: list[ConcurrentPair],
timing_data: dict[str, SpanTimingData],
) -> None:
"""Add concurrent operations section.
Args:
lines: List to append concurrent operations section to.
concurrent_pairs: List of detected concurrent span pairs.
timing_data: Timing data (currently unused but kept for consistency).
"""
lines.extend(
[
"",
"CONCURRENT OPERATIONS:",
"-" * 100,
]
)
if not concurrent_pairs:
lines.append("No significant concurrent operations detected.")
return
lines.extend(
[
f"{'span1':<10} {'span2':<10} {'name1':<30} {'name2':<30} {'overlap':>10}",
"-" * 100,
]
)
lines.extend(
f"{pair.span1_num:<10} {pair.span2_num:<10} "
f"{pair.span1_name:<30} {pair.span2_name:<30} "
f"{pair.overlap_s:>10.3f}s"
for pair in concurrent_pairs
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/get_span_performance_and_timing_report.py",
"license": "Apache License 2.0",
"lines": 419,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/judges/test_judge_tool_get_span_performance_and_timing_report.py | from mlflow.entities.span import Span
from mlflow.entities.trace import Trace
from mlflow.entities.trace_data import TraceData
from mlflow.entities.trace_info import TraceInfo
from mlflow.entities.trace_location import TraceLocation
from mlflow.entities.trace_state import TraceState
from mlflow.genai.judges.tools.get_span_performance_and_timing_report import (
GetSpanPerformanceAndTimingReportTool,
)
from mlflow.types.llm import ToolDefinition
from tests.tracing.helper import create_mock_otel_span
def test_get_span_timing_report_tool_name():
tool = GetSpanPerformanceAndTimingReportTool()
assert tool.name == "get_span_performance_and_timing_report"
def test_get_span_timing_report_tool_get_definition():
tool = GetSpanPerformanceAndTimingReportTool()
definition = tool.get_definition()
assert isinstance(definition, ToolDefinition)
assert definition.function.name == "get_span_performance_and_timing_report"
assert "Generate a comprehensive span timing report" in definition.function.description
assert definition.function.parameters.type == "object"
assert definition.function.parameters.required == []
assert definition.type == "function"
def test_get_span_timing_report_tool_invoke_success():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root-span",
start_time=1000000000000,
end_time=1000001000000,
parent_id=None,
)
root_span = Span(root_otel_span)
child_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="child-span",
start_time=1000000200000,
end_time=1000000800000,
parent_id=100,
)
child_span = Span(child_otel_span)
trace_data = TraceData(spans=[root_span, child_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=1000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert isinstance(result, str)
assert "SPAN TIMING REPORT FOR TRACE: trace-123" in result
assert "Total Duration: 1.00s" in result
assert "Total Spans: 2" in result
assert "SPAN TABLE:" in result
assert "SUMMARY BY TYPE:" in result
assert "TOP 10 SPANS BY SELF DURATION" in result
assert "CONCURRENT OPERATIONS:" in result
assert "root-span" in result
assert "child-span" in result
def test_get_span_timing_report_tool_invoke_no_spans():
tool = GetSpanPerformanceAndTimingReportTool()
trace_data = TraceData(spans=[])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=0,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert result == "No spans found in trace"
def test_get_span_timing_report_tool_invoke_none_trace():
tool = GetSpanPerformanceAndTimingReportTool()
result = tool.invoke(None)
assert result == "No spans found in trace"
def test_get_span_timing_report_tool_invoke_complex_hierarchy():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root-span",
start_time=1000000000000,
end_time=1000002000000,
parent_id=None,
)
root_span = Span(root_otel_span)
child1_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="child1-span",
start_time=1000000200000,
end_time=1000001000000,
parent_id=100,
)
child1_span = Span(child1_otel_span)
child2_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=102,
name="child2-span",
start_time=1000001200000,
end_time=1000001800000,
parent_id=100,
)
child2_span = Span(child2_otel_span)
grandchild_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=103,
name="grandchild-span",
start_time=1000000400000,
end_time=1000000600000,
parent_id=101,
)
grandchild_span = Span(grandchild_otel_span)
trace_data = TraceData(spans=[root_span, child1_span, child2_span, grandchild_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=2000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert isinstance(result, str)
assert "Total Spans: 4" in result
assert "root-span" in result
assert "child1-span" in result
assert "child2-span" in result
assert "grandchild-span" in result
assert "s1" in result
assert "s2" in result
def test_get_span_timing_report_tool_invoke_concurrent_operations():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root-span",
start_time=1000000000000,
end_time=1000002000000,
parent_id=None,
)
root_span = Span(root_otel_span)
child1_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="concurrent-child1",
start_time=1000000200000,
end_time=1000001200000,
parent_id=100,
)
child1_span = Span(child1_otel_span)
child2_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=102,
name="concurrent-child2",
start_time=1000000600000,
end_time=1000001800000,
parent_id=100,
)
child2_span = Span(child2_otel_span)
trace_data = TraceData(spans=[root_span, child1_span, child2_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=2000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert isinstance(result, str)
assert "CONCURRENT OPERATIONS:" in result
assert "concurrent-child1" in result
assert "concurrent-child2" in result
def test_get_span_timing_report_tool_invoke_span_types():
tool = GetSpanPerformanceAndTimingReportTool()
llm_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="llm-call",
start_time=1000000000000,
end_time=1000001000000,
parent_id=None,
)
llm_otel_span.set_attribute("span_type", "LLM")
llm_span = Span(llm_otel_span)
retrieval_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="retrieval-call",
start_time=1000001200000,
end_time=1000001800000,
parent_id=None,
)
retrieval_otel_span.set_attribute("span_type", "RETRIEVAL")
retrieval_span = Span(retrieval_otel_span)
trace_data = TraceData(spans=[llm_span, retrieval_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=1800,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert isinstance(result, str)
assert "SUMMARY BY TYPE:" in result
lines = result.split("\n")
summary_section = False
found_types = set()
for line in lines:
if "SUMMARY BY TYPE:" in line:
summary_section = True
continue
if summary_section and line.strip() and not line.startswith("-"):
parts = line.split()
if len(parts) > 0:
span_type = parts[0]
if span_type not in ["type", ""]:
found_types.add(span_type)
assert len(found_types) > 0
def test_get_span_timing_report_tool_invoke_top_spans_ranking():
tool = GetSpanPerformanceAndTimingReportTool()
quick_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="quick-span",
start_time=1000000000000,
end_time=1000000100000,
parent_id=None,
)
quick_span = Span(quick_otel_span)
slow_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="slow-span",
start_time=1000000200000,
end_time=1000001200000,
parent_id=None,
)
slow_span = Span(slow_otel_span)
trace_data = TraceData(spans=[quick_span, slow_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=1300,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert isinstance(result, str)
assert "TOP 10 SPANS BY SELF DURATION" in result
assert "quick-span" in result
assert "slow-span" in result
lines = result.split("\n")
top_section = False
for line in lines:
if "TOP 10 SPANS BY SELF DURATION" in line:
top_section = True
continue
if top_section and "slow-span" in line:
assert line.strip().startswith("1")
break
def test_get_span_timing_report_tool_invoke_long_span_names():
tool = GetSpanPerformanceAndTimingReportTool()
long_name_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="this_is_a_very_long_span_name_that_should_be_truncated_in_the_report",
start_time=1000000000000,
end_time=1000001000000,
parent_id=None,
)
long_name_span = Span(long_name_otel_span)
trace_data = TraceData(spans=[long_name_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=1000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert isinstance(result, str)
assert "this_is_a_very_long_span_na..." in result
def test_get_span_timing_report_tool_self_duration_calculation():
tool = GetSpanPerformanceAndTimingReportTool()
parent_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="parent",
start_time=1000000000000000000,
end_time=1000001000000000000,
parent_id=None,
)
parent_span = Span(parent_otel_span)
child1_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="child1",
start_time=1000000100000000000,
end_time=1000000300000000000,
parent_id=100,
)
child1_span = Span(child1_otel_span)
child2_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=102,
name="child2",
start_time=1000000250000000000,
end_time=1000000450000000000,
parent_id=100,
)
child2_span = Span(child2_otel_span)
trace_data = TraceData(spans=[parent_span, child1_span, child2_span])
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=1000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
lines = result.split("\n")
for line in lines:
if "parent" in line and "s1" in line:
parts = line.split()
for i, part in enumerate(parts):
if "." in part and i > 3:
self_dur = float(parts[4])
assert 640 < self_dur < 660
break
break
def test_get_span_timing_report_tool_empty_trace_data():
tool = GetSpanPerformanceAndTimingReportTool()
trace = Trace(info=None, data=None)
result = tool.invoke(trace)
assert result == "No spans found in trace"
def test_get_span_timing_report_tool_concurrent_pairs_limit():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root",
start_time=1000000000000,
end_time=1000010000000,
parent_id=None,
)
root_span = Span(root_otel_span)
concurrent_spans = []
for i in range(30):
otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101 + i,
name=f"concurrent-{i}",
start_time=1000001000000 + i * 100000,
end_time=1000008000000 + i * 100000,
parent_id=100,
)
concurrent_spans.append(Span(otel_span))
trace_data = TraceData(spans=[root_span] + concurrent_spans)
trace_info = TraceInfo(
trace_id="trace-123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=10000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
concurrent_count = result.count("concurrent-")
assert concurrent_count <= 2 * tool.MAX_CONCURRENT_PAIRS + 10
def test_truncate_name_method_directly():
tool = GetSpanPerformanceAndTimingReportTool()
test_cases = [
("short", "short"),
("a" * 30, "a" * 30),
("a" * 31, "a" * 27 + "..."),
("a" * 50, "a" * 27 + "..."),
("", ""),
("123456789012345678901234567890", "123456789012345678901234567890"),
("1234567890123456789012345678901", "123456789012345678901234567..."),
]
for input_name, expected in test_cases:
result = tool._truncate_name(input_name)
assert result == expected
assert len(result) <= 30
def test_truncation_in_multiple_report_sections():
tool = GetSpanPerformanceAndTimingReportTool()
spans = []
test_names = [
("short", False),
("a" * 30, False),
("a" * 31, True),
("this_is_a_very_long_span_name_that_should_definitely_be_truncated", True),
]
for i, (name, should_truncate) in enumerate(test_names):
otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100 + i,
name=name,
start_time=1000000000000 + i * 1000000000,
end_time=1000000000000 + (i + 1) * 1000000000,
parent_id=None,
)
spans.append(Span(otel_span))
trace_data = TraceData(spans=spans)
trace_info = TraceInfo(
trace_id="test-truncation",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=len(test_names) * 1000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
for name, should_truncate in test_names:
if should_truncate:
truncated = name[:27] + "..." if len(name) > 30 else name
assert truncated in result
if len(name) > 30:
assert name not in result
else:
assert name in result
def test_truncation_edge_cases():
tool = GetSpanPerformanceAndTimingReportTool()
unicode_name = "测试" + "很" * 30
result = tool._truncate_name(unicode_name)
assert len(result) <= 30
if len(unicode_name) > 30:
assert result.endswith("...")
special = "!@#$%^&*()" * 5
result = tool._truncate_name(special)
assert result == special[:27] + "..."
assert len(result) == 30
spaces = "word " * 10
result = tool._truncate_name(spaces)
if len(spaces) > 30:
assert result == spaces[:27] + "..."
assert len(result) == 30
mixed = "test_123_" * 5
result = tool._truncate_name(mixed)
assert result == mixed[:27] + "..."
assert len(result) == 30
def test_truncation_preserves_table_formatting():
tool = GetSpanPerformanceAndTimingReportTool()
spans = []
for i in range(5):
name = f"span_{i}_" + "x" * (40 + i * 10)
otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100 + i,
name=name,
start_time=1000000000000 + i * 1000000000,
end_time=1000000000000 + (i + 1) * 1000000000,
parent_id=None,
)
spans.append(Span(otel_span))
trace_data = TraceData(spans=spans)
trace_info = TraceInfo(
trace_id="test-format",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=5000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
lines = result.split("\n")
table_lines = []
in_table = False
for line in lines:
if "SPAN TABLE:" in line:
in_table = True
elif in_table and line.startswith("-" * 200):
in_table = False
elif in_table and line.strip() and not line.startswith("-"):
table_lines.append(line)
for line in table_lines[1:]:
parts = line.split()
if len(parts) >= 3:
assert "..." in line or all(len(p) <= 30 for p in parts[2:3])
def test_max_name_length_constant_usage():
tool = GetSpanPerformanceAndTimingReportTool()
assert hasattr(tool, "MAX_NAME_LENGTH")
assert tool.MAX_NAME_LENGTH == 30
name = "a" * (tool.MAX_NAME_LENGTH + 1)
truncated = tool._truncate_name(name)
assert len(truncated) == tool.MAX_NAME_LENGTH
assert truncated == "a" * (tool.MAX_NAME_LENGTH - 3) + "..."
def test_truncation_in_concurrent_spans_section():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root",
start_time=1000000000000000000,
end_time=1000010000000000000,
parent_id=None,
)
root_span = Span(root_otel_span)
long_name1 = "very_long_concurrent_span_name_number_one_that_exceeds_limit"
long_name2 = "another_extremely_long_concurrent_span_name_number_two_exceeds"
span1_otel = create_mock_otel_span(
trace_id=12345,
span_id=101,
name=long_name1,
start_time=1000001000000000000,
end_time=1000005000000000000,
parent_id=100,
)
span1 = Span(span1_otel)
span2_otel = create_mock_otel_span(
trace_id=12345,
span_id=102,
name=long_name2,
start_time=1000002000000000000,
end_time=1000006000000000000,
parent_id=100,
)
span2 = Span(span2_otel)
trace_data = TraceData(spans=[root_span, span1, span2])
trace_info = TraceInfo(
trace_id="test-concurrent",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=10000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
if "CONCURRENT OPERATIONS:" in result and "No significant concurrent" not in result:
truncated1 = long_name1[:27] + "..."
truncated2 = long_name2[:27] + "..."
assert truncated1 in result or truncated2 in result
assert long_name1 not in result
assert long_name2 not in result
def test_min_overlap_threshold_enforcement():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root",
start_time=1000000000000000000,
end_time=1000010000000000000,
parent_id=None,
)
root_span = Span(root_otel_span)
span1_otel = create_mock_otel_span(
trace_id=12345,
span_id=101,
name="span1",
start_time=1000001000000000000,
end_time=1000002000000000000,
parent_id=100,
)
span1 = Span(span1_otel)
span2_otel = create_mock_otel_span(
trace_id=12345,
span_id=102,
name="span2",
start_time=1000001999991000000,
end_time=1000003000000000000,
parent_id=100,
)
span2 = Span(span2_otel)
span3_otel = create_mock_otel_span(
trace_id=12345,
span_id=103,
name="span3",
start_time=1000004000000000000,
end_time=1000005000000000000,
parent_id=100,
)
span3 = Span(span3_otel)
span4_otel = create_mock_otel_span(
trace_id=12345,
span_id=104,
name="span4",
start_time=1000004980000000000,
end_time=1000006000000000000,
parent_id=100,
)
span4 = Span(span4_otel)
trace_data = TraceData(spans=[root_span, span1, span2, span3, span4])
trace_info = TraceInfo(
trace_id="test-overlap-threshold",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=10000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert tool.MIN_OVERLAP_THRESHOLD_S == 0.01
if "CONCURRENT OPERATIONS:" in result:
lines = result.split("\n")
concurrent_section = False
found_span3_span4 = False
found_span1_span2 = False
for line in lines:
if "CONCURRENT OPERATIONS:" in line:
concurrent_section = True
continue
if concurrent_section:
if line.startswith("-") or "span1" in line and "span2" in line and "name1" in line:
continue
if "span3" in line and "span4" in line and "." in line:
found_span3_span4 = True
if "span1" in line and "span2" in line and "." in line:
found_span1_span2 = True
assert found_span3_span4
assert not found_span1_span2
def test_top_spans_count_limit():
tool = GetSpanPerformanceAndTimingReportTool()
spans = []
for i in range(15):
otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100 + i,
name=f"span_{i:02d}",
start_time=1000000000000000000 + i * 100000000000,
end_time=1000000000000000000 + (i + 1) * 100000000000,
parent_id=None,
)
spans.append(Span(otel_span))
trace_data = TraceData(spans=spans)
trace_info = TraceInfo(
trace_id="test-top-spans-limit",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=15000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert tool.TOP_SPANS_COUNT == 10
lines = result.split("\n")
top_section = False
span_count = 0
for line in lines:
if "TOP 10 SPANS BY SELF DURATION" in line:
top_section = True
continue
if top_section and line.startswith("-"):
continue
if top_section and line.strip() and not line.startswith("-"):
parts = line.strip().split()
if parts and parts[0].isdigit():
span_count += 1
if top_section and line.strip() == "":
break
assert span_count == 10
def test_max_concurrent_pairs_exact_limit():
tool = GetSpanPerformanceAndTimingReportTool()
root_otel_span = create_mock_otel_span(
trace_id=12345,
span_id=100,
name="root",
start_time=1000000000000000000,
end_time=1000100000000000000,
parent_id=None,
)
root_span = Span(root_otel_span)
spans = [root_span]
for i in range(25):
otel_span = create_mock_otel_span(
trace_id=12345,
span_id=101 + i,
name=f"concurrent_{i:02d}",
start_time=1000010000000000000 + i * 1000000000,
end_time=1000050000000000000 + i * 1000000000,
parent_id=100,
)
spans.append(Span(otel_span))
trace_data = TraceData(spans=spans)
trace_info = TraceInfo(
trace_id="test-max-concurrent",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=100000,
)
trace = Trace(info=trace_info, data=trace_data)
result = tool.invoke(trace)
assert tool.MAX_CONCURRENT_PAIRS == 20
lines = result.split("\n")
concurrent_section = False
pair_count = 0
for line in lines:
if "CONCURRENT OPERATIONS:" in line:
concurrent_section = True
continue
if concurrent_section and "No significant concurrent" in line:
break
if concurrent_section and line.strip() and not line.startswith("-"):
if "concurrent_" in line and "s" in line:
pair_count += 1
if concurrent_section and line.strip() == "":
break
assert pair_count <= tool.MAX_CONCURRENT_PAIRS
def test_all_constants_exist_and_have_expected_values():
tool = GetSpanPerformanceAndTimingReportTool()
assert hasattr(tool, "MAX_NAME_LENGTH")
assert hasattr(tool, "MIN_OVERLAP_THRESHOLD_S")
assert hasattr(tool, "TOP_SPANS_COUNT")
assert hasattr(tool, "MAX_CONCURRENT_PAIRS")
assert tool.MAX_NAME_LENGTH == 30
assert tool.MIN_OVERLAP_THRESHOLD_S == 0.01
assert tool.TOP_SPANS_COUNT == 10
assert tool.MAX_CONCURRENT_PAIRS == 20
assert isinstance(tool.MAX_NAME_LENGTH, int)
assert isinstance(tool.MIN_OVERLAP_THRESHOLD_S, float)
assert isinstance(tool.TOP_SPANS_COUNT, int)
assert isinstance(tool.MAX_CONCURRENT_PAIRS, int)
assert 10 <= tool.MAX_NAME_LENGTH <= 100
assert 0.001 <= tool.MIN_OVERLAP_THRESHOLD_S <= 1.0
assert 5 <= tool.TOP_SPANS_COUNT <= 50
assert 10 <= tool.MAX_CONCURRENT_PAIRS <= 100
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/test_judge_tool_get_span_performance_and_timing_report.py",
"license": "Apache License 2.0",
"lines": 728,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/langchain/test_responses_agent_langchain.py | import json
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from mlflow.types.responses import ResponsesAgentStreamEvent, output_to_responses_items_stream
def test_output_to_responses_items_stream_langchain():
"""
Tests langchain message stream to responses items stream conversion.
Accounts for:
- AIMessage w/ and w/o tool calls
- ToolMessage
- Filtering out HumanMessage from the stream
- Message
"""
messages = [
AIMessage(
content="test text0",
additional_kwargs={},
response_metadata={},
name="query_result",
id="e0eafab0-f008-49d4-ac0d-f17a70096fe1",
),
AIMessage(
content="Transferring back to supervisor",
additional_kwargs={},
response_metadata={"__is_handoff_back": True},
name="revenue-genie",
id="5e88662b-29e7-4659-a521-f8175e7642ee",
tool_calls=[
{
"name": "transfer_back_to_supervisor",
"args": {},
"id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"type": "tool_call",
}
],
),
ToolMessage(
content="Successfully transferred back to supervisor",
name="transfer_back_to_supervisor",
id="6fd471d8-57d4-46ec-a21a-9bb20dfda4d3",
tool_call_id="543a6b6b-dc73-463c-9b6e-5d5a941b7669",
),
HumanMessage(
content="Which companies do I have revenue data for",
additional_kwargs={},
response_metadata={},
id="43d0cf0a-d687-4302-8562-4f2e09603473",
),
AIMessage(
content="I'll help you",
additional_kwargs={
"tool_calls": [
{
"id": "toolu_bdrk_01FtmRmzFm89zDtwYu3xdFkh",
"function": {
"arguments": "{}",
"name": "transfer_to_revenue-genie",
},
"type": "function",
}
]
},
response_metadata={
"usage": {
"prompt_tokens": 551,
"completion_tokens": 73,
"total_tokens": 624,
},
"prompt_tokens": 551,
"completion_tokens": 73,
"total_tokens": 624,
"model": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"model_name": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"finish_reason": "tool_calls",
},
name="supervisor",
id="run--e112332b-ed6d-4e10-b17c-adf637fb67eb-0",
tool_calls=[
{
"name": "transfer_to_revenue-genie",
"args": {},
"id": "toolu_bdrk_01FtmRmzFm89zDtwYu3xdFkh",
"type": "tool_call",
}
],
),
ToolMessage(
content="Successfully transferred to revenue-genie",
name="transfer_to_revenue-genie",
id="92acdb97-babc-4239-979e-b16880e7f58f",
tool_call_id="toolu_bdrk_01FtmRmzFm89zDtwYu3xdFkh",
),
AIMessage(
content="test text1",
additional_kwargs={},
response_metadata={},
name="query_result",
id="e0eafab0-f008-49d4-ac0d-f17a70096fe1",
),
AIMessage(
content="Transferring back to supervisor",
additional_kwargs={},
response_metadata={"__is_handoff_back": True},
name="revenue-genie",
id="5e88662b-29e7-4659-a521-f8175e7642ee",
tool_calls=[
{
"name": "transfer_back_to_supervisor",
"args": {},
"id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"type": "tool_call",
}
],
),
ToolMessage(
content="Successfully transferred back to supervisor",
name="transfer_back_to_supervisor",
id="6fd471d8-57d4-46ec-a21a-9bb20dfda4d3",
tool_call_id="543a6b6b-dc73-463c-9b6e-5d5a941b7669",
),
AIMessage(
content="test text2",
additional_kwargs={},
response_metadata={
"usage": {
"prompt_tokens": 813,
"completion_tokens": 108,
"total_tokens": 921,
},
"prompt_tokens": 813,
"completion_tokens": 108,
"total_tokens": 921,
"model": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"model_name": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"finish_reason": "stop",
},
name="supervisor",
id="run--2622edf9-37b6-4e25-9e97-6351d145b198-0",
),
]
expected = [
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "e0eafab0-f008-49d4-ac0d-f17a70096fe1",
"content": [{"text": "test text0", "type": "output_text", "annotations": []}],
"role": "assistant",
"type": "message",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "5e88662b-29e7-4659-a521-f8175e7642ee",
"content": [
{
"text": "Transferring back to supervisor",
"type": "output_text",
"annotations": [],
}
],
"role": "assistant",
"type": "message",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call",
"id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"call_id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"name": "transfer_back_to_supervisor",
"arguments": "{}",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call_output",
"call_id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"output": "Successfully transferred back to supervisor",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "run--e112332b-ed6d-4e10-b17c-adf637fb67eb-0",
"content": [{"text": "I'll help you", "type": "output_text", "annotations": []}],
"role": "assistant",
"type": "message",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call",
"id": "toolu_bdrk_01FtmRmzFm89zDtwYu3xdFkh",
"call_id": "toolu_bdrk_01FtmRmzFm89zDtwYu3xdFkh",
"name": "transfer_to_revenue-genie",
"arguments": "{}",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call_output",
"call_id": "toolu_bdrk_01FtmRmzFm89zDtwYu3xdFkh",
"output": "Successfully transferred to revenue-genie",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "e0eafab0-f008-49d4-ac0d-f17a70096fe1",
"content": [{"text": "test text1", "type": "output_text", "annotations": []}],
"role": "assistant",
"type": "message",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "5e88662b-29e7-4659-a521-f8175e7642ee",
"content": [
{
"text": "Transferring back to supervisor",
"type": "output_text",
"annotations": [],
}
],
"role": "assistant",
"type": "message",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call",
"id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"call_id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"name": "transfer_back_to_supervisor",
"arguments": "{}",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call_output",
"call_id": "543a6b6b-dc73-463c-9b6e-5d5a941b7669",
"output": "Successfully transferred back to supervisor",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "run--2622edf9-37b6-4e25-9e97-6351d145b198-0",
"content": [{"text": "test text2", "type": "output_text", "annotations": []}],
"role": "assistant",
"type": "message",
},
),
]
result = list(output_to_responses_items_stream(messages))
assert result == expected
def test_output_to_responses_items_stream_langchain_multiple_tool_calls_unique_ids():
messages = [
AIMessage(
content="I'll look up both the current and historical stock prices for Apple.",
id="lc_run--019c2b6f-4d8d-70e3-b620-b80545b4cb30",
tool_calls=[
{
"name": "get_current_stock_price",
"args": {"ticker": "AAPL"},
"id": "toolu_bdrk_01X3zqC3kknbchSJB6XYmtHQ",
"type": "tool_call",
},
{
"name": "get_historical_stock_prices",
"args": {
"ticker": "AAPL",
"start_date": "2023-01-01",
"end_date": "2024-01-01",
},
"id": "toolu_bdrk_01Dte8KnRx9Tk7pMz9h83okn",
"type": "tool_call",
},
],
),
ToolMessage(
content='{"ticker": "AAPL", "price": 276.49}',
name="get_current_stock_price",
id="tool-output-1",
tool_call_id="toolu_bdrk_01X3zqC3kknbchSJB6XYmtHQ",
),
ToolMessage(
content="Error executing tool",
name="get_historical_stock_prices",
id="tool-output-2",
tool_call_id="toolu_bdrk_01Dte8KnRx9Tk7pMz9h83okn",
),
]
expected = [
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"id": "lc_run--019c2b6f-4d8d-70e3-b620-b80545b4cb30",
"content": [
{
"text": "I'll look up both the current and historical "
"stock prices for Apple.",
"type": "output_text",
"annotations": [],
}
],
"role": "assistant",
"type": "message",
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call",
"id": "toolu_bdrk_01X3zqC3kknbchSJB6XYmtHQ",
"call_id": "toolu_bdrk_01X3zqC3kknbchSJB6XYmtHQ",
"name": "get_current_stock_price",
"arguments": '{"ticker": "AAPL"}',
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call",
"id": "toolu_bdrk_01Dte8KnRx9Tk7pMz9h83okn",
"call_id": "toolu_bdrk_01Dte8KnRx9Tk7pMz9h83okn",
"name": "get_historical_stock_prices",
"arguments": '{"ticker": "AAPL", "start_date": "2023-01-01", '
'"end_date": "2024-01-01"}',
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call_output",
"call_id": "toolu_bdrk_01X3zqC3kknbchSJB6XYmtHQ",
"output": '{"ticker": "AAPL", "price": 276.49}',
},
),
ResponsesAgentStreamEvent(
type="response.output_item.done",
custom_outputs=None,
item={
"type": "function_call_output",
"call_id": "toolu_bdrk_01Dte8KnRx9Tk7pMz9h83okn",
"output": "Error executing tool",
},
),
]
result = list(output_to_responses_items_stream(messages))
assert result == expected
def test_output_to_responses_items_stream_langchain_non_string_tool_content():
mcp_content_blocks = [{"type": "text", "text": "result from mcp tool"}]
messages = [
AIMessage(
content="Calling tools",
id="ai-1",
tool_calls=[
{"name": "mcp_tool", "args": {}, "id": "call-1", "type": "tool_call"},
],
),
ToolMessage(
content=mcp_content_blocks,
name="mcp_tool",
id="tool-1",
tool_call_id="call-1",
),
]
result = list(output_to_responses_items_stream(messages))
tool_outputs = [r for r in result if r.item.get("type") == "function_call_output"]
assert len(tool_outputs) == 1
assert tool_outputs[0].item["output"] == json.dumps(mcp_content_blocks)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/langchain/test_responses_agent_langchain.py",
"license": "Apache License 2.0",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/job_api.py | """
Internal job APIs for UI invocation
"""
import json
from typing import Any
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from mlflow.entities._job import Job as JobEntity
from mlflow.entities._job_status import JobStatus
from mlflow.exceptions import MlflowException
job_api_router = APIRouter(prefix="/ajax-api/3.0/jobs", tags=["Job"])
class Job(BaseModel):
"""
Pydantic model for job query response.
"""
job_id: str
creation_time: int
job_name: str
params: dict[str, Any]
timeout: float | None
status: JobStatus
result: Any
retry_count: int
last_update_time: int
@classmethod
def from_job_entity(cls, job: JobEntity) -> "Job":
return cls(
job_id=job.job_id,
creation_time=job.creation_time,
job_name=job.job_name,
params=json.loads(job.params),
timeout=job.timeout,
status=job.status,
result=job.parsed_result,
retry_count=job.retry_count,
last_update_time=job.last_update_time,
)
@job_api_router.get("/{job_id}", response_model=Job)
def get_job(job_id: str) -> Job:
from mlflow.server.jobs import get_job
try:
job = get_job(job_id)
return Job.from_job_entity(job)
except MlflowException as e:
raise HTTPException(
status_code=e.get_http_status_code(),
detail=e.message,
)
class SubmitJobPayload(BaseModel):
job_name: str
params: dict[str, Any]
timeout: float | None = None
@job_api_router.post("/", response_model=Job)
def submit_job(payload: SubmitJobPayload) -> Job:
from mlflow.server.jobs import submit_job
from mlflow.server.jobs.utils import _load_function, get_job_fn_fullname
job_name = payload.job_name
try:
function_fullname = get_job_fn_fullname(job_name)
function = _load_function(function_fullname)
job = submit_job(function, payload.params, payload.timeout)
return Job.from_job_entity(job)
except MlflowException as e:
raise HTTPException(
status_code=e.get_http_status_code(),
detail=e.message,
)
@job_api_router.patch("/cancel/{job_id}", response_model=Job)
def cancel_job(job_id: str) -> Job:
from mlflow.server.jobs import cancel_job
try:
job = cancel_job(job_id)
return Job.from_job_entity(job)
except MlflowException as e:
raise HTTPException(
status_code=e.get_http_status_code(),
detail=e.message,
)
class SearchJobPayload(BaseModel):
job_name: str | None = None
params: dict[str, Any] | None = None
statuses: list[JobStatus] | None = None
class SearchJobsResponse(BaseModel):
"""
Pydantic model for job searching response.
"""
jobs: list[Job]
@job_api_router.post("/search", response_model=SearchJobsResponse)
def search_jobs(payload: SearchJobPayload) -> SearchJobsResponse:
from mlflow.server.handlers import _get_job_store
try:
store = _get_job_store()
job_results = [
Job.from_job_entity(job)
for job in store.list_jobs(
job_name=payload.job_name,
statuses=payload.statuses,
params=payload.params,
)
]
return SearchJobsResponse(jobs=job_results)
except MlflowException as e:
raise HTTPException(
status_code=e.get_http_status_code(),
detail=e.message,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/job_api.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/src/clint/rules/isinstance_union_syntax.py | import ast
from clint.rules.base import Rule
class IsinstanceUnionSyntax(Rule):
def _message(self) -> str:
return (
"Use `isinstance(obj, (X, Y))` instead of `isinstance(obj, X | Y)`. "
"The union syntax with `|` is slower than using a tuple of types."
)
@staticmethod
def check(node: ast.Call) -> bool:
"""
Returns True if the call is isinstance with union syntax (X | Y) in the second argument.
Examples that should be flagged:
- isinstance(obj, str | int)
- isinstance(obj, int | str | float)
- isinstance(value, (dict | list))
Examples that should NOT be flagged:
- isinstance(obj, (str, int))
- isinstance(obj, str)
- other_func(obj, str | int)
"""
# Check if this is an isinstance call
if not (isinstance(node.func, ast.Name) and node.func.id == "isinstance"):
return False
# Check if the second argument uses union syntax (X | Y)
match node.args:
case [_, type_arg]:
return IsinstanceUnionSyntax._has_union_syntax(type_arg)
case _:
return False
@staticmethod
def _has_union_syntax(node: ast.expr) -> bool:
"""
Returns True if the node contains union syntax with BitOr operator.
This handles nested cases like (A | B) | C.
"""
match node:
case ast.BinOp(op=ast.BitOr()):
return True
case ast.Tuple(elts=elts):
# Check if any element in the tuple has union syntax
return any(map(IsinstanceUnionSyntax._has_union_syntax, elts))
case _:
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/isinstance_union_syntax.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_isinstance_union_syntax.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules import IsinstanceUnionSyntax
def test_isinstance_union_syntax(index_path: Path) -> None:
code = """
# Bad - basic union syntax
isinstance(obj, str | int)
isinstance(value, int | str | float)
# Bad - parenthesized union in tuple
isinstance(x, ((str | int),))
# Good - tuple syntax (recommended)
isinstance(obj, (str, int))
isinstance(value, (int, str, float))
# Good - single type
isinstance(obj, str)
isinstance(obj, int)
# Good - Union type annotation (different syntax)
isinstance(obj, Union[str, int])
# Good - other functions with union syntax
other_func(obj, str | int)
some_call(x | y)
# Good - invalid isinstance calls, not our concern
isinstance()
isinstance(obj)
"""
config = Config(select={IsinstanceUnionSyntax.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert all(isinstance(r.rule, IsinstanceUnionSyntax) for r in results)
assert [r.range for r in results] == [
Range(Position(2, 0)),
Range(Position(3, 0)),
Range(Position(6, 0)),
]
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_isinstance_union_syntax.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/entities/_job.py | import json
from typing import Any
from mlflow.entities._job_status import JobStatus
from mlflow.entities._mlflow_object import _MlflowObject
from mlflow.utils.workspace_utils import resolve_entity_workspace_name
class Job(_MlflowObject):
"""
MLflow entity representing a Job.
"""
def __init__(
self,
job_id: str,
creation_time: int,
job_name: str,
params: str,
timeout: float | None,
status: JobStatus,
result: str | None,
retry_count: int,
last_update_time: int,
workspace: str | None = None,
):
super().__init__()
self._job_id = job_id
self._creation_time = creation_time
self._job_name = job_name
self._params = params
self._timeout = timeout
self._status = status
self._result = result
self._retry_count = retry_count
self._last_update_time = last_update_time
self._workspace = resolve_entity_workspace_name(workspace)
@property
def job_id(self) -> str:
"""String containing job ID."""
return self._job_id
@property
def creation_time(self) -> int:
"""Creation timestamp of the job, in number of milliseconds since the UNIX epoch."""
return self._creation_time
@property
def job_name(self) -> str:
"""
String containing the static job name that uniquely identifies the decorated job function.
"""
return self._job_name
@property
def params(self) -> str:
"""
String containing the job serialized parameters in JSON format.
For example, `{"a": 3, "b": 4}` represents two params:
`a` with value 3 and `b` with value 4.
"""
return self._params
@property
def timeout(self) -> float | None:
"""
Job execution timeout in seconds.
"""
return self._timeout
@property
def status(self) -> JobStatus:
"""
One of the values in :py:class:`mlflow.entities._job_status.JobStatus`
describing the status of the job.
"""
return self._status
@property
def result(self) -> str | None:
"""String containing the job result or error message."""
return self._result
@property
def parsed_result(self) -> Any:
"""
Return the parsed result.
If job status is SUCCEEDED, the parsed result is the
job function returned value
If job status is FAILED, the parsed result is the error string.
Otherwise, the parsed result is None.
"""
if self.status == JobStatus.SUCCEEDED:
return json.loads(self.result)
return self.result
@property
def retry_count(self) -> int:
"""Integer containing the job retry count"""
return self._retry_count
@property
def last_update_time(self) -> int:
"""Last update timestamp of the job, in number of milliseconds since the UNIX epoch."""
return self._last_update_time
@property
def workspace(self) -> str | None:
"""Workspace associated with this job."""
return self._workspace
def __repr__(self) -> str:
return f"<Job(job_id={self.job_id}, job_name={self.job_name}, workspace={self.workspace})>"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/_job.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/entities/_job_status.py | from enum import Enum
from mlflow.exceptions import MlflowException
from mlflow.protos.jobs_pb2 import JobStatus as ProtoJobStatus
class JobStatus(str, Enum):
"""Enum for status of a Job."""
PENDING = "PENDING"
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
TIMEOUT = "TIMEOUT"
CANCELED = "CANCELED"
@classmethod
def from_int(cls, status_int: int) -> "JobStatus":
"""Convert integer status to JobStatus enum."""
try:
return next(e for i, e in enumerate(JobStatus) if i == status_int)
except StopIteration:
raise MlflowException.invalid_parameter_value(
f"The value {status_int} can't be converted to JobStatus enum value."
)
@classmethod
def from_str(cls, status_str: str) -> "JobStatus":
"""Convert string status to JobStatus enum."""
try:
return JobStatus[status_str]
except KeyError:
raise MlflowException.invalid_parameter_value(
f"The string '{status_str}' can't be converted to JobStatus enum value."
)
def to_int(self) -> int:
"""Convert JobStatus enum to integer."""
return next(i for i, e in enumerate(JobStatus) if e == self)
def to_proto(self) -> int:
"""Convert JobStatus enum to proto JobStatus enum value."""
mapping = {
JobStatus.PENDING: ProtoJobStatus.JOB_STATUS_PENDING,
JobStatus.RUNNING: ProtoJobStatus.JOB_STATUS_IN_PROGRESS,
JobStatus.SUCCEEDED: ProtoJobStatus.JOB_STATUS_COMPLETED,
JobStatus.FAILED: ProtoJobStatus.JOB_STATUS_FAILED,
JobStatus.TIMEOUT: ProtoJobStatus.JOB_STATUS_FAILED, # No TIMEOUT in proto
JobStatus.CANCELED: ProtoJobStatus.JOB_STATUS_CANCELED,
}
return mapping.get(self, ProtoJobStatus.JOB_STATUS_UNSPECIFIED)
def __str__(self):
return self.name
@staticmethod
def is_finalized(status: "JobStatus") -> bool:
"""
Determines whether or not a JobStatus is a finalized status.
A finalized status indicates that no further status updates will occur.
"""
return status in [
JobStatus.SUCCEEDED,
JobStatus.FAILED,
JobStatus.TIMEOUT,
JobStatus.CANCELED,
]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/_job_status.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/store/jobs/abstract_store.py | from abc import ABC, abstractmethod
from typing import Any, Iterator
from mlflow.entities._job import Job
from mlflow.entities._job_status import JobStatus
from mlflow.utils.annotations import developer_stable
@developer_stable
class AbstractJobStore(ABC):
"""
Abstract class that defines API interfaces for storing Job metadata.
"""
@property
def supports_workspaces(self) -> bool:
"""Return whether workspaces are supported by this job store."""
return False
@abstractmethod
def create_job(self, job_name: str, params: str, timeout: float | None = None) -> Job:
"""
Create a new job with the specified function and parameters.
Args:
job_name: The static job name that identifies the decorated job function
params: The job parameters that are serialized as a JSON string
timeout: The job execution timeout in seconds
Returns:
Job entity instance
"""
@abstractmethod
def start_job(self, job_id: str) -> None:
"""
Start a job by setting its status to RUNNING.
Args:
job_id: The ID of the job to start
"""
@abstractmethod
def reset_job(self, job_id: str) -> None:
"""
Reset a job by setting its status to PENDING.
Args:
job_id: The ID of the job to re-enqueue.
"""
@abstractmethod
def finish_job(self, job_id: str, result: str) -> None:
"""
Finish a job by setting its status to DONE and setting the result.
Args:
job_id: The ID of the job to finish
result: The job result as a string
"""
@abstractmethod
def mark_job_timed_out(self, job_id: str) -> None:
"""
Set a job status to Timeout.
Args:
job_id: The ID of the job
"""
@abstractmethod
def fail_job(self, job_id: str, error: str) -> None:
"""
Fail a job by setting its status to FAILED and setting the error message.
Args:
job_id: The ID of the job to fail
error: The error message as a string
"""
@abstractmethod
def retry_or_fail_job(self, job_id: str, error: str) -> int | None:
"""
If the job retry_count is less than maximum allowed retry count,
increment the retry_count and reset the job to PENDING status,
otherwise set the job to FAILED status and fill the job's error field.
Args:
job_id: The ID of the job to fail
error: The error message as a string
Returns:
If the job is allowed to retry, returns the retry count,
otherwise returns None.
"""
@abstractmethod
def list_jobs(
self,
job_name: str | None = None,
statuses: list[JobStatus] | None = None,
begin_timestamp: int | None = None,
end_timestamp: int | None = None,
params: dict[str, Any] | None = None,
) -> Iterator[Job]:
"""
List jobs based on the provided filters.
Args:
job_name: Filter by job name (exact match)
statuses: Filter by a list of job status (PENDING, RUNNING, DONE, FAILED, TIMEOUT)
begin_timestamp: Filter jobs created after this timestamp (inclusive)
end_timestamp: Filter jobs created before this timestamp (inclusive)
params: Filter jobs by matching job params dict with the provided params dict
e.g., if `params` is ``{'a': 3, 'b': 4}``, it can match the following job params:
``{'a': 3, 'b': 4}``, ``{'a': 3, 'b': 4, 'c': 5}``, but it does not match the
following job params: ``{'a': 3, 'b': 6}``, ``{'a': 3, 'c': 5}``.
Returns:
Iterator of Job entities that match the filters, ordered by creation time (oldest first)
"""
@abstractmethod
def get_job(self, job_id: str) -> Job:
"""
Get a job by its ID.
Args:
job_id: The ID of the job to retrieve
Returns:
Job entity
Raises:
MlflowException: If job with the given ID is not found
"""
@abstractmethod
def cancel_job(self, job_id: str) -> Job:
"""
Cancel a job by its ID.
Args:
job_id: The ID of the job to cancel
Returns:
Job entity
Raises:
MlflowException: If job with the given ID is not found
"""
@abstractmethod
def delete_jobs(self, older_than: int = 0, job_ids: list[str] | None = None) -> list[str]:
"""
Delete finalized jobs based on the provided filters. Used by ``mlflow gc``.
Only jobs with finalized status (SUCCEEDED, FAILED, TIMEOUT, CANCELED) are
eligible for deletion.
Behavior:
- No filters: Deletes all finalized jobs.
- Only ``older_than``: Deletes finalized jobs older than the threshold.
- Only ``job_ids``: Deletes only the specified finalized jobs.
- Both filters: Deletes finalized jobs matching both conditions.
Args:
older_than: Time threshold in milliseconds. Jobs with creation_time
older than (current_time - older_than) are eligible for deletion.
A value of 0 disables this filter.
job_ids: List of specific job IDs to delete. If None, all finalized jobs
(subject to older_than filter) are eligible for deletion.
Returns:
List of job IDs that were deleted.
"""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/store/jobs/abstract_store.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/store/jobs/sqlalchemy_store.py | import json
import threading
import uuid
from typing import Any, Iterator
import sqlalchemy
from mlflow.entities._job import Job
from mlflow.entities._job_status import JobStatus
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST
from mlflow.store.db.utils import (
_get_managed_session_maker,
_safe_initialize_tables,
create_sqlalchemy_engine_with_retry,
)
from mlflow.store.jobs.abstract_store import AbstractJobStore
from mlflow.store.tracking.dbmodels.models import SqlJob
from mlflow.utils.time import get_current_time_millis
from mlflow.utils.uri import extract_db_type_from_uri
from mlflow.utils.workspace_utils import DEFAULT_WORKSPACE_NAME
sqlalchemy.orm.configure_mappers()
_LIST_JOB_PAGE_SIZE = 100
class SqlAlchemyJobStore(AbstractJobStore):
"""
SQLAlchemy compliant backend store for storing Job metadata.
This store interacts with SQL store using SQLAlchemy abstractions defined
for MLflow Job entities.
"""
# Class-level cache for SQLAlchemy engines to prevent connection pool leaks
# when multiple store instances are created with the same database URI.
_engine_map: dict[str, sqlalchemy.engine.Engine] = {}
_engine_map_lock = threading.Lock()
@classmethod
def _get_or_create_engine(cls, db_uri: str) -> sqlalchemy.engine.Engine:
"""Get a cached engine or create a new one for the given database URI."""
if db_uri not in cls._engine_map:
with cls._engine_map_lock:
if db_uri not in cls._engine_map:
cls._engine_map[db_uri] = create_sqlalchemy_engine_with_retry(db_uri)
return cls._engine_map[db_uri]
def __init__(self, db_uri):
"""
Create a database backed store.
Args:
db_uri: The SQLAlchemy database URI string to connect to the database.
"""
super().__init__()
self.db_uri = db_uri
self.db_type = extract_db_type_from_uri(db_uri)
self.engine = self._get_or_create_engine(db_uri)
_safe_initialize_tables(self.engine)
SessionMaker = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.ManagedSessionMaker = _get_managed_session_maker(SessionMaker, self.db_type)
def _get_active_workspace(self) -> str:
"""
Get the active workspace name.
In single-tenant mode, always returns DEFAULT_WORKSPACE_NAME.
Workspace-aware subclasses override this to enforce isolation.
"""
return DEFAULT_WORKSPACE_NAME
def _get_query(self, session, model):
"""
Return a query for ``model``.
Workspace-aware subclasses override this to enforce scoping.
"""
return session.query(model)
def _with_workspace_field(self, instance):
"""
Allow subclasses to populate model fields (e.g., workspace metadata) on ORM instances.
"""
if hasattr(instance, "workspace") and getattr(instance, "workspace", None) is None:
instance.workspace = DEFAULT_WORKSPACE_NAME
return instance
def create_job(self, job_name: str, params: str, timeout: float | None = None) -> Job:
"""
Create a new job with the specified function and parameters.
Args:
job_name: The static job name that identifies the decorated job function
params: The job parameters that are serialized as a JSON string
timeout: The job execution timeout in seconds
Returns:
Job entity instance
"""
with self.ManagedSessionMaker() as session:
job_id = str(uuid.uuid4())
creation_time = get_current_time_millis()
job = self._with_workspace_field(
SqlJob(
id=job_id,
creation_time=creation_time,
job_name=job_name,
params=params,
timeout=timeout,
status=JobStatus.PENDING.to_int(),
result=None,
last_update_time=creation_time,
)
)
session.add(job)
session.flush()
return job.to_mlflow_entity()
def _update_job(self, job_id: str, new_status: JobStatus, result: str | None = None) -> Job:
with self.ManagedSessionMaker() as session:
job = self._get_sql_job(session, job_id)
if JobStatus.is_finalized(job.status):
raise MlflowException(
f"The Job {job_id} is already finalized with status: {job.status}, "
"it can't be updated."
)
job.status = new_status.to_int()
if result is not None:
job.result = result
job.last_update_time = get_current_time_millis()
return job.to_mlflow_entity()
def start_job(self, job_id: str) -> None:
"""
Start a job by setting its status to RUNNING.
Only succeeds if the job is currently in PENDING state.
Args:
job_id: The ID of the job to start
Raises:
MlflowException: If job is not in PENDING state or doesn't exist
"""
with self.ManagedSessionMaker() as session:
# Atomic update: only transition from PENDING to RUNNING
rows_updated = (
self._get_query(session, SqlJob)
.filter(SqlJob.id == job_id, SqlJob.status == JobStatus.PENDING.to_int())
.update(
{
SqlJob.status: JobStatus.RUNNING.to_int(),
SqlJob.last_update_time: get_current_time_millis(),
}
)
)
if rows_updated == 0:
job = self._get_query(session, SqlJob).filter(SqlJob.id == job_id).one_or_none()
if job is None:
raise MlflowException(
f"Job with ID {job_id} not found", error_code=RESOURCE_DOES_NOT_EXIST
)
raise MlflowException(
f"Job {job_id} is in {JobStatus.from_int(job.status)} state, "
"cannot start (must be PENDING)"
)
def reset_job(self, job_id: str) -> None:
"""
Reset a job by setting its status to PENDING.
Args:
job_id: The ID of the job to re-enqueue.
"""
self._update_job(job_id, JobStatus.PENDING)
def finish_job(self, job_id: str, result: str) -> None:
"""
Finish a job by setting its status to DONE and setting the result.
Args:
job_id: The ID of the job to finish
result: The job result as a string
"""
self._update_job(job_id, JobStatus.SUCCEEDED, result)
def fail_job(self, job_id: str, error: str) -> None:
"""
Fail a job by setting its status to FAILED and setting the error message.
Args:
job_id: The ID of the job to fail
error: The error message as a string
"""
self._update_job(job_id, JobStatus.FAILED, error)
def mark_job_timed_out(self, job_id: str) -> None:
"""
Set a job status to Timeout.
Args:
job_id: The ID of the job
"""
self._update_job(job_id, JobStatus.TIMEOUT)
def retry_or_fail_job(self, job_id: str, error: str) -> int | None:
"""
If the job retry_count is less than maximum allowed retry count,
increment the retry_count and reset the job to PENDING status,
otherwise set the job to FAILED status and fill the job's error field.
Args:
job_id: The ID of the job to fail
error: The error message as a string
Returns:
If the job is allowed to retry, returns the retry count,
otherwise returns None.
"""
from mlflow.environment_variables import MLFLOW_SERVER_JOB_TRANSIENT_ERROR_MAX_RETRIES
max_retries = MLFLOW_SERVER_JOB_TRANSIENT_ERROR_MAX_RETRIES.get()
with self.ManagedSessionMaker() as session:
job = self._get_sql_job(session, job_id)
if job.retry_count >= max_retries:
job.status = JobStatus.FAILED.to_int()
job.result = error
return None
job.retry_count += 1
job.status = JobStatus.PENDING.to_int()
job.last_update_time = get_current_time_millis()
return job.retry_count
def list_jobs(
self,
job_name: str | None = None,
statuses: list[JobStatus] | None = None,
begin_timestamp: int | None = None,
end_timestamp: int | None = None,
params: dict[str, Any] | None = None,
) -> Iterator[Job]:
"""
List jobs based on the provided filters.
Args:
job_name: Filter by job name (exact match)
statuses: Filter by a list of job status (PENDING, RUNNING, DONE, FAILED, TIMEOUT)
begin_timestamp: Filter jobs created after this timestamp (inclusive)
end_timestamp: Filter jobs created before this timestamp (inclusive)
params: Filter jobs by matching job params dict with the provided params dict.
e.g., if `params` is ``{'a': 3, 'b': 4}``, it can match the following job params:
``{'a': 3, 'b': 4}``, ``{'a': 3, 'b': 4, 'c': 5}``, but it does not match the
following job params: ``{'a': 3, 'b': 6}``, ``{'a': 3, 'c': 5}``.
Returns:
Iterator of Job entities that match the filters, ordered by creation time (oldest first)
"""
offset = 0
def filter_by_params(job_params: dict[str, Any]) -> bool:
for key in params:
if key in job_params:
if job_params[key] != params[key]:
return False
else:
return False
return True
while True:
with self.ManagedSessionMaker() as session:
# Select all columns needed for Job entity
query = self._get_query(session, SqlJob)
# Apply filters
if job_name is not None:
query = query.filter(SqlJob.job_name == job_name)
if statuses:
query = query.filter(
SqlJob.status.in_([status.to_int() for status in statuses])
)
if begin_timestamp is not None:
query = query.filter(SqlJob.creation_time >= begin_timestamp)
if end_timestamp is not None:
query = query.filter(SqlJob.creation_time <= end_timestamp)
# Order by creation time (oldest first) and apply pagination
jobs = (
query.order_by(SqlJob.creation_time)
.offset(offset)
.limit(_LIST_JOB_PAGE_SIZE)
.all()
)
# If no jobs returned, we've reached the end
if not jobs:
break
# Yield each job
if params:
for job in jobs:
if filter_by_params(json.loads(job.params)):
yield job.to_mlflow_entity()
else:
for job in jobs:
yield job.to_mlflow_entity()
# If we got fewer jobs than page_size, we've reached the end
if len(jobs) < _LIST_JOB_PAGE_SIZE:
break
# Move to next page
offset += _LIST_JOB_PAGE_SIZE
def _get_sql_job(self, session, job_id) -> SqlJob:
job = self._get_query(session, SqlJob).filter(SqlJob.id == job_id).one_or_none()
if job is None:
raise MlflowException(
f"Job with ID {job_id} not found", error_code=RESOURCE_DOES_NOT_EXIST
)
return job
def get_job(self, job_id: str) -> Job:
"""
Get a job by its ID.
Args:
job_id: The ID of the job to retrieve
Returns:
Job entity
Raises:
MlflowException: If job with the given ID is not found
"""
with self.ManagedSessionMaker() as session:
job = self._get_sql_job(session, job_id)
return job.to_mlflow_entity()
def delete_jobs(self, older_than: int = 0, job_ids: list[str] | None = None) -> list[str]:
"""
Delete finalized jobs based on the provided filters. Used by ``mlflow gc``.
Only jobs with finalized status (SUCCEEDED, FAILED, TIMEOUT, CANCELED) are
eligible for deletion.
Behavior:
- No filters: Deletes all finalized jobs.
- Only ``older_than``: Deletes finalized jobs older than the threshold.
- Only ``job_ids``: Deletes only the specified finalized jobs.
- Both filters: Deletes finalized jobs matching both conditions.
Args:
older_than: Time threshold in milliseconds. Jobs with creation_time
older than (current_time - older_than) are eligible for deletion.
A value of 0 disables this filter.
job_ids: List of specific job IDs to delete. If None, all finalized jobs
(subject to older_than filter) are eligible for deletion.
Returns:
List of job IDs that were deleted.
"""
current_time = get_current_time_millis()
time_threshold = current_time - older_than
finalized_statuses = [
JobStatus.SUCCEEDED.to_int(),
JobStatus.FAILED.to_int(),
JobStatus.TIMEOUT.to_int(),
JobStatus.CANCELED.to_int(),
]
with self.ManagedSessionMaker() as session:
query = self._get_query(session, SqlJob).filter(SqlJob.status.in_(finalized_statuses))
if job_ids:
query = query.filter(SqlJob.id.in_(job_ids))
if older_than > 0:
query = query.filter(SqlJob.creation_time < time_threshold)
ids_to_delete = [job.id for job in query.all()]
if ids_to_delete:
self._get_query(session, SqlJob).filter(SqlJob.id.in_(ids_to_delete)).delete(
synchronize_session=False
)
return ids_to_delete
def cancel_job(self, job_id: str) -> Job:
"""
Cancel a job by its ID.
Args:
job_id: The ID of the job to cancel
Returns:
Job entity
Raises:
MlflowException: If job with the given ID is not found
"""
return self._update_job(job_id, JobStatus.CANCELED)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/store/jobs/sqlalchemy_store.py",
"license": "Apache License 2.0",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/check_init_py.py | """
Pre-commit hook to check for missing `__init__.py` files in mlflow and tests directories.
This script ensures that all directories under the mlflow package and tests directory that contain
Python files also have an `__init__.py` file. This prevents `setuptools` from excluding these
directories during package build and ensures test modules are properly structured.
Usage:
uv run dev/check_init_py.py
Requirements:
- If `mlflow/foo/bar.py` exists, `mlflow/foo/__init__.py` must exist.
- If `tests/foo/test_bar.py` exists, `tests/foo/__init__.py` must exist.
- Only test files (starting with `test_`) in the tests directory are checked.
- All parent directories of Python files are checked recursively for `__init__.py`.
- Ignore directories that do not contain any Python files (e.g., `mlflow/server/js`).
"""
import subprocess
import sys
from pathlib import Path
def get_tracked_python_files() -> list[Path]:
try:
result = subprocess.check_output(
["git", "ls-files", "mlflow/**/*.py", "tests/**/*.py"],
text=True,
)
paths = (Path(f) for f in result.splitlines() if f)
return [p for p in paths if (not p.is_relative_to("tests") or p.name.startswith("test_"))]
except subprocess.CalledProcessError as e:
print(f"Error running git ls-files: {e}", file=sys.stderr)
sys.exit(1)
def main() -> int:
python_files = get_tracked_python_files()
if not python_files:
return 0
python_dirs = {p for f in python_files for p in f.parents if p != Path(".")}
if missing_init_files := [d for d in python_dirs if not (d / "__init__.py").exists()]:
print("Error: The following directories contain Python files but lack __init__.py:")
for d in sorted(missing_init_files):
print(f" {d.as_posix()}/")
print("Please add __init__.py files to the directories listed above.")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/check_init_py.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/dev/test_check_init_py.py | import subprocess
import sys
from pathlib import Path
import pytest
def get_check_init_py_script() -> Path:
return Path(__file__).resolve().parents[2] / "dev" / "check_init_py.py"
@pytest.fixture
def temp_git_repo(tmp_path: Path) -> Path:
subprocess.check_call(["git", "init"], cwd=tmp_path)
subprocess.check_call(["git", "config", "user.email", "test@example.com"], cwd=tmp_path)
subprocess.check_call(["git", "config", "user.name", "Test User"], cwd=tmp_path)
return tmp_path
def test_exits_with_0_when_all_directories_have_init_py(temp_git_repo: Path) -> None:
mlflow_dir = temp_git_repo / "mlflow"
test_package_dir = mlflow_dir / "test_package"
test_package_dir.mkdir(parents=True)
(mlflow_dir / "__init__.py").touch()
(test_package_dir / "__init__.py").touch()
(test_package_dir / "test_module.py").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 0
assert result.stdout == ""
def test_exits_with_1_when_directories_missing_init_py(temp_git_repo: Path) -> None:
mlflow_dir = temp_git_repo / "mlflow"
test_package_dir = mlflow_dir / "test_package"
test_package_dir.mkdir(parents=True)
(test_package_dir / "test_module.py").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 1
assert (
"Error: The following directories contain Python files but lack __init__.py:"
in result.stdout
)
assert "mlflow" in result.stdout
assert "mlflow/test_package" in result.stdout
def test_exits_with_0_when_no_python_files_exist(temp_git_repo: Path) -> None:
mlflow_dir = temp_git_repo / "mlflow"
js_dir = mlflow_dir / "server" / "js"
js_dir.mkdir(parents=True)
(js_dir / "main.js").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 0
assert result.stdout == ""
def test_identifies_only_directories_missing_init_py(temp_git_repo: Path) -> None:
mlflow_dir = temp_git_repo / "mlflow"
package1_dir = mlflow_dir / "package1"
package2_dir = mlflow_dir / "package2"
package1_dir.mkdir(parents=True)
package2_dir.mkdir(parents=True)
(mlflow_dir / "__init__.py").touch()
(package1_dir / "__init__.py").touch()
(package1_dir / "module1.py").touch()
(package2_dir / "module2.py").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 1
assert (
"Error: The following directories contain Python files but lack __init__.py:"
in result.stdout
)
assert "mlflow/package2" in result.stdout
assert "mlflow/package1" not in result.stdout
def test_checks_tests_directory_for_missing_init_py(temp_git_repo: Path) -> None:
tests_dir = temp_git_repo / "tests"
test_package_dir = tests_dir / "test_package"
test_package_dir.mkdir(parents=True)
# Only test files (starting with test_) are checked
(test_package_dir / "test_module.py").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 1
assert (
"Error: The following directories contain Python files but lack __init__.py:"
in result.stdout
)
assert "tests/test_package" in result.stdout
assert "tests" in result.stdout # Parent directory also needs __init__.py
def test_exits_with_0_when_tests_directories_have_init_py(temp_git_repo: Path) -> None:
tests_dir = temp_git_repo / "tests"
test_package_dir = tests_dir / "test_package"
test_package_dir.mkdir(parents=True)
(tests_dir / "__init__.py").touch()
(test_package_dir / "__init__.py").touch()
(test_package_dir / "test_module.py").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 0
assert result.stdout == ""
def test_ignores_non_test_files_in_tests_directory(temp_git_repo: Path) -> None:
tests_dir = temp_git_repo / "tests"
test_package_dir = tests_dir / "test_package"
test_package_dir.mkdir(parents=True)
# Non-test file (doesn't start with test_) should be ignored
(test_package_dir / "helper.py").touch()
(test_package_dir / "utils.py").touch()
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
# Should pass since no test files exist
assert result.returncode == 0
assert result.stdout == ""
def test_checks_all_parent_directories(temp_git_repo: Path) -> None:
mlflow_dir = temp_git_repo / "mlflow"
deep_dir = mlflow_dir / "level1" / "level2" / "level3"
deep_dir.mkdir(parents=True)
# Create a Python file deep in the hierarchy
(deep_dir / "module.py").touch()
# Only add __init__.py to some directories
(mlflow_dir / "__init__.py").touch()
(mlflow_dir / "level1" / "__init__.py").touch()
# Missing: level2 and level3 __init__.py files
subprocess.check_call(["git", "add", "."], cwd=temp_git_repo)
subprocess.check_call(["git", "commit", "-m", "Initial commit"], cwd=temp_git_repo)
result = subprocess.run(
[sys.executable, get_check_init_py_script()],
capture_output=True,
text=True,
cwd=temp_git_repo,
)
assert result.returncode == 1
assert (
"Error: The following directories contain Python files but lack __init__.py:"
in result.stdout
)
# Both level2 and level3 should be reported as missing __init__.py
assert "mlflow/level1/level2" in result.stdout
assert "mlflow/level1/level2/level3" in result.stdout
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/dev/test_check_init_py.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/test_index.py | from pathlib import Path
from unittest.mock import patch
from clint.index import SymbolIndex
def test_symbol_index_build_basic(tmp_path: Path) -> None:
mlflow_dir = tmp_path / "mlflow"
mlflow_dir.mkdir()
test_file = mlflow_dir / "test.py"
test_file.write_text("def test_function(): pass")
mock_git_output = "mlflow/test.py\n"
with (
patch("clint.index.get_repo_root", return_value=tmp_path) as mock_repo_root,
patch("subprocess.check_output", return_value=mock_git_output) as mock_check_output,
):
index = SymbolIndex.build()
assert isinstance(index, SymbolIndex)
mock_repo_root.assert_called_once()
mock_check_output.assert_called_once()
def test_symbol_index_build_skips_missing_files(tmp_path: Path) -> None:
mlflow_dir = tmp_path / "mlflow"
mlflow_dir.mkdir()
existing_file = mlflow_dir / "existing.py"
existing_file.write_text("def existing_function(): pass")
mock_git_output = "mlflow/existing.py\nmlflow/deleted.py\n"
with (
patch("clint.index.get_repo_root", return_value=tmp_path) as mock_repo_root,
patch("subprocess.check_output", return_value=mock_git_output) as mock_check_output,
):
index = SymbolIndex.build()
assert isinstance(index, SymbolIndex)
mock_repo_root.assert_called_once()
mock_check_output.assert_called_once()
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/test_index.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/temp_dir_in_test.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class TempDirInTest(Rule):
def _message(self) -> str:
return "Do not use `tempfile.TemporaryDirectory` in test directly. Use `tmp_path` fixture (https://docs.pytest.org/en/stable/reference/reference.html#tmp-path)."
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the call is to tempfile.TemporaryDirectory().
"""
return resolver.resolve(node) == ["tempfile", "TemporaryDirectory"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/temp_dir_in_test.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_temp_dir_in_test.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.temp_dir_in_test import TempDirInTest
def test_temp_dir_in_test(index_path: Path) -> None:
code = """
import tempfile
# Bad
def test_func():
tempfile.TemporaryDirectory()
# Good
def non_test_func():
tempfile.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_temp_dir_in_test_with_from_import(index_path: Path) -> None:
code = """
from tempfile import TemporaryDirectory
# Bad
def test_func():
TemporaryDirectory()
# Good
def non_test_func():
TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_temp_dir_in_test_no_violation_outside_test(index_path: Path) -> None:
code = """
import tempfile
def normal_function():
tempfile.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("non_test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_temp_dir_in_test_with_alias(index_path: Path) -> None:
code = """
import tempfile as tf
# Bad - should still catch aliased import
def test_func():
tf.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_temp_dir_in_test_nested_functions_not_caught(index_path: Path) -> None:
"""
Nested functions are not considered to be "in test" - this matches
the behavior of other test-specific rules like os.environ.
"""
code = """
import tempfile
def test_outer():
def inner_function():
tempfile.TemporaryDirectory() # Not caught since inner_function is not a test function
inner_function()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_temp_dir_not_tempfile_module(index_path: Path) -> None:
code = """
class FakeTempfile:
@staticmethod
def TemporaryDirectory():
pass
fake_tempfile = FakeTempfile()
def test_func():
# Should not trigger since it's not tempfile.TemporaryDirectory
fake_tempfile.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_temp_dir_in_test_with_context_manager(index_path: Path) -> None:
code = """
import tempfile
# Bad - using with statement
def test_func():
with tempfile.TemporaryDirectory() as tmpdir:
pass
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 9))
def test_temp_dir_in_test_assigned_to_variable(index_path: Path) -> None:
code = """
import tempfile
# Bad - assigned to variable
def test_func():
tmpdir = tempfile.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 13))
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_temp_dir_in_test.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/os_chdir_in_test.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class OsChdirInTest(Rule):
def _message(self) -> str:
return "Do not use `os.chdir` in test directly. Use `monkeypatch.chdir` (https://docs.pytest.org/en/stable/reference/reference.html#pytest.MonkeyPatch.chdir)."
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the call is to os.chdir().
"""
return resolver.resolve(node) == ["os", "chdir"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/os_chdir_in_test.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/rules/test_os_chdir_in_test.py | from pathlib import Path
from clint.config import Config
from clint.linter import Position, Range, lint_file
from clint.rules.os_chdir_in_test import OsChdirInTest
def test_os_chdir_in_test(index_path: Path) -> None:
code = """
import os
# Bad
def test_func():
os.chdir("/tmp")
# Good
def non_test_func():
os.chdir("/tmp")
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsChdirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_os_chdir_in_test_with_from_import(index_path: Path) -> None:
code = """
from os import chdir
# Bad
def test_func():
chdir("/tmp")
# Good
def non_test_func():
chdir("/tmp")
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsChdirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_os_chdir_in_test_no_violation_outside_test(index_path: Path) -> None:
code = """
import os
def normal_function():
os.chdir("/tmp")
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("non_test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_os_chdir_in_test_with_alias(index_path: Path) -> None:
code = """
import os as operating_system
# Bad - should still catch aliased import
def test_func():
operating_system.chdir("/tmp")
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, OsChdirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 4))
def test_os_chdir_in_test_nested_functions_not_caught(index_path: Path) -> None:
"""
Nested functions are not considered to be "in test" - this matches
the behavior of other test-specific rules like os.environ.
"""
code = """
import os
def test_outer():
def inner_function():
os.chdir("/tmp") # Not caught since inner_function is not a test function
inner_function()
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_os_chdir_not_os_module(index_path: Path) -> None:
code = """
class FakeOs:
@staticmethod
def chdir(path):
pass
fake_os = FakeOs()
def test_func():
fake_os.chdir("/tmp") # Should not trigger since it's not os.chdir
"""
config = Config(select={OsChdirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_os_chdir_in_test.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/test_resolve_paths.py | from __future__ import annotations
import subprocess
from pathlib import Path
from unittest.mock import patch
import pytest
from clint.utils import ALLOWED_EXTS, _git_ls_files, resolve_paths
@pytest.fixture
def git_repo(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path:
"""Create and initialize a git repository in a temporary directory."""
subprocess.check_call(["git", "init"], cwd=tmp_path, stdout=subprocess.DEVNULL)
subprocess.check_call(["git", "config", "user.email", "test@example.com"], cwd=tmp_path)
subprocess.check_call(["git", "config", "user.name", "Test User"], cwd=tmp_path)
monkeypatch.chdir(tmp_path)
return tmp_path
def test_resolve_paths_with_real_git_repo_tracked_and_untracked(git_repo: Path) -> None:
tracked_py = git_repo / "tracked.py"
tracked_md = git_repo / "tracked.md"
tracked_py.write_text("# tracked python file")
tracked_md.write_text("# tracked markdown file")
subprocess.check_call(["git", "add", "tracked.py", "tracked.md"])
subprocess.check_call(["git", "commit", "-m", "Add tracked files"])
untracked_py = git_repo / "untracked.py"
untracked_rst = git_repo / "untracked.rst"
untracked_py.write_text("# untracked python file")
untracked_rst.write_text("# untracked rst file")
gitignore = git_repo / ".gitignore"
gitignore.write_text("ignored.py\n")
ignored_py = git_repo / "ignored.py"
ignored_py.write_text("# ignored file")
result = resolve_paths([Path(".")])
expected_paths = [
Path("tracked.md"),
Path("tracked.py"),
Path("untracked.py"),
Path("untracked.rst"),
]
assert result == expected_paths
assert Path("ignored.py") not in result
def test_resolve_paths_with_real_git_repo_specific_pathspecs(git_repo: Path) -> None:
subdir = git_repo / "subdir"
subdir.mkdir()
root_py = git_repo / "root.py"
subdir_py = subdir / "sub.py"
subdir_md = subdir / "sub.md"
root_py.write_text("# root file")
subdir_py.write_text("# subdir python file")
subdir_md.write_text("# subdir markdown file")
subprocess.check_call(["git", "add", "root.py"])
subprocess.check_call(["git", "commit", "-m", "Add root file"])
result = resolve_paths([Path("subdir")])
expected_paths = [Path("subdir/sub.md"), Path("subdir/sub.py")]
assert result == expected_paths
assert Path("root.py") not in result
def test_resolve_paths_with_real_git_repo_untracked_only(git_repo: Path) -> None:
untracked1 = git_repo / "untracked1.py"
untracked2 = git_repo / "untracked2.md"
untracked1.write_text("# untracked file 1")
untracked2.write_text("# untracked file 2")
result = resolve_paths([Path(".")])
expected_paths = [Path("untracked1.py"), Path("untracked2.md")]
assert result == expected_paths
def test_resolve_paths_with_real_git_repo_tracked_only(git_repo: Path) -> None:
tracked1 = git_repo / "tracked1.py"
tracked2 = git_repo / "tracked2.md"
tracked1.write_text("# tracked file 1")
tracked2.write_text("# tracked file 2")
subprocess.check_call(["git", "add", "tracked1.py", "tracked2.md"])
subprocess.check_call(["git", "commit", "-m", "Add tracked files"])
result = resolve_paths([Path(".")])
expected_paths = [Path("tracked1.py"), Path("tracked2.md")]
assert result == expected_paths
def test_resolve_paths_with_real_git_repo_removed_tracked_file(git_repo: Path) -> None:
tracked1 = git_repo / "tracked1.py"
tracked2 = git_repo / "tracked2.md"
tracked1.write_text("# tracked file 1")
tracked2.write_text("# tracked file 2")
subprocess.check_call(["git", "add", "tracked1.py", "tracked2.md"])
subprocess.check_call(["git", "commit", "-m", "Add tracked files"])
tracked1.unlink()
result = resolve_paths([Path(".")])
expected_paths = [Path("tracked2.md")]
assert result == expected_paths
def test_git_ls_files_success() -> None:
mock_output = "file1.py\ndir/file2.md\nfile3.ipynb\n"
with patch("subprocess.check_output", return_value=mock_output) as mock_check_output:
result = _git_ls_files([Path(".")])
mock_check_output.assert_called_once()
expected = [Path("file1.py"), Path("dir/file2.md"), Path("file3.ipynb")]
assert result == expected
def test_git_ls_files_empty_output() -> None:
with patch("subprocess.check_output", return_value="") as mock_check_output:
result = _git_ls_files([Path(".")])
mock_check_output.assert_called_once()
assert result == []
def test_git_ls_files_with_pathspecs() -> None:
mock_output = "file1.py\n"
with patch("subprocess.check_output", return_value=mock_output) as mock_check_output:
result = _git_ls_files([Path("dir1"), Path("file.py")])
mock_check_output.assert_called_once()
assert result == [Path("file1.py")]
def test_git_ls_files_subprocess_error() -> None:
with patch(
"subprocess.check_output", side_effect=subprocess.CalledProcessError(1, "git")
) as mock_check_output:
with pytest.raises(RuntimeError, match="Failed to list git files"):
_git_ls_files([Path(".")])
mock_check_output.assert_called_once()
def test_git_ls_files_os_error() -> None:
with patch(
"subprocess.check_output", side_effect=OSError("git not found")
) as mock_check_output:
with pytest.raises(RuntimeError, match="Failed to list git files"):
_git_ls_files([Path(".")])
mock_check_output.assert_called_once()
def test_resolve_paths_default_current_dir() -> None:
mock_output = "file1.py\nfile2.md\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([])
mock_check_output.assert_called_once()
expected = [Path("file1.py"), Path("file2.md")]
assert result == expected
def test_resolve_paths_filters_by_extension() -> None:
mock_output = "file1.py\nfile2.md\nfile3.txt\nfile4.ipynb\nfile5.mdx\nfile6.js\nfile7.rst\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([Path(".")])
mock_check_output.assert_called_once()
expected = [
Path("file1.py"),
Path("file2.md"),
Path("file4.ipynb"),
Path("file5.mdx"),
Path("file7.rst"),
]
assert result == expected
def test_resolve_paths_case_insensitive_extensions() -> None:
mock_output = "file1.PY\nfile2.MD\nfile3.IPYNB\nfile4.py\nfile5.RST\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([Path(".")])
mock_check_output.assert_called_once()
expected = [
Path("file1.PY"),
Path("file2.MD"),
Path("file3.IPYNB"),
Path("file4.py"),
Path("file5.RST"),
]
assert result == expected
def test_resolve_paths_returns_sorted_list() -> None:
mock_output = "z_file.py\na_file.md\nm_file.ipynb\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([Path(".")])
mock_check_output.assert_called_once()
expected = [Path("a_file.md"), Path("m_file.ipynb"), Path("z_file.py")]
assert result == expected
def test_resolve_paths_deduplicates_results() -> None:
mock_output = "file1.py\nfile1.py\nfile2.md\nfile2.md\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([Path(".")])
mock_check_output.assert_called_once()
expected = [Path("file1.py"), Path("file2.md")]
assert result == expected
def test_resolve_paths_with_multiple_pathspecs() -> None:
mock_output = "dir1/file1.py\ndir2/file2.md\nfile3.ipynb\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([Path("dir1"), Path("file3.ipynb")])
mock_check_output.assert_called_once()
expected = [Path("dir1/file1.py"), Path("dir2/file2.md"), Path("file3.ipynb")]
assert result == expected
def test_resolve_paths_includes_rst_files() -> None:
mock_output = "README.rst\ndocs/index.rst\nsetup.py\n"
with (
patch("subprocess.check_output", return_value=mock_output) as mock_check_output,
patch("pathlib.Path.exists", return_value=True),
):
result = resolve_paths([Path(".")])
mock_check_output.assert_called_once()
expected = [Path("README.rst"), Path("docs/index.rst"), Path("setup.py")]
assert result == expected
def test_allowed_extensions_constant() -> None:
expected = {".md", ".mdx", ".rst", ".py", ".ipynb"}
assert ALLOWED_EXTS == expected
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/test_resolve_paths.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/tests/test_config.py | import subprocess
from pathlib import Path
from typing import Generator
import pytest
from clint.config import Config
from clint.utils import get_repo_root
@pytest.fixture(autouse=True)
def clear_repo_root_cache() -> Generator[None, None, None]:
"""Clear the get_repo_root cache before each test to avoid cross-test contamination."""
get_repo_root.cache_clear()
yield
get_repo_root.cache_clear()
@pytest.fixture
def tmp_git_repo(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path:
"""Create a temporary git repository for testing."""
subprocess.check_call(
["git", "init"], cwd=tmp_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
subprocess.check_call(["git", "config", "user.name", "Test User"], cwd=tmp_path)
subprocess.check_call(["git", "config", "user.email", "test@example.com"], cwd=tmp_path)
monkeypatch.chdir(tmp_path)
return tmp_path
def test_config_validate_exclude_paths_success(tmp_git_repo: Path) -> None:
test_file = tmp_git_repo / "test_file.py"
test_file.touch()
test_dir = tmp_git_repo / "test_dir"
test_dir.mkdir()
pyproject = tmp_git_repo / "pyproject.toml"
pyproject.write_text(f"""
[tool.clint]
exclude = [
"{test_file.name}",
"{test_dir.name}"
]
""")
config = Config.load()
assert len(config.exclude) == 2
assert test_file.name in config.exclude
assert test_dir.name in config.exclude
def test_config_validate_exclude_paths_failure(tmp_git_repo: Path) -> None:
pyproject = tmp_git_repo / "pyproject.toml"
pyproject.write_text("""
[tool.clint]
exclude = [
"non_existing_file.py",
"non_existing_dir"
]
""")
with pytest.raises(ValueError, match="Non-existing paths found in exclude field") as exc_info:
Config.load()
error_msg = str(exc_info.value)
assert "non_existing_file.py" in error_msg
assert "non_existing_dir" in error_msg
def test_config_validate_exclude_paths_mixed(tmp_git_repo: Path) -> None:
existing_file = tmp_git_repo / "existing_file.py"
existing_file.touch()
pyproject = tmp_git_repo / "pyproject.toml"
pyproject.write_text(f"""
[tool.clint]
exclude = [
"{existing_file.name}",
"non_existing_file.py"
]
""")
with pytest.raises(ValueError, match="Non-existing paths found in exclude field") as exc_info:
Config.load()
error_msg = str(exc_info.value)
assert "non_existing_file.py" in error_msg
assert "['non_existing_file.py']" in error_msg
def test_config_empty_exclude_list(tmp_git_repo: Path) -> None:
pyproject = tmp_git_repo / "pyproject.toml"
pyproject.write_text("""
[tool.clint]
exclude = []
""")
config = Config.load()
assert config.exclude == []
def test_config_no_exclude_field(tmp_git_repo: Path) -> None:
pyproject = tmp_git_repo / "pyproject.toml"
pyproject.write_text("""
[tool.clint]
select = ["do-not-disable"]
""")
config = Config.load()
assert config.exclude == []
def test_config_loads_from_repo_root(tmp_git_repo: Path, monkeypatch: pytest.MonkeyPatch) -> None:
subdir = tmp_git_repo / "subdir"
subdir.mkdir()
pyproject = tmp_git_repo / "pyproject.toml"
pyproject.write_text("""
[tool.clint]
select = ["do-not-disable"]
exclude = ["excluded_path"]
""")
excluded_path = tmp_git_repo / "excluded_path"
excluded_path.mkdir()
monkeypatch.chdir(subdir)
config = Config.load()
assert "excluded_path" in config.exclude
assert "do-not-disable" in config.select
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/test_config.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:examples/haystack/tracing.py | import os
from getpass import getpass
from haystack import Pipeline
from haystack.components.builders import ChatPromptBuilder
from haystack.components.generators.chat import OpenAIChatGenerator
from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
from haystack.components.routers import ConditionalRouter
from haystack.components.websearch.serper_dev import SerperDevWebSearch
from haystack.dataclasses import ChatMessage, Document
from haystack.document_stores.in_memory import InMemoryDocumentStore
import mlflow
mlflow.set_experiment("Haystack Tracing")
mlflow.haystack.autolog()
if "OPENAI_API_KEY" not in os.environ:
os.environ["OPENAI_API_KEY"] = getpass("Enter OpenAI API key:")
if "SERPERDEV_API_KEY" not in os.environ:
os.environ["SERPERDEV_API_KEY"] = getpass("Enter SerperDev API key:")
document_store = InMemoryDocumentStore()
documents = [
Document(
content="""Munich, the vibrant capital of Bavaria in southern Germany, exudes a perfect blend of rich cultural
heritage and modern urban sophistication. Nestled along the banks of the Isar River, Munich is renowned
for its splendid architecture, including the iconic Neues Rathaus (New Town Hall) at Marienplatz and
the grandeur of Nymphenburg Palace. The city is a haven for art enthusiasts, with world-class museums like the
Alte Pinakothek housing masterpieces by renowned artists. Munich is also famous for its lively beer gardens, where
locals and tourists gather to enjoy the city's famed beers and traditional Bavarian cuisine. The city's annual
Oktoberfest celebration, the world's largest beer festival, attracts millions of visitors from around the globe.
Beyond its cultural and culinary delights, Munich offers picturesque parks like the English Garden, providing a
serene escape within the heart of the bustling metropolis. Visitors are charmed by Munich's warm hospitality,
making it a must-visit destination for travelers seeking a taste of both old-world charm and contemporary allure."""
)
]
document_store.write_documents(documents)
retriever = InMemoryBM25Retriever(document_store)
prompt_template = [
ChatMessage.from_user(
"""
Answer the following query given the documents.
If the answer is not contained within the documents reply with 'no_answer'
Documents:
{% for document in documents %}
{{document.content}}
{% endfor %}
Query: {{query}}
"""
)
]
prompt_builder = ChatPromptBuilder(template=prompt_template, required_variables="*")
llm = OpenAIChatGenerator(model="gpt-4o-mini")
prompt_for_websearch = [
ChatMessage.from_user(
"""
Answer the following query given the documents retrieved from the web.
Your answer should indicate that your answer was generated from websearch.
Documents:
{% for document in documents %}
{{document.content}}
{% endfor %}
Query: {{query}}
"""
)
]
websearch = SerperDevWebSearch()
prompt_builder_for_websearch = ChatPromptBuilder(
template=prompt_for_websearch, required_variables="*"
)
llm_for_websearch = OpenAIChatGenerator(model="gpt-4o-mini")
routes = [
{
"condition": "{{'no_answer' in replies[0].text}}",
"output": "{{query}}",
"output_name": "go_to_websearch",
"output_type": str,
},
{
"condition": "{{'no_answer' not in replies[0].text}}",
"output": "{{replies[0].text}}",
"output_name": "answer",
"output_type": str,
},
]
router = ConditionalRouter(routes)
agentic_rag_pipe = Pipeline()
agentic_rag_pipe.add_component("retriever", retriever)
agentic_rag_pipe.add_component("prompt_builder", prompt_builder)
agentic_rag_pipe.add_component("llm", llm)
agentic_rag_pipe.add_component("router", router)
agentic_rag_pipe.add_component("websearch", websearch)
agentic_rag_pipe.add_component("prompt_builder_for_websearch", prompt_builder_for_websearch)
agentic_rag_pipe.add_component("llm_for_websearch", llm_for_websearch)
agentic_rag_pipe.connect("retriever", "prompt_builder.documents")
agentic_rag_pipe.connect("prompt_builder.prompt", "llm.messages")
agentic_rag_pipe.connect("llm.replies", "router.replies")
agentic_rag_pipe.connect("router.go_to_websearch", "websearch.query")
agentic_rag_pipe.connect("router.go_to_websearch", "prompt_builder_for_websearch.query")
agentic_rag_pipe.connect("websearch.documents", "prompt_builder_for_websearch.documents")
agentic_rag_pipe.connect("prompt_builder_for_websearch", "llm_for_websearch")
query = "How many people live in Munich?"
result = agentic_rag_pipe.run(
{"retriever": {"query": query}, "prompt_builder": {"query": query}, "router": {"query": query}}
)
# Print the `replies` generated using the web searched Documents
print(result["llm_for_websearch"]["replies"][0].text)
last_trace_id = mlflow.get_last_active_trace_id()
trace = mlflow.get_trace(trace_id=last_trace_id)
# Print the token usage
total_usage = trace.info.token_usage
print("== Total token usage: ==")
print(f" Input tokens: {total_usage['input_tokens']}")
print(f" Output tokens: {total_usage['output_tokens']}")
print(f" Total tokens: {total_usage['total_tokens']}")
# Print the token usage for each LLM call
print("\n== Detailed usage for each LLM call: ==")
for span in trace.data.spans:
if usage := span.get_attribute("mlflow.chat.tokenUsage"):
print(f"{span.name}:")
print(f" Input tokens: {usage['input_tokens']}")
print(f" Output tokens: {usage['output_tokens']}")
print(f" Total tokens: {usage['total_tokens']}")
| {
"repo_id": "mlflow/mlflow",
"file_path": "examples/haystack/tracing.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/haystack/autolog.py | import json
import logging
import threading
from typing import Any
from haystack.tracing import OpenTelemetryTracer, enable_tracing
from opentelemetry import trace
from opentelemetry.context import Context
from opentelemetry.sdk.trace import ReadableSpan as OTelReadableSpan
from opentelemetry.sdk.trace import Span as OTelSpan
from opentelemetry.sdk.trace import TracerProvider as SDKTracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from opentelemetry.trace import (
NoOpTracerProvider,
ProxyTracerProvider,
get_tracer_provider,
set_tracer_provider,
)
from mlflow.entities import LiveSpan, SpanType
from mlflow.entities.span import create_mlflow_span
from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey
from mlflow.tracing.provider import _get_tracer
from mlflow.tracing.trace_manager import InMemoryTraceManager
from mlflow.tracing.utils import (
_bypass_attribute_guard,
generate_trace_id_v3,
get_mlflow_span_for_otel_span,
set_span_cost_attribute,
set_span_model_attribute,
should_compute_cost_client_side,
)
_logger = logging.getLogger(__name__)
def setup_haystack_tracing():
from haystack import tracing as hs_tracing
hs_tracing.tracer.is_content_tracing_enabled = True
provider = get_tracer_provider()
hs_processor = HaystackSpanProcessor()
if isinstance(provider, (NoOpTracerProvider, ProxyTracerProvider)):
new_provider = SDKTracerProvider()
new_provider.add_span_processor(hs_processor)
set_tracer_provider(new_provider)
else:
if not any(
isinstance(p, HaystackSpanProcessor)
for p in provider._active_span_processor._span_processors
):
provider.add_span_processor(hs_processor)
tracer = trace.get_tracer(__name__)
enable_tracing(OpenTelemetryTracer(tracer))
def _infer_span_type_from_haystack(
comp_type: str | None,
comp_alias: str | None,
span: OTelReadableSpan,
) -> SpanType:
s = (comp_type or comp_alias or span.name or "").lower()
if any(
k in s
for k in (
"llm",
"chat",
"generator",
"completion",
"textgen",
"chatgenerator",
"openai",
"anthropic",
"mistral",
"cohere",
"gemini",
)
):
return SpanType.LLM
if "embedder" in s:
return SpanType.EMBEDDING
if "retriever" in s:
return SpanType.RETRIEVER
if "ranker" in s:
return SpanType.RERANKER
if "agent" in s:
return SpanType.AGENT
return SpanType.TOOL
class HaystackSpanProcessor(SimpleSpanProcessor):
def __init__(self):
self.span_exporter = SpanExporter()
self._pipeline_io: dict[str, tuple[dict[str, Any], dict[str, Any]]] = {}
self._processing_local = threading.local()
def on_start(self, span: OTelSpan, parent_context: Context | None = None):
# Recursion guard: with MLFLOW_USE_DEFAULT_TRACER_PROVIDER=false (shared provider),
# tracer.span_processor.on_start() routes back through the same composite processor,
# re-entering this method and causing infinite recursion.
if getattr(self._processing_local, "in_on_start", False):
return
self._processing_local.in_on_start = True
try:
tracer = _get_tracer(__name__)
tracer.span_processor.on_start(span, parent_context)
trace_id = generate_trace_id_v3(span)
mlflow_span = create_mlflow_span(span, trace_id)
InMemoryTraceManager.get_instance().register_span(mlflow_span)
finally:
self._processing_local.in_on_start = False
def on_end(self, span: OTelReadableSpan) -> None:
# Recursion guard: with MLFLOW_USE_DEFAULT_TRACER_PROVIDER=false (shared provider),
# tracer.span_processor.on_end() routes back through the same composite processor,
# re-entering this method and causing infinite recursion.
if getattr(self._processing_local, "in_on_end", False):
return
self._processing_local.in_on_end = True
try:
mlflow_span = get_mlflow_span_for_otel_span(span)
if mlflow_span is None:
_logger.debug("Span not found in the map. Skipping end.")
return
with _bypass_attribute_guard(mlflow_span._span):
if span.name in ("haystack.pipeline.run", "haystack.async_pipeline.run"):
self.set_pipeline_info(mlflow_span, span)
elif span.name in ("haystack.component.run"):
self.set_component_info(mlflow_span, span)
tracer = _get_tracer(__name__)
tracer.span_processor.on_end(span)
finally:
self._processing_local.in_on_end = False
def set_component_info(self, mlflow_span: LiveSpan, span: OTelReadableSpan) -> None:
comp_alias = span.attributes.get("haystack.component.name")
comp_type = span.attributes.get("haystack.component.type")
mlflow_span.set_span_type(_infer_span_type_from_haystack(comp_type, comp_alias, span))
# Haystack spans originally have name='haystack.component.run'. We need to update both the
# _name field of the Otel span and the _original_name field of the MLflow span to
# customize the span name here, as otherwise it would be overwritten in the
# deduplication process
span_name = comp_type or comp_alias or span.name
mlflow_span._span._name = span_name
mlflow_span._original_name = span_name
if (inputs := span.attributes.get("haystack.component.input")) is not None:
try:
mlflow_span.set_inputs(json.loads(inputs))
except Exception:
mlflow_span.set_inputs(inputs)
if (outputs := span.attributes.get("haystack.component.output")) is not None:
try:
mlflow_span.set_outputs(json.loads(outputs))
except Exception:
mlflow_span.set_outputs(outputs)
if isinstance(mlflow_span.inputs, dict):
set_span_model_attribute(mlflow_span, mlflow_span.inputs)
if usage := _parse_token_usage(mlflow_span.outputs):
mlflow_span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage)
if should_compute_cost_client_side():
set_span_cost_attribute(mlflow_span)
if parent_id := mlflow_span.parent_id:
key = comp_alias or comp_type or mlflow_span.name
inputs_agg, outputs_agg = self._pipeline_io.setdefault(parent_id, ({}, {}))
if mlflow_span.inputs is not None:
inputs_agg[key] = mlflow_span.inputs
if mlflow_span.outputs is not None:
outputs_agg[key] = mlflow_span.outputs
def set_pipeline_info(self, mlflow_span: LiveSpan, span: OTelReadableSpan) -> None:
# Pipelines are CHAINs
mlflow_span.set_span_type(SpanType.CHAIN)
if pipe_name := span.attributes.get("haystack.pipeline.name"):
mlflow_span._span._name = pipe_name
if (inputs := span.attributes.get("haystack.pipeline.input")) is not None:
try:
mlflow_span.set_inputs(json.loads(inputs))
except Exception:
mlflow_span.set_inputs(inputs)
if (outputs := span.attributes.get("haystack.pipeline.output")) is not None:
try:
mlflow_span.set_outputs(json.loads(outputs))
except Exception:
mlflow_span.set_outputs(outputs)
if mlflow_span.span_id in self._pipeline_io:
inputs_agg, outputs_agg = self._pipeline_io.pop(mlflow_span.span_id)
if mlflow_span.inputs is None and inputs_agg:
mlflow_span.set_inputs(inputs_agg)
if mlflow_span.outputs is None and outputs_agg:
mlflow_span.set_outputs(outputs_agg)
def _parse_token_usage(outputs: Any) -> dict[str, int] | None:
try:
if not isinstance(outputs, dict):
return None
replies = outputs.get("replies")
if isinstance(replies, list) and len(replies) > 0:
usage = (
replies[0].get("meta", {}).get("usage", {}) if isinstance(replies[0], dict) else {}
)
meta = outputs.get("meta")
if isinstance(meta, list) and len(meta) > 0:
usage = meta[0].get("usage", {}) if isinstance(meta[0], dict) else {}
if isinstance(usage, dict):
in_tok = usage.get("prompt_tokens", 0)
out_tok = usage.get("completion_tokens", 0)
tot_tok = usage.get("total_tokens", 0)
return {
TokenUsageKey.INPUT_TOKENS: in_tok,
TokenUsageKey.OUTPUT_TOKENS: out_tok,
TokenUsageKey.TOTAL_TOKENS: tot_tok,
}
except Exception:
_logger.debug("Failed to parse token usage from outputs.", exc_info=True)
def teardown_haystack_tracing():
provider = get_tracer_provider()
if isinstance(provider, SDKTracerProvider):
span_processors = getattr(provider._active_span_processor, "_span_processors", ())
provider._active_span_processor._span_processors = tuple(
p for p in span_processors if not isinstance(p, HaystackSpanProcessor)
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/haystack/autolog.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/haystack/test_haystack_tracing.py | from unittest.mock import patch
from haystack import Document, Pipeline, component
from haystack.components.rankers import LostInTheMiddleRanker
from haystack.components.retrievers import InMemoryBM25Retriever
from haystack.document_stores.in_memory import InMemoryDocumentStore
import mlflow
from mlflow.entities import SpanType
from mlflow.environment_variables import MLFLOW_USE_DEFAULT_TRACER_PROVIDER
from mlflow.tracing.constant import SpanAttributeKey
from mlflow.version import IS_TRACING_SDK_ONLY
from tests.tracing.helper import get_traces
@component
class Add:
def run(self, a: int, b: int):
return {"sum": a + b}
@component
class Multiply:
def run(self, value: int, factor: int):
return {"product": value * factor}
def test_haystack_autolog_single_trace():
mlflow.haystack.autolog()
pipe = Pipeline()
pipe.add_component("adder", Add())
pipe.run({"adder": {"a": 1, "b": 2}})
traces = get_traces()
assert len(traces) == 1
spans = traces[0].data.spans
assert spans[0].span_type == SpanType.CHAIN
assert spans[0].name == "haystack.pipeline.run"
assert spans[0].inputs == {"adder": {"a": 1, "b": 2}}
assert spans[0].outputs == {"adder": {"sum": 3}}
assert spans[1].span_type == SpanType.TOOL
assert spans[1].name == "Add"
assert spans[1].inputs == {"a": 1, "b": 2}
assert spans[1].outputs == {"sum": 3}
mlflow.haystack.autolog(disable=True)
pipe.run({"adder": {"a": 3, "b": 4}})
assert len(get_traces()) == 1
def test_pipeline_with_multiple_components_single_trace():
mlflow.haystack.autolog()
pipe = Pipeline()
pipe.add_component("adder", Add())
pipe.add_component("multiplier", Multiply())
pipe.run({"adder": {"a": 1, "b": 2}, "multiplier": {"value": 3, "factor": 4}})
traces = get_traces()
assert len(traces) == 1
spans = traces[0].data.spans
assert spans[0].span_type == SpanType.CHAIN
assert spans[0].name == "haystack.pipeline.run"
assert spans[1].span_type == SpanType.TOOL
assert spans[2].span_type == SpanType.TOOL
assert spans[1].name == "Add"
assert spans[2].name == "Multiply"
assert spans[1].inputs == {"a": 1, "b": 2}
assert spans[1].outputs == {"sum": 3}
assert spans[2].inputs == {"value": 3, "factor": 4}
assert spans[2].outputs == {"product": 12}
mlflow.haystack.autolog(disable=True)
pipe.run({"adder": {"a": 1, "b": 2}, "multiplier": {"value": 3, "factor": 4}})
traces = get_traces()
assert len(traces) == 1
def test_token_usage_parsed_for_llm_component(mock_litellm_cost):
mlflow.haystack.autolog()
@component
class MyLLM:
def run(self, prompt: str, model: str):
return {}
pipe = Pipeline()
pipe.add_component("my_llm", MyLLM())
output = {
"replies": [
{
"content": [{"text": "hi"}],
"meta": {"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3}},
}
]
}
with patch.object(MyLLM, "run", return_value=output):
pipe.run({"my_llm": {"prompt": "hello", "model": "gpt-4"}})
traces = get_traces()
assert len(traces) == 1
span = traces[0].data.spans[1]
assert span.span_type == SpanType.LLM
assert span.name == "MyLLM"
assert span.attributes[SpanAttributeKey.CHAT_USAGE] == {
"input_tokens": 1,
"output_tokens": 2,
"total_tokens": 3,
}
assert span.model_name == "gpt-4"
if not IS_TRACING_SDK_ONLY:
# Verify cost is calculated (1 input token * 1.0 + 2 output tokens * 2.0)
assert span.llm_cost == {
"input_cost": 1.0,
"output_cost": 4.0,
"total_cost": 5.0,
}
mlflow.haystack.autolog(disable=True)
traces = get_traces()
with patch.object(MyLLM, "run", return_value=output):
pipe.run({"my_llm": {"prompt": "hello", "model": "gpt-4"}})
assert len(traces) == 1
def test_autolog_disable():
mlflow.haystack.autolog()
pipe1 = Pipeline()
pipe1.add_component("adder", Add())
pipe1.run({"adder": {"a": 1, "b": 2}})
assert len(get_traces()) == 1
mlflow.haystack.autolog(disable=True)
pipe2 = Pipeline()
pipe2.add_component("adder", Add())
pipe2.run({"adder": {"a": 2, "b": 3}})
assert len(get_traces()) == 1
def test_in_memory_retriever_component_traced():
mlflow.set_experiment("haystack_retriever")
mlflow.haystack.autolog()
store = InMemoryDocumentStore()
store.write_documents([Document(content="foo")])
pipe = Pipeline()
pipe.add_component("retriever", InMemoryBM25Retriever(document_store=store))
pipe.run({"retriever": {"query": "foo"}})
traces = get_traces()
assert len(traces) == 1
span = traces[0].data.spans[1]
assert span.span_type == SpanType.RETRIEVER
assert span.name == "InMemoryBM25Retriever"
assert span.outputs["documents"][0]["content"] == "foo"
def test_multiple_components_in_pipeline_reranker():
mlflow.haystack.autolog()
pipe = Pipeline()
store = InMemoryDocumentStore()
store.write_documents([Document(content="foo")])
pipe.add_component("retriever", InMemoryBM25Retriever(document_store=store))
pipe.add_component("reranker", LostInTheMiddleRanker())
pipe.connect("retriever.documents", "reranker.documents")
pipe.run({"retriever": {"query": "foo"}})
traces = get_traces()
assert len(traces) == 1
spans = traces[0].data.spans
assert spans[0].span_type == SpanType.CHAIN
assert spans[0].name == "haystack.pipeline.run"
assert spans[1].name == "InMemoryBM25Retriever"
assert spans[2].name == "LostInTheMiddleRanker"
assert spans[1].span_type == SpanType.RETRIEVER
assert spans[2].span_type == SpanType.RERANKER
assert spans[1].inputs["query"] == "foo"
assert spans[2].inputs["documents"][0]["content"] == "foo"
mlflow.haystack.autolog(disable=True)
pipe.run({"retriever": {"query": "foo"}})
assert len(get_traces()) == 1
def test_haystack_autolog_shared_provider_no_recursion(monkeypatch):
# Verify haystack.autolog() works with shared tracer provider (no RecursionError)
monkeypatch.setenv(MLFLOW_USE_DEFAULT_TRACER_PROVIDER.name, "false")
mlflow.haystack.autolog()
pipe = Pipeline()
pipe.add_component("adder", Add())
pipe.run({"adder": {"a": 1, "b": 2}})
traces = get_traces()
assert len(traces) == 1
spans = traces[0].data.spans
assert spans[0].span_type == SpanType.CHAIN
assert spans[0].inputs == {"adder": {"a": 1, "b": 2}}
assert spans[0].outputs == {"adder": {"sum": 3}}
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/haystack/test_haystack_tracing.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/xtest_viz.py | # /// script
# dependencies = [
# "aiohttp",
# ]
# ///
"""
Script to visualize cross-version test results for MLflow autologging and models.
This script fetches scheduled workflow run results from GitHub Actions and generates
a markdown table showing the test status for different package versions across
different dates.
Usage:
uv run dev/xtest_viz.py # Fetch last 14 days from mlflow/dev
uv run dev/xtest_viz.py --days 30 # Fetch last 30 days
uv run dev/xtest_viz.py --repo mlflow/mlflow # Use different repo
Example output (truncated for brevity):
| Name | 2024-01-15 | 2024-01-14 | 2024-01-13 |
|----------------------------------------|------------|------------|------------|
| test1 (sklearn, 1.3.1, autologging...) | [✅](link) | [✅](link) | [❌](link) |
| test1 (pytorch, 2.1.0, models...) | [✅](link) | [⚠️](link) | [✅](link) |
| test2 (xgboost, 2.0.0, autologging...) | [❌](link) | [✅](link) | — |
Where:
✅ = success
❌ = failure
⚠️ = cancelled
❓ = unknown status
— = no data
"""
import argparse
import asyncio
import os
import re
import sys
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Any, cast
import aiohttp
@dataclass
class JobResult:
name: str
date: str
status: str
class XTestViz:
def __init__(self, github_token: str | None = None, repo: str = "mlflow/dev"):
self.github_token = github_token or os.environ.get("GH_TOKEN")
self.repo = repo
self.per_page = 30
self.headers: dict[str, str] = {}
if self.github_token:
self.headers["Authorization"] = f"token {self.github_token}"
self.headers["Accept"] = "application/vnd.github.v3+json"
def status_to_emoji(self, status: str) -> str | None:
"""Convert job status to emoji representation.
Returns None for skipped status to indicate it should be filtered out.
"""
match status:
case "success":
return "✅"
case "failure":
return "❌"
case "cancelled":
return "⚠️"
case "skipped":
return None
case _:
return "❓"
def parse_job_name(self, job_name: str) -> str:
"""Extract string inside parentheses from job name.
Examples:
- "test1 (sklearn / autologging / 1.3.1)" -> "sklearn / autologging / 1.3.1"
- "test2 (pytorch / models / 2.1.0)" -> "pytorch / models / 2.1.0"
Returns:
str: Content inside parentheses, or original name if no parentheses found
"""
# Pattern to match: anything (content)
pattern = r"\(([^)]+)\)"
if match := re.search(pattern, job_name.strip()):
return match.group(1).strip()
return job_name
async def _make_request(
self,
session: aiohttp.ClientSession,
url: str,
params: dict[str, str] | None = None,
) -> dict[str, Any]:
"""Make an async HTTP GET request and return JSON response."""
async with session.get(url, headers=self.headers, params=params) as response:
response.raise_for_status()
return cast(dict[str, Any], await response.json())
async def get_workflow_runs(
self, session: aiohttp.ClientSession, days_back: int = 30
) -> list[dict[str, Any]]:
"""Fetch cross-version test workflow runs from the last N days."""
since_date = (datetime.now() - timedelta(days=days_back)).isoformat()
print(f"Fetching scheduled workflow runs from last {days_back} days...", file=sys.stderr)
all_runs: list[dict[str, Any]] = []
page = 1
while True:
params = {
"per_page": str(self.per_page),
"page": str(page),
"created": f">={since_date}",
"status": "completed",
"event": "schedule",
}
url = f"https://api.github.com/repos/{self.repo}/actions/workflows/cross-version-tests.yml/runs"
data = await self._make_request(session, url, params=params)
runs = data.get("workflow_runs", [])
if not runs:
break
all_runs.extend(runs)
print(f" Fetched page {page} ({len(runs)} runs)", file=sys.stderr)
if len(runs) < self.per_page:
break
page += 1
print(f"Found {len(all_runs)} scheduled workflow runs total", file=sys.stderr)
return all_runs
async def get_workflow_jobs(
self, session: aiohttp.ClientSession, run_id: int
) -> list[dict[str, Any]]:
"""Get jobs for a specific workflow run."""
all_jobs: list[dict[str, Any]] = []
page = 1
while True:
params = {"per_page": str(self.per_page), "page": str(page)}
url = f"https://api.github.com/repos/{self.repo}/actions/runs/{run_id}/jobs"
data = await self._make_request(session, url, params=params)
jobs = data.get("jobs", [])
if not jobs:
break
all_jobs.extend(jobs)
if len(jobs) < self.per_page:
break
page += 1
return all_jobs
async def _fetch_run_jobs(
self, session: aiohttp.ClientSession, run: dict[str, Any]
) -> list[JobResult]:
"""Fetch jobs for a single workflow run."""
run_id = run["id"]
run_date = datetime.fromisoformat(run["created_at"].replace("Z", "+00:00")).strftime(
"%m/%d"
)
jobs = await self.get_workflow_jobs(session, run_id)
data_rows = []
for job in jobs:
emoji = self.status_to_emoji(job["conclusion"])
if emoji is None: # Skip this job
continue
job_url = job["html_url"]
status_link = f"[{emoji}]({job_url})"
parsed_name = self.parse_job_name(job["name"])
data_rows.append(
JobResult(
name=parsed_name,
date=run_date,
status=status_link,
)
)
return data_rows
async def fetch_all_jobs(self, days_back: int = 30) -> list[JobResult]:
"""Fetch all jobs from workflow runs in the specified time period."""
async with aiohttp.ClientSession() as session:
workflow_runs = await self.get_workflow_runs(session, days_back)
if not workflow_runs:
return []
print(
f"Fetching jobs for {len(workflow_runs)} workflow runs concurrently...",
file=sys.stderr,
)
tasks = [self._fetch_run_jobs(session, run) for run in workflow_runs]
results = await asyncio.gather(*tasks, return_exceptions=True)
data_rows: list[JobResult] = []
for i, result in enumerate(results, 1):
if isinstance(result, BaseException):
print(f" Error fetching jobs for run {i}: {result}", file=sys.stderr)
else:
data_rows.extend(result)
print(
f" Completed {i}/{len(workflow_runs)} ({len(result)} jobs)",
file=sys.stderr,
)
return data_rows
def _pivot_job_results(
self, data_rows: list[JobResult]
) -> tuple[dict[str, dict[str, str]], list[str], list[str]]:
"""Pivot job results data into a format suitable for table rendering.
Args:
data_rows: List of job results to pivot
Returns:
Tuple of (pivot_data, sorted_dates, sorted_names) where:
- pivot_data: Dictionary mapping name -> date -> status
- sorted_dates: List of dates sorted in reverse chronological order
- sorted_names: List of test names sorted alphabetically
"""
pivot_data: dict[str, dict[str, str]] = {}
all_dates: set[str] = set()
for row in data_rows:
if row.name not in pivot_data:
pivot_data[row.name] = {}
# Use first occurrence for each name-date combination
if row.date not in pivot_data[row.name]:
pivot_data[row.name][row.date] = row.status
all_dates.add(row.date)
# Sort dates in reverse order (newest first)
sorted_dates = sorted(all_dates, reverse=True)
# Sort names alphabetically
sorted_names = sorted(pivot_data.keys())
return pivot_data, sorted_dates, sorted_names
def _build_markdown_table(
self,
pivot_data: dict[str, dict[str, str]],
sorted_dates: list[str],
sorted_names: list[str],
) -> str:
"""Build a markdown table from pivoted data.
Args:
pivot_data: Dictionary mapping name -> date -> status
sorted_dates: List of dates (columns) in desired order
sorted_names: List of test names (rows) in desired order
Returns:
Markdown-formatted table as a string
"""
headers = ["Name"] + sorted_dates
# Calculate column widths
col_widths = [len(h) for h in headers]
for name in sorted_names:
col_widths[0] = max(col_widths[0], len(name))
for i, date in enumerate(sorted_dates, 1):
value = pivot_data[name].get(date, "—")
col_widths[i] = max(col_widths[i], len(value))
# Build table rows
lines = []
# Header row
header_row = "| " + " | ".join(h.ljust(col_widths[i]) for i, h in enumerate(headers)) + " |"
lines.append(header_row)
# Separator row
separator = "| " + " | ".join("-" * w for w in col_widths) + " |"
lines.append(separator)
# Data rows
for name in sorted_names:
row_values = [name.ljust(col_widths[0])]
for i, date in enumerate(sorted_dates, 1):
value = pivot_data[name].get(date, "—")
row_values.append(value.ljust(col_widths[i]))
lines.append("| " + " | ".join(row_values) + " |")
return "\n".join(lines)
def render_results_table(self, data_rows: list[JobResult]) -> str:
"""Render job data as a markdown table."""
if not data_rows:
return "No test jobs found."
pivot_data, sorted_dates, sorted_names = self._pivot_job_results(data_rows)
return self._build_markdown_table(pivot_data, sorted_dates, sorted_names)
async def generate_results_table(self, days_back: int = 30) -> str:
"""Generate markdown table of cross-version test results."""
data_rows = await self.fetch_all_jobs(days_back)
if not data_rows:
return "No workflow runs found in the specified time period."
return self.render_results_table(data_rows)
async def main() -> None:
parser = argparse.ArgumentParser(description="Visualize MLflow cross-version test results")
parser.add_argument(
"--days", type=int, default=14, help="Number of days back to fetch results (default: 14)"
)
parser.add_argument(
"--repo",
default="mlflow/dev",
help="GitHub repository in owner/repo format (default: mlflow/dev)",
)
parser.add_argument("--token", help="GitHub token (default: use GH_TOKEN env var)")
args = parser.parse_args()
token = args.token or os.environ.get("GH_TOKEN")
if not token:
print(
"Warning: No GitHub token provided. API requests may be rate-limited.", file=sys.stderr
)
print("Set GH_TOKEN environment variable or use --token option.", file=sys.stderr)
visualizer = XTestViz(github_token=token, repo=args.repo)
output = await visualizer.generate_results_table(args.days)
print(output)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/xtest_viz.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/base.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel, Field
from mlflow.entities.trace import Trace
from mlflow.genai.judges.constants import (
_RATIONALE_FIELD_DESCRIPTION,
_RESULT_FIELD_DESCRIPTION,
)
from mlflow.genai.judges.utils import get_default_optimizer
from mlflow.genai.scorers.base import Scorer, ScorerKind
from mlflow.telemetry.events import AlignJudgeEvent
from mlflow.telemetry.track import record_usage_event
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class AlignmentOptimizer(ABC):
"""
Abstract base class for judge alignment optimizers.
Alignment optimizers improve judge accuracy by learning from traces
that contain human feedback.
"""
@abstractmethod
def align(self, judge: Judge, traces: list[Trace]) -> Judge:
"""
Align a judge using the provided traces.
Args:
judge: The judge to be optimized
traces: List of traces containing alignment data (feedback)
Returns:
A new Judge instance that is better aligned with the input traces.
"""
class JudgeField(BaseModel):
"""
Represents a field definition for judges with name and description.
Used to define input and output fields for judge evaluation signatures.
"""
name: str = Field(..., description="Name of the field")
description: str = Field(..., description="Description of what the field represents")
value_type: Any = Field(default=str, description="Type of the field's value")
@experimental(version="3.4.0")
class Judge(Scorer):
"""
Base class for LLM-as-a-judge scorers that can be aligned with human feedback.
Judges are specialized scorers that use LLMs to evaluate outputs based on
configurable criteria and the results of human-provided feedback alignment.
"""
@property
def kind(self) -> ScorerKind:
return ScorerKind.BUILTIN
@property
@abstractmethod
def instructions(self) -> str:
"""
Plain text instructions of what this judge evaluates.
"""
@property
@abstractmethod
def feedback_value_type(self) -> Any:
"""
Type of the feedback value.
"""
@abstractmethod
def get_input_fields(self) -> list[JudgeField]:
"""
Get the input fields for this judge.
Returns:
List of JudgeField objects defining the input fields.
"""
@classmethod
def get_output_fields(cls) -> list[JudgeField]:
"""
Get the standard output fields used by all judges.
This is the source of truth for judge output field definitions.
Returns:
List of JudgeField objects defining the standard output fields.
"""
return [
JudgeField(name="result", description=_RESULT_FIELD_DESCRIPTION, value_type=str),
JudgeField(
name="rationale",
description=_RATIONALE_FIELD_DESCRIPTION,
value_type=str,
),
]
@experimental(version="3.4.0")
@record_usage_event(AlignJudgeEvent)
def align(self, traces: list[Trace], optimizer: AlignmentOptimizer | None = None) -> Judge:
"""
Align this judge with human preferences using the provided optimizer and traces.
Args:
traces: Training traces for alignment
optimizer: The alignment optimizer to use. If None, uses the default SIMBA optimizer.
Returns:
A new Judge instance that is better aligned with the input traces.
Raises:
NotImplementedError: If called on a session-level scorer. Alignment is currently
only supported for single-turn scorers.
Note on Logging:
By default, alignment optimization shows minimal progress information.
To see detailed optimization output, set the optimizer's logger to DEBUG::
import logging
# For SIMBA optimizer (default)
logging.getLogger("mlflow.genai.judges.optimizers.simba").setLevel(logging.DEBUG)
"""
if self.is_session_level_scorer:
raise NotImplementedError("Alignment is not supported for session-level scorers.")
if optimizer is None:
optimizer = get_default_optimizer()
return optimizer.align(self, traces)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/base.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/builtin_judges.py | from mlflow.genai.judges.base import Judge
from mlflow.genai.scorers.builtin_scorers import BuiltInScorer
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class BuiltinJudge(BuiltInScorer, Judge):
"""
Base class for built-in AI judge scorers that use LLMs for evaluation.
"""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/builtin_judges.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/constants.py | _DATABRICKS_DEFAULT_JUDGE_MODEL = "databricks"
_DATABRICKS_AGENTIC_JUDGE_MODEL = "gpt-oss-120b"
# Use case constants for chat completions
USE_CASE_BUILTIN_JUDGE = "builtin_judge"
USE_CASE_AGENTIC_JUDGE = "agentic_judge"
USE_CASE_CUSTOM_PROMPT_JUDGE = "custom_prompt_judge"
USE_CASE_JUDGE_ALIGNMENT = "judge_alignment"
# Common affirmative values that should map to YES
_AFFIRMATIVE_VALUES = frozenset(
[
"true",
"pass",
"passed",
"correct",
"success",
"1",
"1.0",
"yes",
"y",
"yea",
"yeah",
"affirmative",
"absolutely",
"certainly",
"indeed",
"sure",
"ok",
"okay",
"agree",
"accepted",
"right",
"positive",
"accurate",
"valid",
"validity",
"confirmed",
"approved",
"complete",
"completed",
"good",
"great",
"excellent",
"active",
"enabled",
"on",
"present",
"found",
"match",
"matched",
"validated",
"approve",
"accept",
"pos",
]
)
# Common negative values that should map to NO
_NEGATIVE_VALUES = frozenset(
[
"false",
"fail",
"failed",
"incorrect",
"failure",
"0",
"0.0",
"no",
"n",
"nah",
"nope",
"negative",
"reject",
"rejected",
"disagree",
"not approved",
"invalid",
"inaccurate",
"wrong",
"declined",
"denied",
"incomplete",
"bad",
"poor",
"inactive",
"disabled",
"off",
"missing",
"absent",
"notfound",
"mismatch",
"mismatched",
"none",
"null",
"nil",
"deny",
"disapprove",
"disapproved",
"neg",
]
)
_RESULT_FIELD_DESCRIPTION = "The evaluation rating/result"
_RATIONALE_FIELD_DESCRIPTION = "Detailed explanation for the evaluation"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/constants.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/instructions_judge/constants.py | """
Constants for the InstructionsJudge module.
This module contains constant values used by the InstructionsJudge class,
including the augmented prompt template for trace-based evaluation.
"""
# Common base prompt for all judge evaluations
JUDGE_BASE_PROMPT = """You are an expert judge tasked with evaluating the performance of an AI
agent on a particular query. You will be given instructions that describe the criteria and
methodology for evaluating the agent's performance on the query."""
# Simple system prompt for field-based evaluation
INSTRUCTIONS_JUDGE_SYSTEM_PROMPT = JUDGE_BASE_PROMPT + "\n\nYour task: {{instructions}}."
# Augmented prompt template for trace-based evaluation
INSTRUCTIONS_JUDGE_TRACE_PROMPT_TEMPLATE = (
JUDGE_BASE_PROMPT
+ """ Your job is to analyze a trace of the agent's execution on the
query and provide an evaluation rating in accordance with the instructions.
A *trace* is a step-by-step record of how the agent processed the query, including the input query
itself, all intermediate steps, decisions, and outputs. Each step in a trace is represented as a
*span*, which includes the inputs and outputs of that step, as well as latency information and
metadata.
The instructions containing the evaluation criteria and methodology are provided below, and they
refer to a placeholder called {{{{ trace }}}}. To read the actual trace, you will need to use the
tools provided to you. These tools enable you to 1. fetch trace metadata, timing, & execution
details, 2. list all spans in the trace with inputs and outputs, 3. search for specific text or
patterns across the entire trace, and much more. These tools do *not* require you to specify a
particular trace; the tools will select the relevant trace automatically (however, you *will* need
to specify *span* IDs when retrieving specific spans).
In order to follow the instructions precisely and correctly, you must think methodically and act
step-by-step:
1. Thoroughly read the instructions to understand what information you need to gather from the trace
in order to perform the evaluation, according to the criteria and methodology specified.
2. Look at the tools available to you, and use as many of them as necessary in order to gather the
information you need from the trace.
3. Carefully read and analyze the information you gathered.
4. Think critically about whether you have enough information to produce an evaluation rating in
accordance with the instructions. If you do not have enough information, or if you suspect that
there is additional relevant information in the trace that you haven't gathered, then go back
to steps 2 and 3.
5. Once you have gathered enough information, provide your evaluation rating in accordance with the
instructions.
You *must* format your evaluation rating as a JSON object with the following fields. Pay close
attention to the field type of the evaluation rating (string, boolean, numeric, etc.), and ensure
that it conforms to the instructions.
Evaluation Rating Fields
------------------------
{evaluation_rating_fields}
Instructions
------------------------
{instructions}
"""
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/instructions_judge/constants.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/make_judge.py | from typing import Any, Literal, get_args, get_origin
from mlflow.genai.judges.base import Judge
from mlflow.genai.judges.instructions_judge import InstructionsJudge
from mlflow.telemetry.events import MakeJudgeEvent
from mlflow.telemetry.track import record_usage_event
from mlflow.utils.annotations import experimental
def _validate_feedback_value_type(feedback_value_type: Any) -> None:
"""
Validate that feedback_value_type is one of the supported types for serialization.
Supported types match FeedbackValueType:
- PbValueType: int, float, str, bool
- Literal types with PbValueType values
- dict[str, PbValueType]
- list[PbValueType]
"""
from mlflow.entities.assessment import PbValueType
# Check for basic PbValueType (float, int, str, bool)
pb_value_types = get_args(PbValueType)
if feedback_value_type in pb_value_types:
return
# Check for Literal type
origin = get_origin(feedback_value_type)
if origin is Literal:
# Validate that all literal values are of PbValueType
literal_values = get_args(feedback_value_type)
for value in literal_values:
if not isinstance(value, pb_value_types):
from mlflow.exceptions import MlflowException
raise MlflowException.invalid_parameter_value(
"The `feedback_value_type` argument does not support a Literal type"
f"with non-primitive types, but got {type(value).__name__}. "
f"Literal values must be str, int, float, or bool."
)
return
# Check for dict[str, PbValueType]
if origin is dict:
args = get_args(feedback_value_type)
if len(args) == 2:
key_type, value_type = args
# Key must be str
if key_type != str:
from mlflow.exceptions import MlflowException
raise MlflowException.invalid_parameter_value(
f"dict key type must be str, got {key_type}"
)
# Value must be a PbValueType
if value_type not in pb_value_types:
from mlflow.exceptions import MlflowException
raise MlflowException.invalid_parameter_value(
"The `feedback_value_type` argument does not support a dict type"
f"with non-primitive values, but got {value_type.__name__}"
)
return
# Check for list[PbValueType]
if origin is list:
args = get_args(feedback_value_type)
if len(args) == 1:
element_type = args[0]
# Element must be a PbValueType
if element_type not in pb_value_types:
from mlflow.exceptions import MlflowException
raise MlflowException.invalid_parameter_value(
"The `feedback_value_type` argument does not support a list type"
f"with non-primitive values, but got {element_type.__name__}"
)
return
# If we get here, it's an unsupported type
from mlflow.exceptions import MlflowException
raise MlflowException.invalid_parameter_value(
f"Unsupported feedback_value_type: {feedback_value_type}. "
f"Supported types (FeedbackValueType): str, int, float, bool, Literal[...], "
f"as well as a dict and list of these types. "
f"Pydantic BaseModel types are not supported."
)
@experimental(version="3.4.0")
@record_usage_event(MakeJudgeEvent)
def make_judge(
name: str,
instructions: str,
model: str | None = None,
description: str | None = None,
feedback_value_type: Any = None,
inference_params: dict[str, Any] | None = None,
) -> Judge:
"""
.. note::
As of MLflow 3.4.0, this function is deprecated in favor of `mlflow.genai.make_judge`
and may be removed in a future version.
Create a custom MLflow judge instance.
Args:
name: The name of the judge
instructions: Natural language instructions for evaluation. Must contain at least one
template variable: {{ inputs }}, {{ outputs }}, {{ expectations }},
{{ conversation }}, or {{ trace }} to reference evaluation data. Custom
variables are not supported.
Note: {{ conversation }} can only coexist with {{ expectations }}.
It cannot be used together with {{ inputs }}, {{ outputs }}, or {{ trace }}.
model: The model identifier to use for evaluation (e.g., "openai:/gpt-4")
description: A description of what the judge evaluates
feedback_value_type: Type specification for the 'value' field in the Feedback
object. The judge will use structured outputs to enforce this type.
If unspecified, the feedback value type is determined by the judge.
It is recommended to explicitly specify the type.
Supported types (matching FeedbackValueType):
- int: Integer ratings (e.g., 1-5 scale)
- float: Floating point scores (e.g., 0.0-1.0)
- str: Text responses
- bool: Yes/no evaluations
- Literal[values]: Enum-like choices (e.g., Literal["good", "bad"])
- dict[str, int | float | str | bool]: Dictionary with string keys and
int, float, str, or bool values.
- list[int | float | str | bool]: List of int, float, str, or bool values
Note: Pydantic BaseModel types are not supported.
inference_params: Optional dictionary of inference parameters to pass to the model
(e.g., temperature, top_p, max_tokens). These parameters allow
fine-grained control over the model's behavior during evaluation.
For example, setting a lower temperature can produce more
deterministic and reproducible evaluation results.
Returns:
An InstructionsJudge instance configured with the provided parameters
Example:
.. code-block:: python
import mlflow
from mlflow.genai.judges import make_judge
from typing import Literal
# Create a judge that evaluates response quality using template variables
quality_judge = make_judge(
name="response_quality",
instructions=(
"Evaluate if the response in {{ outputs }} correctly answers "
"the question in {{ inputs }}. The response should be accurate, "
"complete, and professional."
),
model="openai:/gpt-4",
feedback_value_type=Literal["yes", "no"],
)
# Evaluate a response
result = quality_judge(
inputs={"question": "What is machine learning?"},
outputs="ML is basically when computers learn stuff on their own",
)
# Create a judge that compares against expectations
correctness_judge = make_judge(
name="correctness",
instructions=(
"Compare the {{ outputs }} against the {{ expectations }}. "
"Rate how well they match on a scale of 1-5."
),
model="openai:/gpt-4",
feedback_value_type=int,
)
# Evaluate with expectations (must be dictionaries)
result = correctness_judge(
inputs={"question": "What is the capital of France?"},
outputs={"answer": "The capital of France is Paris."},
expectations={"expected_answer": "Paris"},
)
# Create a judge that evaluates based on trace context
trace_judge = make_judge(
name="trace_quality",
instructions="Evaluate the overall quality of the {{ trace }} execution.",
model="openai:/gpt-4",
feedback_value_type=Literal["good", "needs_improvement"],
)
# Use with search_traces() - evaluate each trace
traces = mlflow.search_traces(locations=["1"], return_type="list")
for trace in traces:
feedback = trace_judge(trace=trace)
print(f"Trace {trace.info.trace_id}: {feedback.value} - {feedback.rationale}")
# Create a multi-turn judge that detects user frustration
frustration_judge = make_judge(
name="user_frustration",
instructions=(
"Analyze the {{ conversation }} to detect signs of user frustration. "
"Look for indicators such as repeated questions, negative language, "
"or expressions of dissatisfaction."
),
model="openai:/gpt-4",
feedback_value_type=Literal["frustrated", "not frustrated"],
)
# Evaluate a multi-turn conversation using session traces
session = mlflow.search_traces(
locations=["1"],
filter_string="metadata.`mlflow.trace.session` = 'session_123'",
return_type="list",
)
result = frustration_judge(session=session)
# Align a judge with human feedback
aligned_judge = quality_judge.align(traces)
# To see detailed optimization output during alignment, enable DEBUG logging:
# import logging
# logging.getLogger("mlflow.genai.judges.optimizers.simba").setLevel(logging.DEBUG)
"""
# Default feedback_value_type to str if not specified (consistent with MLflow <= 3.5.x)
# TODO: Implement logic to allow the LLM to choose the appropriate value type if not specified
if feedback_value_type is None:
feedback_value_type = str
_validate_feedback_value_type(feedback_value_type)
return InstructionsJudge(
name=name,
instructions=instructions,
model=model,
description=description,
feedback_value_type=feedback_value_type,
inference_params=inference_params,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/make_judge.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/optimizers/dspy.py | """DSPy-based alignment optimizer implementation."""
import logging
from abc import abstractmethod
from typing import Any, Callable, ClassVar, Collection
from mlflow.entities.assessment import Feedback
from mlflow.entities.trace import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.base import AlignmentOptimizer, Judge
from mlflow.genai.judges.optimizers.dspy_utils import (
_check_dspy_installed,
agreement_metric,
append_input_fields_section,
construct_dspy_lm,
create_dspy_signature,
format_demos_as_examples,
trace_to_dspy_example,
)
from mlflow.genai.judges.utils import (
_suppress_litellm_nonfatal_errors,
get_default_model,
)
from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, INVALID_PARAMETER_VALUE
from mlflow.utils.annotations import experimental
_check_dspy_installed()
import dspy
_logger = logging.getLogger(__name__)
@experimental(version="3.4.0")
class DSPyAlignmentOptimizer(AlignmentOptimizer):
"""
Abstract base class for DSPy-based alignment optimizers.
Provides common functionality for converting MLflow traces to DSPy examples
and handling DSPy program compilation.
"""
_logger: logging.Logger
_model: str
_MINIMUM_TRACES_REQUIRED_FOR_OPTIMIZATION: ClassVar[int] = 10
@classmethod
def get_min_traces_required(cls) -> int:
"""Get the minimum number of traces required for optimization.
Returns:
The minimum number of traces required for optimization.
"""
return cls._MINIMUM_TRACES_REQUIRED_FOR_OPTIMIZATION
@property
def model(self) -> str:
"""Get the model used by this optimizer."""
return self._model
def __init__(self, model: str | None = None, **kwargs):
"""
Initialize DSPy optimizer with common parameters.
Args:
model: Model to use for DSPy optimization. If None, uses get_default_model().
kwargs: Additional keyword arguments.
"""
super().__init__(**kwargs)
self._logger = logging.getLogger(self.__class__.__name__)
self._model = model if model is not None else get_default_model()
@abstractmethod
def _dspy_optimize(
self,
program: "dspy.Predict",
examples: Collection["dspy.Example"],
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> "dspy.Predict":
"""
Perform DSPy optimization with algorithm-specific parameters.
Each implementation can decide how to split the data internally if needed.
Args:
program: The DSPy Predict program to optimize
examples: Examples for optimization (implementations decide how to split)
metric_fn: Metric function for optimization
Returns:
Optimized DSPy Predict program
"""
def _create_judge_from_dspy_program(
self,
optimized_program: "dspy.Predict",
original_judge: Judge,
) -> Judge:
"""
Create a judge from an optimized DSPy program.
This method combines instruction post-processing (appending input fields section)
and demo formatting into a single operation that returns a ready-to-use judge.
Args:
optimized_program: The optimized DSPy Predict program
original_judge: The original judge (to get name, model, field names, etc.)
Returns:
A new Judge instance with processed instructions and demos included
"""
optimized_instructions = optimized_program.signature.instructions
instructions = append_input_fields_section(optimized_instructions, original_judge)
demos = getattr(optimized_program, "demos", [])
if demos_text := format_demos_as_examples(demos, original_judge):
instructions = demos_text + "\n\n" + instructions
self._logger.info(f"Including {len(demos)} demos from optimization")
return make_judge(
name=original_judge.name,
instructions=instructions,
model=original_judge.model,
feedback_value_type=original_judge.feedback_value_type,
)
def _get_dspy_program_from_judge(self, judge: Judge) -> Any:
"""Convert a judge into a DSPy Predict module."""
create_judge_from_dspy_program = self._create_judge_from_dspy_program
class CustomPredict(dspy.Predict):
"""
Custom DSPy Predict class that uses the judge's model for evaluations.
This ensures the optimized DSPy program uses the judge's model,
while allowing the optimizer itself to use a different model.
"""
def __init__(self, original_judge: Judge):
super().__init__(create_dspy_signature(original_judge))
self._original_judge: Judge = original_judge
def forward(self, *args, **kwargs):
# Extract _trace before filtering (DSPy convention for disabling trace)
should_trace = kwargs.pop("_trace", True)
# Filter kwargs to only include the judge's input fields
input_field_names = {f.name for f in self._original_judge.get_input_fields()}
judge_kwargs = {k: v for k, v in kwargs.items() if k in input_field_names}
created_judge: Judge = create_judge_from_dspy_program(
optimized_program=self,
original_judge=self._original_judge,
)
feedback: Feedback = created_judge(**judge_kwargs)
pred = dspy.Prediction(
result=feedback.value,
rationale=feedback.rationale,
)
# Manually record a consistent trace for optimizers that depend on it (e.g., GEPA)
if should_trace and dspy.settings.trace is not None:
trace = dspy.settings.trace
max_trace_size = getattr(dspy.settings, "max_trace_size", float("inf"))
if max_trace_size > 0:
if len(trace) >= max_trace_size:
trace.pop(0)
trace.append((self, {**kwargs}, pred))
return pred
return CustomPredict(judge)
@_suppress_litellm_nonfatal_errors
def align(self, judge: Judge, traces: list[Trace]) -> Judge:
"""
Main alignment method that orchestrates the DSPy optimization process.
1. Extract judge instructions and create DSPy signature
2. Convert traces to DSPy examples
3. Create and compile DSPy optimizer
4. Generate optimized judge from results
Args:
judge: The judge to be optimized
traces: List of traces containing alignment data.
The implementation will split these traces internally for train/validation.
Returns:
A new optimized Judge instance
"""
try:
if not traces:
raise MlflowException(
"No traces provided for alignment",
error_code=INVALID_PARAMETER_VALUE,
)
self._logger.debug(f"Setting up DSPy context with model: {self._model}")
# Configure DSPy to use the optimizer's model
# This ensures the optimizer uses its own model, separate from the judge's model
optimizer_lm = construct_dspy_lm(self._model)
with dspy.context(lm=optimizer_lm):
# Create DSPy program that will simulate the judge
program = self._get_dspy_program_from_judge(judge)
self._logger.debug("Created DSPy program with signature using judge's model")
# Convert traces to DSPy format
dspy_examples = []
for trace in traces:
example = trace_to_dspy_example(trace, judge)
if example is not None:
dspy_examples.append(example)
self._logger.info(
f"Preparing optimization with {len(dspy_examples)} examples "
f"from {len(traces)} traces"
)
if not dspy_examples:
raise MlflowException(
f"No valid examples could be created from traces. "
f"Ensure that the provided traces contain Feedback entries "
f"with name {judge.name}",
error_code=INVALID_PARAMETER_VALUE,
)
min_traces = self.get_min_traces_required()
if len(dspy_examples) < min_traces:
raise MlflowException(
f"At least {min_traces} valid traces are required for optimization. "
f"Label more traces with Feedback entries with name {judge.name}",
error_code=INVALID_PARAMETER_VALUE,
)
self._logger.debug("Starting DSPy optimization...")
# Use the algorithm-specific optimization method
# Each implementation decides how to handle data splitting
optimized_program = self._dspy_optimize(program, dspy_examples, agreement_metric)
self._logger.debug("DSPy optimization completed")
if not isinstance(optimized_program, dspy.Predict):
raise MlflowException(
f"Optimizer returned {type(optimized_program).__name__}, "
"expected dspy.Predict. Custom optimizers must return a "
"Predict instance from _dspy_optimize().",
error_code=INTERNAL_ERROR,
)
return self._create_judge_from_dspy_program(
optimized_program=optimized_program,
original_judge=judge,
)
except Exception as e:
raise MlflowException(
f"Alignment optimization failed: {e!s}", error_code=INTERNAL_ERROR
) from e
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/dspy.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/optimizers/dspy_utils.py | """Utility functions for DSPy-based alignment optimizers."""
import logging
import os
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional
from mlflow import __version__ as VERSION
from mlflow.entities.assessment_source import AssessmentSourceType
from mlflow.entities.trace import Trace
from mlflow.exceptions import INVALID_PARAMETER_VALUE, MlflowException
from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import (
call_chat_completions,
)
from mlflow.genai.judges.base import Judge
from mlflow.genai.judges.constants import (
_DATABRICKS_DEFAULT_JUDGE_MODEL,
USE_CASE_JUDGE_ALIGNMENT,
)
from mlflow.genai.utils.trace_utils import (
extract_expectations_from_trace,
extract_request_from_trace,
extract_response_from_trace,
)
from mlflow.metrics.genai.model_utils import (
_parse_model_uri,
convert_mlflow_uri_to_litellm,
)
from mlflow.utils import AttrDict
# Import dspy - raise exception if not installed
try:
import dspy
except ImportError:
raise MlflowException("DSPy library is required but not installed")
if TYPE_CHECKING:
from mlflow.genai.judges.base import Judge
_logger = logging.getLogger(__name__)
@contextmanager
def suppress_verbose_logging(
logger_name: str, threshold_level: int = logging.DEBUG
) -> Iterator[None]:
"""
Context manager to suppress verbose logging from a specific logger.
This is useful when running optimization algorithms that produce verbose output
by default. The suppression only applies if the parent MLflow logger is above
the threshold level, allowing users to see detailed output when they explicitly
set logging to DEBUG.
Args:
logger_name: Name of the logger to control.
threshold_level: Only suppress if MLflow logger is above this level.
Defaults to logging.DEBUG.
Example:
>>> with suppress_verbose_logging("some.verbose.library"):
... # Run operations that would normally log verbosely
... result = run_optimization()
"""
logger = logging.getLogger(logger_name)
original_level = logger.level
try:
if _logger.getEffectiveLevel() > threshold_level:
logger.setLevel(logging.WARNING)
yield
finally:
logger.setLevel(original_level)
def create_gepa_metric_adapter(
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> Any:
"""
Create a metric adapter that bridges DSPy's standard metric to GEPA's format.
GEPA requires a metric with signature: (gold, pred, trace, pred_name, pred_trace)
but our standard metric_fn has signature: (example, pred, trace).
This function creates an adapter that bridges the two signatures.
Args:
metric_fn: Standard metric function with signature (example, pred, trace)
Returns:
Adapter function with GEPA's expected signature
"""
def gepa_metric_adapter(gold, pred, trace=None, pred_name=None, pred_trace=None):
"""Adapt DSPy's 3-argument metric to GEPA's 5-argument format."""
# gold is the dspy.Example
# pred is the prediction output
# trace/pred_name/pred_trace are optional GEPA-specific args
# We pass None for our metric's trace parameter since GEPA's trace is different
return metric_fn(gold, pred, trace=None)
return gepa_metric_adapter
def _check_dspy_installed():
try:
import dspy # noqa: F401
except ImportError as e:
raise MlflowException(
"The DSPy library is required but not installed. "
"Please install it using: `pip install dspy`"
) from e
def construct_dspy_lm(model: str):
"""
Create a dspy.LM instance from a given model.
Args:
model: The model identifier/URI
Returns:
A dspy.LM instance configured for the given model
"""
if model == _DATABRICKS_DEFAULT_JUDGE_MODEL:
return AgentEvalLM()
else:
model_litellm = convert_mlflow_uri_to_litellm(model)
api_base, api_key = _get_api_base_key(model)
if api_base:
return dspy.LM(model=model_litellm, api_base=api_base, api_key=api_key)
return dspy.LM(model=model_litellm)
def _get_api_base_key(model: str) -> tuple[str | None, str | None]:
"""
Get the api_base URL and api_key for a model.
Args:
model: MLflow model URI (e.g., 'endpoints:/my-endpoint', 'databricks:/my-endpoint')
Returns:
Tuple of (api_base, api_key) - both None if not applicable
"""
try:
scheme, _ = _parse_model_uri(model)
if scheme in ("endpoints", "databricks"):
return _get_databricks_api_base_key()
return None, None
except Exception:
return None, None
def _get_databricks_api_base_key() -> tuple[str | None, str | None]:
"""
Get the api_base URL and api_key for Databricks serving endpoints.
For Databricks endpoints with OpenAI-compatible API, the URL format is:
- api_base: https://host/serving-endpoints
- model: endpoint-name (passed in request body)
LiteLLM appends /chat/completions to api_base, making the final URL:
https://host/serving-endpoints/chat/completions with model=endpoint-name in body.
Returns:
Tuple of (api_base, api_key) - both None if credentials cannot be determined
"""
# Get Databricks host and token from environment or SDK
host = os.environ.get("DATABRICKS_HOST")
api_key = os.environ.get("DATABRICKS_TOKEN")
if not host or not api_key:
try:
from databricks.sdk import WorkspaceClient
except ImportError:
_logger.warning(
"Could not determine Databricks credentials. "
"Set DATABRICKS_HOST and DATABRICKS_TOKEN environment variables."
)
return None, None
client = WorkspaceClient()
if not host:
host = client.config.host
if not api_key:
# Get token from SDK authentication (supports OAuth, PAT, etc.)
headers = client.config.authenticate()
if "Authorization" in headers:
api_key = headers["Authorization"].replace("Bearer ", "")
host = host.rstrip("/")
# Return api_base with just /serving-endpoints
# LiteLLM will append /chat/completions and pass the endpoint name as model
return f"{host}/serving-endpoints", api_key
def _to_attrdict(obj):
"""Recursively convert nested dicts/lists to AttrDicts."""
if isinstance(obj, dict):
return AttrDict({k: _to_attrdict(v) for k, v in obj.items()})
elif isinstance(obj, list):
return [_to_attrdict(item) for item in obj]
else:
return obj
def _process_chat_completions(
user_prompt: str, system_prompt: str | None = None
) -> AttrDict[str, Any]:
"""Call managed RAG client and return formatted response."""
response = call_chat_completions(
user_prompt=user_prompt,
system_prompt=system_prompt,
session_name=f"mlflow-judge-optimizer-v{VERSION}",
use_case=USE_CASE_JUDGE_ALIGNMENT,
)
if response.output is not None:
result_dict = {
"object": "chat.completion",
"model": "databricks",
"choices": [
{
"index": 0,
"finish_reason": "stop",
"message": {"role": "assistant", "content": response.output},
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0,
},
"response_format": "json_object",
}
else:
result_dict = {
"object": "response",
"error": response.error_message,
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0,
},
"response_format": "json_object",
}
return _to_attrdict(result_dict)
class AgentEvalLM(dspy.BaseLM):
"""Special DSPy LM for Databricks environment using managed RAG client."""
def __init__(self):
super().__init__("databricks")
def dump_state(self):
return {}
def load_state(self, state):
pass
def forward(
self,
prompt: str | None = None,
messages: list[dict[str, Any]] | None = None,
**kwargs,
) -> AttrDict[str, Any]:
"""Forward pass for the language model."""
user_prompt = None
system_prompt = None
if messages:
for message in messages:
if message.get("role") == "user":
user_prompt = message.get("content", "")
elif message.get("role") == "system":
system_prompt = message.get("content", "")
if not user_prompt and prompt:
user_prompt = prompt
return _process_chat_completions(user_prompt, system_prompt)
def _sanitize_assessment_name(name: str) -> str:
"""
Sanitize a name by converting it to lowercase and stripping whitespace.
"""
return name.lower().strip()
def convert_litellm_to_mlflow_uri(litellm_model: str) -> str:
"""
Convert LiteLLM model format to MLflow URI format.
LiteLLM uses formats like 'openai/gpt-4' while MLflow expects 'openai:/gpt-4'.
Args:
litellm_model: LiteLLM model string (e.g., 'openai/gpt-4')
Returns:
MLflow-compatible model URI (e.g., 'openai:/gpt-4')
Raises:
MlflowException: If the model string is not in the expected format
Examples:
>>> convert_litellm_to_mlflow_uri("openai/gpt-4")
'openai:/gpt-4'
>>> convert_litellm_to_mlflow_uri("anthropic/claude-3")
'anthropic:/claude-3'
"""
if not litellm_model:
raise MlflowException(
"Model string cannot be empty or None",
error_code=INVALID_PARAMETER_VALUE,
)
if "/" not in litellm_model:
raise MlflowException(
f"Invalid LiteLLM model format: '{litellm_model}'. "
"Expected format: 'provider/model' (e.g., 'openai/gpt-4')",
error_code=INVALID_PARAMETER_VALUE,
)
try:
provider, model = litellm_model.split("/", 1)
if not provider or not model:
raise MlflowException(
f"Invalid LiteLLM model format: '{litellm_model}'. "
"Both provider and model name must be non-empty",
error_code=INVALID_PARAMETER_VALUE,
)
return f"{provider}:/{model}"
except ValueError as e:
raise MlflowException(f"Failed to convert LiteLLM format to MLflow URI: {e}")
def trace_to_dspy_example(trace: Trace, judge: Judge) -> Optional["dspy.Example"]:
"""
Convert MLflow trace to DSPy example format.
Extracts:
- inputs/outputs from trace spans
- expected result from human assessments
- rationale from assessment feedback
Args:
trace: MLflow trace object
judge: Judge instance to find assessments for
Returns:
DSPy example object or None if conversion fails
"""
try:
judge_input_fields = judge.get_input_fields()
judge_requires_trace = any(field.name == "trace" for field in judge_input_fields)
judge_requires_inputs = any(field.name == "inputs" for field in judge_input_fields)
judge_requires_outputs = any(field.name == "outputs" for field in judge_input_fields)
judge_requires_expectations = any(
field.name == "expectations" for field in judge_input_fields
)
request = extract_request_from_trace(trace)
response = extract_response_from_trace(trace)
expectations = extract_expectations_from_trace(trace)
# Check for missing required fields
if not request and judge_requires_inputs:
_logger.warning(f"Missing required request in trace {trace.info.trace_id}")
return None
elif not response and judge_requires_outputs:
_logger.warning(f"Missing required response in trace {trace.info.trace_id}")
return None
elif not expectations and judge_requires_expectations:
_logger.warning(f"Missing required expectations in trace {trace.info.trace_id}")
return None
# Find human assessment for this judge
expected_result = None
if trace.info.assessments:
# Sort assessments by creation time (most recent first) then process
sorted_assessments = sorted(
trace.info.assessments,
key=lambda a: (
a.create_time_ms if hasattr(a, "create_time_ms") and a.create_time_ms else 0
),
reverse=True,
)
sanitized_judge_name = _sanitize_assessment_name(judge.name)
matching_assessments = [
a
for a in sorted_assessments
if _sanitize_assessment_name(a.name) == sanitized_judge_name
and a.source.source_type == AssessmentSourceType.HUMAN
]
if len(matching_assessments) > 1:
_logger.warning(
f"Found {len(matching_assessments)} human assessments with name "
f"'{judge.name}' in trace {trace.info.trace_id}. "
f"Only the most recent one will be used for alignment."
)
if matching_assessments:
expected_result = matching_assessments[0]
if not expected_result:
_logger.warning(
f"No human assessment found for judge '{judge.name}' in trace {trace.info.trace_id}"
)
return None
if not expected_result.feedback:
_logger.warning(f"No feedback found in assessment for trace {trace.info.trace_id}")
return None
# Create DSPy example
example_kwargs = {}
example_inputs = []
if judge_requires_trace:
example_kwargs["trace"] = trace
example_inputs.append("trace")
if judge_requires_inputs:
example_kwargs["inputs"] = request
example_inputs.append("inputs")
if judge_requires_outputs:
example_kwargs["outputs"] = response
example_inputs.append("outputs")
if judge_requires_expectations:
example_kwargs["expectations"] = expectations
example_inputs.append("expectations")
example = dspy.Example(
result=str(expected_result.feedback.value).lower(),
rationale=expected_result.rationale or "",
**example_kwargs,
)
# Set inputs (what the model should use as input)
return example.with_inputs(*example_inputs)
except Exception as e:
_logger.error(f"Failed to create DSPy example from trace: {e}")
return None
def create_dspy_signature(judge: "Judge") -> "dspy.Signature":
"""
Create DSPy signature for judge evaluation.
Args:
judge: The judge to create signature for
Returns:
DSPy signature object
"""
try:
# Build signature fields dictionary using the judge's field definitions
signature_fields = {}
# Get input fields from the judge
input_fields = judge.get_input_fields()
for field in input_fields:
signature_fields[field.name] = (
field.value_type,
dspy.InputField(desc=field.description),
)
# Get output fields from the judge
output_fields = judge.get_output_fields()
for field in output_fields:
signature_fields[field.name] = (
field.value_type,
dspy.OutputField(desc=field.description),
)
return dspy.make_signature(signature_fields, judge.instructions)
except Exception as e:
raise MlflowException(f"Failed to create DSPy signature: {e}")
def agreement_metric(example: "dspy.Example", pred: Any, trace: Any | None = None):
"""Simple agreement metric for judge optimization."""
try:
# Extract result from example and prediction
expected = getattr(example, "result", None)
predicted = getattr(pred, "result", None)
if expected is None or predicted is None:
return False
# Normalize both to consistent format
expected_norm = str(expected).lower().strip()
predicted_norm = str(predicted).lower().strip()
_logger.debug(f"expected_norm: {expected_norm}, predicted_norm: {predicted_norm}")
return expected_norm == predicted_norm
except Exception as e:
_logger.warning(f"Error in agreement_metric: {e}")
return False
def append_input_fields_section(instructions: str, judge: "Judge") -> str:
"""
Append a section listing the input fields for assessment to the instructions.
DSPy optimizers may modify instructions during optimization. This function
ensures that the input field names are clearly documented at the end of
the instructions, but only if they're not already present.
Args:
instructions: The optimized instructions
judge: The original judge (to get field names)
Returns:
Instructions with input fields section appended, or original instructions
if all input fields are already present
"""
input_fields = judge.get_input_fields()
field_names = [f.name for f in input_fields]
if not field_names:
return instructions
# Check if all input fields are already present in mustached format
# Support both {{field}} and {{ field }} formats
def has_mustached_field(name: str) -> bool:
return f"{{{{{name}}}}}" in instructions or f"{{{{ {name} }}}}" in instructions
if all(has_mustached_field(name) for name in field_names):
return instructions
fields_list = ", ".join(f"{{{{ {name} }}}}" for name in field_names)
return f"{instructions}\n\nInputs for assessment: {fields_list}"
def format_demos_as_examples(demos: list[Any], judge: "Judge") -> str:
"""Format demos as few-shot examples to include in judge instructions.
SIMBA optimization adds successful examples to program.demos. This function
converts them to a text format suitable for inclusion in the judge prompt.
Args:
demos: List of dspy.Example objects containing successful judge evaluations
judge: The original judge (to get field names)
Returns:
Formatted text with examples, or empty string if no valid demos
"""
if not demos:
return ""
# Get field names from the judge
input_fields = [f.name for f in judge.get_input_fields()]
output_fields = [f.name for f in judge.get_output_fields()]
examples_text = []
for i, demo in enumerate(demos):
example_parts = []
# Access demo fields using dict-style access
# (getattr conflicts with dspy.Example's built-in methods like inputs())
if not hasattr(demo, "items"):
raise MlflowException(
f"Demo at index {i} cannot be converted to dict. "
f"Expected dspy.Example, got {type(demo).__name__}",
error_code=INVALID_PARAMETER_VALUE,
)
demo_dict = dict(demo)
# Format input fields
for field in input_fields:
if field in demo_dict:
value = demo_dict[field]
example_parts.append(f"{field}: {value}")
# Format output fields
for field in output_fields:
if field in demo_dict:
value = demo_dict[field]
example_parts.append(f"{field}: {value}")
if example_parts:
examples_text.append(f"Example {i + 1}:\n" + "\n".join(example_parts))
if examples_text:
return "Here are some examples of good assessments:\n\n" + "\n\n".join(examples_text)
return ""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/dspy_utils.py",
"license": "Apache License 2.0",
"lines": 478,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/optimizers/simba.py | """SIMBA alignment optimizer implementation."""
import logging
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Collection
from mlflow.genai.judges.optimizers.dspy import DSPyAlignmentOptimizer
from mlflow.genai.judges.optimizers.dspy_utils import (
_check_dspy_installed,
suppress_verbose_logging,
)
from mlflow.utils.annotations import experimental
if TYPE_CHECKING:
import dspy
_check_dspy_installed()
_logger = logging.getLogger(__name__)
@experimental(version="3.4.0")
class SIMBAAlignmentOptimizer(DSPyAlignmentOptimizer):
"""
SIMBA (Simplified Multi-Bootstrap Aggregation) alignment optimizer.
Uses DSPy's SIMBA algorithm to optimize judge prompts through
bootstrap aggregation with simplified parametrization.
Note on Logging:
By default, SIMBA optimization suppresses DSPy's verbose output.
To see detailed optimization progress from DSPy, set the MLflow logger to DEBUG::
import logging
logging.getLogger("mlflow.genai.judges.optimizers.simba").setLevel(logging.DEBUG)
"""
# Class constants for default SIMBA parameters
DEFAULT_SEED: ClassVar[int] = 42
def __init__(
self,
model: str | None = None,
batch_size: int | None = None,
seed: int | None = None,
simba_kwargs: dict[str, Any] | None = None,
**kwargs,
):
"""
Initialize SIMBA optimizer with customizable parameters.
Args:
model: Model to use for DSPy optimization. If None, uses get_default_model().
batch_size: Batch size for SIMBA evaluation. If None, uses get_min_traces_required().
seed: Random seed for reproducibility. If None, uses DEFAULT_SEED (42).
simba_kwargs: Additional keyword arguments to pass directly to dspy.SIMBA().
Supported parameters include:
- metric: Custom metric function (overrides default agreement_metric)
- max_demos: Maximum number of demonstrations to use
- num_threads: Number of threads for parallel optimization
- max_steps: Maximum number of optimization steps
See https://dspy.ai/api/optimizers/SIMBA/ for full list.
kwargs: Additional keyword arguments passed to parent class
"""
super().__init__(model=model, **kwargs)
self._batch_size = batch_size
self._seed = seed or self.DEFAULT_SEED
self._simba_kwargs = simba_kwargs or {}
def _get_batch_size(self) -> int:
"""
Get the batch size for SIMBA optimization.
Returns:
The batch size to use for SIMBA optimization.
"""
return self._batch_size if self._batch_size is not None else self.get_min_traces_required()
def _dspy_optimize(
self,
program: "dspy.Module",
examples: Collection["dspy.Example"],
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> "dspy.Module":
"""
Perform SIMBA optimization with algorithm-specific parameters.
SIMBA uses all examples as training data (no separate validation set).
Args:
program: The DSPy program to optimize
examples: Examples for optimization
metric_fn: Default metric function for optimization
Returns:
Optimized DSPy program
"""
import dspy
with suppress_verbose_logging("dspy.teleprompt.simba"):
optimizer_kwargs = {
"metric": metric_fn,
"bsize": self._get_batch_size(),
**self._simba_kwargs, # Pass through any additional SIMBA parameters
}
optimizer = dspy.SIMBA(**optimizer_kwargs)
_logger.info(
f"Starting SIMBA optimization with {len(examples)} examples "
f"(set logging to DEBUG for detailed output)"
)
result = optimizer.compile(
student=program,
trainset=examples,
seed=self._seed,
)
_logger.info("SIMBA optimization completed")
return result
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/simba.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/tools/base.py | """
Base classes for MLflow GenAI tools that can be used by judges.
This module provides the foundational interfaces for tools that judges can use
to enhance their evaluation capabilities.
"""
from abc import ABC, abstractmethod
from typing import Any
from mlflow.entities.trace import Trace
from mlflow.types.llm import ToolDefinition
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class JudgeTool(ABC):
"""
Abstract base class for tools that can be used by MLflow judges.
Tools provide additional capabilities to judges for analyzing traces,
performing calculations, or accessing external data sources during evaluation.
"""
@property
@abstractmethod
def name(self) -> str:
"""
Return the unique name of the tool.
Returns:
Tool name used for registration and invocation
"""
@abstractmethod
def get_definition(self) -> ToolDefinition:
"""
Get the tool definition in LiteLLM/OpenAI function calling format.
Returns:
ToolDefinition object containing the tool specification
"""
@abstractmethod
def invoke(self, trace: Trace, **kwargs) -> Any:
"""
Invoke the tool with the provided trace and arguments.
Args:
trace: The MLflow trace object to analyze
kwargs: Additional keyword arguments for the tool
Returns:
Result of the tool execution
"""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/base.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/tools/constants.py | """
Constants for MLflow GenAI judge tools.
This module contains constant values used across the judge tools system,
providing a single reference point for tool names and other constants.
"""
from mlflow.utils.annotations import experimental
# Tool names
@experimental(version="3.4.0")
class ToolNames:
"""Registry of judge tool names."""
GET_TRACE_INFO = "get_trace_info"
GET_ROOT_SPAN = "get_root_span"
GET_SPAN = "get_span"
LIST_SPANS = "list_spans"
SEARCH_TRACE_REGEX = "search_trace_regex"
GET_SPAN_PERFORMANCE_AND_TIMING_REPORT = "get_span_performance_and_timing_report"
_GET_TRACES_IN_SESSION = "_get_traces_in_session"
_SEARCH_TRACES = "_search_traces"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/constants.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/tools/get_root_span.py | """
Get root span tool for MLflow GenAI judges.
This module provides a tool for retrieving the root span of a trace,
which contains the top-level inputs and outputs.
"""
from mlflow.entities.trace import Trace
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.genai.judges.tools.get_span import GetSpanTool
from mlflow.genai.judges.tools.types import SpanResult
from mlflow.types.llm import FunctionToolDefinition, ToolDefinition, ToolParamsSchema
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class GetRootSpanTool(JudgeTool):
"""
Tool for retrieving the root span from a trace.
The root span contains the top-level inputs to the agent and final outputs.
"""
@property
def name(self) -> str:
return ToolNames.GET_ROOT_SPAN
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.GET_ROOT_SPAN,
description=(
"Retrieve the root span of the trace, which contains the top-level inputs "
"to the agent and final outputs. Note that in some traces, the root span "
"may not contain outputs, but it typically should. If the root span doesn't "
"have outputs, you may need to look at other spans to find the final results. "
"The content is returned as a JSON string. Large content may be paginated. "
"Consider selecting only relevant attributes to reduce data size and improve "
"efficiency."
),
parameters=ToolParamsSchema(
type="object",
properties={
"attributes_to_fetch": {
"type": "array",
"items": {"type": "string"},
"description": (
"List of specific attributes to fetch from the span. If specified, "
"only these attributes will be returned. If not specified, all "
"attributes are returned. Use list_spans first to see available "
"attribute names, then select only the relevant ones."
),
},
"max_content_length": {
"type": "integer",
"description": "Maximum content size in bytes (default: 100000)",
},
"page_token": {
"type": "string",
"description": "Token to retrieve the next page of content",
},
},
required=[],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
attributes_to_fetch: list[str] | None = None,
max_content_length: int = 100000,
page_token: str | None = None,
) -> SpanResult:
"""
Get the root span from the trace.
Args:
trace: The MLflow trace object to analyze
attributes_to_fetch: List of specific attributes to fetch (None for all)
max_content_length: Maximum content size in bytes to return
page_token: Token to retrieve the next page (offset in bytes)
Returns:
SpanResult with the root span content as JSON string
"""
if not trace or not trace.data or not trace.data.spans:
return SpanResult(
span_id=None, content=None, content_size_bytes=0, error="Trace has no spans"
)
root_span_id = None
for span in trace.data.spans:
if span.parent_id is None:
root_span_id = span.span_id
break
if not root_span_id:
return SpanResult(
span_id=None,
content=None,
content_size_bytes=0,
error="No root span found in trace",
)
return GetSpanTool().invoke(
trace, root_span_id, attributes_to_fetch, max_content_length, page_token
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/get_root_span.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/tools/get_span.py | """
Get span tool for MLflow GenAI judges.
This module provides a tool for retrieving a specific span by ID.
"""
import json
from mlflow.entities.trace import Trace
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.genai.judges.tools.types import SpanResult
from mlflow.genai.judges.tools.utils import create_page_token, parse_page_token
from mlflow.types.llm import FunctionToolDefinition, ToolDefinition, ToolParamsSchema
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class GetSpanTool(JudgeTool):
"""
Tool for retrieving a specific span by its ID.
Returns the complete span data including inputs, outputs, attributes, and events.
"""
@property
def name(self) -> str:
return ToolNames.GET_SPAN
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.GET_SPAN,
description=(
"Retrieve a specific span by its ID. Returns the complete span data "
"including inputs, outputs, attributes, events, and timing information. "
"Use this when you need to examine the full details of a particular span. "
"Large content may be paginated. Consider selecting only relevant attributes "
"to reduce data size and improve efficiency."
),
parameters=ToolParamsSchema(
type="object",
properties={
"span_id": {
"type": "string",
"description": "The ID of the span to retrieve",
},
"attributes_to_fetch": {
"type": "array",
"items": {"type": "string"},
"description": (
"List of specific attributes to fetch from the span. If specified, "
"only these attributes will be returned. If not specified, all "
"attributes are returned. It is recommended to use list_spans "
"first to see available attribute names, then select relevant ones."
),
},
"max_content_length": {
"type": "integer",
"description": "Maximum content size in bytes (default: 100000)",
},
"page_token": {
"type": "string",
"description": "Token to retrieve the next page of content",
},
},
required=["span_id"],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
span_id: str,
attributes_to_fetch: list[str] | None = None,
max_content_length: int = 100000,
page_token: str | None = None,
) -> SpanResult:
"""
Get a specific span by ID from the trace.
Args:
trace: The MLflow trace object to analyze
span_id: The ID of the span to retrieve
attributes_to_fetch: List of specific attributes to fetch (None for all)
max_content_length: Maximum content size in bytes to return
page_token: Token to retrieve the next page (offset in bytes)
Returns:
SpanResult with the span content as JSON string
"""
if not trace or not trace.data or not trace.data.spans:
return SpanResult(
span_id=None, content=None, content_size_bytes=0, error="Trace has no spans"
)
target_span = None
for span in trace.data.spans:
if span.span_id == span_id:
target_span = span
break
if not target_span:
return SpanResult(
span_id=None,
content=None,
content_size_bytes=0,
error=f"Span with ID '{span_id}' not found in trace",
)
span_dict = target_span.to_dict()
if attributes_to_fetch is not None and span_dict.get("attributes"):
filtered_attributes = {}
for attr in attributes_to_fetch:
if attr in span_dict["attributes"]:
filtered_attributes[attr] = span_dict["attributes"][attr]
span_dict["attributes"] = filtered_attributes
full_content = json.dumps(span_dict, default=str, indent=2)
total_size = len(full_content.encode("utf-8"))
start_offset = parse_page_token(page_token)
end_offset = min(start_offset + max_content_length, total_size)
content_chunk = full_content[start_offset:end_offset]
next_page_token = create_page_token(end_offset) if end_offset < total_size else None
return SpanResult(
span_id=target_span.span_id,
content=content_chunk,
content_size_bytes=len(content_chunk.encode("utf-8")),
page_token=next_page_token,
error=None,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/get_span.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/tools/get_trace_info.py | """
Get trace info tool for MLflow GenAI judges.
This module provides a tool for retrieving trace metadata including
timing, location, state, and other high-level information.
"""
from mlflow.entities.trace import Trace
from mlflow.entities.trace_info import TraceInfo
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.types.llm import (
FunctionToolDefinition,
ToolDefinition,
ToolParamsSchema,
)
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class GetTraceInfoTool(JudgeTool):
"""
Tool for retrieving high-level metadata about a trace.
This provides trace metadata like ID, timing, state, and location without
the detailed span data.
"""
@property
def name(self) -> str:
return ToolNames.GET_TRACE_INFO
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.GET_TRACE_INFO,
description=(
"Retrieve high-level metadata about the trace including ID, timing, state, "
"location, and request/response previews. This provides an overview of the "
"trace without detailed span data. Use this to understand the overall trace "
"context, execution duration, and whether the trace completed successfully."
),
parameters=ToolParamsSchema(
type="object",
properties={},
required=[],
),
),
type="function",
)
def invoke(self, trace: Trace) -> TraceInfo:
"""
Get metadata about the trace.
Args:
trace: The MLflow trace object to analyze
Returns:
TraceInfo object
"""
return trace.info
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/get_trace_info.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/tools/list_spans.py | """
Tool definitions for MLflow GenAI judges.
This module provides concrete JudgeTool implementations that judges can use
to analyze traces and extract information during evaluation.
"""
from dataclasses import dataclass
from mlflow.entities.trace import Trace
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.types import SpanInfo
from mlflow.genai.judges.tools.utils import create_page_token, parse_page_token
from mlflow.types.llm import (
FunctionToolDefinition,
ParamProperty,
ToolDefinition,
ToolParamsSchema,
)
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
@dataclass
class ListSpansResult:
"""Result from listing spans with optional pagination."""
spans: list[SpanInfo]
next_page_token: str | None = None
def _create_span_info(span) -> SpanInfo:
"""Create SpanInfo from a span object."""
start_time_ms = span.start_time_ns / 1_000_000
end_time_ms = span.end_time_ns / 1_000_000
duration_ms = end_time_ms - start_time_ms
# Get attribute names
attribute_names = list(span.attributes.keys()) if span.attributes else []
return SpanInfo(
span_id=span.span_id,
name=span.name,
span_type=span.span_type,
start_time_ms=start_time_ms,
end_time_ms=end_time_ms,
duration_ms=duration_ms,
parent_id=span.parent_id,
status=span.status,
is_root=(span.parent_id is None),
attribute_names=attribute_names,
)
@experimental(version="3.4.0")
class ListSpansTool(JudgeTool):
"""
Tool for listing and analyzing spans within a trace.
This tool provides functionality to extract and analyze span information
from MLflow traces, including span names, types, durations, and metadata.
"""
@property
def name(self) -> str:
return "list_spans"
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name="list_spans",
description=(
"List information about spans within a trace with pagination support. "
"Returns span metadata including span_id, name, span_type, timing data "
"(start_time_ms, end_time_ms, duration_ms), parent_id, status, and "
"attribute_names (list of attribute keys). This provides an overview of "
"all spans but does not fetch full span content."
),
parameters=ToolParamsSchema(
type="object",
properties={
"max_results": ParamProperty(
type="integer",
description="Maximum number of spans to return (default: 100)",
),
"page_token": ParamProperty(
type="string",
description="Token for retrieving the next page of results",
),
},
required=[],
),
),
type="function",
)
def invoke(
self, trace: Trace, max_results: int = 100, page_token: str | None = None
) -> ListSpansResult:
"""
List spans from a trace with pagination support.
Args:
trace: The MLflow trace object to analyze
max_results: Maximum number of spans to return (default: 100)
page_token: Token for retrieving the next page of results
Returns:
ListSpansResult containing spans list and optional next page token
"""
if not trace or not trace.data or not trace.data.spans:
return ListSpansResult(spans=[])
start_index = parse_page_token(page_token)
# Get the slice of spans for this page
all_spans = trace.data.spans
end_index = start_index + max_results
page_spans = all_spans[start_index:end_index]
# Build span info for this page
spans_info = [_create_span_info(span) for span in page_spans]
# Determine next page token - only include if there are more pages
next_page_token = None
if end_index < len(all_spans):
next_page_token = create_page_token(end_index)
return ListSpansResult(spans=spans_info, next_page_token=next_page_token)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/list_spans.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/tools/registry.py | """
Tool registry for MLflow GenAI judges.
This module provides a registry system for managing and invoking JudgeTool instances.
"""
import json
import logging
from typing import Any
import mlflow
from mlflow.entities import SpanType, Trace
from mlflow.environment_variables import MLFLOW_GENAI_EVAL_ENABLE_SCORER_TRACING
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST
from mlflow.utils.annotations import experimental
_logger = logging.getLogger(__name__)
@experimental(version="3.4.0")
class JudgeToolRegistry:
"""Registry for managing and invoking JudgeTool instances."""
def __init__(self):
self._tools: dict[str, JudgeTool] = {}
def register(self, tool: JudgeTool) -> None:
"""
Register a judge tool in the registry.
Args:
tool: The JudgeTool instance to register
"""
self._tools[tool.name] = tool
def invoke(self, tool_call: Any, trace: Trace) -> Any:
"""
Invoke a tool using a ToolCall instance and trace.
Args:
tool_call: The ToolCall containing function name and arguments
trace: The MLflow trace object to analyze
Returns:
The result of the tool execution
Raises:
MlflowException: If the tool is not found or arguments are invalid
"""
function_name = tool_call.function.name
if function_name not in self._tools:
raise MlflowException(
f"Tool '{function_name}' not found in registry",
error_code=RESOURCE_DOES_NOT_EXIST,
)
tool = self._tools[function_name]
try:
arguments = json.loads(tool_call.function.arguments)
except json.JSONDecodeError as e:
raise MlflowException(
f"Invalid JSON arguments for tool '{function_name}': {e}",
error_code="INVALID_PARAMETER_VALUE",
)
_logger.debug(f"Invoking tool '{function_name}' with args: {arguments}")
try:
if MLFLOW_GENAI_EVAL_ENABLE_SCORER_TRACING.get():
tool_func = mlflow.trace(name=tool.name, span_type=SpanType.TOOL)(tool.invoke)
else:
tool_func = tool.invoke
result = tool_func(trace, **arguments)
_logger.debug(f"Tool '{function_name}' returned: {result}")
return result
except TypeError as e:
raise MlflowException(
f"Invalid arguments for tool '{function_name}': {e}",
error_code="INVALID_PARAMETER_VALUE",
)
def list_tools(self) -> list[JudgeTool]:
"""
List all registered tools.
Returns:
List of registered JudgeTool instances
"""
return list(self._tools.values())
_judge_tool_registry = JudgeToolRegistry()
@experimental(version="3.4.0")
def register_judge_tool(tool: JudgeTool) -> None:
"""
Register a judge tool in the global registry.
Args:
tool: The JudgeTool instance to register
"""
_judge_tool_registry.register(tool)
@experimental(version="3.4.0")
def invoke_judge_tool(tool_call: Any, trace: Trace) -> Any:
"""
Invoke a judge tool using a ToolCall instance and trace.
Args:
tool_call: The ToolCall containing function name and arguments
trace: The MLflow trace object to analyze
Returns:
The result of the tool execution
"""
return _judge_tool_registry.invoke(tool_call, trace)
@experimental(version="3.4.0")
def list_judge_tools() -> list[JudgeTool]:
"""
List all registered judge tools.
Returns:
List of registered JudgeTool instances
"""
return _judge_tool_registry.list_tools()
# NB: Tool imports are at the bottom to avoid circular dependencies and ensure
# the registry is fully defined before tools attempt to register themselves.
from mlflow.genai.judges.tools.get_root_span import GetRootSpanTool
from mlflow.genai.judges.tools.get_span import GetSpanTool
from mlflow.genai.judges.tools.get_span_performance_and_timing_report import (
GetSpanPerformanceAndTimingReportTool,
)
from mlflow.genai.judges.tools.get_trace_info import GetTraceInfoTool
from mlflow.genai.judges.tools.list_spans import ListSpansTool
from mlflow.genai.judges.tools.search_trace_regex import SearchTraceRegexTool
_judge_tool_registry.register(GetTraceInfoTool())
_judge_tool_registry.register(GetRootSpanTool())
_judge_tool_registry.register(GetSpanTool())
_judge_tool_registry.register(ListSpansTool())
_judge_tool_registry.register(SearchTraceRegexTool())
_judge_tool_registry.register(GetSpanPerformanceAndTimingReportTool())
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/registry.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/tools/search_trace_regex.py | """
Tool for searching traces using regex patterns.
This module provides functionality to search through entire traces (including
spans, metadata, tags, requests, and responses) using regular expressions
with case-insensitive matching.
"""
import re
from dataclasses import dataclass
from mlflow.entities.trace import Trace
from mlflow.genai.judges.tools.base import JudgeTool
from mlflow.genai.judges.tools.constants import ToolNames
from mlflow.types.llm import FunctionToolDefinition, ToolDefinition, ToolParamsSchema
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
@dataclass
class RegexMatch:
"""Represents a single regex match found in a trace."""
span_id: str
matched_text: str
surrounding_text: str
@experimental(version="3.4.0")
@dataclass
class SearchTraceRegexResult:
"""Result of searching a trace with a regex pattern."""
pattern: str
total_matches: int
matches: list[RegexMatch]
error: str | None = None
@experimental(version="3.4.0")
class SearchTraceRegexTool(JudgeTool):
"""
Tool for searching through entire traces using regex patterns.
Performs case-insensitive regex search across all trace fields including
spans, metadata, tags, requests, responses, and other fields. Returns
matched text with surrounding context to help understand where matches occur.
"""
@property
def name(self) -> str:
"""Return the tool name."""
return ToolNames.SEARCH_TRACE_REGEX
def get_definition(self) -> ToolDefinition:
"""Get the tool definition for LiteLLM/OpenAI function calling."""
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.SEARCH_TRACE_REGEX,
description=(
"Search through the entire trace using a regular expression pattern. "
"Performs case-insensitive matching across all trace fields including spans, "
"metadata, tags, requests, and responses. Returns all matches with surrounding "
"context. Useful for finding specific patterns, values, or text anywhere in "
"the trace."
),
parameters=ToolParamsSchema(
type="object",
properties={
"pattern": {
"type": "string",
"description": (
"Regular expression pattern to search for. The search is "
"case-insensitive. Examples: 'error.*timeout', 'user_id:\\s*\\d+', "
"'function_name\\(.*\\)'"
),
},
"max_matches": {
"type": "integer",
"description": "Maximum number of matches to return (default: 50)",
"default": 50,
},
"surrounding_content_length": {
"type": "integer",
"description": (
"Number of characters to include before and after each match "
"for context (default: 100)"
),
"default": 100,
},
},
required=["pattern"],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
pattern: str,
max_matches: int = 50,
surrounding_content_length: int = 100,
) -> SearchTraceRegexResult:
"""
Search through the trace using a regex pattern.
Args:
trace: The MLflow trace object to search through
pattern: Regular expression pattern to search for
max_matches: Maximum number of matches to return
surrounding_content_length: Number of characters to include before and after each
match for context
Returns:
SearchTraceRegexResult containing the search results
"""
try:
regex = re.compile(pattern, re.IGNORECASE)
except re.error as e:
return SearchTraceRegexResult(
pattern=pattern,
total_matches=0,
matches=[],
error=f"Invalid regex pattern: {e}",
)
trace_json = trace.to_json()
matches = []
total_found = 0
for match in regex.finditer(trace_json):
if total_found >= max_matches:
break
matches.append(
self._create_regex_match(
match, trace_json, surrounding_content_length=surrounding_content_length
)
)
total_found += 1
return SearchTraceRegexResult(
pattern=pattern,
total_matches=total_found,
matches=matches,
)
def _create_regex_match(
self,
match: re.Match[str],
text: str,
span_id: str = "trace",
surrounding_content_length: int = 100,
) -> RegexMatch:
"""Create a RegexMatch with surrounding context from a regex match object."""
matched_text = match.group()
start, end = match.span()
context_start = max(0, start - surrounding_content_length)
context_end = min(len(text), end + surrounding_content_length)
surrounding = text[context_start:context_end]
if context_start > 0:
surrounding = "..." + surrounding
if context_end < len(text):
surrounding = surrounding + "..."
return RegexMatch(
span_id=span_id,
matched_text=matched_text,
surrounding_text=surrounding,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/search_trace_regex.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/tools/types.py | """
Shared types for MLflow GenAI judge tools.
This module provides common data structures and types that can be reused
across multiple judge tools for consistent data representation.
"""
from dataclasses import dataclass
from typing import Any
from mlflow.entities.assessment import FeedbackValueType
from mlflow.entities.span_status import SpanStatus
from mlflow.entities.trace_state import TraceState
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
@dataclass
class SpanResult:
"""Result from getting span content."""
span_id: str | None
content: str | None
content_size_bytes: int
page_token: str | None = None
error: str | None = None
@experimental(version="3.4.0")
@dataclass
class SpanInfo:
"""Information about a single span."""
span_id: str
name: str
span_type: str
start_time_ms: float
end_time_ms: float
duration_ms: float
parent_id: str | None
status: SpanStatus
is_root: bool
attribute_names: list[str]
@experimental(version="3.5.0")
@dataclass
class JudgeToolExpectation:
"""Expectation for a trace (simplified for judge tools)."""
name: str
source: str
rationale: str | None
span_id: str | None
assessment_id: str | None
value: Any
@experimental(version="3.5.0")
@dataclass
class JudgeToolFeedback:
"""Feedback for a trace (simplified for judge tools)."""
name: str
source: str
rationale: str | None
span_id: str | None
assessment_id: str | None
value: FeedbackValueType | None
error_code: str | None
error_message: str | None
stack_trace: str | None
overrides: str | None
valid: bool | None
@experimental(version="3.5.0")
@dataclass
class JudgeToolTraceInfo:
"""Information about a single trace (simplified for judge tools)."""
trace_id: str
request_time: int
state: TraceState
request: str | None
response: str | None
execution_duration: int | None
assessments: list[JudgeToolExpectation | JudgeToolFeedback]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/types.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/tools/utils.py | """
Utilities for MLflow GenAI judge tools.
This module contains utility functions and classes used across
different judge tool implementations.
"""
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
def create_page_token(offset: int) -> str:
"""
Create a page token from an offset value.
Args:
offset: The byte offset for pagination
Returns:
String representation of the offset to use as a page token
"""
return str(offset)
@experimental(version="3.4.0")
def parse_page_token(page_token: str | None) -> int:
"""
Parse a page token to extract the offset value.
Args:
page_token: The page token string to parse, or None
Returns:
The offset value, or 0 if token is None
Raises:
MlflowException: If page_token is invalid
"""
if page_token is None:
return 0
try:
return int(page_token)
except (ValueError, TypeError) as e:
raise MlflowException(
f"Invalid page_token '{page_token}': must be a valid integer",
error_code=INVALID_PARAMETER_VALUE,
) from e
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/tools/utils.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/genai/judges/optimizers/test_dspy_base.py | from typing import Any, Callable, Collection
from unittest.mock import MagicMock, Mock, patch
import dspy
import litellm
import pytest
from mlflow.entities.trace import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.optimizers.dspy import DSPyAlignmentOptimizer
from mlflow.genai.judges.optimizers.dspy_utils import AgentEvalLM
from mlflow.metrics.genai.model_utils import convert_mlflow_uri_to_litellm
from tests.genai.judges.optimizers.conftest import MockDSPyLM, MockJudge
def make_judge_mock_builder(
expected_model: str | None = None, track_calls: list[str] | None = None
) -> Callable[[str, str, str], MagicMock]:
"""Create a mock make_judge function for testing.
Args:
expected_model: If provided, track calls with this model to track_calls list
track_calls: List to append model to when make_judge is called with expected_model
Returns:
Mock function that can be used to patch make_judge
"""
mock_feedback = MagicMock()
mock_feedback.value = "pass"
mock_feedback.rationale = "Test rationale"
def mock_make_judge(name, instructions, model, feedback_value_type):
if track_calls is not None and model == expected_model:
track_calls.append(model)
return MagicMock(return_value=mock_feedback)
return mock_make_judge
class ConcreteDSPyOptimizer(DSPyAlignmentOptimizer):
"""Concrete implementation for testing."""
def _dspy_optimize(
self,
program: "dspy.Predict",
examples: Collection["dspy.Example"],
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> "dspy.Predict":
mock_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
return mock_program
def test_dspy_optimizer_abstract():
with pytest.raises(TypeError, match="Can't instantiate abstract class"):
DSPyAlignmentOptimizer()
def test_concrete_implementation_required():
class IncompleteDSPyOptimizer(DSPyAlignmentOptimizer):
pass
with pytest.raises(TypeError, match="Can't instantiate abstract class"):
IncompleteDSPyOptimizer()
def test_concrete_implementation_works():
optimizer = ConcreteDSPyOptimizer()
assert optimizer is not None
def test_align_success(sample_traces_with_assessments):
mock_judge = MockJudge(name="mock_judge", model="openai:/gpt-4")
with patch("dspy.LM", MagicMock()):
optimizer = ConcreteDSPyOptimizer()
with patch.object(ConcreteDSPyOptimizer, "get_min_traces_required", return_value=5):
result = optimizer.align(mock_judge, sample_traces_with_assessments)
assert result is not None
assert result.model == mock_judge.model
assert "Optimized instructions with {{inputs}} and {{outputs}}" in result.instructions
assert "Inputs for assessment:" not in result.instructions
def test_align_no_traces(mock_judge):
optimizer = ConcreteDSPyOptimizer()
with pytest.raises(MlflowException, match="Alignment optimization failed") as exc_info:
optimizer.align(mock_judge, [])
assert "No traces provided" in str(exc_info.value)
assert exc_info.value.__cause__ is not None
assert "No traces provided" in str(exc_info.value.__cause__)
def test_align_no_valid_examples(mock_judge, sample_trace_without_assessment):
with patch("dspy.LM", MagicMock()):
optimizer = ConcreteDSPyOptimizer()
with pytest.raises(MlflowException, match="Alignment optimization failed") as exc_info:
optimizer.align(mock_judge, [sample_trace_without_assessment])
assert "No valid examples could be created" in str(exc_info.value)
assert exc_info.value.__cause__ is not None
assert "No valid examples could be created" in str(exc_info.value.__cause__)
def test_align_insufficient_examples(mock_judge, sample_trace_with_assessment):
optimizer = ConcreteDSPyOptimizer()
min_traces = optimizer.get_min_traces_required()
with patch("dspy.LM", MagicMock()):
with pytest.raises(MlflowException, match="Alignment optimization failed") as exc_info:
optimizer.align(mock_judge, [sample_trace_with_assessment])
assert f"At least {min_traces} valid traces are required" in str(exc_info.value)
assert exc_info.value.__cause__ is not None
assert f"At least {min_traces} valid traces are required" in str(exc_info.value.__cause__)
def _create_mock_dspy_lm_factory(optimizer_lm, judge_lm):
"""Factory function to create MockDSPyLM instances that track calls to LMs."""
def mock_lm_factory(model=None, **kwargs):
"""Internal factory method to carry the input models"""
if model == optimizer_lm.model:
return optimizer_lm
elif model == judge_lm.model:
return judge_lm
else:
raise ValueError(f"Invalid model: {model}")
return mock_lm_factory
def test_optimizer_and_judge_use_different_models(sample_traces_with_assessments):
judge_model = "openai:/gpt-4"
optimizer_model = "anthropic:/claude-3"
mock_judge = MockJudge(name="mock_judge", model=judge_model)
traces = sample_traces_with_assessments
optimizer_lm = MockDSPyLM(convert_mlflow_uri_to_litellm(optimizer_model))
judge_lm = MockDSPyLM(convert_mlflow_uri_to_litellm(judge_model))
mock_lm_factory = _create_mock_dspy_lm_factory(optimizer_lm, judge_lm)
mock_make_judge = make_judge_mock_builder(
expected_model=judge_model, track_calls=judge_lm.context_calls
)
with (
patch.object(dspy, "LM", side_effect=mock_lm_factory),
patch(
"mlflow.genai.judges.optimizers.dspy.make_judge",
side_effect=mock_make_judge,
),
):
class TestDSPyOptimizer(ConcreteDSPyOptimizer):
def _dspy_optimize(
self,
program: "dspy.Module",
examples: Collection["dspy.Example"],
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> "dspy.Module":
lm_in_context = dspy.settings.lm
assert lm_in_context == optimizer_lm
program(inputs=examples[0].inputs, outputs=examples[0].outputs)
return super()._dspy_optimize(program, examples, metric_fn)
optimizer = TestDSPyOptimizer(model=optimizer_model)
with patch.object(TestDSPyOptimizer, "get_min_traces_required", return_value=5):
optimizer.align(mock_judge, traces)
assert len(judge_lm.context_calls) > 0, (
f"Expected judge LM to be called, but got {len(judge_lm.context_calls)} calls. "
f"Optimizer calls: {len(optimizer_lm.context_calls)}"
)
assert len(optimizer_lm.context_calls) == 0, (
f"Expected optimizer LM to not be called, but got "
f"{len(optimizer_lm.context_calls)} calls. "
f"Judge calls: {len(judge_lm.context_calls)}"
)
def test_optimizer_default_model_initialization():
with patch("mlflow.genai.judges.optimizers.dspy.get_default_model") as mock_get_default:
mock_get_default.return_value = "whichever default model is used"
optimizer = ConcreteDSPyOptimizer()
assert optimizer.model == "whichever default model is used"
mock_get_default.assert_called_once()
def test_optimizer_custom_model_initialization():
custom_model = "anthropic:/claude-3.5-sonnet"
optimizer = ConcreteDSPyOptimizer(model=custom_model)
assert optimizer.model == custom_model
def test_different_models_no_interference():
optimizer1 = ConcreteDSPyOptimizer(model="openai:/gpt-3.5-turbo")
optimizer2 = ConcreteDSPyOptimizer(model="anthropic:/claude-3")
assert optimizer1.model == "openai:/gpt-3.5-turbo"
assert optimizer2.model == "anthropic:/claude-3"
assert optimizer1.model != optimizer2.model
def test_mlflow_to_litellm_uri_conversion_in_optimizer(sample_traces_with_assessments):
judge_model = "openai:/gpt-4"
optimizer_model = "anthropic:/claude-3.5-sonnet"
mock_judge = MockJudge(name="mock_judge", model=judge_model)
lm_calls = []
def mock_lm_init(model=None, **kwargs):
lm_calls.append(model)
return MagicMock()
with patch("dspy.LM", side_effect=mock_lm_init):
optimizer = ConcreteDSPyOptimizer(model=optimizer_model)
with patch.object(ConcreteDSPyOptimizer, "get_min_traces_required", return_value=5):
optimizer.align(mock_judge, sample_traces_with_assessments)
assert lm_calls == ["anthropic/claude-3.5-sonnet"]
def test_mlflow_to_litellm_uri_conversion_in_judge_program():
mock_judge = MockJudge(name="test_judge", model="openai:/gpt-4o-mini")
optimizer = ConcreteDSPyOptimizer()
make_judge_calls = []
mock_make_judge = make_judge_mock_builder(
expected_model=mock_judge.model, track_calls=make_judge_calls
)
program = optimizer._get_dspy_program_from_judge(mock_judge)
with patch("mlflow.genai.judges.optimizers.dspy.make_judge", side_effect=mock_make_judge):
mock_lm = MagicMock()
mock_lm.model = convert_mlflow_uri_to_litellm(mock_judge.model)
program.forward(inputs="test", outputs="test", lm=mock_lm)
assert len(make_judge_calls) == 1
assert make_judge_calls[0] == mock_judge.model
def test_dspy_align_litellm_nonfatal_error_messages_suppressed():
suppression_state_during_call = {}
def mock_dspy_optimize(program, examples, metric_fn):
suppression_state_during_call["set_verbose"] = litellm.set_verbose
suppression_state_during_call["suppress_debug_info"] = litellm.suppress_debug_info
mock_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_program.signature.instructions = "Optimized instructions"
return mock_program
optimizer = ConcreteDSPyOptimizer()
min_traces = optimizer.get_min_traces_required()
mock_traces = [Mock(spec=Trace) for _ in range(min_traces)]
mock_judge = MockJudge(name="test_judge", model="openai:/gpt-4o-mini")
with (
patch("dspy.LM"),
patch(
"mlflow.genai.judges.optimizers.dspy.trace_to_dspy_example",
return_value=Mock(),
),
patch("mlflow.genai.judges.optimizers.dspy.make_judge", return_value=Mock()),
patch.object(optimizer, "_dspy_optimize", mock_dspy_optimize),
):
optimizer.align(mock_judge, mock_traces)
assert suppression_state_during_call["set_verbose"] is False
assert suppression_state_during_call["suppress_debug_info"] is True
def test_align_configures_databricks_lm_in_context(sample_traces_with_assessments):
mock_judge = MockJudge(name="mock_judge", model="openai:/gpt-4")
optimizer = ConcreteDSPyOptimizer(model="databricks")
def check_context(*args, **kwargs):
assert isinstance(dspy.settings["lm"], AgentEvalLM)
mock_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_program.signature.instructions = "Optimized instructions"
return mock_program
with (
patch("mlflow.genai.judges.optimizers.dspy.make_judge", return_value=MagicMock()),
patch.object(optimizer, "_dspy_optimize", side_effect=check_context),
patch.object(optimizer, "get_min_traces_required", return_value=0),
):
optimizer.align(mock_judge, sample_traces_with_assessments)
def test_align_configures_openai_lm_in_context(sample_traces_with_assessments):
mock_judge = MockJudge(name="mock_judge", model="openai:/gpt-4")
optimizer = ConcreteDSPyOptimizer(model="openai:/gpt-4.1")
def check_context(*args, **kwargs):
assert isinstance(dspy.settings["lm"], dspy.LM)
assert dspy.settings["lm"].model == "openai/gpt-4.1"
mock_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_program.signature.instructions = "Optimized instructions"
return mock_program
with (
patch(
"mlflow.genai.judges.optimizers.dspy.trace_to_dspy_example",
return_value=MagicMock(),
),
patch("mlflow.genai.judges.optimizers.dspy.make_judge", return_value=MagicMock()),
patch.object(optimizer, "_dspy_optimize", side_effect=check_context),
patch.object(optimizer, "get_min_traces_required", return_value=0),
):
optimizer.align(mock_judge, sample_traces_with_assessments)
def test_dspy_program_forward_always_uses_original_judge_model():
original_judge_model = "openai:/gpt-4"
mock_judge = MockJudge(name="test_judge", model=original_judge_model)
optimizer = ConcreteDSPyOptimizer()
program = optimizer._get_dspy_program_from_judge(mock_judge)
make_judge_calls = []
captured_args = {}
def track_make_judge(name, instructions, model, feedback_value_type):
make_judge_calls.append(model)
captured_args["name"] = name
captured_args["instructions"] = instructions
mock_feedback = MagicMock()
mock_feedback.value = "pass"
mock_feedback.rationale = "Test"
return MagicMock(return_value=mock_feedback)
# Test with no lm parameter
with patch("mlflow.genai.judges.optimizers.dspy.make_judge", side_effect=track_make_judge):
program.forward(inputs="test", outputs="test")
assert make_judge_calls[0] == original_judge_model
# Test with lm parameter (should still use original judge model)
make_judge_calls.clear()
with patch("mlflow.genai.judges.optimizers.dspy.make_judge", side_effect=track_make_judge):
mock_lm = MagicMock()
mock_lm.model = "anthropic/claude-3"
program.forward(inputs="test", outputs="test", lm=mock_lm)
# Should still use the original judge model, not the lm parameter
assert make_judge_calls[0] == original_judge_model
assert "Inputs for assessment:" not in captured_args["instructions"]
def test_dspy_program_uses_make_judge_with_optimized_instructions(
sample_traces_with_assessments,
):
original_instructions = (
"Original judge instructions for evaluation of {{inputs}} and {{outputs}}"
)
optimized_instructions = (
"Optimized instructions after DSPy alignment for {{inputs}} and {{outputs}}"
)
mock_judge = MockJudge(
name="mock_judge", model="openai:/gpt-4", instructions=original_instructions
)
captured_instructions = None
def capture_make_judge(name, instructions, model, feedback_value_type):
nonlocal captured_instructions
captured_instructions = instructions
mock_feedback = MagicMock()
mock_feedback.value = "pass"
mock_feedback.rationale = "Test"
return MagicMock(return_value=mock_feedback)
class TestOptimizer(ConcreteDSPyOptimizer):
def _dspy_optimize(self, program, examples, metric_fn):
program.signature.instructions = optimized_instructions
with patch(
"mlflow.genai.judges.optimizers.dspy.make_judge",
side_effect=capture_make_judge,
):
program.forward(inputs="test input", outputs="test output")
return program
optimizer = TestOptimizer()
with (
patch("dspy.LM", MagicMock()),
patch.object(TestOptimizer, "get_min_traces_required", return_value=5),
):
optimizer.align(mock_judge, sample_traces_with_assessments)
assert optimized_instructions in captured_instructions
assert "Inputs for assessment:" not in captured_instructions
def test_align_includes_demos_in_judge_instructions(sample_traces_with_assessments):
mock_judge = MockJudge(name="mock_judge", model="openai:/gpt-4")
class OptimizerWithDemos(ConcreteDSPyOptimizer):
def _dspy_optimize(self, program, examples, metric_fn):
# Create a program with demos (like SIMBA produces)
optimized = dspy.Predict("inputs, outputs -> result, rationale")
optimized.signature.instructions = (
"Optimized instructions for evaluating {{inputs}} and {{outputs}}"
)
optimized.demos = [
dspy.Example(
inputs="Example question",
outputs="Example answer",
result="pass",
rationale="Good answer",
),
dspy.Example(
inputs="Another question",
outputs="Another answer",
result="fail",
rationale="Poor answer",
),
]
return optimized
optimizer = OptimizerWithDemos()
with (
patch("dspy.LM", MagicMock()),
patch.object(OptimizerWithDemos, "get_min_traces_required", return_value=5),
):
result = optimizer.align(mock_judge, sample_traces_with_assessments)
assert "Here are some examples of good assessments:" in result.instructions
assert "Example 1:" in result.instructions
assert "Example question" in result.instructions
assert "Example answer" in result.instructions
assert "pass" in result.instructions
assert "Good answer" in result.instructions
assert "Example 2:" in result.instructions
assert "Another question" in result.instructions
def test_create_judge_from_dspy_program_uses_optimized_instructions():
optimizer = ConcreteDSPyOptimizer()
mock_judge = MockJudge(name="test_judge", model="openai:/gpt-4")
program = dspy.Predict("inputs, outputs -> result, rationale")
program.signature.instructions = "New optimized instructions for {{inputs}} and {{outputs}}"
result = optimizer._create_judge_from_dspy_program(program, mock_judge)
assert result.name == "test_judge"
assert result.model == "openai:/gpt-4"
assert "New optimized instructions" in result.instructions
assert "Inputs for assessment:" not in result.instructions
def test_create_judge_from_dspy_program_with_empty_demos():
optimizer = ConcreteDSPyOptimizer()
mock_judge = MockJudge(name="test_judge", model="openai:/gpt-4")
program = dspy.Predict("inputs, outputs -> result, rationale")
program.signature.instructions = "Instructions for {{inputs}} and {{outputs}}"
result = optimizer._create_judge_from_dspy_program(program, mock_judge)
assert "Here are some examples" not in result.instructions
assert "Instructions for {{inputs}}" in result.instructions
def test_create_judge_from_dspy_program_with_demos():
optimizer = ConcreteDSPyOptimizer()
mock_judge = MockJudge(name="test_judge", model="openai:/gpt-4")
program = dspy.Predict("inputs, outputs -> result, rationale")
program.signature.instructions = "Judge the {{inputs}} and {{outputs}}"
program.demos = [
dspy.Example(inputs="Q1", outputs="A1", result="pass", rationale="Good"),
]
result = optimizer._create_judge_from_dspy_program(program, mock_judge)
assert "Here are some examples of good assessments:" in result.instructions
assert "Example 1:" in result.instructions
assert "inputs: Q1" in result.instructions
def test_create_judge_from_dspy_program_preserves_feedback_value_type():
optimizer = ConcreteDSPyOptimizer()
judge = make_judge(
name="test_judge",
instructions="Check {{inputs}} vs {{outputs}}",
model="openai:/gpt-4",
feedback_value_type=bool,
)
program = dspy.Predict("inputs, outputs -> result, rationale")
program.signature.instructions = "Check {{inputs}} vs {{outputs}}"
result = optimizer._create_judge_from_dspy_program(program, judge)
assert result.feedback_value_type == bool
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/test_dspy_base.py",
"license": "Apache License 2.0",
"lines": 385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/judges/optimizers/test_dspy_utils.py | from unittest.mock import MagicMock, Mock, patch
import dspy
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.base import JudgeField
from mlflow.genai.judges.optimizers.dspy_utils import (
AgentEvalLM,
agreement_metric,
append_input_fields_section,
construct_dspy_lm,
convert_litellm_to_mlflow_uri,
create_dspy_signature,
format_demos_as_examples,
trace_to_dspy_example,
)
from mlflow.genai.utils.trace_utils import (
extract_expectations_from_trace,
extract_request_from_trace,
extract_response_from_trace,
)
from mlflow.metrics.genai.model_utils import convert_mlflow_uri_to_litellm
from tests.genai.judges.optimizers.conftest import MockJudge
def test_sanitize_judge_name(sample_trace_with_assessment, mock_judge):
# The sanitization is now done inside trace_to_dspy_example
# Test that it correctly handles different judge name formats
mock_dspy = MagicMock()
mock_example = MagicMock()
mock_example.with_inputs.return_value = mock_example
mock_dspy.Example.return_value = mock_example
with patch.dict("sys.modules", {"dspy": mock_dspy}):
judge1 = MockJudge(name=" mock_judge ")
judge2 = MockJudge(name="Mock_Judge")
judge3 = MockJudge(name="MOCK_JUDGE")
assert trace_to_dspy_example(sample_trace_with_assessment, judge1) is not None
assert trace_to_dspy_example(sample_trace_with_assessment, judge2) is not None
assert trace_to_dspy_example(sample_trace_with_assessment, judge3) is not None
def test_trace_to_dspy_example_two_human_assessments(trace_with_two_human_assessments, mock_judge):
dspy = pytest.importorskip("dspy", reason="DSPy not installed")
trace = trace_with_two_human_assessments
result = trace_to_dspy_example(trace, mock_judge)
assert isinstance(result, dspy.Example)
# Should use the newer assessment with value="pass" and specific rationale
assert result["result"] == "pass"
assert result["rationale"] == "Second assessment - should be used (more recent)"
def test_trace_to_dspy_example_human_vs_llm_priority(
trace_with_human_and_llm_assessments, mock_judge
):
dspy = pytest.importorskip("dspy", reason="DSPy not installed")
trace = trace_with_human_and_llm_assessments
result = trace_to_dspy_example(trace, mock_judge)
assert isinstance(result, dspy.Example)
# Should use the HUMAN assessment despite being older
assert result["result"] == "fail"
assert result["rationale"] == "Human assessment - should be prioritized"
@pytest.mark.parametrize(
("trace_fixture", "required_fields", "expected_inputs"),
[
# Test different combinations of required fields
("sample_trace_with_assessment", ["inputs"], ["inputs"]),
("sample_trace_with_assessment", ["outputs"], ["outputs"]),
("sample_trace_with_assessment", ["inputs", "outputs"], ["inputs", "outputs"]),
(
"sample_trace_with_assessment",
["trace", "inputs", "outputs"],
["trace", "inputs", "outputs"],
),
("trace_with_expectations", ["expectations"], ["expectations"]),
(
"trace_with_expectations",
["inputs", "expectations"],
["inputs", "expectations"],
),
(
"trace_with_expectations",
["outputs", "expectations"],
["outputs", "expectations"],
),
(
"trace_with_expectations",
["inputs", "outputs", "expectations"],
["inputs", "outputs", "expectations"],
),
(
"trace_with_expectations",
["trace", "inputs", "outputs", "expectations"],
["trace", "inputs", "outputs", "expectations"],
),
],
)
def test_trace_to_dspy_example_success(request, trace_fixture, required_fields, expected_inputs):
dspy = pytest.importorskip("dspy", reason="DSPy not installed")
trace = request.getfixturevalue(trace_fixture)
class TestJudge(MockJudge):
def __init__(self, fields):
super().__init__(name="mock_judge")
self._fields = fields
def get_input_fields(self):
return [JudgeField(name=field, description=f"Test {field}") for field in self._fields]
judge = TestJudge(required_fields)
# Use real DSPy since we've skipped if it's not available
result = trace_to_dspy_example(trace, judge)
assert isinstance(result, dspy.Example)
# Build expected kwargs based on required fields
expected_kwargs = {}
if "trace" in required_fields:
expected_kwargs["trace"] = trace
if "inputs" in required_fields:
expected_kwargs["inputs"] = extract_request_from_trace(trace)
if "outputs" in required_fields:
expected_kwargs["outputs"] = extract_response_from_trace(trace)
if "expectations" in required_fields:
expected_kwargs["expectations"] = extract_expectations_from_trace(trace)
# Determine expected rationale based on fixture
if trace_fixture == "trace_with_expectations":
expected_rationale = "Meets expectations"
else:
expected_rationale = "This looks good"
# Construct an expected example and assert that the result is the same
expected_example = dspy.Example(
result="pass",
rationale=expected_rationale,
**expected_kwargs,
).with_inputs(*expected_inputs)
# Compare the examples
assert result == expected_example
@pytest.mark.parametrize(
("trace_fixture", "required_fields"),
[
("sample_trace_with_assessment", ["expectations"]),
("sample_trace_with_assessment", ["inputs", "expectations"]),
("sample_trace_with_assessment", ["outputs", "expectations"]),
("sample_trace_with_assessment", ["inputs", "outputs", "expectations"]),
(
"sample_trace_with_assessment",
["trace", "inputs", "outputs", "expectations"],
),
],
)
def test_trace_to_dspy_example_missing_required_fields(request, trace_fixture, required_fields):
trace = request.getfixturevalue(trace_fixture)
class TestJudge(MockJudge):
def __init__(self, fields):
super().__init__(name="mock_judge")
self._fields = fields
def get_input_fields(self):
return [JudgeField(name=field, description=f"Test {field}") for field in self._fields]
judge = TestJudge(required_fields)
result = trace_to_dspy_example(trace, judge)
assert result is None
def test_trace_to_dspy_example_no_assessment(sample_trace_without_assessment, mock_judge):
# Use the fixture for trace without assessment
trace = sample_trace_without_assessment
# This should return None since there's no matching assessment
result = trace_to_dspy_example(trace, mock_judge)
assert result is None
def test_create_dspy_signature(mock_judge):
pytest.importorskip("dspy", reason="DSPy not installed")
signature = create_dspy_signature(mock_judge)
assert signature.instructions == mock_judge.instructions
judge_input_fields = mock_judge.get_input_fields()
for field in judge_input_fields:
assert field.name in signature.input_fields
assert signature.input_fields[field.name].json_schema_extra["desc"] == field.description
judge_output_fields = mock_judge.get_output_fields()
for field in judge_output_fields:
assert field.name in signature.output_fields
assert signature.output_fields[field.name].json_schema_extra["desc"] == field.description
def test_agreement_metric():
# Test metric with matching results
example = Mock()
example.result = "pass"
pred = Mock()
pred.result = "pass"
assert agreement_metric(example, pred) is True
# Test metric with different results
pred.result = "fail"
assert agreement_metric(example, pred) is False
def test_agreement_metric_error_handling():
# Test with invalid inputs
result = agreement_metric(None, None)
assert result is False
@pytest.mark.parametrize(
("mlflow_uri", "expected_litellm_uri"),
[
("openai:/gpt-4", "openai/gpt-4"),
("openai:/gpt-3.5-turbo", "openai/gpt-3.5-turbo"),
("anthropic:/claude-3", "anthropic/claude-3"),
("anthropic:/claude-3.5-sonnet", "anthropic/claude-3.5-sonnet"),
("cohere:/command", "cohere/command"),
("databricks:/dbrx", "databricks/dbrx"),
],
)
def test_convert_mlflow_uri_to_litellm(mlflow_uri, expected_litellm_uri):
assert convert_mlflow_uri_to_litellm(mlflow_uri) == expected_litellm_uri
@pytest.mark.parametrize(
"invalid_uri",
[
"openai-gpt-4", # Invalid format (missing colon-slash)
"", # Empty string
None, # None value
],
)
def test_convert_mlflow_uri_to_litellm_invalid(invalid_uri):
with pytest.raises(MlflowException, match="Failed to convert MLflow model URI"):
convert_mlflow_uri_to_litellm(invalid_uri)
@pytest.mark.parametrize(
("litellm_model", "expected_uri"),
[
("openai/gpt-4", "openai:/gpt-4"),
("openai/gpt-3.5-turbo", "openai:/gpt-3.5-turbo"),
("anthropic/claude-3", "anthropic:/claude-3"),
("anthropic/claude-3.5-sonnet", "anthropic:/claude-3.5-sonnet"),
("cohere/command", "cohere:/command"),
("databricks/dbrx", "databricks:/dbrx"),
],
)
def test_convert_litellm_to_mlflow_uri(litellm_model, expected_uri):
result = convert_litellm_to_mlflow_uri(litellm_model)
assert result == expected_uri
@pytest.mark.parametrize(
"invalid_model",
[
"openai-gpt-4", # Missing slash
"", # Empty string
None, # None value
"openai/", # Missing model name
"/gpt-4", # Missing provider
"//", # Empty provider and model
],
)
def test_convert_litellm_to_mlflow_uri_invalid(invalid_model):
with pytest.raises(MlflowException, match="LiteLLM|empty|None") as exc_info:
convert_litellm_to_mlflow_uri(invalid_model)
if invalid_model is None or invalid_model == "":
assert "cannot be empty or None" in str(exc_info.value)
elif "/" not in invalid_model:
assert "Expected format: 'provider/model'" in str(exc_info.value)
@pytest.mark.parametrize(
"mlflow_uri",
[
"openai:/gpt-4",
"anthropic:/claude-3.5-sonnet",
"cohere:/command",
"databricks:/dbrx",
],
)
def test_mlflow_to_litellm_uri_round_trip_conversion(mlflow_uri):
# Convert MLflow -> LiteLLM
litellm_format = convert_mlflow_uri_to_litellm(mlflow_uri)
# Convert LiteLLM -> MLflow
result = convert_litellm_to_mlflow_uri(litellm_format)
# Should get back the original
assert result == mlflow_uri, f"Round-trip failed for {mlflow_uri}"
@pytest.mark.parametrize(
("model", "expected_type"),
[
("databricks", "AgentEvalLM"),
("openai:/gpt-4", "dspy.LM"),
("anthropic:/claude-3", "dspy.LM"),
],
)
def test_construct_dspy_lm_utility_method(model, expected_type):
result = construct_dspy_lm(model)
if expected_type == "AgentEvalLM":
assert isinstance(result, AgentEvalLM)
elif expected_type == "dspy.LM":
assert isinstance(result, dspy.LM)
# Ensure MLflow URI format is converted (no :/ in the model)
assert ":/" not in result.model
def test_agent_eval_lm_uses_optimizer_session_name():
from mlflow.utils import AttrDict
pytest.importorskip("dspy", reason="DSPy not installed")
mock_response = AttrDict({"output": "test response", "error_message": None})
with (
patch("mlflow.genai.judges.optimizers.dspy_utils.call_chat_completions") as mock_call,
patch("mlflow.genai.judges.optimizers.dspy_utils.VERSION", "1.0.0"),
):
mock_call.return_value = mock_response
agent_lm = AgentEvalLM()
agent_lm.forward(prompt="test prompt")
# Verify call_chat_completions was called with the optimizer session name
mock_call.assert_called_once_with(
user_prompt="test prompt",
system_prompt=None,
session_name="mlflow-judge-optimizer-v1.0.0",
use_case="judge_alignment",
)
@pytest.mark.parametrize(
("instructions", "field_names", "should_append"),
[
# Fields already present - should NOT append
(
"Evaluate {{inputs}} and {{outputs}} for quality",
["inputs", "outputs"],
False,
),
# Fields NOT present - should append
(
"Evaluate the response for quality",
["inputs", "outputs"],
True,
),
# No fields defined - should NOT append
(
"Some instructions",
[],
False,
),
# Plain field names present but not mustached - should append
(
"Check the inputs and outputs carefully",
["inputs", "outputs"],
True,
),
],
)
def test_append_input_fields_section(instructions, field_names, should_append):
class TestJudge(MockJudge):
def __init__(self, fields):
super().__init__(name="test_judge")
self._fields = fields
def get_input_fields(self):
return [JudgeField(name=f, description=f"The {f}") for f in self._fields]
judge = TestJudge(field_names)
result = append_input_fields_section(instructions, judge)
if should_append:
assert result != instructions
assert "Inputs for assessment:" in result
for field in field_names:
assert f"{{{{ {field} }}}}" in result
else:
assert result == instructions
def test_format_demos_empty_list(mock_judge):
result = format_demos_as_examples([], mock_judge)
assert result == ""
def test_format_demos_multiple_demos(mock_judge):
long_input = "x" * 600
demos = [
dspy.Example(inputs="Q1", outputs="A1", result="pass", rationale="Good"),
dspy.Example(inputs="Q2", outputs="A2", result="fail", rationale="Bad"),
dspy.Example(inputs=long_input, outputs="short", result="pass", rationale="Test"),
]
result = format_demos_as_examples(demos, mock_judge)
assert "Example 1:" in result
assert "Example 2:" in result
assert "Example 3:" in result
assert "inputs: Q1" in result
assert "inputs: Q2" in result
# Long values should NOT be truncated
assert long_input in result
def test_format_demos_respects_judge_fields():
class CustomFieldsJudge(MockJudge):
def get_input_fields(self):
return [
JudgeField(name="query", description="The query"),
JudgeField(name="context", description="The context"),
]
def get_output_fields(self):
return [JudgeField(name="verdict", description="The verdict")]
judge = CustomFieldsJudge(name="custom_judge")
demo = dspy.Example(
query="What is AI?",
context="AI is artificial intelligence",
verdict="pass",
extra_field="should not appear", # Not in judge fields
)
result = format_demos_as_examples([demo], judge)
assert "query: What is AI?" in result
assert "context: AI is artificial intelligence" in result
assert "verdict: pass" in result
assert "extra_field" not in result
def test_format_demos_raises_on_invalid_demo(mock_judge):
class NonDictDemo:
pass
demos = [
dspy.Example(inputs="Q1", outputs="A1", result="pass", rationale="Good"),
NonDictDemo(), # Invalid demo - should raise exception
]
with pytest.raises(MlflowException, match="Demo at index 1 cannot be converted to dict"):
format_demos_as_examples(demos, mock_judge)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/test_dspy_utils.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/judges/optimizers/test_simba.py | from importlib import reload
from unittest.mock import MagicMock, patch
import dspy
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.optimizers import SIMBAAlignmentOptimizer
def test_dspy_optimize_no_dspy():
# Since dspy import is now at module level, we need to test this differently
# The error should be raised when importing the module, not when calling methods
def _reload_module():
import mlflow.genai.judges.optimizers.simba as simba_module
reload(simba_module)
with patch.dict("sys.modules", {"dspy": None}):
with pytest.raises(MlflowException, match="DSPy library is required"):
_reload_module()
def test_full_alignment_workflow(mock_judge, sample_traces_with_assessments):
mock_simba = MagicMock()
mock_compiled_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_compiled_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
mock_simba.compile.return_value = mock_compiled_program
with (
patch("dspy.SIMBA", MagicMock()) as mock_simba_class,
patch("dspy.LM", MagicMock()),
):
mock_simba_class.return_value = mock_simba
optimizer = SIMBAAlignmentOptimizer()
# Mock get_min_traces_required to work with 5 traces from fixture
with patch.object(SIMBAAlignmentOptimizer, "get_min_traces_required", return_value=5):
result = optimizer.align(mock_judge, sample_traces_with_assessments)
assert result is not None
assert result.model == mock_judge.model
# The judge instructions should include the optimized instructions
assert "Optimized instructions with {{inputs}} and {{outputs}}" in result.instructions
# Instructions already contain {{inputs}} and {{outputs}}, so fields section is not appended
assert "Inputs for assessment:" not in result.instructions
def test_custom_simba_parameters(mock_judge, sample_traces_with_assessments):
mock_simba = MagicMock()
mock_compiled_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_compiled_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
mock_simba.compile.return_value = mock_compiled_program
def custom_metric(example, pred, trace=None):
return True
custom_batch_size = 15
with patch("dspy.SIMBA") as mock_simba_class, patch("dspy.LM", MagicMock()):
mock_simba_class.return_value = mock_simba
optimizer = SIMBAAlignmentOptimizer(
batch_size=custom_batch_size,
seed=123,
simba_kwargs={
"metric": custom_metric,
"max_demos": 5,
"num_threads": 2,
"max_steps": 10,
},
)
with patch.object(SIMBAAlignmentOptimizer, "get_min_traces_required", return_value=5):
optimizer.align(mock_judge, sample_traces_with_assessments)
# Verify SIMBA was initialized with custom parameters
mock_simba_class.assert_called_once()
call_kwargs = mock_simba_class.call_args.kwargs
assert call_kwargs["bsize"] == custom_batch_size
assert call_kwargs["metric"] == custom_metric
assert call_kwargs["max_demos"] == 5
assert call_kwargs["num_threads"] == 2
assert call_kwargs["max_steps"] == 10
# Verify seed is passed to compile
mock_simba.compile.assert_called_once()
compile_kwargs = mock_simba.compile.call_args.kwargs
assert compile_kwargs["seed"] == 123
def test_default_parameters_not_passed(mock_judge, sample_traces_with_assessments):
mock_simba = MagicMock()
mock_compiled_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_compiled_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
mock_simba.compile.return_value = mock_compiled_program
with patch("dspy.SIMBA") as mock_simba_class, patch("dspy.LM", MagicMock()):
mock_simba_class.return_value = mock_simba
optimizer = SIMBAAlignmentOptimizer()
with patch.object(SIMBAAlignmentOptimizer, "get_min_traces_required", return_value=5):
optimizer.align(mock_judge, sample_traces_with_assessments)
# Verify only required parameters are passed
mock_simba_class.assert_called_once()
call_kwargs = mock_simba_class.call_args.kwargs
assert "metric" in call_kwargs
assert "bsize" in call_kwargs
assert len(call_kwargs) == 2
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/test_simba.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.