sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
mlflow/mlflow:mlflow/demo/generators/traces.py | from __future__ import annotations
import hashlib
import logging
import random
import re
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Literal
import mlflow
from mlflow.demo.base import (
DEMO_EXPERIMENT_NAME,
DEMO_PROMPT_PREFIX,
BaseDemoGenerator,
DemoFeature,
DemoResult,
)
from mlflow.demo.data import (
AGENT_TRACES,
PROMPT_TRACES,
RAG_TRACES,
SESSION_TRACES,
DemoTrace,
)
from mlflow.entities import SpanType
from mlflow.tracing.constant import SpanAttributeKey, TraceMetadataKey
from mlflow.tracking._tracking_service.utils import _get_store
_logger = logging.getLogger(__name__)
DEMO_VERSION_TAG = "mlflow.demo.version"
DEMO_TRACE_TYPE_TAG = "mlflow.demo.trace_type"
_TOTAL_TRACES_PER_VERSION = 17
def _get_trace_timestamps(trace_index: int, version: str) -> tuple[int, int]:
"""Get deterministic start and end timestamps for a trace.
Distributes traces over the last 7 days with a deterministic pattern
based on the trace index and version. This ensures the demo dashboard
shows activity across the time range.
Args:
trace_index: Index of the trace (0-based) within its version set.
version: "v1" or "v2" - v1 traces are earlier, v2 traces are later.
Returns:
Tuple of (start_time_ns, end_time_ns).
"""
now = datetime.now(timezone.utc)
seven_days_ago = now - timedelta(days=7)
if version == "v1":
day_offset = (trace_index * 3.5) / _TOTAL_TRACES_PER_VERSION
else:
day_offset = 3.5 + (trace_index * 3.5) / _TOTAL_TRACES_PER_VERSION
hash_input = f"{trace_index}:{version}"
hash_val = int(hashlib.md5(hash_input.encode(), usedforsecurity=False).hexdigest()[:8], 16)
hour_offset = (hash_val % 24) / 24
minute_offset = ((hash_val >> 8) % 60) / (60 * 24)
trace_time = seven_days_ago + timedelta(days=day_offset + hour_offset + minute_offset)
duration_ms = 50 + (hash_val % 1950)
start_ns = int(trace_time.timestamp() * 1_000_000_000)
end_ns = start_ns + (duration_ms * 1_000_000)
return start_ns, end_ns
def _estimate_tokens(text: str) -> int:
"""Estimate token count for text (rough approximation: ~4 chars per token)."""
return max(1, len(text) // 4)
@dataclass(frozen=True)
class _Model:
"""Model configuration with name, provider, and pricing."""
name: str
provider: str
pricing: tuple[float, float] # (input $/1M tokens, output $/1M tokens)
# Using three distinct models so the cost breakdown chart shows a nice distribution.
GPT_5_2 = _Model(name="gpt-5.2", provider="openai", pricing=(1.75, 14.00))
CLAUDE_SONNET_4_5 = _Model(name="claude-sonnet-4-5", provider="anthropic", pricing=(3.00, 15.00))
GEMINI_3_PRO = _Model(name="gemini-3-pro", provider="google", pricing=(2.00, 12.00))
_DEMO_MODELS = (GPT_5_2, CLAUDE_SONNET_4_5, GEMINI_3_PRO)
def _compute_cost(model: _Model, prompt_tokens: int, completion_tokens: int) -> dict[str, float]:
"""Compute synthetic cost using approximate per-model pricing."""
input_rate, output_rate = model.pricing
input_cost = prompt_tokens * input_rate / 1_000_000
output_cost = completion_tokens * output_rate / 1_000_000
return {
"input_cost": input_cost,
"output_cost": output_cost,
"total_cost": input_cost + output_cost,
}
class TracesDemoGenerator(BaseDemoGenerator):
"""Generates demo traces for the MLflow UI.
Creates two sets of traces showing agent improvement:
- V1 traces: Initial/baseline agent (uses v1_response)
- V2 traces: Improved agent after updates (uses v2_response)
Both versions use the same inputs but produce different outputs,
simulating an agent improvement workflow.
Trace types generated:
- RAG: Document retrieval and generation pipeline
- Agent: Tool-using agent with function calls
- Prompt: Prompt template-based generation
- Session: Multi-turn conversation sessions
"""
name = DemoFeature.TRACES
version = 1
def generate(self) -> DemoResult:
self._restore_experiment_if_deleted()
experiment = mlflow.set_experiment(DEMO_EXPERIMENT_NAME)
mlflow.MlflowClient().set_experiment_tag(
experiment.experiment_id, "mlflow.experimentKind", "genai_development"
)
mlflow.set_experiment_tag(
"mlflow.note.content",
"Sample experiment with pre-populated demo data including traces, evaluations, "
"and prompts. Explore MLflow's GenAI features with this experiment.",
)
v1_trace_ids = self._generate_trace_set("v1")
v2_trace_ids = self._generate_trace_set("v2")
all_trace_ids = v1_trace_ids + v2_trace_ids
return DemoResult(
feature=self.name,
entity_ids=all_trace_ids,
navigation_url=f"#/experiments/{experiment.experiment_id}",
)
def _generate_trace_set(self, version: Literal["v1", "v2"]) -> list[str]:
"""Generate a complete set of traces for the given version."""
trace_ids = []
trace_index = 0
for trace_def in RAG_TRACES:
start_ns, end_ns = _get_trace_timestamps(trace_index, version)
if trace_id := self._create_rag_trace(trace_def, version, start_ns, end_ns):
trace_ids.append(trace_id)
trace_index += 1
for trace_def in AGENT_TRACES:
start_ns, end_ns = _get_trace_timestamps(trace_index, version)
if trace_id := self._create_agent_trace(trace_def, version, start_ns, end_ns):
trace_ids.append(trace_id)
trace_index += 1
for idx, trace_def in enumerate(PROMPT_TRACES):
start_ns, end_ns = _get_trace_timestamps(trace_index, version)
prompt_version_num = str(idx % 2 + 1) if version == "v1" else str(idx % 2 + 3)
if trace_id := self._create_prompt_trace(
trace_def, version, start_ns, end_ns, prompt_version_num
):
trace_ids.append(trace_id)
trace_index += 1
trace_ids.extend(self._create_session_traces(version, trace_index))
return trace_ids
def _data_exists(self) -> bool:
store = _get_store()
try:
experiment = store.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
if experiment is None or experiment.lifecycle_stage != "active":
return False
traces = mlflow.search_traces(
locations=[experiment.experiment_id],
max_results=1,
)
return len(traces) > 0
except Exception:
_logger.debug("Failed to check if demo data exists", exc_info=True)
return False
def delete_demo(self) -> None:
store = _get_store()
try:
experiment = store.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
if experiment is None:
return
client = mlflow.MlflowClient()
traces = client.search_traces(
locations=[experiment.experiment_id],
max_results=200,
)
if trace_ids := [trace.info.trace_id for trace in traces]:
try:
client.delete_traces(
experiment_id=experiment.experiment_id,
trace_ids=trace_ids,
)
except Exception:
pass
except Exception:
_logger.debug("Failed to delete demo traces", exc_info=True)
def _restore_experiment_if_deleted(self) -> None:
"""Restore the demo experiment if it was soft-deleted."""
store = _get_store()
try:
experiment = store.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
if experiment is not None and experiment.lifecycle_stage == "deleted":
_logger.info("Restoring soft-deleted demo experiment")
client = mlflow.MlflowClient()
client.restore_experiment(experiment.experiment_id)
except Exception:
_logger.debug("Failed to check/restore demo experiment", exc_info=True)
def _get_response(self, trace_def: DemoTrace, version: Literal["v1", "v2"]) -> str:
"""Get the appropriate response based on version."""
return trace_def.v1_response if version == "v1" else trace_def.v2_response
def _create_rag_trace(
self,
trace_def: DemoTrace,
version: Literal["v1", "v2"],
start_ns: int,
end_ns: int,
) -> str | None:
"""Create a RAG pipeline trace: embed -> retrieve -> generate."""
response = self._get_response(trace_def, version)
prompt_tokens = _estimate_tokens(trace_def.query) + 50
completion_tokens = _estimate_tokens(response)
total_duration = end_ns - start_ns
embed_end = start_ns + int(total_duration * 0.1)
retrieve_end = embed_end + int(total_duration * 0.2)
llm_start = retrieve_end
llm_end = end_ns - int(total_duration * 0.05)
root = mlflow.start_span_no_context(
name="rag_pipeline",
span_type=SpanType.CHAIN,
inputs={"query": trace_def.query},
metadata={DEMO_VERSION_TAG: version, DEMO_TRACE_TYPE_TAG: "rag"},
start_time_ns=start_ns,
)
embed = mlflow.start_span_no_context(
name="embed_query",
span_type=SpanType.EMBEDDING,
parent_span=root,
inputs={"text": trace_def.query},
start_time_ns=start_ns + 1000,
)
embedding = [random.uniform(-1, 1) for _ in range(384)]
embed.set_outputs({"embedding": embedding[:5], "dimensions": 384})
embed.end(end_time_ns=embed_end)
retrieve = mlflow.start_span_no_context(
name="retrieve_docs",
span_type=SpanType.RETRIEVER,
parent_span=root,
inputs={"embedding": embedding[:5], "top_k": 3},
start_time_ns=embed_end + 1000,
)
docs = [
{"id": f"doc_{i}", "score": round(0.7 + random.uniform(0, 0.25), 2)} for i in range(3)
]
retrieve.set_outputs({"documents": docs})
retrieve.end(end_time_ns=retrieve_end)
model = GPT_5_2
llm = mlflow.start_span_no_context(
name="generate_response",
span_type=SpanType.LLM,
parent_span=root,
inputs={
"messages": [
{"role": "system", "content": "You are an MLflow assistant."},
{"role": "user", "content": trace_def.query},
],
"context": docs,
"model": model.name,
},
attributes={
SpanAttributeKey.CHAT_USAGE: {
"input_tokens": prompt_tokens,
"output_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
SpanAttributeKey.MODEL: model.name,
SpanAttributeKey.MODEL_PROVIDER: model.provider,
SpanAttributeKey.LLM_COST: _compute_cost(model, prompt_tokens, completion_tokens),
},
start_time_ns=llm_start,
)
llm.set_outputs({"response": response})
llm.end(end_time_ns=llm_end)
root.set_outputs({"response": response})
root.end(end_time_ns=end_ns)
return root.trace_id
def _create_agent_trace(
self,
trace_def: DemoTrace,
version: Literal["v1", "v2"],
start_ns: int,
end_ns: int,
) -> str | None:
response = self._get_response(trace_def, version)
prompt_tokens = _estimate_tokens(trace_def.query) + 100
completion_tokens = _estimate_tokens(response)
total_duration = end_ns - start_ns
tool_duration = int(total_duration * 0.3)
llm_start = start_ns + tool_duration + 10000
root = mlflow.start_span_no_context(
name="agent",
span_type=SpanType.AGENT,
inputs={"query": trace_def.query},
metadata={DEMO_VERSION_TAG: version, DEMO_TRACE_TYPE_TAG: "agent"},
start_time_ns=start_ns,
)
tool_start = start_ns + 5000
for i, tool in enumerate(trace_def.tools):
tool_span = mlflow.start_span_no_context(
name=tool.name,
span_type=SpanType.TOOL,
parent_span=root,
inputs=tool.input,
start_time_ns=tool_start,
)
tool_span.set_outputs(tool.output)
tool_span.end(end_time_ns=tool_start + tool_duration // len(trace_def.tools))
tool_start += tool_duration // len(trace_def.tools) + 1000
model = CLAUDE_SONNET_4_5
llm = mlflow.start_span_no_context(
name="generate_response",
span_type=SpanType.LLM,
parent_span=root,
inputs={
"messages": [
{"role": "system", "content": "You are a helpful assistant with tools."},
{"role": "user", "content": trace_def.query},
],
"tool_results": [t.output for t in trace_def.tools],
"model": model.name,
},
attributes={
SpanAttributeKey.CHAT_USAGE: {
"input_tokens": prompt_tokens,
"output_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
SpanAttributeKey.MODEL: model.name,
SpanAttributeKey.MODEL_PROVIDER: model.provider,
SpanAttributeKey.LLM_COST: _compute_cost(model, prompt_tokens, completion_tokens),
},
start_time_ns=llm_start,
)
llm.set_outputs({"response": response})
llm.end(end_time_ns=end_ns - 5000)
root.set_outputs({"response": response})
root.end(end_time_ns=end_ns)
return root.trace_id
def _create_prompt_trace(
self,
trace_def: DemoTrace,
version: Literal["v1", "v2"],
start_ns: int,
end_ns: int,
prompt_version: str = "1",
) -> str | None:
"""Create a prompt-based trace showing template rendering and generation.
Fetches the actual registered prompt template and renders it with appropriate
variables to ensure trace contents match the linked prompt version.
"""
response = self._get_response(trace_def, version)
if trace_def.prompt_template is None:
return None
full_prompt_name = f"{DEMO_PROMPT_PREFIX}.prompts.{trace_def.prompt_template.prompt_name}"
try:
client = mlflow.MlflowClient()
prompt_version_obj = client.get_prompt_version(
name=full_prompt_name,
version=prompt_version,
)
actual_template = prompt_version_obj.template
except Exception:
actual_template = trace_def.prompt_template.template
variables = self._get_prompt_variables(
trace_def.prompt_template.prompt_name,
trace_def.query,
trace_def.prompt_template.variables,
)
rendered_prompt = self._render_template(actual_template, variables)
prompt_tokens = _estimate_tokens(rendered_prompt) + 20
completion_tokens = _estimate_tokens(response)
total_duration = end_ns - start_ns
render_end = start_ns + int(total_duration * 0.1)
llm_start = render_end + 1000
root = mlflow.start_span_no_context(
name="prompt_chain",
span_type=SpanType.CHAIN,
inputs={
"query": trace_def.query,
"template_variables": variables,
},
metadata={DEMO_VERSION_TAG: version, DEMO_TRACE_TYPE_TAG: "prompt"},
start_time_ns=start_ns,
)
render = mlflow.start_span_no_context(
name="render_prompt",
span_type=SpanType.CHAIN,
parent_span=root,
inputs={
"template": actual_template,
"variables": variables,
},
start_time_ns=start_ns + 1000,
)
render.set_outputs({"rendered_prompt": rendered_prompt})
render.end(end_time_ns=render_end)
model = GEMINI_3_PRO
llm = mlflow.start_span_no_context(
name="generate_response",
span_type=SpanType.LLM,
parent_span=root,
inputs={
"messages": [
{"role": "user", "content": rendered_prompt},
],
"model": model.name,
},
attributes={
SpanAttributeKey.CHAT_USAGE: {
"input_tokens": prompt_tokens,
"output_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
SpanAttributeKey.MODEL: model.name,
SpanAttributeKey.MODEL_PROVIDER: model.provider,
SpanAttributeKey.LLM_COST: _compute_cost(model, prompt_tokens, completion_tokens),
},
start_time_ns=llm_start,
)
llm.set_outputs({"response": response})
llm.end(end_time_ns=end_ns - 5000)
root.set_outputs({"response": response})
root.end(end_time_ns=end_ns)
trace_id = root.trace_id
self._link_prompt_to_trace(trace_def.prompt_template.prompt_name, trace_id, prompt_version)
return trace_id
def _link_prompt_to_trace(
self, short_prompt_name: str, trace_id: str, prompt_version: str = "1"
) -> None:
full_prompt_name = f"{DEMO_PROMPT_PREFIX}.prompts.{short_prompt_name}"
try:
client = mlflow.MlflowClient()
prompt_version_obj = client.get_prompt_version(
name=full_prompt_name,
version=prompt_version,
)
client.link_prompt_versions_to_trace(
prompt_versions=[prompt_version_obj],
trace_id=trace_id,
)
except Exception:
_logger.debug(
"Failed to link prompt %s v%s to trace %s",
full_prompt_name,
prompt_version,
trace_id,
exc_info=True,
)
def _get_prompt_variables(
self, prompt_name: str, query: str, base_variables: dict[str, str]
) -> dict[str, str]:
"""Get complete variable set for a prompt type.
Combines base variables from the trace definition with additional
variables that may be needed for more advanced prompt versions.
"""
variables = dict(base_variables)
if "query" not in variables:
variables["query"] = query
if prompt_name == "customer-support":
variables.setdefault("company_name", "TechCorp")
variables.setdefault("context", "Customer has been with us for 2 years, premium tier.")
elif prompt_name == "document-summarizer":
variables.setdefault("max_words", "150")
variables.setdefault("audience", "technical professionals")
variables.setdefault(
"document",
variables.get("query", "Sample document content for summarization."),
)
elif prompt_name == "code-reviewer":
variables.setdefault("language", "python")
variables.setdefault("focus_areas", "security, performance, readability")
variables.setdefault("severity_levels", "critical, warning, suggestion")
variables.setdefault("code", variables.get("query", "def example(): pass"))
return variables
def _render_template(
self, template: str | list[dict[str, str]], variables: dict[str, str]
) -> str:
"""Render a prompt template with variables.
Handles both string templates and chat-format templates (list of messages).
"""
def substitute(text: str, vars_dict: dict[str, str]) -> str:
for key, value in vars_dict.items():
text = re.sub(r"\{\{\s*" + key + r"\s*\}\}", str(value), text)
return text
if isinstance(template, str):
return substitute(template, variables)
elif isinstance(template, list):
rendered_parts = []
for msg in template:
role = msg.get("role", "user")
content = substitute(msg.get("content", ""), variables)
rendered_parts.append(f"[{role}]: {content}")
return "\n\n".join(rendered_parts)
else:
return str(template)
def _create_session_traces(self, version: Literal["v1", "v2"], start_index: int) -> list[str]:
"""Create multi-turn conversation session traces."""
trace_ids = []
current_session = None
turn_counter = 0
trace_index = start_index
for trace_def in SESSION_TRACES:
if trace_def.session_id != current_session:
current_session = trace_def.session_id
turn_counter = 0
turn_counter += 1
versioned_session_id = f"{trace_def.session_id}-{version}"
start_ns, end_ns = _get_trace_timestamps(trace_index, version)
if trace_id := self._create_session_turn_trace(
trace_def, turn_counter, version, versioned_session_id, start_ns, end_ns
):
trace_ids.append(trace_id)
trace_index += 1
return trace_ids
def _create_session_turn_trace(
self,
trace_def: DemoTrace,
turn: int,
version: Literal["v1", "v2"],
versioned_session_id: str,
start_ns: int,
end_ns: int,
) -> str | None:
"""Create a single turn in a conversation session."""
response = self._get_response(trace_def, version)
prompt_tokens = _estimate_tokens(trace_def.query) + 80
completion_tokens = _estimate_tokens(response)
total_duration = end_ns - start_ns
tool_end = start_ns + int(total_duration * 0.3)
llm_start = tool_end + 1000
root = mlflow.start_span_no_context(
name="chat_agent",
span_type=SpanType.AGENT,
inputs={"message": trace_def.query, "turn": turn},
metadata={
TraceMetadataKey.TRACE_SESSION: versioned_session_id,
TraceMetadataKey.TRACE_USER: trace_def.session_user or "user",
DEMO_VERSION_TAG: version,
DEMO_TRACE_TYPE_TAG: "session",
},
start_time_ns=start_ns,
)
tool_start = start_ns + 5000
for tool in trace_def.tools:
tool_span = mlflow.start_span_no_context(
name=tool.name,
span_type=SpanType.TOOL,
parent_span=root,
inputs=tool.input,
start_time_ns=tool_start,
)
tool_span.set_outputs(tool.output)
tool_span.end(end_time_ns=tool_end)
tool_start = tool_end + 1000
model = _DEMO_MODELS[turn % len(_DEMO_MODELS)]
llm = mlflow.start_span_no_context(
name="generate_response",
span_type=SpanType.LLM,
parent_span=root,
inputs={
"messages": [
{"role": "system", "content": "You are an MLflow assistant."},
{"role": "user", "content": trace_def.query},
],
"model": model.name,
},
attributes={
SpanAttributeKey.CHAT_USAGE: {
"input_tokens": prompt_tokens,
"output_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
SpanAttributeKey.MODEL: model.name,
SpanAttributeKey.MODEL_PROVIDER: model.provider,
SpanAttributeKey.LLM_COST: _compute_cost(model, prompt_tokens, completion_tokens),
},
start_time_ns=llm_start,
)
llm.set_outputs({"role": "assistant", "content": response})
llm.end(end_time_ns=end_ns - 5000)
root.set_outputs({"response": response})
root.end(end_time_ns=end_ns)
return root.trace_id
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/demo/generators/traces.py",
"license": "Apache License 2.0",
"lines": 569,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/demo/test_demo_integration.py | """Integration tests for the demo data framework.
These tests run against a real MLflow tracking server to verify that demo data
is correctly persisted, retrieved, and cleaned up on version bumps.
"""
from pathlib import Path
import pytest
from mlflow import MlflowClient, set_tracking_uri
from mlflow.demo import generate_all_demos
from mlflow.demo.base import DEMO_EXPERIMENT_NAME, DEMO_PROMPT_PREFIX
from mlflow.demo.data import DEMO_PROMPTS
from mlflow.demo.generators.evaluation import (
DEMO_DATASET_BASELINE_SESSION_NAME,
DEMO_DATASET_IMPROVED_SESSION_NAME,
DEMO_DATASET_TRACE_LEVEL_NAME,
EvaluationDemoGenerator,
)
from mlflow.demo.generators.judges import DEMO_JUDGE_PREFIX, JudgesDemoGenerator
from mlflow.demo.generators.prompts import PromptsDemoGenerator
from mlflow.demo.generators.traces import (
DEMO_TRACE_TYPE_TAG,
DEMO_VERSION_TAG,
TracesDemoGenerator,
)
from mlflow.demo.registry import demo_registry
from mlflow.genai.datasets import search_datasets
from mlflow.genai.prompts import load_prompt, search_prompts
from mlflow.genai.scorers.registry import list_scorers
@pytest.fixture
def client(db_uri: str, tmp_path: Path):
# Point the tracking URI directly at the SQLite database to avoid HTTP
# overhead during data generation (3,000+ requests per test run).
set_tracking_uri(db_uri)
yield MlflowClient(db_uri)
set_tracking_uri(None)
@pytest.fixture
def traces_generator():
generator = TracesDemoGenerator()
original_version = generator.version
yield generator
TracesDemoGenerator.version = original_version
@pytest.fixture
def evaluation_generator():
generator = EvaluationDemoGenerator()
original_version = generator.version
yield generator
EvaluationDemoGenerator.version = original_version
@pytest.fixture
def prompts_generator():
generator = PromptsDemoGenerator()
original_version = generator.version
yield generator
PromptsDemoGenerator.version = original_version
@pytest.fixture
def judges_generator():
generator = JudgesDemoGenerator()
original_version = generator.version
yield generator
JudgesDemoGenerator.version = original_version
def test_generate_all_demos_generates_all_registered(client):
results = generate_all_demos()
registered_names = set(demo_registry.list_generators())
generated_names = {r.feature for r in results}
assert generated_names == registered_names
def test_generate_all_demos_is_idempotent(client):
results_first = generate_all_demos()
assert len(results_first) > 0
results_second = generate_all_demos()
assert len(results_second) == 0
def test_generate_all_demos_creates_experiment(client):
generate_all_demos()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
assert experiment is not None
assert experiment.lifecycle_stage == "active"
def test_version_mismatch_triggers_cleanup_and_regeneration(client, traces_generator):
result = traces_generator.generate()
traces_generator.store_version()
original_entity_count = len(result.entity_ids)
TracesDemoGenerator.version = traces_generator.version + 1
assert traces_generator.is_generated() is False
result = traces_generator.generate()
traces_generator.store_version()
assert len(result.entity_ids) == original_entity_count
assert traces_generator.is_generated() is True
def test_traces_creates_on_server(client, traces_generator):
result = traces_generator.generate()
traces_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
traces = client.search_traces(locations=[experiment.experiment_id], max_results=200)
assert len(traces) == len(result.entity_ids)
assert len(traces) == 34
def test_traces_have_expected_span_types(client, traces_generator):
traces_generator.generate()
traces_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
traces = client.search_traces(locations=[experiment.experiment_id], max_results=200)
all_span_names = {span.name for trace in traces for span in trace.data.spans}
assert "rag_pipeline" in all_span_names
assert "embed_query" in all_span_names
assert "retrieve_docs" in all_span_names
assert "generate_response" in all_span_names
assert "agent" in all_span_names
assert "chat_agent" in all_span_names
assert "prompt_chain" in all_span_names
assert "render_prompt" in all_span_names
def test_traces_session_metadata(client, traces_generator):
traces_generator.generate()
traces_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
traces = client.search_traces(locations=[experiment.experiment_id], max_results=200)
session_traces = [t for t in traces if t.info.trace_metadata.get("mlflow.trace.session")]
assert len(session_traces) == 14
session_ids = {t.info.trace_metadata.get("mlflow.trace.session") for t in session_traces}
assert len(session_ids) == 6
def test_traces_version_metadata(client, traces_generator):
traces_generator.generate()
traces_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
traces = client.search_traces(locations=[experiment.experiment_id], max_results=200)
v1_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_VERSION_TAG) == "v1"]
v2_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_VERSION_TAG) == "v2"]
assert len(v1_traces) == 17
assert len(v2_traces) == 17
def test_traces_type_metadata(client, traces_generator):
traces_generator.generate()
traces_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
traces = client.search_traces(locations=[experiment.experiment_id], max_results=200)
rag_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "rag"]
agent_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "agent"]
prompt_traces = [
t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "prompt"
]
session_traces = [
t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "session"
]
assert len(rag_traces) == 4
assert len(agent_traces) == 4
assert len(prompt_traces) == 12
assert len(session_traces) == 14
def test_traces_delete_removes_all(client, traces_generator):
traces_generator.generate()
traces_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
traces_before = client.search_traces(locations=[experiment.experiment_id], max_results=200)
assert len(traces_before) == 34
traces_generator.delete_demo()
traces_after = client.search_traces(locations=[experiment.experiment_id], max_results=200)
assert len(traces_after) == 0
def test_evaluation_creates_three_datasets(client, evaluation_generator):
result = evaluation_generator.generate()
evaluation_generator.store_version()
assert len(result.entity_ids) == 3 # Three evaluation run IDs
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
trace_level_datasets = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string=f"name = '{DEMO_DATASET_TRACE_LEVEL_NAME}'",
max_results=10,
)
baseline_session_datasets = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string=f"name = '{DEMO_DATASET_BASELINE_SESSION_NAME}'",
max_results=10,
)
improved_session_datasets = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string=f"name = '{DEMO_DATASET_IMPROVED_SESSION_NAME}'",
max_results=10,
)
assert len(trace_level_datasets) == 1
assert len(baseline_session_datasets) == 1
assert len(improved_session_datasets) == 1
assert trace_level_datasets[0].name == DEMO_DATASET_TRACE_LEVEL_NAME
assert baseline_session_datasets[0].name == DEMO_DATASET_BASELINE_SESSION_NAME
assert improved_session_datasets[0].name == DEMO_DATASET_IMPROVED_SESSION_NAME
def test_evaluation_datasets_have_records(client, evaluation_generator):
evaluation_generator.generate()
evaluation_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
trace_level_datasets = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string=f"name = '{DEMO_DATASET_TRACE_LEVEL_NAME}'",
max_results=10,
)
baseline_session_datasets = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string=f"name = '{DEMO_DATASET_BASELINE_SESSION_NAME}'",
max_results=10,
)
improved_session_datasets = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string=f"name = '{DEMO_DATASET_IMPROVED_SESSION_NAME}'",
max_results=10,
)
assert len(trace_level_datasets) == 1
assert len(baseline_session_datasets) == 1
assert len(improved_session_datasets) == 1
trace_level_df = trace_level_datasets[0].to_df()
baseline_session_df = baseline_session_datasets[0].to_df()
improved_session_df = improved_session_datasets[0].to_df()
# Trace-level dataset merges v1 + v2 non-session traces (10 unique queries,
# merge_records deduplicates by inputs so v2 records overwrite v1)
assert len(trace_level_df) == 10
# Session datasets have 7 traces each (v1 and v2)
assert len(baseline_session_df) == 7
assert len(improved_session_df) == 7
def test_evaluation_delete_removes_datasets(client, evaluation_generator):
evaluation_generator.generate()
evaluation_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
datasets_before = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string="name LIKE 'demo-%'",
max_results=10,
)
assert len(datasets_before) == 3
evaluation_generator.delete_demo()
datasets_after = search_datasets(
experiment_ids=[experiment.experiment_id],
filter_string="name LIKE 'demo-%'",
max_results=10,
)
assert len(datasets_after) == 0
def test_prompts_creates_on_server(client, prompts_generator):
result = prompts_generator.generate()
prompts_generator.store_version()
prompts = search_prompts(
filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'",
max_results=100,
)
assert len(prompts) == len(DEMO_PROMPTS)
assert any("prompts:" in e for e in result.entity_ids)
assert any("versions:" in e for e in result.entity_ids)
def test_prompts_have_multiple_versions(client, prompts_generator):
prompts_generator.generate()
prompts_generator.store_version()
for prompt_def in DEMO_PROMPTS:
expected_versions = len(prompt_def.versions)
prompt = load_prompt(prompt_def.name, version=expected_versions)
assert prompt is not None
assert prompt.version == expected_versions
def test_prompts_have_production_alias(client, prompts_generator):
prompts_generator.generate()
prompts_generator.store_version()
for prompt_def in DEMO_PROMPTS:
for version_num, version_def in enumerate(prompt_def.versions, start=1):
if "production" in version_def.aliases:
prompt = load_prompt(f"prompts:/{prompt_def.name}@production")
assert prompt.version == version_num
def test_prompts_delete_removes_all(client, prompts_generator):
prompts_generator.generate()
prompts_generator.store_version()
prompts_before = search_prompts(
filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'",
max_results=100,
)
assert len(prompts_before) == len(DEMO_PROMPTS)
prompts_generator.delete_demo()
prompts_after = search_prompts(
filter_string=f"name LIKE '{DEMO_PROMPT_PREFIX}.%'",
max_results=100,
)
assert len(prompts_after) == 0
def test_judges_creates_on_server(client, judges_generator):
result = judges_generator.generate()
judges_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
scorers = list_scorers(experiment_id=experiment.experiment_id)
demo_judges = [s for s in scorers if s.name.startswith(DEMO_JUDGE_PREFIX)]
assert len(demo_judges) == 4
assert "judges:4" in result.entity_ids
def test_judges_have_expected_names(client, judges_generator):
judges_generator.generate()
judges_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
scorers = list_scorers(experiment_id=experiment.experiment_id)
demo_judges = [s for s in scorers if s.name.startswith(DEMO_JUDGE_PREFIX)]
judge_names = {s.name for s in demo_judges}
expected_names = {
f"{DEMO_JUDGE_PREFIX}.relevance",
f"{DEMO_JUDGE_PREFIX}.correctness",
f"{DEMO_JUDGE_PREFIX}.groundedness",
f"{DEMO_JUDGE_PREFIX}.safety",
}
assert judge_names == expected_names
def test_judges_delete_removes_all(client, judges_generator):
judges_generator.generate()
judges_generator.store_version()
experiment = client.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
scorers_before = list_scorers(experiment_id=experiment.experiment_id)
demo_judges_before = [s for s in scorers_before if s.name.startswith(DEMO_JUDGE_PREFIX)]
assert len(demo_judges_before) == 4
judges_generator.delete_demo()
scorers_after = list_scorers(experiment_id=experiment.experiment_id)
demo_judges_after = [s for s in scorers_after if s.name.startswith(DEMO_JUDGE_PREFIX)]
assert len(demo_judges_after) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_demo_integration.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/demo/test_traces_generator.py | import pytest
from mlflow import MlflowClient, get_experiment_by_name, set_experiment
from mlflow.demo.base import DEMO_EXPERIMENT_NAME, DemoFeature, DemoResult
from mlflow.demo.generators.traces import (
DEMO_TRACE_TYPE_TAG,
DEMO_VERSION_TAG,
TracesDemoGenerator,
)
@pytest.fixture
def traces_generator():
generator = TracesDemoGenerator()
original_version = generator.version
yield generator
TracesDemoGenerator.version = original_version
def test_generator_attributes():
generator = TracesDemoGenerator()
assert generator.name == DemoFeature.TRACES
assert generator.version == 1
def test_data_exists_false_when_no_experiment():
generator = TracesDemoGenerator()
assert generator._data_exists() is False
def test_data_exists_false_when_experiment_empty():
set_experiment(DEMO_EXPERIMENT_NAME)
generator = TracesDemoGenerator()
assert generator._data_exists() is False
def test_generate_creates_traces():
generator = TracesDemoGenerator()
result = generator.generate()
assert isinstance(result, DemoResult)
assert result.feature == DemoFeature.TRACES
assert len(result.entity_ids) > 0
assert "experiments" in result.navigation_url
def test_generate_creates_experiment():
generator = TracesDemoGenerator()
generator.generate()
experiment = get_experiment_by_name(DEMO_EXPERIMENT_NAME)
assert experiment is not None
assert experiment.lifecycle_stage == "active"
def test_data_exists_true_after_generate():
generator = TracesDemoGenerator()
assert generator._data_exists() is False
generator.generate()
assert generator._data_exists() is True
def test_delete_demo_removes_traces():
generator = TracesDemoGenerator()
generator.generate()
assert generator._data_exists() is True
generator.delete_demo()
assert generator._data_exists() is False
def test_traces_have_expected_structure():
generator = TracesDemoGenerator()
generator.generate()
experiment = get_experiment_by_name(DEMO_EXPERIMENT_NAME)
client = MlflowClient()
traces = client.search_traces(locations=[experiment.experiment_id], max_results=100)
assert len(traces) > 0
all_span_names = set()
for trace in traces:
all_span_names.update(span.name for span in trace.data.spans)
assert "rag_pipeline" in all_span_names
assert "agent" in all_span_names
assert "chat_agent" in all_span_names
assert "prompt_chain" in all_span_names
assert "render_prompt" in all_span_names
assert "embed_query" in all_span_names
assert "retrieve_docs" in all_span_names
assert "generate_response" in all_span_names
def test_traces_have_version_metadata():
generator = TracesDemoGenerator()
generator.generate()
experiment = get_experiment_by_name(DEMO_EXPERIMENT_NAME)
client = MlflowClient()
traces = client.search_traces(locations=[experiment.experiment_id], max_results=100)
v1_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_VERSION_TAG) == "v1"]
v2_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_VERSION_TAG) == "v2"]
# 2 RAG + 2 agent + 6 prompt + 7 session = 17 per version
assert len(v1_traces) == 17
assert len(v2_traces) == 17
assert len(traces) == 34
def test_traces_have_type_metadata():
generator = TracesDemoGenerator()
generator.generate()
experiment = get_experiment_by_name(DEMO_EXPERIMENT_NAME)
client = MlflowClient()
traces = client.search_traces(locations=[experiment.experiment_id], max_results=50)
rag_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "rag"]
agent_traces = [t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "agent"]
prompt_traces = [
t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "prompt"
]
session_traces = [
t for t in traces if t.info.trace_metadata.get(DEMO_TRACE_TYPE_TAG) == "session"
]
# 2 RAG per version = 4 total
# 2 agent per version = 4 total
# 6 prompt per version = 12 total
# 7 session per version = 14 total
assert len(rag_traces) == 4
assert len(agent_traces) == 4
assert len(prompt_traces) == 12
assert len(session_traces) == 14
def test_is_generated_checks_version(traces_generator):
traces_generator.generate()
traces_generator.store_version()
assert traces_generator.is_generated() is True
TracesDemoGenerator.version = 99
assert traces_generator.is_generated() is False
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_traces_generator.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/entities/test_job_status.py | import pytest
from mlflow.entities._job_status import JobStatus
from mlflow.protos.jobs_pb2 import JobStatus as ProtoJobStatus
@pytest.mark.parametrize(
("status", "expected_proto"),
[
(JobStatus.PENDING, ProtoJobStatus.JOB_STATUS_PENDING),
(JobStatus.RUNNING, ProtoJobStatus.JOB_STATUS_IN_PROGRESS),
(JobStatus.SUCCEEDED, ProtoJobStatus.JOB_STATUS_COMPLETED),
(JobStatus.FAILED, ProtoJobStatus.JOB_STATUS_FAILED),
(JobStatus.TIMEOUT, ProtoJobStatus.JOB_STATUS_FAILED),
(JobStatus.CANCELED, ProtoJobStatus.JOB_STATUS_CANCELED),
],
)
def test_job_status_to_proto(status, expected_proto):
assert status.to_proto() == expected_proto
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/entities/test_job_status.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/assistant/skill_installer.py | """
Manage skill installation
Skills are maintained in the mlflow/assistant/skills subtree in the MLflow repository,
which points to the https://github.com/mlflow/skills repository.
"""
import shutil
from importlib import resources
from pathlib import Path
SKILL_MANIFEST_FILE = "SKILL.md"
def _find_skill_directories(path: Path) -> list[Path]:
return [item.parent for item in path.rglob(SKILL_MANIFEST_FILE)]
def install_skills(destination_path: Path) -> list[str]:
"""
Install MLflow skills to the specified destination path (e.g., ~/.claude/skills).
Args:
destination_path: The path where skills should be installed.
Returns:
A list of installed skill names.
"""
destination_dir = destination_path.expanduser()
skills_pkg = resources.files("mlflow.assistant.skills")
installed_skills = []
for item in skills_pkg.iterdir():
if not item.is_dir():
continue
skill_manifest = item.joinpath(SKILL_MANIFEST_FILE)
if not skill_manifest.is_file():
continue
# Use resources.as_file() on the manifest to get a real path
with resources.as_file(skill_manifest) as manifest_path:
skill_dir = manifest_path.parent
target_dir = destination_dir / skill_dir.name
destination_dir.mkdir(parents=True, exist_ok=True)
shutil.copytree(skill_dir, target_dir, dirs_exist_ok=True)
installed_skills.append(skill_dir.name)
return sorted(installed_skills)
def list_installed_skills(destination_path: Path) -> list[str]:
"""
List installed skills in the specified destination path.
Args:
destination_path: The path where skills are installed.
Returns:
A list of installed skill names.
"""
if not destination_path.exists():
return []
return sorted(d.name for d in _find_skill_directories(destination_path))
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/skill_installer.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/assistant/test_skill_installer.py | from mlflow.assistant.skill_installer import install_skills, list_installed_skills
def test_install_skills_copies_to_destination(tmp_path):
destination = tmp_path / "skills"
installed = install_skills(destination)
assert destination.exists()
assert "agent-evaluation" in installed
assert (destination / "agent-evaluation" / "SKILL.md").exists()
def test_install_skills_overwrites_existing(tmp_path):
destination = tmp_path / "skills"
destination.mkdir(parents=True)
install_skills(destination)
assert (destination / "agent-evaluation" / "SKILL.md").exists()
def test_list_installed_skills(tmp_path):
# Create mock installed skills
skill1 = tmp_path / "alpha-skill"
skill1.mkdir()
(skill1 / "SKILL.md").touch()
skill2 = tmp_path / "beta-skill"
skill2.mkdir()
(skill2 / "SKILL.md").touch()
skills = list_installed_skills(tmp_path)
assert skills == ["alpha-skill", "beta-skill"] # Sorted
def test_list_installed_skills_empty(tmp_path):
skills = list_installed_skills(tmp_path)
assert skills == []
def test_list_installed_skills_nonexistent_path(tmp_path):
nonexistent = tmp_path / "does-not-exist"
skills = list_installed_skills(nonexistent)
assert skills == []
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/assistant/test_skill_installer.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/demo/base.py | import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from mlflow.tracking._tracking_service.utils import _get_store
_logger = logging.getLogger(__name__)
DEMO_EXPERIMENT_NAME = "MLflow Demo"
DEMO_PROMPT_PREFIX = "mlflow-demo"
class DemoFeature(str, Enum):
"""Enumeration of demo features that can be generated."""
TRACES = "traces"
EVALUATION = "evaluation"
PROMPTS = "prompts"
JUDGES = "judges"
@dataclass
class DemoResult:
"""Result returned by a demo generator after creating demo data.
Attributes:
feature: The demo feature that was generated. Use DemoFeature enum values.
entity_ids: List of identifiers for created entities (e.g., trace IDs, dataset names).
navigation_url: URL path to navigate to view the demo data in the UI.
"""
feature: DemoFeature
entity_ids: list[str]
navigation_url: str
class BaseDemoGenerator(ABC):
"""Abstract base class for demo data generators.
Subclasses must define a `name` class attribute and implement the `generate()`
and `_data_exists()` methods. Generators are registered with the `demo_registry`
and invoked during server startup to populate demo data.
Versioning:
Each generator has a `version` class attribute (default: 1). When demo data
is generated, the version is stored as a tag on the MLflow Demo experiment.
On subsequent startups, if the stored version doesn't match the generator's
current version, stale data is cleaned up and regenerated.
Bump the version when making breaking changes to demo data format.
Example:
class MyDemoGenerator(BaseDemoGenerator):
name = DemoFeature.TRACES
version = 1 # Bump when demo format changes
def generate(self) -> DemoResult:
# Create demo data using MLflow APIs
return DemoResult(...)
def _data_exists(self) -> bool:
# Check if demo data exists (version handled by base class)
return True/False
def delete_demo(self) -> None:
# Optional: delete demo data (called on version mismatch or via UI)
pass
"""
name: DemoFeature | None = None
version: int = 1
def __init__(self):
if self.name is None:
raise ValueError(f"{self.__class__.__name__} must define 'name' class attribute")
@abstractmethod
def generate(self) -> DemoResult:
"""Generate demo data for this feature. Returns a DemoResult with details."""
@abstractmethod
def _data_exists(self) -> bool:
"""Check if demo data exists (regardless of version)."""
def delete_demo(self) -> None:
"""Delete demo data created by this generator.
Called automatically when version mismatches on startup, or can be called
directly via API for user-initiated deletion. Override to implement cleanup.
"""
def is_generated(self) -> bool:
"""Check if demo data exists with a matching version.
Returns True only if data exists AND the stored version matches the current
generator version. If version mismatches, calls delete_demo() and
returns False to trigger regeneration.
"""
if not self._data_exists():
return False
stored_version = self._get_stored_version()
if stored_version is None or stored_version != self.version:
self.delete_demo()
return False
return True
def _get_stored_version(self) -> int | None:
"""Get the stored version for this generator from experiment tags."""
store = _get_store()
try:
experiment = store.get_experiment_by_name(DEMO_EXPERIMENT_NAME)
if experiment is None:
return None
version_tag = experiment.tags.get(f"mlflow.demo.version.{self.name}")
return int(version_tag) if version_tag else None
except Exception:
_logger.debug("Failed to get stored version for %s", self.name, exc_info=True)
return None
def store_version(self) -> None:
"""Store the current version in experiment tags. Called after successful generation."""
from mlflow.entities import ExperimentTag
store = _get_store()
if experiment := store.get_experiment_by_name(DEMO_EXPERIMENT_NAME):
tag = ExperimentTag(
key=f"mlflow.demo.version.{self.name}",
value=str(self.version),
)
store.set_experiment_tag(experiment.experiment_id, tag)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/demo/base.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/demo/registry.py | from __future__ import annotations
from typing import TYPE_CHECKING
from mlflow.demo.base import DemoFeature
if TYPE_CHECKING:
from mlflow.demo.base import BaseDemoGenerator
class DemoRegistry:
"""Registry for demo data generators.
Provides registration and lookup of BaseDemoGenerator subclasses by name.
The global `demo_registry` instance is used by `generate_all_demos()` to
discover and run all registered generators.
"""
def __init__(self):
self._generators: dict[DemoFeature, type[BaseDemoGenerator]] = {}
def register(self, generator_cls: type[BaseDemoGenerator]) -> None:
name = generator_cls.name
if not name:
raise ValueError(f"{generator_cls.__name__} must define 'name' class attribute")
if name in self._generators:
raise ValueError(f"Generator '{name}' is already registered")
self._generators[name] = generator_cls
def get(self, name: DemoFeature) -> type[BaseDemoGenerator]:
if name not in self._generators:
available = list(self._generators.keys())
raise ValueError(f"Generator '{name}' not found. Available: {available}")
return self._generators[name]
def list_generators(self) -> list[DemoFeature]:
return list(self._generators.keys())
def __contains__(self, name: DemoFeature) -> bool:
return name in self._generators
demo_registry = DemoRegistry()
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/demo/registry.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/demo/test_base.py | import pytest
from mlflow.demo.base import (
DEMO_EXPERIMENT_NAME,
DEMO_PROMPT_PREFIX,
BaseDemoGenerator,
DemoFeature,
DemoResult,
)
def test_demo_feature_enum():
assert DemoFeature.TRACES == "traces"
assert DemoFeature.EVALUATION == "evaluation"
assert isinstance(DemoFeature.TRACES, str)
def test_demo_result_with_enum():
result = DemoResult(
feature=DemoFeature.TRACES,
entity_ids=["trace-1"],
navigation_url="/traces",
)
assert result.feature == DemoFeature.TRACES
assert result.feature == "traces"
def test_demo_result_fields():
result = DemoResult(
feature=DemoFeature.TRACES,
entity_ids=["a", "b"],
navigation_url="/test",
)
assert result.feature == DemoFeature.TRACES
assert result.entity_ids == ["a", "b"]
assert result.navigation_url == "/test"
def test_constants():
assert DEMO_EXPERIMENT_NAME == "MLflow Demo"
assert DEMO_PROMPT_PREFIX == "mlflow-demo"
def test_generator_requires_name():
class NoNameGenerator(BaseDemoGenerator):
def generate(self):
pass
def _data_exists(self):
return False
with pytest.raises(ValueError, match="must define 'name'"):
NoNameGenerator()
def test_generator_with_name(stub_generator):
assert stub_generator.name == DemoFeature.TRACES
def test_generator_default_version():
class VersionedGenerator(BaseDemoGenerator):
name = DemoFeature.TRACES
def generate(self):
pass
def _data_exists(self):
return False
generator = VersionedGenerator()
assert generator.version == 1
def test_generator_custom_version():
class CustomVersionGenerator(BaseDemoGenerator):
name = DemoFeature.EVALUATION
version = 5
def generate(self):
pass
def _data_exists(self):
return False
generator = CustomVersionGenerator()
assert generator.version == 5
def test_generator_generate_returns_result(stub_generator):
result = stub_generator.generate()
assert isinstance(result, DemoResult)
assert result.feature == DemoFeature.TRACES
assert stub_generator.generate_called
def test_is_generated_false_when_no_data(stub_generator):
stub_generator.data_exists_value = False
assert stub_generator.is_generated() is False
def test_is_generated_true_when_data_and_version_match(stub_generator):
stub_generator.data_exists_value = True
stub_generator.stored_version_value = 1
stub_generator.version = 1
assert stub_generator.is_generated() is True
def test_is_generated_false_when_version_mismatch(stub_generator):
stub_generator.data_exists_value = True
stub_generator.stored_version_value = 1
stub_generator.version = 2
assert stub_generator.is_generated() is False
def test_is_generated_calls_delete_demo_on_version_mismatch(stub_generator):
stub_generator.data_exists_value = True
stub_generator.stored_version_value = 1
stub_generator.version = 2
stub_generator.is_generated()
assert stub_generator.delete_demo_called is True
def test_is_generated_no_delete_when_no_data(stub_generator):
stub_generator.data_exists_value = False
stub_generator.is_generated()
assert stub_generator.delete_demo_called is False
def test_is_generated_false_when_no_stored_version(stub_generator):
stub_generator.data_exists_value = True
stub_generator.stored_version_value = None
assert stub_generator.is_generated() is False
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_base.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/demo/test_generate.py | import threading
from unittest import mock
from mlflow.demo import generate_all_demos
from mlflow.demo.base import BaseDemoGenerator, DemoFeature, DemoResult
from mlflow.environment_variables import MLFLOW_WORKSPACE
from mlflow.utils.workspace_context import (
clear_server_request_workspace,
get_request_workspace,
set_server_request_workspace,
)
def test_generate_all_demos_calls_generators(stub_generator, another_stub_generator):
with mock.patch("mlflow.demo.demo_registry") as mock_registry:
mock_registry.list_generators.return_value = ["stub", "another"]
mock_registry.get.side_effect = lambda name: {
"stub": lambda: stub_generator,
"another": lambda: another_stub_generator,
}[name]
results = generate_all_demos()
assert len(results) == 2
assert all(isinstance(r, DemoResult) for r in results)
def test_generate_all_demos_skips_existing(stub_generator):
stub_generator.data_exists_value = True
stub_generator.stored_version_value = stub_generator.version
with mock.patch("mlflow.demo.demo_registry") as mock_registry:
mock_registry.list_generators.return_value = ["stub"]
mock_registry.get.return_value = lambda: stub_generator
results = generate_all_demos()
assert len(results) == 0
assert not stub_generator.generate_called
def test_generate_all_demos_empty_registry():
with mock.patch("mlflow.demo.demo_registry") as mock_registry:
mock_registry.list_generators.return_value = []
results = generate_all_demos()
assert results == []
def test_generate_all_demos_stores_version(stub_generator):
stub_generator.data_exists_value = False
stub_generator.stored_version_value = None
with mock.patch("mlflow.demo.demo_registry") as mock_registry:
mock_registry.list_generators.return_value = ["stub"]
mock_registry.get.return_value = lambda: stub_generator
generate_all_demos()
assert stub_generator.stored_version_value == stub_generator.version
def test_generate_all_demos_regenerates_on_version_mismatch(stub_generator):
stub_generator.data_exists_value = True
stub_generator.stored_version_value = 1
stub_generator.version = 2
with mock.patch("mlflow.demo.demo_registry") as mock_registry:
mock_registry.list_generators.return_value = ["stub"]
mock_registry.get.return_value = lambda: stub_generator
results = generate_all_demos()
assert len(results) == 1
assert stub_generator.generate_called
assert stub_generator.delete_demo_called
assert stub_generator.stored_version_value == 2
def test_generate_all_demos_propagates_workspace_to_child_threads():
workspace_seen_in_thread = [None]
class ThreadSpawningGenerator(BaseDemoGenerator):
name = DemoFeature.TRACES
def generate(self) -> DemoResult:
def _worker():
workspace_seen_in_thread[0] = get_request_workspace()
t = threading.Thread(target=_worker)
t.start()
t.join()
return DemoResult(
feature=self.name,
entity_ids=["t1"],
navigation_url="/test",
)
def _data_exists(self) -> bool:
return False
def store_version(self) -> None:
pass
generator = ThreadSpawningGenerator()
# Simulate server middleware: set workspace via ContextVar only (no env var)
set_server_request_workspace("test-workspace")
try:
assert MLFLOW_WORKSPACE.get_raw() is None
with mock.patch("mlflow.demo.demo_registry") as mock_registry:
mock_registry.list_generators.return_value = ["traces"]
mock_registry.get.return_value = lambda: generator
generate_all_demos()
assert workspace_seen_in_thread[0] == "test-workspace"
finally:
clear_server_request_workspace()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_generate.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/demo/test_registry.py | import pytest
from mlflow.demo.base import BaseDemoGenerator, DemoFeature
def test_register_and_get(fresh_registry, stub_generator):
fresh_registry.register(stub_generator)
assert fresh_registry.get(DemoFeature.TRACES) is stub_generator
def test_register_duplicate_raises(fresh_registry, stub_generator):
fresh_registry.register(stub_generator)
with pytest.raises(ValueError, match="already registered"):
fresh_registry.register(stub_generator)
def test_get_unknown_raises(fresh_registry):
with pytest.raises(ValueError, match="not found"):
fresh_registry.get("unknown")
def test_list_generators(fresh_registry, stub_generator, another_stub_generator):
assert fresh_registry.list_generators() == []
fresh_registry.register(stub_generator)
assert fresh_registry.list_generators() == [DemoFeature.TRACES]
fresh_registry.register(another_stub_generator)
assert set(fresh_registry.list_generators()) == {DemoFeature.TRACES, DemoFeature.EVALUATION}
def test_contains(fresh_registry, stub_generator):
assert DemoFeature.TRACES not in fresh_registry
fresh_registry.register(stub_generator)
assert DemoFeature.TRACES in fresh_registry
def test_register_requires_name(fresh_registry):
class NoNameGenerator(BaseDemoGenerator):
def generate(self):
pass
def _data_exists(self):
return False
with pytest.raises(ValueError, match="must define 'name'"):
fresh_registry.register(NoNameGenerator)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_registry.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/utils/test_providers.py | from unittest import mock
from mlflow.utils.providers import (
_normalize_provider,
get_all_providers,
get_models,
)
def test_normalize_provider_normalizes_vertex_ai_variants():
assert _normalize_provider("vertex_ai") == "vertex_ai"
assert _normalize_provider("vertex_ai-anthropic") == "vertex_ai"
assert _normalize_provider("vertex_ai-llama_models") == "vertex_ai"
assert _normalize_provider("vertex_ai-mistral") == "vertex_ai"
def test_normalize_provider_does_not_normalize_other_providers():
assert _normalize_provider("openai") == "openai"
assert _normalize_provider("anthropic") == "anthropic"
assert _normalize_provider("bedrock") == "bedrock"
assert _normalize_provider("gemini") == "gemini"
def test_get_all_providers_consolidates_vertex_ai_variants():
with mock.patch("mlflow.utils.providers._get_model_cost") as mock_model_cost:
mock_model_cost.return_value = {
"gpt-4o": {"litellm_provider": "openai", "mode": "chat"},
"claude-3-5-sonnet": {"litellm_provider": "anthropic", "mode": "chat"},
"gemini-1.5-pro": {"litellm_provider": "vertex_ai", "mode": "chat"},
"vertex_ai/meta/llama-4-scout": {
"litellm_provider": "vertex_ai-llama_models",
"mode": "chat",
},
"vertex_ai/claude-3-5-sonnet": {
"litellm_provider": "vertex_ai-anthropic",
"mode": "chat",
},
}
providers = get_all_providers()
# vertex_ai-* variants should be consolidated into vertex_ai
assert "vertex_ai" in providers
assert "vertex_ai-llama_models" not in providers
assert "vertex_ai-anthropic" not in providers
assert "openai" in providers
assert "anthropic" in providers
def test_get_models_normalizes_vertex_ai_provider_and_strips_prefix():
with mock.patch("mlflow.utils.providers._get_model_cost") as mock_model_cost:
mock_model_cost.return_value = {
"vertex_ai/meta/llama-4-scout-17b-16e-instruct-maas": {
"litellm_provider": "vertex_ai-llama_models",
"mode": "chat",
"supports_function_calling": True,
},
"vertex_ai/claude-3-5-sonnet": {
"litellm_provider": "vertex_ai-anthropic",
"mode": "chat",
"supports_function_calling": True,
},
"gemini-1.5-pro": {
"litellm_provider": "vertex_ai",
"mode": "chat",
"supports_function_calling": True,
},
}
models = get_models(provider="vertex_ai")
assert len(models) == 3
# Check that all providers are normalized to vertex_ai
for model in models:
assert model["provider"] == "vertex_ai"
# Check that vertex_ai/ prefix is stripped from model names
model_names = [m["model"] for m in models]
assert "meta/llama-4-scout-17b-16e-instruct-maas" in model_names
assert "claude-3-5-sonnet" in model_names
assert "gemini-1.5-pro" in model_names
# Ensure the original prefixed names are not present
assert "vertex_ai/meta/llama-4-scout-17b-16e-instruct-maas" not in model_names
assert "vertex_ai/claude-3-5-sonnet" not in model_names
def test_get_models_filters_by_consolidated_provider():
with mock.patch("mlflow.utils.providers._get_model_cost") as mock_model_cost:
mock_model_cost.return_value = {
"gpt-4o": {"litellm_provider": "openai", "mode": "chat"},
"vertex_ai/meta/llama-4-scout": {
"litellm_provider": "vertex_ai-llama_models",
"mode": "chat",
},
}
# Filtering by vertex_ai should include vertex_ai-* variants
vertex_models = get_models(provider="vertex_ai")
assert len(vertex_models) == 1
assert vertex_models[0]["model"] == "meta/llama-4-scout"
# Filtering by openai should not include vertex_ai models
openai_models = get_models(provider="openai")
assert len(openai_models) == 1
assert openai_models[0]["model"] == "gpt-4o"
def test_get_models_does_not_modify_other_providers():
with mock.patch("mlflow.utils.providers._get_model_cost") as mock_model_cost:
mock_model_cost.return_value = {
"gpt-4o": {
"litellm_provider": "openai",
"mode": "chat",
"supports_function_calling": True,
},
"claude-3-5-sonnet": {
"litellm_provider": "anthropic",
"mode": "chat",
"supports_function_calling": True,
},
}
models = get_models()
openai_model = next(m for m in models if m["provider"] == "openai")
assert openai_model["model"] == "gpt-4o"
anthropic_model = next(m for m in models if m["provider"] == "anthropic")
assert anthropic_model["model"] == "claude-3-5-sonnet"
def test_get_models_dedupes_models_after_normalization():
with mock.patch("mlflow.utils.providers._get_model_cost") as mock_model_cost:
# Same model appearing under different vertex_ai variants should be deduped
mock_model_cost.return_value = {
"gemini-3-flash-preview": {
"litellm_provider": "vertex_ai",
"mode": "chat",
"supports_function_calling": True,
},
"vertex_ai/gemini-3-flash-preview": {
"litellm_provider": "vertex_ai-chat-models",
"mode": "chat",
"supports_function_calling": True,
},
}
models = get_models(provider="vertex_ai")
# Should only have one gemini-3-flash-preview, not two
model_names = [m["model"] for m in models]
assert model_names.count("gemini-3-flash-preview") == 1
assert len(models) == 1
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/utils/test_providers.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/utils/telemetry_utils.py | from __future__ import annotations
import logging
_logger = logging.getLogger(__name__)
def _record_judge_model_usage_success_databricks_telemetry(
*,
request_id: str | None,
model_provider: str,
endpoint_name: str,
num_prompt_tokens: int | None,
num_completion_tokens: int | None,
) -> None:
try:
from databricks.agents.telemetry import record_judge_model_usage_success
except ImportError:
_logger.debug(
"Failed to import databricks.agents.telemetry.record_judge_model_usage_success; "
"databricks-agents needs to be installed."
)
return
from mlflow.tracking.fluent import _get_experiment_id
from mlflow.utils.databricks_utils import get_job_id, get_job_run_id, get_workspace_id
record_judge_model_usage_success(
request_id=request_id,
experiment_id=_get_experiment_id(),
job_id=get_job_id(),
job_run_id=get_job_run_id(),
workspace_id=get_workspace_id(),
model_provider=model_provider,
endpoint_name=endpoint_name,
num_prompt_tokens=num_prompt_tokens,
num_completion_tokens=num_completion_tokens,
)
def _record_judge_model_usage_failure_databricks_telemetry(
*,
model_provider: str,
endpoint_name: str,
error_code: str,
error_message: str,
) -> None:
try:
from databricks.agents.telemetry import record_judge_model_usage_failure
except ImportError:
_logger.debug(
"Failed to import databricks.agents.telemetry.record_judge_model_usage_failure; "
"databricks-agents needs to be installed."
)
return
from mlflow.tracking.fluent import _get_experiment_id
from mlflow.utils.databricks_utils import get_job_id, get_job_run_id, get_workspace_id
record_judge_model_usage_failure(
experiment_id=_get_experiment_id(),
job_id=get_job_id(),
job_run_id=get_job_run_id(),
workspace_id=get_workspace_id(),
model_provider=model_provider,
endpoint_name=endpoint_name,
error_code=error_code,
error_message=error_message,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/utils/telemetry_utils.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/optimizers/gepa.py | """GEPA alignment optimizer implementation."""
import logging
from typing import Any, Callable, Collection
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.optimizers.dspy import DSPyAlignmentOptimizer
from mlflow.genai.judges.optimizers.dspy_utils import create_gepa_metric_adapter
from mlflow.protos.databricks_pb2 import INTERNAL_ERROR
from mlflow.utils.annotations import experimental
# Import dspy - raise exception if not installed
try:
import dspy
except ImportError:
raise MlflowException(
"DSPy library is required but not installed. Please install it with: `pip install dspy`",
error_code=INTERNAL_ERROR,
)
_logger = logging.getLogger(__name__)
@experimental(version="3.8.0")
class GEPAAlignmentOptimizer(DSPyAlignmentOptimizer):
"""
GEPA (Genetic-Pareto) alignment optimizer for judges.
Uses DSPy's GEPA algorithm to optimize judge instructions through
genetic-pareto optimization, learning from human feedback in traces.
GEPA uses iterative refinement to improve text components like judge instructions
by reflecting on system behavior and proposing improvements based on human feedback.
Args:
model: Model to use for DSPy/GEPA optimization. If None, uses get_default_model().
max_metric_calls: Maximum number of evaluation calls during optimization.
Higher values may lead to better results but increase optimization time.
If None (default), automatically set to 4x the number of training examples.
This ensures sufficient budget for initial evaluation plus reflection iterations.
gepa_kwargs: Additional keyword arguments to pass directly to dspy.GEPA().
Useful for accessing advanced GEPA features not directly exposed
through MLflow's GEPA interface.
Note: Parameters already handled by MLflow's GEPA class will be overridden by the direct
parameters and should not be passed through gepa_kwargs. List of predefined params:
- max_metric_calls
- metric
Example:
.. code-block:: python
import mlflow
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.optimizers import GEPAAlignmentOptimizer
# Create a judge
judge = make_judge(
name="relevance",
instructions="Evaluate if the {{ outputs }} is relevant to {{ inputs }}.",
model="openai:/gpt-4o-mini",
)
# Get traces with human feedback for this judge
traces = mlflow.search_traces()
# Optimize the judge instructions
optimizer = GEPAAlignmentOptimizer(
model="openai:/gpt-4o",
max_metric_calls=50,
)
optimized_judge = optimizer.align(judge, traces)
print(optimized_judge.instructions)
"""
_DEFAULT_BUDGET_MULTIPLIER: int = 4
def __init__(
self,
model: str | None = None,
max_metric_calls: int | None = None,
gepa_kwargs: dict[str, Any] | None = None,
**kwargs,
):
"""
Args:
model: Model to use for DSPy/GEPA optimization. If None, uses get_default_model().
max_metric_calls: Maximum number of evaluation calls during optimization.
Higher values may lead to better results but increase optimization time.
If None (default), automatically set to 4x the number of training examples.
gepa_kwargs: Additional keyword arguments to pass directly to dspy.GEPA().
kwargs: Additional keyword arguments passed to parent class
"""
super().__init__(model=model, **kwargs)
self._max_metric_calls = max_metric_calls
self._gepa_kwargs = gepa_kwargs or {}
def _dspy_optimize(
self,
program: "dspy.Predict",
examples: Collection["dspy.Example"],
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> "dspy.Predict":
gepa_metric_adapter = create_gepa_metric_adapter(metric_fn)
reflection_lm = dspy.settings.lm
# Calculate max_metric_calls if not explicitly set
# GEPA needs at least num_examples calls for initial evaluation,
# plus additional calls for reflection iterations
max_metric_calls = self._max_metric_calls
if max_metric_calls is None:
max_metric_calls = len(examples) * self._DEFAULT_BUDGET_MULTIPLIER
self._logger.info(
f"max_metric_calls not specified, using {self._DEFAULT_BUDGET_MULTIPLIER}x "
f"number of examples: {max_metric_calls}"
)
optimizer_kwargs = self._gepa_kwargs | {
"metric": gepa_metric_adapter,
"max_metric_calls": max_metric_calls,
"reflection_lm": reflection_lm,
}
optimizer = dspy.GEPA(**optimizer_kwargs)
self._logger.info(
f"Starting GEPA optimization with {len(examples)} examples "
f"and max {max_metric_calls} metric calls"
)
result = optimizer.compile(
student=program,
trainset=examples,
)
self._logger.info("GEPA optimization completed")
return result
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/gepa.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/genai/judges/optimizers/test_gepa.py | from importlib import reload
from unittest.mock import MagicMock, patch
import dspy
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.optimizers import GEPAAlignmentOptimizer
from tests.genai.judges.optimizers.conftest import create_mock_judge_invocator
def test_dspy_optimize_no_dspy():
# Since dspy import is now at module level, we need to test this differently
# The error should be raised when importing the module, not when calling methods
def _reload_module():
import mlflow.genai.judges.optimizers.gepa as gepa_module
reload(gepa_module)
with patch.dict("sys.modules", {"dspy": None}):
with pytest.raises(MlflowException, match="DSPy library is required"):
_reload_module()
def test_alignment_results(mock_judge, sample_traces_with_assessments):
mock_gepa = MagicMock()
mock_compiled_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_compiled_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
mock_gepa.compile.return_value = mock_compiled_program
with (
patch("dspy.GEPA", MagicMock(), create=True) as mock_gepa_class,
patch("dspy.LM", MagicMock()),
):
mock_gepa_class.return_value = mock_gepa
optimizer = GEPAAlignmentOptimizer()
# Mock get_min_traces_required to work with 5 traces from fixture
with patch.object(GEPAAlignmentOptimizer, "get_min_traces_required", return_value=5):
result = optimizer.align(mock_judge, sample_traces_with_assessments)
assert result is not None
assert result.model == mock_judge.model
# The judge instructions should include the optimized instructions
assert "Optimized instructions with {{inputs}} and {{outputs}}" in result.instructions
# Instructions already contain {{inputs}} and {{outputs}}, so fields section is not appended
assert "Inputs for assessment:" not in result.instructions
def test_custom_gepa_parameters(mock_judge, sample_traces_with_assessments):
mock_gepa = MagicMock()
mock_compiled_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_compiled_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
mock_gepa.compile.return_value = mock_compiled_program
with (
patch("dspy.GEPA", create=True) as mock_gepa_class,
patch("dspy.LM", MagicMock()),
):
mock_gepa_class.return_value = mock_gepa
optimizer = GEPAAlignmentOptimizer(
max_metric_calls=50,
gepa_kwargs={
"candidate_pool_size": 10,
"num_threads": 4,
},
)
with patch.object(GEPAAlignmentOptimizer, "get_min_traces_required", return_value=5):
optimizer.align(mock_judge, sample_traces_with_assessments)
# Verify GEPA was initialized with custom parameters
mock_gepa_class.assert_called_once()
call_kwargs = mock_gepa_class.call_args.kwargs
# max_metric_calls comes from the direct constructor parameter
assert call_kwargs["max_metric_calls"] == 50
# gepa_kwargs pass through non-critical parameters
assert call_kwargs["candidate_pool_size"] == 10
assert call_kwargs["num_threads"] == 4
# metric is controlled internally, not from gepa_kwargs
assert callable(call_kwargs["metric"])
def test_default_parameters(mock_judge, sample_traces_with_assessments):
mock_gepa = MagicMock()
mock_compiled_program = dspy.Predict("inputs, outputs -> result, rationale")
mock_compiled_program.signature.instructions = (
"Optimized instructions with {{inputs}} and {{outputs}}"
)
mock_gepa.compile.return_value = mock_compiled_program
with (
patch("dspy.GEPA", create=True) as mock_gepa_class,
patch("dspy.LM", MagicMock()),
):
mock_gepa_class.return_value = mock_gepa
optimizer = GEPAAlignmentOptimizer()
with patch.object(GEPAAlignmentOptimizer, "get_min_traces_required", return_value=5):
optimizer.align(mock_judge, sample_traces_with_assessments)
# Verify only required parameters are passed with defaults
mock_gepa_class.assert_called_once()
call_kwargs = mock_gepa_class.call_args.kwargs
assert "metric" in call_kwargs
assert "max_metric_calls" in call_kwargs
assert "reflection_lm" in call_kwargs
# Default is 4x number of examples (5 traces * 4 = 20)
assert call_kwargs["max_metric_calls"] == 20
assert len(call_kwargs) == 3 # metric, max_metric_calls, reflection_lm
def test_gepa_e2e_run(mock_judge, sample_traces_with_assessments):
try:
import dspy
from dspy.utils.dummies import DummyLM
if not hasattr(dspy, "GEPA"):
pytest.skip("dspy.GEPA not available in installed dspy version")
except ImportError:
pytest.skip("dspy not installed")
from mlflow.genai.judges.base import Judge
# Configure DummyLM with deterministic instruction proposals
# GEPA will request new instructions during reflection phase
dummy_lm = DummyLM(
[
"Carefully evaluate whether the {{outputs}} effectively addresses {{inputs}}",
"Assess if the {{outputs}} properly responds to the {{inputs}} query",
"Determine whether {{outputs}} satisfactorily answers {{inputs}}",
"Judge if {{outputs}} adequately resolves {{inputs}}",
"Evaluate the quality of {{outputs}} in addressing {{inputs}}",
]
)
# Create optimizer with minimal budget for fast test
# Note: Using max_metric_calls=10 to give GEPA enough budget to actually
# run optimization iterations and modify instructions
optimizer = GEPAAlignmentOptimizer(
model="openai:/gpt-4o-mini",
max_metric_calls=10,
)
mock_invoke_judge_model = create_mock_judge_invocator()
with (
dspy.context(lm=dummy_lm),
patch(
"mlflow.genai.judges.instructions_judge.invoke_judge_model",
side_effect=mock_invoke_judge_model,
) as mock_invoke,
patch.object(
GEPAAlignmentOptimizer, "get_min_traces_required", return_value=5
) as mock_min_traces,
):
result = optimizer.align(mock_judge, sample_traces_with_assessments)
mock_invoke.assert_called()
mock_min_traces.assert_called_once_with()
# Verify optimization completed without errors
assert result is not None
assert isinstance(result, Judge)
assert result.name == mock_judge.name
assert result.model == mock_judge.model
# Verify instructions are valid
assert result.instructions is not None
assert len(result.instructions) > 0
# Verify input fields are referenced in instructions (either as template variables
# or in fields section). DummyLM returns instructions with {{inputs}} and {{outputs}},
# so fields section is not appended.
assert "inputs" in result.instructions
assert "outputs" in result.instructions
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/test_gepa.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/docker/test_integrations.py | import os
from datetime import timedelta
import pytest
from testcontainers.compose import DockerCompose
from testcontainers.core.wait_strategies import HttpWaitStrategy
import mlflow
@pytest.mark.parametrize(
"compose_file",
[
"docker-compose.mssql-test.yaml",
"docker-compose.mysql-test.yaml",
"docker-compose.postgres-test.yaml",
],
)
def test_backend_and_artifact_store_integration(compose_file):
compose = DockerCompose(
context=os.path.dirname(os.path.abspath(__file__)),
compose_file_name=[compose_file],
)
# Configure wait strategy before starting containers
compose.waiting_for(
{
"mlflow": HttpWaitStrategy(5000, "/health")
.for_status_code(200)
.with_startup_timeout(timedelta(minutes=5))
}
)
with compose:
base_url = "http://localhost:5000"
mlflow.set_tracking_uri(base_url)
mlflow.set_experiment("integration-test")
@mlflow.trace
def predict(model_input: list[str]) -> list[str]:
return model_input
with mlflow.start_run():
mlflow.log_param("param", 1)
mlflow.log_metric("metric", 1.0)
mlflow.pyfunc.log_model(
name="test_model",
python_model=predict,
input_example=["a", "b", "c"],
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/docker/test_integrations.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/jobs/logging_utils.py | """Shared logging utilities for MLflow job consumers."""
import logging
from mlflow.utils.logging_utils import get_mlflow_log_level
def configure_logging_for_jobs() -> None:
"""Configure Python logging for job consumers to reduce noise for log levels above DEBUG."""
# Suppress noisy alembic INFO logs (e.g., "Context impl SQLiteImpl", "Will assume...")
if logging.getLevelName(get_mlflow_log_level()) > logging.DEBUG:
logging.getLogger("alembic.runtime.migration").setLevel(logging.WARNING)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/logging_utils.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/ragas/scorers/agentic_metrics.py | from __future__ import annotations
from typing import ClassVar
from mlflow.genai.judges.builtin import _MODEL_API_DOC
from mlflow.genai.scorers.ragas import RagasScorer
from mlflow.utils.annotations import experimental
from mlflow.utils.docstring_utils import format_docstring
@experimental(version="3.9.0")
@format_docstring(_MODEL_API_DOC)
class TopicAdherence(RagasScorer):
"""
Evaluates whether the AI system adheres to specified topics during interaction.
This metric assesses if the agent stays on topic and avoids answering queries
outside its designated domain of interest.
Args:
model: {{ model }}
**metric_kwargs: Additional metric-specific parameters
Examples:
.. code-block:: python
from mlflow.genai.scorers.ragas import TopicAdherence
scorer = TopicAdherence()
feedback = scorer(
trace=trace,
expectations={
"reference_topics": ["machine learning", "data science"],
},
)
# or for sessions:
session = mlflow.search_traces(
filter_string="request_metadata.mlflow.trace.session='{session_id}'",
return_type="list",
)
feedback = scorer(
session=session,
expectations={
"reference_topics": ["machine learning", "data science"],
},
)
"""
metric_name: ClassVar[str] = "TopicAdherence"
@experimental(version="3.9.0")
class ToolCallAccuracy(RagasScorer):
"""
Evaluates the accuracy of tool calls made by an agent.
This deterministic metric compares the actual tool calls made by the agent
against expected tool calls, considering both the tool names and their
arguments. It can evaluate in strict order or flexible order mode.
Args:
**metric_kwargs: Additional metric-specific parameters
Examples:
.. code-block:: python
from mlflow.genai.scorers.ragas import ToolCallAccuracy
scorer = ToolCallAccuracy()
feedback = scorer(
trace=trace,
expectations={
"expected_tool_calls": [
{"name": "weather_check", "arguments": {"location": "Paris"}},
{"name": "uv_index_lookup", "arguments": {"location": "Paris"}},
]
},
)
# or for sessions:
session = mlflow.search_traces(
filter_string="request_metadata.mlflow.trace.session='{session_id}'",
return_type="list",
)
feedback = scorer(
session=session,
expectations={
"expected_tool_calls": [
{"name": "weather_check", "arguments": {"location": "Paris"}},
{"name": "uv_index_lookup", "arguments": {"location": "Paris"}},
]
},
)
"""
metric_name: ClassVar[str] = "ToolCallAccuracy"
@experimental(version="3.9.0")
class ToolCallF1(RagasScorer):
"""
Calculates F1 score between expected and actual tool calls.
Args:
**metric_kwargs: Additional metric-specific parameters
Examples:
.. code-block:: python
from mlflow.genai.scorers.ragas import ToolCallF1
scorer = ToolCallF1()
feedback = scorer(
trace=trace,
expectations={
"expected_tool_calls": [
{"name": "weather_check", "arguments": {"location": "Paris"}},
]
},
)
# or for sessions:
session = mlflow.search_traces(
filter_string="request_metadata.mlflow.trace.session='{session_id}'",
return_type="list",
)
feedback = scorer(
session=session,
expectations={
"expected_tool_calls": [
{"name": "weather_check", "arguments": {"location": "Paris"}},
]
},
)
"""
metric_name: ClassVar[str] = "ToolCallF1"
@experimental(version="3.9.0")
@format_docstring(_MODEL_API_DOC)
class AgentGoalAccuracyWithReference(RagasScorer):
"""
Evaluates whether the agent achieved the user's goal compared to the expectations.
Args:
model: {{ model }}
**metric_kwargs: Additional metric-specific parameters
Examples:
.. code-block:: python
from mlflow.genai.scorers.ragas import AgentGoalAccuracyWithReference
scorer = AgentGoalAccuracyWithReference(model="openai:/gpt-4")
feedback = scorer(
trace=trace,
expectations={"expected_output": "Table booked at a Chinese restaurant for 8pm"},
)
# or for sessions:
session = mlflow.search_traces(
filter_string="request_metadata.mlflow.trace.session='{session_id}'",
return_type="list",
)
feedback = scorer(
session=session,
expectations={"expected_output": "Table booked at a Chinese restaurant for 8pm"},
)
"""
metric_name: ClassVar[str] = "AgentGoalAccuracyWithReference"
@experimental(version="3.9.0")
@format_docstring(_MODEL_API_DOC)
class AgentGoalAccuracyWithoutReference(RagasScorer):
"""
Evaluates whether the agent achieved the user's goal without expectations.
Args:
model: {{ model }}
**metric_kwargs: Additional metric-specific parameters
Examples:
.. code-block:: python
from mlflow.genai.scorers.ragas import AgentGoalAccuracyWithoutReference
scorer = AgentGoalAccuracyWithoutReference(model="openai:/gpt-4")
feedback = scorer(trace=trace)
# or for sessions:
session = mlflow.search_traces(
filter_string="request_metadata.mlflow.trace.session='{session_id}'",
return_type="list",
)
feedback = scorer(session=session)
"""
metric_name: ClassVar[str] = "AgentGoalAccuracyWithoutReference"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/ragas/scorers/agentic_metrics.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/optimize/job.py | import logging
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Callable
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets import get_dataset
from mlflow.genai.optimize import optimize_prompts
from mlflow.genai.optimize.optimizers import (
BasePromptOptimizer,
GepaPromptOptimizer,
MetaPromptOptimizer,
)
from mlflow.genai.prompts import load_prompt
from mlflow.genai.scorers import builtin_scorers
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.registry import get_scorer
from mlflow.protos.prompt_optimization_pb2 import (
OPTIMIZER_TYPE_GEPA,
OPTIMIZER_TYPE_METAPROMPT,
OPTIMIZER_TYPE_UNSPECIFIED,
)
from mlflow.protos.prompt_optimization_pb2 import OptimizerType as ProtoOptimizerType
from mlflow.server.jobs import job
from mlflow.telemetry.events import OptimizePromptsJobEvent
from mlflow.telemetry.track import record_usage_event
from mlflow.tracking.client import MlflowClient
from mlflow.tracking.fluent import set_experiment, start_run
_logger = logging.getLogger(__name__)
_DEFAULT_OPTIMIZATION_JOB_MAX_WORKERS = 2
class OptimizerType(str, Enum):
"""Supported prompt optimizer types."""
GEPA = "gepa"
METAPROMPT = "metaprompt"
@classmethod
def from_proto(cls, proto_value: int) -> "OptimizerType":
"""
Convert a proto OptimizerType enum value to the Python OptimizerType enum.
Args:
proto_value: The integer value from the proto OptimizerType enum.
Returns:
The corresponding OptimizerType enum member.
Raises:
MlflowException: If the proto value is unspecified or unsupported.
"""
if proto_value == OPTIMIZER_TYPE_UNSPECIFIED:
supported_types = [
name for name in ProtoOptimizerType.keys() if name != "OPTIMIZER_TYPE_UNSPECIFIED"
]
raise MlflowException.invalid_parameter_value(
f"optimizer_type is required. Supported types: {supported_types}"
)
elif proto_value == OPTIMIZER_TYPE_GEPA:
return cls.GEPA
elif proto_value == OPTIMIZER_TYPE_METAPROMPT:
return cls.METAPROMPT
else:
supported_types = [
name for name in ProtoOptimizerType.keys() if name != "OPTIMIZER_TYPE_UNSPECIFIED"
]
raise MlflowException.invalid_parameter_value(
f"Unsupported optimizer_type value: {proto_value}. "
f"Supported types: {supported_types}"
)
def to_proto(self) -> int:
"""
Convert the Python OptimizerType enum to a proto OptimizerType enum value.
Returns:
The corresponding proto OptimizerType integer value.
"""
if self == OptimizerType.GEPA:
return OPTIMIZER_TYPE_GEPA
elif self == OptimizerType.METAPROMPT:
return OPTIMIZER_TYPE_METAPROMPT
return OPTIMIZER_TYPE_UNSPECIFIED
@dataclass
class PromptOptimizationJobResult:
run_id: str
source_prompt_uri: str
optimized_prompt_uri: str | None
optimizer_name: str
initial_eval_score: float | None
final_eval_score: float | None
dataset_id: str
scorer_names: list[str]
def _create_optimizer(
optimizer_type: str,
optimizer_config: dict[str, Any] | None,
) -> BasePromptOptimizer:
"""
Create an optimizer instance from type string and configuration dict.
Args:
optimizer_type: The optimizer type string (e.g., "gepa", "metaprompt").
optimizer_config: Optimizer-specific configuration dictionary.
Returns:
An instantiated optimizer.
Raises:
MlflowException: If optimizer type is not supported.
"""
config = optimizer_config or {}
optimizer_type_lower = optimizer_type.lower() if optimizer_type else ""
if optimizer_type_lower == OptimizerType.GEPA:
reflection_model = config.get("reflection_model")
if not reflection_model:
raise MlflowException.invalid_parameter_value(
"Missing required optimizer configuration: 'reflection_model' must be specified "
"in optimizer_config for the GEPA optimizer (e.g., 'openai:/gpt-4o')."
)
return GepaPromptOptimizer(
reflection_model=reflection_model,
max_metric_calls=config.get("max_metric_calls", 100),
display_progress_bar=config.get("display_progress_bar", False),
gepa_kwargs=config.get("gepa_kwargs"),
)
elif optimizer_type_lower == OptimizerType.METAPROMPT:
reflection_model = config.get("reflection_model")
if not reflection_model:
raise MlflowException.invalid_parameter_value(
"Missing required optimizer configuration: 'reflection_model' must be specified "
"in optimizer_config for the MetaPrompt optimizer (e.g., 'openai:/gpt-4o')."
)
return MetaPromptOptimizer(
reflection_model=reflection_model,
lm_kwargs=config.get("lm_kwargs"),
guidelines=config.get("guidelines"),
)
elif not optimizer_type:
supported_types = [t.value for t in OptimizerType]
raise MlflowException.invalid_parameter_value(
f"Optimizer type must be specified. Supported types: {supported_types}"
)
else:
supported_types = [t.value for t in OptimizerType]
raise MlflowException.invalid_parameter_value(
f"Unsupported optimizer type: '{optimizer_type}'. Supported types: {supported_types}"
)
def _load_scorers(scorer_names: list[str], experiment_id: str) -> list[Scorer]:
"""
Load scorers by name.
For each scorer name, first tries to load it as a built-in scorer (by class name),
and if not found, falls back to loading from the registered scorer store.
Args:
scorer_names: List of scorer names. Can be built-in scorer class names
(e.g., "Correctness", "Safety") or registered scorer names.
experiment_id: The experiment ID to load registered scorers from.
Returns:
List of Scorer instances.
Raises:
MlflowException: If a scorer cannot be found as either built-in or registered.
"""
scorers = []
for name in scorer_names:
scorer_class = getattr(builtin_scorers, name, None)
if scorer_class is not None:
try:
scorer = scorer_class()
scorers.append(scorer)
continue
except Exception as e:
_logger.debug(f"Failed to instantiate built-in scorer {name}: {e}")
# Load from the registered scorer store if not a built-in scorer
try:
scorer = get_scorer(name=name, experiment_id=experiment_id)
scorers.append(scorer)
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Scorer '{name}' not found. It is neither a built-in scorer "
f"(e.g., 'Correctness', 'Safety') nor a registered scorer in "
f"experiment '{experiment_id}'. Error: {e}"
)
return scorers
def _build_predict_fn(prompt_uri: str) -> Callable[..., Any]:
"""
Build a predict function for single-prompt optimization.
This creates a simple LLM call using the prompt's model configuration.
The predict function loads the prompt, formats it with inputs, and
calls the LLM via litellm.
Args:
prompt_uri: The URI of the prompt to use for prediction.
Returns:
A callable that takes inputs dict and returns the LLM response.
"""
try:
import litellm
except ImportError as e:
raise MlflowException(
"The 'litellm' package is required for prompt optimization but is not installed. "
"Please install it using: pip install litellm"
) from e
prompt = load_prompt(prompt_uri)
try:
model_config = prompt.model_config
provider = model_config["provider"]
model_name = model_config["model_name"]
except (KeyError, TypeError, AttributeError) as e:
raise MlflowException(
f"Prompt {prompt_uri} doesn't have a model configuration that sets provider and "
"model_name, which are required for optimization."
) from e
litellm_model = f"{provider}/{model_name}"
def predict_fn(**kwargs: Any) -> Any:
response = litellm.completion(
model=litellm_model,
messages=[{"role": "user", "content": prompt.format(**kwargs)}],
)
return response.choices[0].message.content
return predict_fn
@record_usage_event(OptimizePromptsJobEvent)
@job(name="optimize_prompts", max_workers=_DEFAULT_OPTIMIZATION_JOB_MAX_WORKERS)
def optimize_prompts_job(
run_id: str,
experiment_id: str,
prompt_uri: str,
dataset_id: str,
optimizer_type: str,
optimizer_config: dict[str, Any] | None,
scorer_names: list[str],
) -> dict[str, Any]:
"""
Job function for async single-prompt optimization.
This function is executed as a background job by the MLflow server.
It resumes an existing MLflow run (created by the handler) and calls
`mlflow.genai.optimize_prompts()` which reuses the active run.
Note: This job only supports single-prompt optimization. The predict_fn
is automatically built using the prompt's model_config (provider/model_name)
via litellm, making the optimization self-contained without requiring users
to serialize their own predict function.
Args:
run_id: The MLflow run ID to track the optimization configs and metrics.
experiment_id: The experiment ID to track the optimization in.
prompt_uri: The URI of the prompt to optimize.
dataset_id: The ID of the EvaluationDataset containing training data.
optimizer_type: The optimizer type string (e.g., "gepa", "metaprompt").
optimizer_config: Optimizer-specific configuration dictionary.
scorer_names: List of scorer names. Can be built-in scorer class names
(e.g., "Correctness", "Safety") or registered scorer names.
For custom scorers, use mlflow.genai.make_judge() to create a judge,
then register it using scorer.register(experiment_id=experiment_id),
and pass the registered scorer name here.
Returns:
Dict containing optimization results and metadata (JSON-serializable).
"""
set_experiment(experiment_id=experiment_id)
dataset = get_dataset(dataset_id=dataset_id) if dataset_id else None
predict_fn = _build_predict_fn(prompt_uri)
optimizer = _create_optimizer(optimizer_type, optimizer_config)
loaded_scorers = _load_scorers(scorer_names, experiment_id)
source_prompt = load_prompt(prompt_uri)
# Resume the given run ID. Params have already been logged by the handler
with start_run(run_id=run_id):
# Link source prompt to run for lineage
client = MlflowClient()
client.link_prompt_version_to_run(run_id=run_id, prompt=source_prompt)
result = optimize_prompts(
predict_fn=predict_fn,
train_data=dataset,
prompt_uris=[prompt_uri],
optimizer=optimizer,
scorers=loaded_scorers,
enable_tracking=True,
)
job_result = PromptOptimizationJobResult(
run_id=run_id,
source_prompt_uri=prompt_uri,
optimized_prompt_uri=result.optimized_prompts[0].uri if result.optimized_prompts else None,
optimizer_name=result.optimizer_name,
initial_eval_score=result.initial_eval_score,
final_eval_score=result.final_eval_score,
dataset_id=dataset_id,
scorer_names=scorer_names,
)
return asdict(job_result)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/job.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/optimize/test_job.py | """
Unit tests for the optimize_prompts_job wrapper.
These tests focus on the helper functions and job function logic without
requiring a full job execution infrastructure.
"""
import sys
from unittest import mock
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.job import (
OptimizerType,
_build_predict_fn,
_create_optimizer,
_load_scorers,
optimize_prompts_job,
)
from mlflow.genai.optimize.optimizers import GepaPromptOptimizer, MetaPromptOptimizer
from mlflow.genai.scorers import scorer
from mlflow.genai.scorers.builtin_scorers import Correctness, Safety
from mlflow.protos.prompt_optimization_pb2 import (
OPTIMIZER_TYPE_GEPA,
OPTIMIZER_TYPE_METAPROMPT,
OPTIMIZER_TYPE_UNSPECIFIED,
)
def test_create_gepa_optimizer_success():
config = {"reflection_model": "openai:/gpt-4o", "max_metric_calls": 50}
optimizer = _create_optimizer("gepa", config)
assert isinstance(optimizer, GepaPromptOptimizer)
assert optimizer.reflection_model == "openai:/gpt-4o"
assert optimizer.max_metric_calls == 50
def test_create_gepa_optimizer_case_insensitive():
config = {"reflection_model": "openai:/gpt-4o"}
optimizer = _create_optimizer("GEPA", config)
assert isinstance(optimizer, GepaPromptOptimizer)
def test_create_gepa_optimizer_missing_reflection_model():
config = {"max_metric_calls": 50}
with pytest.raises(MlflowException, match="'reflection_model' must be specified"):
_create_optimizer("gepa", config)
def test_create_metaprompt_optimizer_success():
config = {"reflection_model": "openai:/gpt-4o", "guidelines": "Be concise"}
optimizer = _create_optimizer("metaprompt", config)
assert isinstance(optimizer, MetaPromptOptimizer)
def test_create_metaprompt_optimizer_missing_reflection_model():
config = {"guidelines": "Be concise"}
with pytest.raises(MlflowException, match="'reflection_model' must be specified"):
_create_optimizer("metaprompt", config)
def test_create_optimizer_unsupported_type():
with pytest.raises(MlflowException, match="Unsupported optimizer type: 'invalid'"):
_create_optimizer("invalid", None)
@pytest.mark.parametrize(
("proto_value", "expected_type", "expected_str", "error_match"),
[
(OPTIMIZER_TYPE_GEPA, OptimizerType.GEPA, "gepa", None),
(OPTIMIZER_TYPE_METAPROMPT, OptimizerType.METAPROMPT, "metaprompt", None),
(OPTIMIZER_TYPE_UNSPECIFIED, None, None, "optimizer_type is required"),
(999, None, None, "Unsupported optimizer_type value"),
],
)
def test_optimizer_type_from_proto(proto_value, expected_type, expected_str, error_match):
if error_match:
with pytest.raises(MlflowException, match=error_match):
OptimizerType.from_proto(proto_value)
else:
result = OptimizerType.from_proto(proto_value)
assert result == expected_type
assert result == expected_str
@pytest.mark.parametrize(
("optimizer_type", "expected_proto"),
[
(OptimizerType.GEPA, OPTIMIZER_TYPE_GEPA),
(OptimizerType.METAPROMPT, OPTIMIZER_TYPE_METAPROMPT),
],
)
def test_optimizer_type_to_proto(optimizer_type, expected_proto):
assert optimizer_type.to_proto() == expected_proto
def test_load_builtin_scorers():
scorers = _load_scorers(["Correctness", "Safety"], "exp-123")
assert len(scorers) == 2
assert isinstance(scorers[0], Correctness)
assert isinstance(scorers[1], Safety)
def test_load_custom_scorers():
with (
mock.patch("mlflow.genai.scorers.base.is_databricks_uri", return_value=True),
):
experiment_id = mlflow.create_experiment("test_load_custom_scorers")
@scorer
def custom_scorer_1(outputs) -> bool:
return len(outputs) > 0
@scorer
def custom_scorer_2(outputs) -> bool:
return len(outputs) > 0
custom_scorer_1.register(experiment_id=experiment_id, name="custom_scorer_1")
custom_scorer_2.register(experiment_id=experiment_id, name="custom_scorer_2")
scorers = _load_scorers(["custom_scorer_1", "custom_scorer_2"], experiment_id)
assert len(scorers) == 2
assert scorers[0].name == "custom_scorer_1"
assert scorers[1].name == "custom_scorer_2"
mlflow.delete_experiment(experiment_id)
def test_load_scorer_not_found_raises_error():
experiment_id = mlflow.create_experiment("test_load_scorer_not_found")
with pytest.raises(MlflowException, match="Scorer 'unknown_scorer' not found"):
_load_scorers(["unknown_scorer"], experiment_id)
mlflow.delete_experiment(experiment_id)
def test_build_predict_fn_success():
mock_prompt = mock.MagicMock()
mock_prompt.model_config = {"provider": "openai", "model_name": "gpt-4o"}
mock_prompt.format.return_value = "formatted prompt"
mock_litellm = mock.MagicMock()
mock_response = mock.MagicMock()
mock_response.choices = [mock.MagicMock()]
mock_response.choices[0].message.content = "response text"
mock_litellm.completion.return_value = mock_response
with (
mock.patch("mlflow.genai.optimize.job.load_prompt", return_value=mock_prompt),
mock.patch.dict("sys.modules", {"litellm": mock_litellm}),
):
predict_fn = _build_predict_fn("prompts:/test/1")
result = predict_fn(question="What is AI?")
assert result == "response text"
mock_litellm.completion.assert_called_once()
call_args = mock_litellm.completion.call_args
assert call_args.kwargs["model"] == "openai/gpt-4o"
mock_prompt.format.assert_called_with(question="What is AI?")
def test_build_predict_fn_missing_model_config():
mock_prompt = mock.MagicMock()
mock_prompt.model_config = None
mock_litellm = mock.MagicMock()
with (
mock.patch("mlflow.genai.optimize.job.load_prompt", return_value=mock_prompt),
mock.patch.dict("sys.modules", {"litellm": mock_litellm}),
):
with pytest.raises(MlflowException, match="doesn't have a model configuration"):
_build_predict_fn("prompts:/test/1")
def test_build_predict_fn_missing_provider():
mock_prompt = mock.MagicMock()
mock_prompt.model_config = {"model_name": "gpt-4o"}
mock_litellm = mock.MagicMock()
with (
mock.patch("mlflow.genai.optimize.job.load_prompt", return_value=mock_prompt),
mock.patch.dict("sys.modules", {"litellm": mock_litellm}),
):
with pytest.raises(MlflowException, match="doesn't have a model configuration"):
_build_predict_fn("prompts:/test/1")
def test_build_predict_fn_missing_litellm():
# Simulate litellm not being installed
with mock.patch.dict(sys.modules, {"litellm": None}):
with pytest.raises(
MlflowException, match="'litellm' package is required for prompt optimization"
):
_build_predict_fn("prompts:/test/1")
def test_optimize_prompts_job_has_metadata():
assert hasattr(optimize_prompts_job, "_job_fn_metadata")
metadata = optimize_prompts_job._job_fn_metadata
assert metadata.name == "optimize_prompts"
assert metadata.max_workers == 2
def test_optimize_prompts_job_calls():
mock_dataset = mock.MagicMock()
mock_prompt = mock.MagicMock()
mock_prompt.model_config = {"provider": "openai", "model_name": "gpt-4o"}
mock_optimizer = mock.MagicMock()
mock_loaded_scorers = [mock.MagicMock(), mock.MagicMock()]
mock_predict_fn = mock.MagicMock()
mock_result = mock.MagicMock()
mock_result.optimized_prompts = [mock.MagicMock()]
mock_result.optimized_prompts[0].uri = "prompts:/test/2"
mock_result.optimizer_name = "GepaPromptOptimizer"
mock_result.initial_eval_score = 0.5
mock_result.final_eval_score = 0.9
optimizer_config = {"reflection_model": "openai:/gpt-4o"}
with (
mock.patch("mlflow.genai.optimize.job.get_dataset", return_value=mock_dataset),
mock.patch("mlflow.genai.optimize.job.load_prompt", return_value=mock_prompt),
mock.patch(
"mlflow.genai.optimize.job._create_optimizer", return_value=mock_optimizer
) as mock_create_optimizer,
mock.patch(
"mlflow.genai.optimize.job._load_scorers", return_value=mock_loaded_scorers
) as mock_load_scorers,
mock.patch(
"mlflow.genai.optimize.job._build_predict_fn", return_value=mock_predict_fn
) as mock_build_predict_fn,
mock.patch("mlflow.genai.optimize.job.set_experiment"),
mock.patch("mlflow.genai.optimize.job.start_run"),
mock.patch("mlflow.genai.optimize.job.MlflowClient"),
mock.patch(
"mlflow.genai.optimize.job.optimize_prompts", return_value=mock_result
) as mock_optimize_prompts,
):
optimize_prompts_job(
run_id="run-123",
experiment_id="exp-123",
prompt_uri="prompts:/test/1",
dataset_id="dataset-123",
optimizer_type="gepa",
optimizer_config=optimizer_config,
scorer_names=["Correctness", "Safety"],
)
# Verify _create_optimizer was called with correct args
mock_create_optimizer.assert_called_once_with("gepa", optimizer_config)
# Verify _load_scorers was called with correct args
mock_load_scorers.assert_called_once_with(["Correctness", "Safety"], "exp-123")
# Verify _build_predict_fn was called with correct args
mock_build_predict_fn.assert_called_once_with("prompts:/test/1")
# Verify optimize_prompts was called with correct args
mock_optimize_prompts.assert_called_once_with(
predict_fn=mock_predict_fn,
train_data=mock_dataset,
prompt_uris=["prompts:/test/1"],
optimizer=mock_optimizer,
scorers=mock_loaded_scorers,
enable_tracking=True,
)
def test_optimize_prompts_job_result_structure():
mock_dataset = mock.MagicMock()
mock_prompt = mock.MagicMock()
mock_prompt.model_config = {"provider": "openai", "model_name": "gpt-4o"}
mock_optimizer = mock.MagicMock()
mock_result = mock.MagicMock()
mock_result.optimized_prompts = [mock.MagicMock()]
mock_result.optimized_prompts[0].uri = "prompts:/test/2"
mock_result.optimizer_name = "GepaPromptOptimizer"
mock_result.initial_eval_score = 0.5
mock_result.final_eval_score = 0.9
optimizer_config = {"reflection_model": "openai:/gpt-4o"}
with (
mock.patch("mlflow.genai.optimize.job.get_dataset", return_value=mock_dataset),
mock.patch("mlflow.genai.optimize.job.load_prompt", return_value=mock_prompt),
mock.patch("mlflow.genai.optimize.job._create_optimizer", return_value=mock_optimizer),
mock.patch("mlflow.genai.optimize.job._load_scorers", return_value=[mock.MagicMock()]),
mock.patch("mlflow.genai.optimize.job._build_predict_fn", return_value=lambda **k: "r"),
mock.patch("mlflow.genai.optimize.job.set_experiment"),
mock.patch("mlflow.genai.optimize.job.start_run"),
mock.patch("mlflow.genai.optimize.job.MlflowClient"),
mock.patch("mlflow.genai.optimize.job.optimize_prompts", return_value=mock_result),
):
result = optimize_prompts_job(
run_id="run-123",
experiment_id="exp-123",
prompt_uri="prompts:/test/1",
dataset_id="dataset-123",
optimizer_type="gepa",
optimizer_config=optimizer_config,
scorer_names=["Correctness", "Safety"],
)
# Verify result structure (returned as dict from asdict())
assert result["run_id"] == "run-123"
assert result["source_prompt_uri"] == "prompts:/test/1"
assert result["optimized_prompt_uri"] == "prompts:/test/2"
assert result["optimizer_name"] == "GepaPromptOptimizer"
assert result["initial_eval_score"] == 0.5
assert result["final_eval_score"] == 0.9
assert result["dataset_id"] == "dataset-123"
assert result["scorer_names"] == ["Correctness", "Safety"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/optimize/test_job.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/use_gh_token.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class UseGhToken(Rule):
def _message(self) -> str:
return "Use GH_TOKEN instead of GITHUB_TOKEN for the environment variable name."
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the call reads the GITHUB_TOKEN environment variable.
Handles:
- os.getenv("GITHUB_TOKEN")
- os.environ.get("GITHUB_TOKEN")
"""
match node:
case ast.Call(args=[ast.Constant(value="GITHUB_TOKEN"), *_]):
match resolver.resolve(node.func):
case ["os", "getenv"] | ["os", "environ", "get"]:
return True
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/use_gh_token.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_use_gh_token.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import lint_file
from clint.rules.use_gh_token import UseGhToken
@pytest.mark.parametrize(
"code",
[
pytest.param(
'import os\n\ntoken = os.getenv("GITHUB_TOKEN")',
id="os.getenv",
),
pytest.param(
'import os\n\ntoken = os.environ.get("GITHUB_TOKEN")',
id="os.environ.get",
),
pytest.param(
'import os\n\ntoken = os.getenv("GITHUB_TOKEN", "default")',
id="os.getenv with default",
),
pytest.param(
'import os\n\ntoken = os.environ.get("GITHUB_TOKEN", None)',
id="os.environ.get with default",
),
],
)
def test_violation(code: str, index_path: Path) -> None:
config = Config(select={UseGhToken.name})
violations = lint_file(Path("file.py"), code, config, index_path)
assert len(violations) == 1
assert isinstance(violations[0].rule, UseGhToken)
@pytest.mark.parametrize(
"code",
[
pytest.param(
'import os\n\ntoken = os.getenv("GH_TOKEN")',
id="os.getenv with GH_TOKEN",
),
pytest.param(
'import os\n\ntoken = os.environ.get("GH_TOKEN")',
id="os.environ.get with GH_TOKEN",
),
pytest.param(
'import os\n\ntoken = os.getenv("OTHER_VAR")',
id="os.getenv with other var",
),
],
)
def test_no_violation(code: str, index_path: Path) -> None:
config = Config(select={UseGhToken.name})
violations = lint_file(Path("file.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_use_gh_token.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/otel/translation/spring_ai.py | """
Translation utilities for Spring AI semantic conventions.
Spring AI uses OpenTelemetry GenAI semantic conventions but stores
prompt/completion content in events rather than attributes:
- gen_ai.content.prompt event with gen_ai.prompt attribute
- gen_ai.content.completion event with gen_ai.completion attribute
Reference: https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-events/
"""
from typing import Any
from mlflow.entities.span import SpanType
from mlflow.tracing.otel.translation.base import OtelSchemaTranslator
class SpringAiTranslator(OtelSchemaTranslator):
"""
Translator for Spring AI spans.
Spring AI uses GenAI semantic conventions but stores prompt/completion
in events. This translator extends the base to handle event-based
input/output extraction.
"""
# Spring AI uses gen_ai.operation.name for span kind (same as GenAI)
SPAN_KIND_ATTRIBUTE_KEY = "gen_ai.operation.name"
SPAN_KIND_TO_MLFLOW_TYPE = {
"chat": SpanType.CHAT_MODEL,
"embeddings": SpanType.EMBEDDING,
}
# Token usage attribute keys (same as GenAI semantic conventions)
INPUT_TOKEN_KEY = "gen_ai.usage.input_tokens"
OUTPUT_TOKEN_KEY = "gen_ai.usage.output_tokens"
# Spring AI doesn't use attribute-based input/output
# Instead, it uses events (handled via get_input_value_from_events/get_output_value_from_events)
INPUT_VALUE_KEYS = None
OUTPUT_VALUE_KEYS = None
# Event names for Spring AI prompt/completion content
PROMPT_EVENT_NAME = "gen_ai.content.prompt"
COMPLETION_EVENT_NAME = "gen_ai.content.completion"
# Attribute keys within events
PROMPT_ATTRIBUTE_KEY = "gen_ai.prompt"
COMPLETION_ATTRIBUTE_KEY = "gen_ai.completion"
def get_input_value_from_events(self, events: list[dict[str, Any]]) -> Any:
"""
Get input value from Spring AI prompt events.
Args:
events: List of span events
Returns:
Input value or None if not found
"""
return self._get_value_from_event(events, self.PROMPT_EVENT_NAME, self.PROMPT_ATTRIBUTE_KEY)
def get_output_value_from_events(self, events: list[dict[str, Any]]) -> Any:
"""
Get output value from Spring AI completion events.
Args:
events: List of span events
Returns:
Output value or None if not found
"""
return self._get_value_from_event(
events, self.COMPLETION_EVENT_NAME, self.COMPLETION_ATTRIBUTE_KEY
)
def _get_value_from_event(
self, events: list[dict[str, Any]], event_name: str, attribute_key: str
) -> Any:
"""
Extract a value from a specific event.
Args:
events: List of span events
event_name: The event name to look for
attribute_key: The attribute key within the event
Returns:
The attribute value or None if not found
"""
for event in events:
if event.get("name") == event_name:
event_attrs = event.get("attributes", {})
if value := event_attrs.get(attribute_key):
return value
return None
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/otel/translation/spring_ai.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/assistant/cli.py | """MLflow CLI commands for Assistant integration."""
import shutil
import sys
import threading
import time
from pathlib import Path
import click
from mlflow.assistant.config import AssistantConfig, ProjectConfig, SkillsConfig
from mlflow.assistant.providers import AssistantProvider, list_providers
from mlflow.assistant.providers.base import ProviderNotConfiguredError
from mlflow.assistant.skill_installer import install_skills
class Spinner:
"""Simple spinner animation for long-running operations."""
def __init__(self, message: str = "Loading"):
self.message = message
self.spinning = False
self.thread = None
self.frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
def _spin(self):
i = 0
while self.spinning:
frame = self.frames[i % len(self.frames)]
sys.stdout.write(f"\r{frame} {self.message}")
sys.stdout.flush()
time.sleep(0.1)
i += 1
def __enter__(self):
self.spinning = True
self.thread = threading.Thread(target=self._spin, name="Spinner")
self.thread.start()
return self
def __exit__(self, *args):
self.spinning = False
if self.thread:
self.thread.join()
sys.stdout.write("\r" + " " * (len(self.message) + 4) + "\r")
sys.stdout.flush()
@click.command("assistant")
@click.option(
"--configure",
is_flag=True,
help="Configure or reconfigure the assistant settings",
)
def commands(configure: bool):
"""MLflow Assistant - AI-powered trace analysis.
Run 'mlflow assistant --configure' to set up the assistant.
"""
if configure:
_run_configuration()
else:
# Check if already configured
config = AssistantConfig.load()
if not config.providers:
click.secho(
"Assistant is not configured. Please run: mlflow assistant --configure",
fg="yellow",
)
else:
click.secho(
"Assistant launch is not yet implemented. To use Assistant, run `mlflow assistant "
"--configure` to setup, then launch the MLflow UI manually.",
fg="yellow",
)
def _run_configuration():
"""Configure MLflow Assistant for the UI.
This interactive command sets up the AI assistant feature that allows you
to analyze MLflow traces directly from the UI.
The command will:
1. Ask which provider to use (Claude Code for now)
2. Check provider availability
3. Optionally connect an experiment with code repository
4. Ask which model to use
5. Ask where to install skills (user-level or project-level)
6. Install provider-specific skills
7. Save configuration
Example:
mlflow assistant --configure
"""
click.echo()
click.secho("╔══════════════════════════════════════════╗", fg="cyan")
click.secho("║ * . * . * ║", fg="cyan")
click.secho("║ . * MLflow Assistant Setup * . ║", fg="cyan", bold=True)
click.secho("║ * . * . * ║", fg="cyan")
click.secho("╚══════════════════════════════════════════╝", fg="cyan")
click.echo()
# Step 1: Select provider
provider = _prompt_provider()
if provider is None:
return
# Step 2: Check provider availability
if not _check_provider(provider):
return
# Step 3: Optionally connect experiment with code repository
project_path = _prompt_experiment_path()
# Step 4: Ask for model
model = _prompt_model()
# Step 5: Ask for skill location
skills_config = _prompt_skill_location(project_path)
# Step 6: Install skills
skill_path = _install_skills(provider, skills_config, project_path)
# Step 7: Save configuration
_save_config(provider, model, skills_config)
# Show success message
_show_init_success(provider, model, skill_path)
def _prompt_provider() -> AssistantProvider | None:
"""Prompt user to select a provider."""
providers = list_providers()
click.secho("Step 1/4: Select AI Provider", fg="cyan", bold=True)
click.secho("-" * 30, fg="cyan")
click.echo()
for i, provider in enumerate(providers, 1):
marker = click.style(" [recommended]", fg="green") if i == 1 else ""
click.echo(f" {i}. {provider.display_name}{marker}")
click.secho(f" {provider.description}", dim=True)
click.echo()
click.secho(" More providers coming soon...", dim=True)
click.echo()
default_provider = providers[0]
choice = click.prompt(
click.style(f"Select provider [1: {default_provider.display_name}]", fg="bright_blue"),
default="1",
type=click.Choice([str(i) for i in range(1, len(providers) + 1)]),
show_choices=False,
show_default=False,
)
provider = providers[int(choice) - 1]
click.echo()
return provider
def _check_provider(provider: AssistantProvider) -> bool:
"""Check if the selected provider is available."""
click.secho("Step 2/4: Checking Provider", fg="cyan", bold=True)
click.secho("-" * 30, fg="cyan")
click.echo()
# First check if CLI is installed
claude_path = shutil.which("claude")
if not claude_path:
click.secho(
"Claude Code CLI is not installed. "
"Install it with: npm install -g @anthropic-ai/claude-code",
fg="red",
)
click.echo()
return False
click.echo(f"Claude CLI found: {claude_path}")
try:
spinner_msg = "Checking connection... " + click.style(
"(this may take a few seconds)", dim=True
)
with Spinner(spinner_msg):
provider.check_connection()
click.secho("Connection verified", fg="green")
click.echo()
return True
except ProviderNotConfiguredError as e:
click.secho(str(e), fg="red")
click.echo()
return False
def _fetch_recent_experiments(tracking_uri: str, max_results: int = 5) -> list[tuple[str, str]]:
"""Fetch recent experiments from the tracking server.
Returns:
List of (experiment_id, experiment_name) tuples.
"""
import mlflow
original_uri = mlflow.get_tracking_uri()
try:
mlflow.set_tracking_uri(tracking_uri)
client = mlflow.MlflowClient()
experiments = client.search_experiments(
max_results=max_results,
order_by=["last_update_time DESC"],
)
return [(exp.experiment_id, exp.name) for exp in experiments]
except Exception:
return []
finally:
mlflow.set_tracking_uri(original_uri)
def _resolve_experiment_id(tracking_uri: str, name_or_id: str) -> str | None:
"""Resolve experiment name or ID to experiment ID.
Args:
tracking_uri: MLflow tracking server URI.
name_or_id: Experiment name or ID.
Returns:
Experiment ID if found, None otherwise.
"""
import mlflow
original_uri = mlflow.get_tracking_uri()
try:
mlflow.set_tracking_uri(tracking_uri)
client = mlflow.MlflowClient()
# First try to get by ID (if it looks like an ID)
if name_or_id.isdigit():
try:
if exp := client.get_experiment(name_or_id):
return exp.experiment_id
except Exception:
pass
# Try to get by name
if exp := client.get_experiment_by_name(name_or_id):
return exp.experiment_id
return None
except Exception:
return None
finally:
mlflow.set_tracking_uri(original_uri)
def _prompt_experiment_path() -> Path | None:
"""Prompt user to optionally connect an experiment with code repository.
Returns:
The project path if configured, None otherwise.
"""
click.secho("Step 3/5: Experiment & Code Context ", fg="cyan", bold=True, nl=False)
click.secho("[Optional, Recommended]", fg="green", bold=True)
click.secho("-" * 30, fg="cyan")
click.echo()
click.echo("You can connect an experiment with a code repository to give")
click.echo("the assistant context about your source code for better analysis.")
click.secho("(You can also set this up later in the MLflow UI.)", dim=True)
click.echo()
connect = click.confirm(
click.style(
"Do you want to connect an experiment with a code repository?", fg="bright_blue"
),
default=True,
)
if not connect:
click.echo()
return None
click.echo()
# Ask for tracking URI to fetch experiments
tracking_uri = click.prompt(
click.style("Enter the MLflow tracking server URI", fg="bright_blue"),
default="http://localhost:5000",
)
click.echo()
click.secho("Fetching recent experiments...", dim=True)
# Fetch recent experiments
experiments = _fetch_recent_experiments(tracking_uri)
if not experiments:
click.secho("Could not fetch experiments from the server.", fg="yellow")
click.echo("You can set this up later in the MLflow UI.")
click.echo()
return None
click.echo()
click.echo(click.style("Select an experiment to connect:", fg="bright_blue"))
click.echo()
for i, (exp_id, exp_name) in enumerate(experiments, 1):
click.echo(f" {i}. {exp_name} (ID: {exp_id})")
other_option = len(experiments) + 1
click.echo(f" {other_option}. Enter experiment name or ID manually")
click.echo()
choice = click.prompt(
click.style("Select experiment", fg="bright_blue"),
type=click.IntRange(1, other_option),
default=1,
)
if choice == other_option:
while True:
click.echo()
name_or_id = click.prompt(
click.style("Experiment name or ID", fg="bright_blue"), default=""
)
if not name_or_id:
click.secho("No experiment specified. Please try again.", fg="yellow")
continue
experiment_id = _resolve_experiment_id(tracking_uri, name_or_id)
if experiment_id:
# Use the input as display name (could be name or ID)
experiment_name = name_or_id
break
click.secho(
f"Experiment '{name_or_id}' not found. Please try again.",
fg="red",
)
else:
experiment_id, experiment_name = experiments[choice - 1]
click.secho(
f"Experiment '{experiment_name}' selected",
fg="green",
)
click.echo()
# Ask for project path
default_path = str(Path.cwd())
while True:
raw_path = click.prompt(
click.style("Enter the path to your project directory:", fg="bright_blue"),
default=default_path,
)
# Expand ~ and resolve relative paths
expanded_path = Path(raw_path).expanduser().resolve()
if expanded_path.is_dir():
project_path = str(expanded_path)
break
click.secho(f"Directory '{raw_path}' does not exist. Please try again.", fg="red")
# Save the project path mapping locally
try:
config = AssistantConfig.load()
config.projects[experiment_id] = ProjectConfig(type="local", location=project_path)
config.save()
click.secho(
f"Project path {project_path} is saved for experiment '{experiment_name}'",
fg="green",
)
except Exception as e:
click.secho(f"Error saving project path: {e}", fg="red")
click.echo()
return expanded_path
def _prompt_model() -> str:
"""Prompt user for model selection."""
click.secho("Step 4/5: Model Selection", fg="cyan", bold=True)
click.secho("-" * 30, fg="cyan")
click.echo()
click.echo("Choose a model for analysis:")
click.secho(" - Press Enter to use the default model (recommended)", dim=True)
click.secho(" - Or type a specific model name (e.g., claude-sonnet-4-20250514)", dim=True)
click.echo()
model = click.prompt(click.style("Model", fg="bright_blue"), default="default")
click.echo()
return model
def _prompt_skill_location(project_path: Path | None) -> SkillsConfig:
"""Prompt user for skill installation location.
Args:
project_path: The project path from experiment setup, or None if skipped.
Returns:
SkillsConfig with the selected location type and optional custom path.
"""
click.secho("Step 5/5: Skill Installation Location", fg="cyan", bold=True)
click.secho("-" * 30, fg="cyan")
click.echo()
click.echo("Choose where to install MLflow skills for Assistant:")
click.echo()
# TODO: Update this when we support other providers
user_path = Path.home() / ".claude" / "skills"
click.echo(f" 1. User level ({user_path})")
click.secho(" Skills available globally across all projects", dim=True)
click.echo()
if project_path:
project_skill_path = project_path / ".claude" / "skills"
click.echo(f" 2. Project level ({project_skill_path})")
click.secho(" Skills available only in this project", dim=True)
click.echo()
click.echo(" 3. Custom location")
click.secho(" Specify a custom path for skills", dim=True)
click.echo()
valid_choices = ["1", "2", "3"]
else:
click.echo(" 2. Custom location")
click.secho(" Specify a custom path for skills", dim=True)
click.echo()
valid_choices = ["1", "2"]
choice = click.prompt(
click.style("Select location [1: User level]", fg="bright_blue"),
default="1",
type=click.Choice(valid_choices),
show_choices=False,
show_default=False,
)
click.echo()
if choice == "1":
return SkillsConfig(type="global")
elif choice == "2" and project_path:
return SkillsConfig(type="project")
else:
# Custom location
while True:
raw_path = click.prompt(
click.style("Enter the custom path for skills", fg="bright_blue"),
default=str(user_path),
)
expanded_path = Path(raw_path).expanduser().resolve()
# For custom paths, we'll create the directory, so just check parent exists
if expanded_path.parent.exists() or expanded_path.exists():
click.echo()
return SkillsConfig(type="custom", custom_path=str(expanded_path))
click.secho(
f"Parent directory '{expanded_path.parent}' does not exist. Please try again.",
fg="red",
)
def _install_skills(
provider: AssistantProvider, skills_config: SkillsConfig, project_path: Path | None
) -> Path:
"""Install skills bundled with MLflow.
Returns:
The resolved path where skills were installed.
"""
match skills_config.type:
case "global":
skill_path = provider.resolve_skills_path(Path.home())
case "project":
skill_path = provider.resolve_skills_path(project_path)
case "custom":
skill_path = Path(skills_config.custom_path).expanduser()
if installed_skills := install_skills(skill_path):
for skill in installed_skills:
click.secho(f" - {skill}")
else:
click.secho("No skills available to install.", fg="yellow")
click.echo()
return skill_path
def _save_config(provider: AssistantProvider, model: str, skills_config: SkillsConfig) -> None:
"""Save configuration to file."""
click.secho("Saving Configuration", fg="cyan", bold=True)
click.secho("-" * 30, fg="cyan")
config = AssistantConfig.load()
config.set_provider(provider.name, model)
config.providers[provider.name].skills = skills_config
config.save()
click.secho("Configuration saved", fg="green")
click.echo()
def _show_init_success(provider: AssistantProvider, model: str, skill_path: Path) -> None:
"""Show success message and next steps."""
click.secho(" ~ * ~ * ~ * ~ * ~ * ~ * ~ * ~", fg="green")
click.secho(" Setup Complete! ", fg="green", bold=True)
click.secho(" ~ * ~ * ~ * ~ * ~ * ~ * ~ * ~", fg="green")
click.echo()
click.secho("Configuration:", bold=True)
click.echo(f" Provider: {provider.display_name}")
click.echo(f" Model: {model}")
click.echo(f" Skills: {skill_path}")
click.echo()
click.secho("Next steps:", bold=True)
click.echo(" 1. Start MLflow server:")
click.secho(" $ mlflow server", fg="cyan")
click.echo()
click.echo(" 2. Open MLflow UI and navigate to an experiment")
click.echo()
click.echo(" 3. Click 'Ask Assistant'")
click.echo()
click.secho("To reconfigure, run: ", nl=False)
click.secho("mlflow assistant --configure", fg="cyan")
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/cli.py",
"license": "Apache License 2.0",
"lines": 428,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/assistant/config.py | from pathlib import Path
from typing import Literal
from pydantic import BaseModel, Field
MLFLOW_ASSISTANT_HOME = Path.home() / ".mlflow" / "assistant"
CONFIG_PATH = MLFLOW_ASSISTANT_HOME / "config.json"
class PermissionsConfig(BaseModel):
"""Permission settings for the assistant provider."""
allow_edit_files: bool = True
allow_read_docs: bool = True
full_access: bool = False
class SkillsConfig(BaseModel):
"""Skills configuration for a provider."""
type: Literal["global", "project", "custom"] = "global"
custom_path: str | None = None # Only used when type="custom"
class ProviderConfig(BaseModel):
model: str = "default"
selected: bool = False
permissions: PermissionsConfig = Field(default_factory=PermissionsConfig)
skills: SkillsConfig = Field(default_factory=SkillsConfig)
class ProjectConfig(BaseModel):
type: Literal["local"] = "local"
location: str
class AssistantConfig(BaseModel):
"""Main configuration for MLflow Assistant."""
projects: dict[str, ProjectConfig] = Field(
default_factory=dict,
description="Mapping of experiment ID to project path",
)
providers: dict[str, ProviderConfig] = Field(
default_factory=dict,
description="Mapping of provider name to their configuration",
)
@classmethod
def load(cls) -> "AssistantConfig":
"""Load the assistant configuration from disk.
Returns:
The loaded configuration, or a new empty config if file doesn't exist.
"""
if not CONFIG_PATH.exists():
return cls()
try:
with open(CONFIG_PATH) as f:
return cls.model_validate_json(f.read())
except Exception:
return cls()
def save(self) -> None:
"""Save the assistant configuration to disk."""
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
with open(CONFIG_PATH, "w") as f:
f.write(self.model_dump_json(indent=2))
def get_project_path(self, experiment_id: str) -> str | None:
"""Get the project path for a given experiment ID.
Args:
experiment_id: The experiment ID to look up.
Returns:
The project path location if found, None otherwise.
"""
project = self.projects.get(experiment_id)
return project.location if project else None
def get_selected_provider(self) -> ProviderConfig | None:
"""Get the currently selected provider.
Returns:
The selected provider configuration, or None if no provider is selected.
"""
for provider in self.providers.values():
if provider.selected:
return provider
return None
def set_provider(
self,
provider_name: str,
model: str,
permissions: PermissionsConfig | None = None,
) -> None:
"""Set or update a provider configuration and mark it as selected.
Args:
provider_name: The provider name (e.g., "claude_code").
model: The model to use.
permissions: Permission settings (None = keep existing/use defaults).
"""
# Update or create the provider
if provider_name in self.providers:
self.providers[provider_name].model = model
if permissions is not None:
self.providers[provider_name].permissions = permissions
else:
self.providers[provider_name] = ProviderConfig(
model=model,
selected=False,
permissions=permissions or PermissionsConfig(),
)
# Mark this provider as selected and deselect others
for name, provider in self.providers.items():
provider.selected = name == provider_name
__all__ = [
"AssistantConfig",
"PermissionsConfig",
"ProjectConfig",
"ProviderConfig",
"SkillsConfig",
]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/config.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/assistant/providers/base.py | from abc import ABC, abstractmethod
from functools import lru_cache
from pathlib import Path
from typing import Any, AsyncGenerator, Callable
from mlflow.assistant.config import AssistantConfig, ProviderConfig
@lru_cache(maxsize=10)
def load_config(name: str) -> ProviderConfig:
cfg = AssistantConfig.load()
if not cfg or name not in cfg.providers:
raise RuntimeError(f"Provider configuration not found for {name}")
return cfg.providers[name]
def clear_config_cache() -> None:
"""Clear the config cache to pick up config changes."""
load_config.cache_clear()
class ProviderNotConfiguredError(Exception):
"""Raised when a provider is not properly configured."""
class CLINotInstalledError(ProviderNotConfiguredError):
"""Raised when the provider CLI is not installed."""
class NotAuthenticatedError(ProviderNotConfiguredError):
"""Raised when the user is not authenticated with the provider."""
class AssistantProvider(ABC):
"""Abstract base class for assistant providers."""
@property
@abstractmethod
def name(self) -> str:
"""Return the provider identifier (e.g., 'claude_code')."""
@property
@abstractmethod
def display_name(self) -> str:
"""Return the human-readable provider name (e.g., 'Claude Code')."""
@property
@abstractmethod
def description(self) -> str:
"""Return a short description of the provider."""
@abstractmethod
def is_available(self) -> bool:
"""Check if the provider is available and ready to use."""
@abstractmethod
def check_connection(self, echo: Callable[[str], None] | None = None) -> None:
"""
Check if the provider is properly configured and can connect.
Args:
echo: Optional function to print status messages.
Raises:
ProviderNotConfiguredError: If the provider is not properly configured.
"""
@abstractmethod
def resolve_skills_path(self, base_directory: Path) -> Path:
"""Resolve the skills installation path.
Args:
base_directory: Base directory to resolve skills path from.
Returns:
Resolved absolute path for skills installation.
"""
@abstractmethod
def astream(
self,
prompt: str,
tracking_uri: str,
session_id: str | None = None,
cwd: Path | None = None,
context: dict[str, Any] | None = None,
) -> AsyncGenerator[dict[str, Any], None]:
"""
Stream responses from the assistant asynchronously.
Args:
prompt: The prompt to send to the assistant
tracking_uri: MLflow tracking server URI for the assistant to use
session_id: Session ID for conversation continuity
cwd: Working directory for the assistant
context: Additional context for the assistant, such as information from
the current UI page the user is viewing (e.g., experimentId, traceId)
Yields:
Event dictionaries with 'type' and 'data' keys.
Event types: 'message', 'status', 'done', 'error'
"""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/providers/base.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/assistant/providers/claude_code.py | """
Claude Code provider for MLflow Assistant.
This module provides the Claude Code integration for the assistant API,
enabling AI-powered trace analysis through the Claude Code CLI.
"""
import asyncio
import json
import logging
import os
import shutil
import subprocess
from pathlib import Path
from typing import Any, AsyncGenerator, Callable
from mlflow.assistant.providers.base import (
AssistantProvider,
CLINotInstalledError,
NotAuthenticatedError,
load_config,
)
from mlflow.assistant.types import (
ContentBlock,
Event,
Message,
TextBlock,
ThinkingBlock,
ToolResultBlock,
ToolUseBlock,
)
from mlflow.server.assistant.session import clear_process_pid, save_process_pid
_logger = logging.getLogger(__name__)
# Allowed tools for Claude Code CLI
# Restrict to only Bash commands that use MLflow CLI
BASE_ALLOWED_TOOLS = [
"Bash(mlflow:*)",
"Skill", # Skill tool needs to be explicitly allowed
]
FILE_EDIT_TOOLS = [
# Allow writing evaluation scripts, editing code, reading
# project files, etc. in the project directory
"Edit(*)",
"Read(*)",
"Write(*)",
# Allow writing large command output to files in /tmp so it
# can be analyzed with bash commands (e.g. grep, jq) without
# loading full contents into context
"Edit(//tmp/**)",
"Read(//tmp/**)",
"Write(//tmp/**)",
]
DOCS_TOOLS = ["WebFetch(domain:mlflow.org)"]
CLAUDE_SYSTEM_PROMPT = """\
You are an MLflow assistant helping users with their MLflow projects. Users interact with
you through the MLflow UI. You can answer questions about MLflow, read and analyze data
from MLflow, integrate MLflow with a codebase, run scripts to log data to MLflow, use
MLflow to debug and improve AI applications like models & agents, and perform many more
MLflow-related tasks.
The following instructions are fundamental to your behavior. You MUST ALWAYS follow them
exactly as specified. You MUST re-read them carefully whenever you start a new response to the user.
Do NOT ignore or skip these instructions under any circumstances!
## CRITICAL: Be Proactive and Minimize User Effort
NEVER ask the user to do something manually that you can do for them.
You MUST always try to minimize the number of steps the user has to take manually. The user
is relying on you to accelerate their workflows. For example, if the user asks for a tutorial on
how to do something, find the answer and then offer to do it for them using MLflow commands or code,
rather than just telling them how to do it themselves.
## CRITICAL: Using Skills
You have Claude Code skills for MLflow tasks. Each skill listed in your available skills has a
description that explains when to use it.
You MUST use skills for anything relating to:
- Onboarding and getting started with MLflow (e.g. new user questions about MLflow)
- Reading or analyzing traces and chat sessions
- Searching for traces and chat sessions
- Searching for MLflow documentation
- Running MLflow GenAI evaluation to evaluate traces or agents
- Querying MLflow metrics
- Anything else explicitly covered by a skill
(you MUST read skill descriptions carefully before acting)
ALWAYS abide by the following rules:
- Before responding to any user message or request, YOU MUST consult your list of available skills
to determine if a relevant skill exists. If a relevant skill exists, you MUST try using it first.
Using the right skill leads to more effective outcomes.
Even if your conversation with the user has many previous messages, EVERY new message from the
user MUST trigger a skills check. Do NOT skip this step.
- When following a skill, you MUST read its instructions VERY carefully —
especially command syntax, which must be followed precisely.
- NEVER run ANY command before checking for a relevant skill. ALWAYS
check for skills first. For example, do not try to consult the CLI
reference for searching traces until you have read the skills for
trace search and analysis first.
## CRITICAL: Complete All Work Before Finishing Your Response
You may provide progress updates throughout the process, but do NOT finish your response until ALL
work — including work done by subagents — is fully complete. The user interacts with you
through a UI that does not support fetching results from async subagents. If you finish
responding before subagent work is done, the user will never see those results. Always wait for
all subagent tasks to finish and include their results in your final response.
## MLflow Server Connection (Pre-configured)
The MLflow tracking server is running at: `{tracking_uri}`
**CRITICAL**:
- The server is ALREADY RUNNING. Never ask the user to start or set up the MLflow server.
- ALL MLflow operations MUST target this server. You must assume MLFLOW_TRACKING_URI env var is.
always set. DO NOT try to override it or set custom env var to the bash command.
- Assume the server is available and operational at all times, unless you have good reason
to believe otherwise (e.g. an error that seems likely caused by server unavailability).
## User Context
The user has already installed MLflow and is working within the MLflow UI. Never instruct the
user to install MLflow or start the MLflow UI/server - these are already set up and running.
Under normal conditions, never verify that the server is running; if the user is using the
MLflow UI, the server is clearly operational. Only check server status when debugging or
investigating a suspected server error.
Since the user is already in the MLflow UI, do NOT unnecessarily reference the server URL in
your responses (e.g., "go to http://localhost:8888" or "refresh your MLflow UI at ...").
Only include URLs when they are specific, actionable links to a particular page in the UI
(e.g., a link to a specific experiment, run, or trace).
User messages may include a <context> block containing JSON that represents what the user is
currently viewing on screen (e.g., traceId, experimentId, selectedTraceIds). Use this context
to understand what entities the user is referring to when they ask questions, as well as
where the user wants to log (write) or update information.
## Command Preferences (IMPORTANT)
### MLflow Read-Only Operations
For querying and reading MLflow data (experiments, runs, traces, metrics, etc.):
* STRONGLY PREFER MLflow CLI commands directly. Try to use the CLI until you are certain
that it cannot accomplish the task. Do NOT mistake syntax errors or your own mistakes
for limitations of the CLI.
* When using MLflow CLI, always use `--help` to discover all available options.
Do not skip this step or you will not get the correct command.
* Trust that MLflow CLI commands will work. Do not add error handling or fallbacks to Python.
* Never combine two bash commands with `&&` or `||`. That will error out.
* If the CLI cannot accomplish the task, fall back to the MLflow SDK.
* When working with large output, write it to files /tmp and use
bash commands to analyze the files, rather than reading the full contents into context.
### MLflow Write Operations
For logging new data to MLflow (traces, runs, metrics, artifacts, etc.):
* The CLI does not support all write operations, so use an MLflow SDK instead.
* Use the appropriate SDK for your working directory's project language
(Python, TypeScript, etc.). Fall back to Python if no project is detected or if
MLflow does not offer an SDK for the detected language.
* Always set the tracking URI before logging (see "MLflow Server Connection" section above).
IMPORTANT: After writing data, always tell the user how to access it. Prefer directing them
to the MLflow UI (provide specific URLs where possible, e.g., `{tracking_uri}/#/experiments/123`).
If the data is not viewable in the UI, explain how to access it via MLflow CLI or API.
### Handling permissions issues
If you require additional permissions to execute a command or perform an action, ALWAYS tell the
user what specific permission(s) you need.
If the permissions are for the MLflow CLI, then the user likely has a permissions override in
their Claude Code settings JSON file or Claude Code hooks. In this case, tell the user to edit
their settings files or hooks to provide the exact permission(s) needed in order to proceed. Give
them the exact permission(s) require in Claude Code syntax.
Otherwise, tell the user to enable full access permissions from the Assistant Settings UI. Also tell
the user that, if full access permissions are already enabled, then they need to check their
Claude Code settings JSON file or Claude Code hooks to ensure there are no permission overrides that
conflict with full access (Claude Code's 'bypassPermissions' mode). Finally, tell the user how to
edit their Claude Code settings or hooks to enable the specific permission(s) needed to proceed.
This gives the user all of the available options and necessary information to resolve permission
issues.
### Data Access
NEVER access the MLflow server's backend storage directly. Always use MLflow APIs or CLIs and
let the server handle storage. Specifically:
- NEVER use the MLflow CLI or API with a database or file tracking URI - only use the configured
HTTP tracking URI (`{tracking_uri}`).
- NEVER use database CLI tools (e.g., sqlite3, psql) to connect directly to the MLflow database.
- NEVER read the filesystem or cloud storage to access MLflow artifact storage directly.
- ALWAYS let the MLflow server handle all storage operations through its APIs.
## MLflow Documentation
If you have a permission to fetch MLflow documentation, use the WebFetch tool to fetch
pages from mlflow.org to provide accurate information about MLflow.
### Accessing Documentation
When reading documentation, ALWAYS start from https://mlflow.org/docs/latest/llms.txt page that
lists links to each pages of the documentation. Start with that page and follow the links to the
relevant pages to get more information.
IMPORTANT: When accessing documentation pages or returning documentation links to users, always use
the latest version URL (https://mlflow.org/docs/latest/...) instead of version-specific URLs.
### CRITICAL: Presenting Documentation Results
IMPORTANT: ALWAYS offer to complete tasks from the documentation results yourself, on behalf of the
user. Since you are capable of executing code, debugging, logging data to MLflow, and much more, do
NOT just return documentation links or excerpts for the user to read and act on themselves.
Only ask the user to do something manually if you have tried and cannot do it yourself, or
if you truly do not know how.
IMPORTANT: When presenting information from documentation, you MUST adapt it to the user's
context (see "User Context" section above). Before responding, thoroughly re-read the User Context
section and adjust your response accordingly. Always consider what the user already has set up
and running. For example:
- Do NOT tell the user to install MLflow or how to install it - it is already installed.
- Do NOT tell the user to start the MLflow server or UI - they are already running.
- Do NOT tell the user to open a browser to view the MLflow UI - they are already using it.
- Skip any setup/installation steps that are already complete for this user.
Focus on the substantive content that is relevant to the user's actual question.
"""
def _build_system_prompt(tracking_uri: str) -> str:
"""
Build the system prompt for the Claude Code assistant.
Args:
tracking_uri: The MLflow tracking server URI (e.g., "http://localhost:5000").
Returns:
The complete system prompt string.
"""
return CLAUDE_SYSTEM_PROMPT.format(tracking_uri=tracking_uri)
class ClaudeCodeProvider(AssistantProvider):
"""Assistant provider using Claude Code CLI."""
@property
def name(self) -> str:
return "claude_code"
@property
def display_name(self) -> str:
return "Claude Code"
@property
def description(self) -> str:
return "AI-powered assistant using Claude Code CLI"
def is_available(self) -> bool:
return shutil.which("claude") is not None
def check_connection(self, echo: Callable[[str], None] | None = None) -> None:
"""
Check if Claude CLI is installed and authenticated.
Args:
echo: Optional function to print status messages.
Raises:
ProviderNotConfiguredError: If CLI is not installed or not authenticated.
"""
claude_path = shutil.which("claude")
if not claude_path:
if echo:
echo("Claude CLI not found")
raise CLINotInstalledError(
"Claude Code CLI is not installed. "
"Install it with: npm install -g @anthropic-ai/claude-code"
)
if echo:
echo(f"Claude CLI found: {claude_path}")
echo("Checking connection... (this may take a few seconds)")
# Check authentication by running a minimal test prompt
try:
result = subprocess.run(
["claude", "-p", "hi", "--max-turns", "1", "--output-format", "json"],
capture_output=True,
text=True,
timeout=30,
)
if result.returncode == 0:
if echo:
echo("Authentication verified")
return
# Check for common auth errors in stderr
stderr = result.stderr.lower()
if "auth" in stderr or "login" in stderr or "unauthorized" in stderr:
error_msg = "Not authenticated. Please run: claude login"
else:
error_msg = result.stderr.strip() or f"Process exited with code {result.returncode}"
if echo:
echo(f"Authentication failed: {error_msg}")
raise NotAuthenticatedError(error_msg)
except subprocess.TimeoutExpired:
if echo:
echo("Authentication check timed out")
raise NotAuthenticatedError("Authentication check timed out")
except subprocess.SubprocessError as e:
if echo:
echo(f"Error checking authentication: {e}")
raise NotAuthenticatedError(str(e))
def resolve_skills_path(self, base_directory: Path) -> Path:
"""Resolve the path to the skills directory."""
return base_directory / ".claude" / "skills"
async def astream(
self,
prompt: str,
tracking_uri: str,
session_id: str | None = None,
mlflow_session_id: str | None = None,
cwd: Path | None = None,
context: dict[str, Any] | None = None,
) -> AsyncGenerator[Event, None]:
"""
Stream responses from Claude Code CLI asynchronously.
Args:
prompt: The prompt to send to Claude
tracking_uri: MLflow tracking server URI for the assistant to use
session_id: Claude session ID for resume
mlflow_session_id: MLflow session ID for PID tracking (enables cancellation)
cwd: Working directory for Claude Code CLI
context: Additional context for the assistant, such as information from
the current UI page the user is viewing (e.g., experimentId, traceId)
Yields:
Event objects
"""
claude_path = shutil.which("claude")
if not claude_path:
yield Event.from_error(
"Claude CLI not found. Please install Claude Code CLI and ensure it's in your PATH."
)
return
# Build user message with context
if context:
user_message = f"<context>\n{json.dumps(context)}\n</context>\n\n{prompt}"
else:
user_message = prompt
# Build command
# Note: --verbose is required when using --output-format=stream-json with -p
cmd = [claude_path, "-p", user_message, "--output-format", "stream-json", "--verbose"]
# Add system prompt with tracking URI context
system_prompt = _build_system_prompt(tracking_uri)
cmd.extend(["--append-system-prompt", system_prompt])
config = load_config(self.name)
# Handle permission mode
if config.permissions.full_access:
# Full access mode - bypass all permission checks
cmd.extend(["--permission-mode", "bypassPermissions"])
else:
# Build allowed tools list based on permissions
allowed_tools = list(BASE_ALLOWED_TOOLS)
if config.permissions.allow_edit_files:
allowed_tools.extend(FILE_EDIT_TOOLS)
if config.permissions.allow_read_docs:
allowed_tools.extend(DOCS_TOOLS)
for tool in allowed_tools:
cmd.extend(["--allowed-tools", tool])
if config.model and config.model != "default":
cmd.extend(["--model", config.model])
if session_id:
cmd.extend(["--resume", session_id])
process = None
try:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd,
# Increase buffer limit from default 64KB to handle large JSON responses
# from Claude Code CLI (e.g., tool results containing large file contents)
limit=100 * 1024 * 1024, # 100 MB
# Specify tracking URI to let Claude Code CLI inherit it
# NB: `env` arg in `create_subprocess_exec` does not merge with the parent process's
# environment so we need to copy the parent process's environment explicitly.
env={**os.environ.copy(), "MLFLOW_TRACKING_URI": tracking_uri},
)
# Save PID for cancellation support
if mlflow_session_id and process.pid:
save_process_pid(mlflow_session_id, process.pid)
try:
async for line in process.stdout:
line_str = line.decode("utf-8").strip()
if not line_str:
continue
try:
data = json.loads(line_str)
if self._should_filter_out_message(data):
continue
if msg := self._parse_message_to_event(data):
yield msg
except json.JSONDecodeError:
# Non-JSON output, treat as plain text
yield Event.from_message(Message(role="user", content=line_str))
finally:
# Clear PID when done (regardless of how we exit)
if mlflow_session_id:
clear_process_pid(mlflow_session_id)
# Wait for process to complete
await process.wait()
# Check if killed by interrupt (SIGKILL = -9)
if process.returncode == -9:
yield Event.from_interrupted()
return
if process.returncode != 0:
stderr = await process.stderr.read()
error_msg = (
stderr.decode("utf-8").strip()
or f"Process exited with code {process.returncode}"
)
yield Event.from_error(error_msg)
except Exception as e:
_logger.exception("Error running Claude Code CLI")
yield Event.from_error(str(e))
finally:
if process is not None and process.returncode is None:
process.kill()
await process.wait()
def _parse_message_to_event(self, data: dict[str, Any]) -> Event | None:
"""
Parse json message from Claude Code CLI output.
Reference: https://github.com/anthropics/claude-agent-sdk-python/blob/29c12cd80b256e88f321b2b8f1f5a88445077aa5/src/claude_agent_sdk/_internal/message_parser.py#L24
Args:
data: Raw message dictionary from CLI output
Returns:
Parsed Event object
"""
message_type = data.get("type")
if not message_type:
return Event.from_error("Message missing 'type' field")
match message_type:
case "user":
try:
if isinstance(data["message"]["content"], list):
user_content_blocks = []
for block in data["message"]["content"]:
match block["type"]:
case "text":
user_content_blocks.append(TextBlock(text=block["text"]))
case "tool_use":
user_content_blocks.append(
ToolUseBlock(
id=block["id"],
name=block["name"],
input=block["input"],
)
)
case "tool_result":
user_content_blocks.append(
ToolResultBlock(
tool_use_id=block["tool_use_id"],
content=block.get("content"),
is_error=block.get("is_error"),
)
)
msg = Message(role="user", content=user_content_blocks)
else:
msg = Message(role="user", content=data["message"]["content"])
return Event.from_message(msg)
except KeyError as e:
return Event.from_error(f"Failed to parse user message: {e}")
case "assistant":
try:
if data["message"].get("error"):
return Event.from_error(data["message"]["error"])
content_blocks: list[ContentBlock] = []
for block in data["message"]["content"]:
match block["type"]:
case "text":
content_blocks.append(TextBlock(text=block["text"]))
case "thinking":
content_blocks.append(
ThinkingBlock(
thinking=block["thinking"],
signature=block["signature"],
)
)
case "tool_use":
content_blocks.append(
ToolUseBlock(
id=block["id"],
name=block["name"],
input=block["input"],
)
)
case "tool_result":
content_blocks.append(
ToolResultBlock(
tool_use_id=block["tool_use_id"],
content=block.get("content"),
is_error=block.get("is_error"),
)
)
msg = Message(role="assistant", content=content_blocks)
return Event.from_message(msg)
except KeyError as e:
return Event.from_error(f"Failed to parse assistant message: {e}")
case "system":
# NB: Skip system message. The system message from Claude Code CLI contains
# the various metadata about runtime, which is not used by the assistant UX.
return None
case "error":
try:
error_msg = data.get("error", {}).get("message", str(data.get("error")))
return Event.from_error(error_msg)
except Exception as e:
return Event.from_error(f"Failed to parse error message: {e}")
case "result":
try:
return Event.from_result(
result=data.get("result"),
session_id=data["session_id"],
)
except KeyError as e:
return Event.from_error(f"Failed to parse result message: {e}")
case "stream_event":
try:
return Event.from_stream_event(event=data["event"])
except KeyError as e:
return Event.from_error(f"Failed to parse stream_event message: {e}")
case _:
return Event.from_error(f"Unknown message type: {message_type}")
def _should_filter_out_message(self, data: dict[str, Any]) -> bool:
"""
Check if an internal message that should be filtered out before being displayed to the user.
Currently filters:
- Skill prompt messages: When a Skill tool is called, Claude Code sends an internal
user message containing the full skill instructions (starting with "Base directory
for this skill:"). These messages are internal and should not be displayed to users.
"""
if data.get("type") != "user":
return False
content = data.get("message", {}).get("content", [])
if not isinstance(content, list):
return False
return any(
block.get("type") == "text"
# TODO: This prefix is not guaranteed to be stable. We should find a better way to
# filter out these messages.
and block.get("text", "").startswith("Base directory for this skill:")
for block in content
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/providers/claude_code.py",
"license": "Apache License 2.0",
"lines": 497,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/assistant/types.py | import json
from enum import Enum
from typing import Any, Literal
from pydantic import BaseModel, Field
# Message interface between assistant providers and the assistant client
# Inspired by https://github.com/anthropics/claude-agent-sdk-python/blob/29c12cd80b256e88f321b2b8f1f5a88445077aa5/src/claude_agent_sdk/types.py
class TextBlock(BaseModel):
"""Text content block."""
text: str
class ThinkingBlock(BaseModel):
"""Thinking content block."""
thinking: str
signature: str
class ToolUseBlock(BaseModel):
"""Tool use content block."""
id: str
name: str
input: dict[str, Any]
class ToolResultBlock(BaseModel):
"""Tool result content block."""
tool_use_id: str
content: str | list[dict[str, Any]] | None = None
is_error: bool | None = None
ContentBlock = TextBlock | ThinkingBlock | ToolUseBlock | ToolResultBlock
class Message(BaseModel):
"""Structured message representation for assistant conversations.
Uses standard chat message format with role and content fields.
Can be extended in the future to support multi-modal content.
"""
role: Literal["user", "assistant", "system"] = Field(description="Role of the message sender")
content: str | list[ContentBlock] = Field(description="Content of the message")
class EventType(str, Enum):
MESSAGE = "message"
STREAM_EVENT = "stream_event"
DONE = "done"
ERROR = "error"
INTERRUPTED = "interrupted"
def __str__(self):
return self.value
class Event(BaseModel):
"""A common event format parsed from the raw assistant provider output."""
type: EventType
data: dict[str, Any]
def to_sse_event(self) -> str:
"""Convert the event to an SSE event string."""
return f"event: {self.type}\ndata: {json.dumps(self.data)}\n\n"
@classmethod
def from_error(cls, error: str) -> "Event":
return cls(type=EventType.ERROR, data={"error": error})
@classmethod
def from_message(cls, message: Message) -> "Event":
return cls(type=EventType.MESSAGE, data={"message": message.model_dump()})
@classmethod
def from_stream_event(cls, event: dict[str, Any]) -> "Event":
return cls(type=EventType.STREAM_EVENT, data={"event": event})
@classmethod
def from_result(cls, result: Any, session_id: str) -> "Event":
return cls(type=EventType.DONE, data={"result": result, "session_id": session_id})
@classmethod
def from_interrupted(cls) -> "Event":
return cls(type=EventType.INTERRUPTED, data={"message": "Assistant was interrupted"})
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/types.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/server/assistant/api.py | """
Assistant API endpoints for MLflow Server.
This module provides endpoints for integrating AI assistants with MLflow UI,
enabling AI-powered helper through a chat interface.
"""
import ipaddress
import uuid
from pathlib import Path
from typing import Any, AsyncGenerator, Literal
from fastapi import APIRouter, Depends, HTTPException, Request
from fastapi.responses import StreamingResponse
from pydantic import BaseModel, Field
from mlflow.assistant import clear_project_path_cache, get_project_path
from mlflow.assistant.config import AssistantConfig, PermissionsConfig, ProjectConfig
from mlflow.assistant.providers.base import (
CLINotInstalledError,
NotAuthenticatedError,
clear_config_cache,
)
from mlflow.assistant.providers.claude_code import ClaudeCodeProvider
from mlflow.assistant.skill_installer import install_skills, list_installed_skills
from mlflow.assistant.types import EventType
from mlflow.server.assistant.session import SessionManager, terminate_session_process
# TODO: Hardcoded provider until supporting multiple providers
_provider = ClaudeCodeProvider()
# Update the message when we support proxy access
_BLOCK_REMOTE_ACCESS_ERROR_MSG = (
"Assistant API is only accessible from the same host where the mLflow server is running."
)
async def _require_localhost(request: Request) -> None:
"""
Dependency that restricts access to localhost only.
Uses ipaddress library for robust loopback detection.
Raises:
HTTPException: If request is not from localhost
"""
client_host = request.client.host if request.client else None
if not client_host:
raise HTTPException(status_code=403, detail=_BLOCK_REMOTE_ACCESS_ERROR_MSG)
try:
ip = ipaddress.ip_address(client_host)
except ValueError:
raise HTTPException(status_code=403, detail=_BLOCK_REMOTE_ACCESS_ERROR_MSG)
if not ip.is_loopback:
raise HTTPException(status_code=403, detail=_BLOCK_REMOTE_ACCESS_ERROR_MSG)
assistant_router = APIRouter(
prefix="/ajax-api/3.0/mlflow/assistant",
tags=["assistant"],
dependencies=[Depends(_require_localhost)],
)
class MessageRequest(BaseModel):
message: str
session_id: str | None = None # empty for the first message
experiment_id: str | None = None
context: dict[str, Any] = Field(default_factory=dict)
class MessageResponse(BaseModel):
session_id: str
stream_url: str
# Config-related models
class ConfigResponse(BaseModel):
providers: dict[str, Any] = Field(default_factory=dict)
projects: dict[str, Any] = Field(default_factory=dict)
class ConfigUpdateRequest(BaseModel):
providers: dict[str, Any] | None = None
projects: dict[str, Any] | None = None
class SessionPatchRequest(BaseModel):
status: Literal["cancelled"]
class SessionPatchResponse(BaseModel):
message: str
# Skills-related models
class SkillsInstallRequest(BaseModel):
type: Literal["global", "project", "custom"] = "global"
custom_path: str | None = None # Required if type="custom"
experiment_id: str | None = None # Used to get project_path for type="project"
class SkillsInstallResponse(BaseModel):
installed_skills: list[str]
skills_directory: str
@assistant_router.post("/message")
async def send_message(request: MessageRequest) -> MessageResponse:
"""
Send a message to the assistant and get a session for streaming the response.
Args:
request: MessageRequest with message, context, and optional session_id
Returns:
MessageResponse with session_id and stream_url
"""
# Generate or use existing session ID
session_id = request.session_id or str(uuid.uuid4())
project_path = get_project_path(request.experiment_id) if request.experiment_id else None
# Create or update session
session = SessionManager.load(session_id)
if session is None:
session = SessionManager.create(
context=request.context, working_dir=Path(project_path) if project_path else None
)
elif request.context:
session.update_context(request.context)
# Store the pending message with role
session.set_pending_message(role="user", content=request.message)
session.add_message(role="user", content=request.message)
SessionManager.save(session_id, session)
return MessageResponse(
session_id=session_id,
stream_url=f"/ajax-api/3.0/mlflow/assistant/stream/{session_id}",
)
@assistant_router.get("/sessions/{session_id}/stream")
async def stream_response(request: Request, session_id: str) -> StreamingResponse:
"""
Stream the assistant's response via Server-Sent Events.
Args:
request: The FastAPI request object
session_id: The session ID returned from /message
Returns:
StreamingResponse with SSE events
"""
session = SessionManager.load(session_id)
if session is None:
raise HTTPException(status_code=404, detail="Session not found")
# Get and clear the pending message
pending_message = session.clear_pending_message()
if not pending_message:
raise HTTPException(status_code=400, detail="No pending message to process")
SessionManager.save(session_id, session)
# Extract the MLflow server URL from the request for the assistant to use.
# This assumes the assistant is accessing the same MLflow server that serves this API,
# which works because the assistant endpoint is localhost-only.
# TODO: Extend this to support remote/proxy scenarios where the tracking URI may differ.
tracking_uri = str(request.base_url).rstrip("/")
async def event_generator() -> AsyncGenerator[str, None]:
nonlocal session
async for event in _provider.astream(
prompt=pending_message.content,
tracking_uri=tracking_uri,
session_id=session.provider_session_id,
mlflow_session_id=session_id,
cwd=session.working_dir,
context=session.context,
):
# Store provider session ID if returned (for conversation continuity)
if event.type == EventType.DONE:
session.provider_session_id = event.data.get("session_id")
SessionManager.save(session_id, session)
yield event.to_sse_event()
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
@assistant_router.patch("/sessions/{session_id}")
async def patch_session(session_id: str, request: SessionPatchRequest) -> SessionPatchResponse:
"""
Update session status.
Currently supports cancelling an active session, which terminates
the running assistant process.
Args:
session_id: The session ID
request: SessionPatchRequest with status to set
Returns:
SessionPatchResponse indicating success
"""
session = SessionManager.load(session_id)
if session is None:
raise HTTPException(status_code=404, detail="Session not found")
if request.status == "cancelled":
terminated = terminate_session_process(session_id)
msg = "Session cancelled and process terminated" if terminated else "Session cancelled"
return SessionPatchResponse(message=msg)
# This branch is unreachable due to Literal type, but satisfies type checker
raise HTTPException(status_code=400, detail=f"Unknown status: {request.status}")
@assistant_router.get("/providers/{provider}/health")
async def provider_health_check(provider: str) -> dict[str, str]:
"""
Check if a specific provider is ready (CLI installed and authenticated).
Args:
provider: The provider name (e.g., "claude_code").
Returns:
200 with { status: "ok" } if ready.
Raises:
HTTPException 404: If provider is not found.
HTTPException 412: If preconditions not met (CLI not installed or not authenticated).
"""
# TODO: Support multiple providers via registry
if provider != _provider.name:
raise HTTPException(status_code=404, detail=f"Provider '{provider}' not found")
try:
_provider.check_connection()
except CLINotInstalledError as e:
raise HTTPException(status_code=412, detail=str(e))
except NotAuthenticatedError as e:
raise HTTPException(status_code=401, detail=str(e))
return {"status": "ok"}
@assistant_router.get("/config")
async def get_config() -> ConfigResponse:
"""
Get the current assistant configuration.
Returns:
Current configuration including providers and projects.
"""
config = AssistantConfig.load()
return ConfigResponse(
providers={name: p.model_dump() for name, p in config.providers.items()},
projects={exp_id: p.model_dump() for exp_id, p in config.projects.items()},
)
@assistant_router.put("/config")
async def update_config(request: ConfigUpdateRequest) -> ConfigResponse:
"""
Update the assistant configuration.
Args:
request: Partial configuration update.
Returns:
Updated configuration.
"""
config = AssistantConfig.load()
# Update providers
if request.providers:
for name, provider_data in request.providers.items():
model = provider_data.get("model", "default")
permissions = None
if "permissions" in provider_data:
perm_data = provider_data["permissions"]
permissions = PermissionsConfig(
allow_edit_files=perm_data.get("allow_edit_files", True),
allow_read_docs=perm_data.get("allow_read_docs", True),
full_access=perm_data.get("full_access", False),
)
config.set_provider(name, model, permissions)
# Update projects
if request.projects:
for exp_id, project_data in request.projects.items():
if project_data is None:
# Remove project mapping
config.projects.pop(exp_id, None)
else:
location = project_data.get("location", "")
project_path = Path(location).expanduser()
if not project_path or not project_path.exists():
raise HTTPException(
status_code=400,
detail=f"Project path does not exist: {location}",
)
config.projects[exp_id] = ProjectConfig(
type=project_data.get("type", "local"),
location=str(project_path),
)
config.save()
# Clear caches so provider and project path lookups pick up new settings
clear_config_cache()
clear_project_path_cache()
return ConfigResponse(
providers={name: p.model_dump() for name, p in config.providers.items()},
projects={exp_id: p.model_dump() for exp_id, p in config.projects.items()},
)
@assistant_router.post("/skills/install")
async def install_skills_endpoint(request: SkillsInstallRequest) -> SkillsInstallResponse:
"""
Install skills bundled with MLflow.
This endpoint only handles installation. Config updates should be done via PUT /config.
Args:
request: SkillsInstallRequest with type, custom_path, and experiment_id.
Returns:
SkillsInstallResponse with installed skill names and directory.
Raises:
HTTPException 400: If custom type without custom_path or project type without experiment_id.
"""
config = AssistantConfig.load()
# Resolve project_path for "project" type
project_path: Path | None = None
if request.type == "project":
if not request.experiment_id:
raise HTTPException(status_code=400, detail="experiment_id required for 'project' type")
project_location = config.get_project_path(request.experiment_id)
if not project_location:
raise HTTPException(
status_code=400,
detail=f"No project path configured for experiment {request.experiment_id}",
)
project_path = Path(project_location)
# Get the destination path to install skills to
match request.type:
case "global":
destination = _provider.resolve_skills_path(Path.home())
case "project":
destination = _provider.resolve_skills_path(project_path)
case "custom":
destination = Path(request.custom_path).expanduser()
# Check if skills already exist - skip re-installation
if destination.exists():
if current_skills := list_installed_skills(destination):
return SkillsInstallResponse(
installed_skills=current_skills, skills_directory=str(destination)
)
installed = install_skills(destination)
return SkillsInstallResponse(installed_skills=installed, skills_directory=str(destination))
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/assistant/api.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/server/assistant/session.py | import json
import os
import signal
import tempfile
import uuid
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
from mlflow.assistant.types import Message
SESSION_DIR = Path(tempfile.gettempdir()) / "mlflow-assistant-sessions"
@dataclass
class Session:
"""Session state for assistant conversations."""
context: dict[str, Any] = field(default_factory=dict)
messages: list[Message] = field(default_factory=list)
pending_message: Message | None = None
provider_session_id: str | None = None
working_dir: Path | None = None # Working directory for the session (e.g. project path)
def add_message(self, role: str, content: str) -> None:
"""Add a message to the session history.
Args:
role: Role of the message sender (user, assistant, system)
content: Text content of the message
"""
self.messages.append(Message(role=role, content=content))
def set_pending_message(self, role: str, content: str) -> None:
"""Set the pending message to be processed.
Args:
role: Role of the message sender
content: Text content of the message
"""
self.pending_message = Message(role=role, content=content)
def clear_pending_message(self) -> Message | None:
"""Clear and return the pending message.
Returns:
The pending message, or None if no message was pending
"""
msg = self.pending_message
self.pending_message = None
return msg
def update_context(self, context: dict[str, Any]) -> None:
"""Update session context.
Args:
context: Context data to merge into session context
"""
self.context.update(context)
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary for serialization.
Returns:
Dictionary representation of session
"""
return {
"context": self.context,
"messages": [msg.model_dump() for msg in self.messages],
"pending_message": self.pending_message.model_dump() if self.pending_message else None,
"provider_session_id": self.provider_session_id,
"working_dir": self.working_dir.as_posix() if self.working_dir else None,
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "Session":
"""Load from dictionary.
Args:
data: Dictionary representation of session
Returns:
Session instance
"""
messages = [Message.model_validate(m) for m in data.get("messages", [])]
pending = data.get("pending_message")
pending_msg = Message.model_validate(pending) if pending else None
return cls(
context=data.get("context", {}),
messages=messages,
pending_message=pending_msg,
provider_session_id=data.get("provider_session_id"),
working_dir=Path(data.get("working_dir")) if data.get("working_dir") else None,
)
class SessionManager:
"""Manages session storage and retrieval.
Provides static methods for session operations, keeping
Session as a simple data container.
"""
@staticmethod
def validate_session_id(session_id: str) -> None:
"""Validate that session_id is a valid UUID to prevent path traversal.
Args:
session_id: Session ID to validate
Raises:
ValueError: If session ID is not a valid UUID
"""
try:
uuid.UUID(session_id)
except (ValueError, TypeError) as e:
raise ValueError("Invalid session ID format") from e
@staticmethod
def get_session_file(session_id: str) -> Path:
"""Get the file path for a session.
Args:
session_id: Session ID
Returns:
Path to session file
Raises:
ValueError: If session ID is invalid
"""
SessionManager.validate_session_id(session_id)
return SESSION_DIR / f"{session_id}.json"
@staticmethod
def save(session_id: str, session: Session) -> None:
"""Save session to disk atomically.
Args:
session_id: Session ID
session: Session to save
Raises:
ValueError: If session ID is invalid
"""
SessionManager.validate_session_id(session_id)
SESSION_DIR.mkdir(parents=True, exist_ok=True)
session_file = SessionManager.get_session_file(session_id)
# Write to temp file, then rename (atomic on POSIX)
fd, temp_path = tempfile.mkstemp(dir=SESSION_DIR, suffix=".tmp")
try:
with os.fdopen(fd, "w") as f:
json.dump(session.to_dict(), f)
os.replace(temp_path, session_file)
except Exception:
os.unlink(temp_path)
raise
@staticmethod
def load(session_id: str) -> Session | None:
"""Load session from disk. Returns a Session instance, or None if not found"""
try:
session_file = SessionManager.get_session_file(session_id)
except ValueError:
return None
if not session_file.exists():
return None
data = json.loads(session_file.read_text())
return Session.from_dict(data)
@staticmethod
def create(context: dict[str, Any] | None = None, working_dir: Path | None = None) -> Session:
"""Create a new session.
Args:
context: Initial context data, or None
working_dir: Working directory for the session
Returns:
New Session instance
"""
return Session(context=context or {}, working_dir=working_dir)
def get_process_file(session_id: str) -> Path:
"""Get the file path for storing process PID."""
SessionManager.validate_session_id(session_id)
return SESSION_DIR / f"{session_id}.process.json"
def save_process_pid(session_id: str, pid: int) -> None:
"""Save process PID to file for cancellation support."""
SESSION_DIR.mkdir(parents=True, exist_ok=True)
process_file = get_process_file(session_id)
process_file.write_text(json.dumps({"pid": pid}))
def get_process_pid(session_id: str) -> int | None:
try:
process_file = get_process_file(session_id)
except ValueError:
return None
if not process_file.exists():
return None
data = json.loads(process_file.read_text())
return data.get("pid")
def clear_process_pid(session_id: str) -> None:
try:
process_file = get_process_file(session_id)
except ValueError:
return
if process_file.exists():
process_file.unlink()
def terminate_session_process(session_id: str) -> bool:
"""Terminate the process associated with a session.
Args:
session_id: Session ID
Returns:
True if process was terminated, False otherwise
"""
if pid := get_process_pid(session_id):
try:
os.kill(pid, signal.SIGTERM)
clear_process_pid(session_id)
return True
except ProcessLookupError:
clear_process_pid(session_id)
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/assistant/session.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/assistant/providers/test_claude_code_provider.py | from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from mlflow.assistant.providers.claude_code import ClaudeCodeProvider
from mlflow.assistant.types import EventType
class AsyncIterator:
"""Helper to mock async stdout iteration."""
def __init__(self, items):
self.items = iter(items)
def __aiter__(self):
return self
async def __anext__(self):
try:
return next(self.items)
except StopIteration:
raise StopAsyncIteration
@pytest.fixture(autouse=True)
def config(tmp_path):
config_file = tmp_path / "config.json"
config_file.write_text('{"providers": {"claude_code": {"model": "claude-opus-4"}}}')
with patch("mlflow.assistant.config.CONFIG_PATH", config_file):
yield config_file
@pytest.mark.parametrize(
("which_return", "expected"),
[
("/usr/bin/claude", True),
(None, False),
],
)
def test_is_available(which_return, expected):
with patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value=which_return,
):
provider = ClaudeCodeProvider()
assert provider.is_available() is expected
@pytest.mark.asyncio
async def test_astream_yields_error_when_claude_not_found():
with patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value=None,
):
provider = ClaudeCodeProvider()
events = [e async for e in provider.astream("test prompt", "http://localhost:5000")]
assert len(events) == 1
assert events[0].type == EventType.ERROR
assert "not found" in events[0].data["error"]
assert "PATH" in events[0].data["error"]
@pytest.mark.asyncio
async def test_astream_builds_correct_command(tmp_path, monkeypatch):
monkeypatch.setenv("TEST_ENV_VAR", "test_value")
mock_process = MagicMock()
mock_process.stdout = AsyncIterator([b'{"type": "result"}\n'])
mock_process.stderr = MagicMock()
mock_process.stderr.read = AsyncMock(return_value=b"")
mock_process.wait = AsyncMock()
mock_process.returncode = 0
with (
patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value="/usr/bin/claude",
),
patch(
"mlflow.assistant.providers.claude_code.asyncio.create_subprocess_exec",
return_value=mock_process,
) as mock_exec,
):
provider = ClaudeCodeProvider()
_ = [
e async for e in provider.astream("test prompt", "http://localhost:5000", cwd=tmp_path)
]
call_args = mock_exec.call_args[0]
assert "/usr/bin/claude" in call_args
assert "-p" in call_args
assert "test prompt" in call_args
assert "--output-format" in call_args
assert "stream-json" in call_args
assert "--verbose" in call_args
assert "--append-system-prompt" in call_args
# Verify system prompt contains tracking URI
system_prompt_idx = call_args.index("--append-system-prompt") + 1
system_prompt = call_args[system_prompt_idx]
assert "http://localhost:5000" in system_prompt
# Verify Skill permission is granted by default
allowed_tools = [
call_args[i + 1] for i, arg in enumerate(call_args) if arg == "--allowed-tools"
]
assert "Skill" in allowed_tools
# Verify cwd and tracking URI env var are passed correctly
call_kwargs = mock_exec.call_args[1]
assert call_kwargs["cwd"] == tmp_path
assert call_kwargs["env"]["MLFLOW_TRACKING_URI"] == "http://localhost:5000"
assert call_kwargs["env"]["TEST_ENV_VAR"] == "test_value"
@pytest.mark.asyncio
async def test_astream_streams_assistant_messages():
mock_stdout = AsyncIterator(
[
b'{"type": "assistant", "message": {"content": [{"type": "text", "text": "Hi!"}]}}\n',
b'{"type": "result", "session_id": "session-123"}\n',
]
)
mock_process = MagicMock()
mock_process.stdout = mock_stdout
mock_process.stderr = MagicMock()
mock_process.stderr.read = AsyncMock(return_value=b"")
mock_process.wait = AsyncMock()
mock_process.returncode = 0
with (
patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value="/usr/bin/claude",
),
patch(
"mlflow.assistant.providers.claude_code.asyncio.create_subprocess_exec",
return_value=mock_process,
),
):
provider = ClaudeCodeProvider()
events = [e async for e in provider.astream("test prompt", "http://localhost:5000")]
assert len(events) == 2
assert events[0].type == EventType.MESSAGE
assert events[0].data["message"]["content"][0]["text"] == "Hi!"
assert events[1].type == EventType.DONE
assert events[1].data["session_id"] == "session-123"
@pytest.mark.asyncio
async def test_astream_handles_process_error():
mock_process = MagicMock()
mock_process.stdout = AsyncIterator([])
mock_process.stderr = MagicMock()
mock_process.stderr.read = AsyncMock(return_value=b"Command failed")
mock_process.wait = AsyncMock()
mock_process.returncode = 1
with (
patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value="/usr/bin/claude",
),
patch(
"mlflow.assistant.providers.claude_code.asyncio.create_subprocess_exec",
return_value=mock_process,
),
):
provider = ClaudeCodeProvider()
events = [e async for e in provider.astream("test prompt", "http://localhost:5000")]
assert events[-1].type == EventType.ERROR
assert "Command failed" in events[-1].data["error"]
@pytest.mark.asyncio
async def test_astream_passes_session_id_for_resume():
mock_process = MagicMock()
mock_process.stdout = AsyncIterator([b'{"type": "result"}\n'])
mock_process.stderr = MagicMock()
mock_process.stderr.read = AsyncMock(return_value=b"")
mock_process.wait = AsyncMock()
mock_process.returncode = 0
with (
patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value="/usr/bin/claude",
),
patch(
"mlflow.assistant.providers.claude_code.asyncio.create_subprocess_exec",
return_value=mock_process,
) as mock_exec,
):
provider = ClaudeCodeProvider()
_ = [
e
async for e in provider.astream(
"prompt", "http://localhost:5000", session_id="existing-session"
)
]
call_args = mock_exec.call_args[0]
assert "--resume" in call_args
assert "existing-session" in call_args
@pytest.mark.asyncio
async def test_astream_handles_non_json_output():
mock_stdout = AsyncIterator(
[
b"Some plain text output\n",
b'{"type": "result"}\n',
]
)
mock_process = MagicMock()
mock_process.stdout = mock_stdout
mock_process.stderr = MagicMock()
mock_process.stderr.read = AsyncMock(return_value=b"")
mock_process.wait = AsyncMock()
mock_process.returncode = 0
with (
patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value="/usr/bin/claude",
),
patch(
"mlflow.assistant.providers.claude_code.asyncio.create_subprocess_exec",
return_value=mock_process,
),
):
provider = ClaudeCodeProvider()
events = [e async for e in provider.astream("test prompt", "http://localhost:5000")]
assert events[0].type == EventType.MESSAGE
assert events[0].data["message"]["content"] == "Some plain text output"
@pytest.mark.asyncio
async def test_astream_handles_error_message_type():
mock_stdout = AsyncIterator(
[
b'{"type": "error", "error": {"message": "API rate limit exceeded"}}\n',
]
)
mock_process = MagicMock()
mock_process.stdout = mock_stdout
mock_process.stderr = MagicMock()
mock_process.stderr.read = AsyncMock(return_value=b"")
mock_process.wait = AsyncMock()
mock_process.returncode = 0
with (
patch(
"mlflow.assistant.providers.claude_code.shutil.which",
return_value="/usr/bin/claude",
),
patch(
"mlflow.assistant.providers.claude_code.asyncio.create_subprocess_exec",
return_value=mock_process,
),
):
provider = ClaudeCodeProvider()
events = [e async for e in provider.astream("test prompt", "http://localhost:5000")]
assert events[0].type == EventType.ERROR
assert "rate limit" in events[0].data["error"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/assistant/providers/test_claude_code_provider.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/assistant/test_cli.py | import os
from unittest import mock
import pytest
from click.testing import CliRunner
from mlflow.assistant.cli import commands
from mlflow.assistant.config import ProviderConfig
@pytest.fixture
def runner():
return CliRunner()
def test_assistant_help(runner):
result = runner.invoke(commands, ["--help"])
assert result.exit_code == 0
assert "AI-powered trace analysis" in result.output
assert "--configure" in result.output
def test_configure_cli_not_found(runner):
with mock.patch("mlflow.assistant.cli.shutil.which", return_value=None):
result = runner.invoke(commands, ["--configure"], input="1\n")
assert "not installed" in result.output
def test_configure_auth_failure(runner):
mock_result = mock.Mock()
mock_result.returncode = 1
mock_result.stderr = "unauthorized"
with (
mock.patch("mlflow.assistant.cli.shutil.which", return_value="/usr/bin/claude"),
mock.patch(
"mlflow.assistant.providers.claude_code.subprocess.run",
return_value=mock_result,
),
):
result = runner.invoke(commands, ["--configure"], input="1\n")
assert result.exit_code == 0
# Should show error about authentication
assert "Not authenticated" in result.output or "not installed" in result.output.lower()
def test_configure_experiment_fetch_failure(runner):
mock_result = mock.Mock()
mock_result.returncode = 0
mock_result.stderr = ""
with (
mock.patch("mlflow.assistant.cli.shutil.which", return_value="/usr/bin/claude"),
mock.patch(
"mlflow.assistant.providers.claude_code.subprocess.run",
return_value=mock_result,
),
mock.patch(
"mlflow.assistant.cli._fetch_recent_experiments",
return_value=[],
),
):
# Input: provider=1, connect=y, tracking_uri=default
result = runner.invoke(
commands,
["--configure"],
input="1\ny\nhttp://localhost:5000\n",
)
assert "Could not fetch experiments" in result.output
def test_configure_success(runner, tmp_path):
mock_result = mock.Mock()
mock_result.returncode = 0
mock_result.stderr = ""
mock_config = mock.Mock()
mock_config.providers = {"claude_code": ProviderConfig()}
mock_config.projects = {}
def mock_set_provider(name, model):
mock_config.providers[name] = ProviderConfig(model=model, selected=True)
mock_config.set_provider = mock_set_provider
with (
mock.patch("mlflow.assistant.cli.shutil.which", return_value="/usr/bin/claude"),
mock.patch(
"mlflow.assistant.providers.claude_code.subprocess.run",
return_value=mock_result,
),
mock.patch(
"mlflow.assistant.cli._fetch_recent_experiments",
return_value=[("1", "Test Experiment")],
),
mock.patch(
"mlflow.assistant.cli.AssistantConfig.load",
return_value=mock_config,
),
mock.patch.object(mock_config, "save"),
runner.isolated_filesystem(temp_dir=tmp_path),
):
# Input: provider=1, connect=y, experiment=1, project_path, model=default, skill_location=1
result = runner.invoke(
commands,
["--configure"],
input=f"1\ny\nhttp://localhost:5000\n1\n{tmp_path}\ndefault\n1\n",
)
assert "Setup Complete" in result.output
def test_configure_tilde_expansion(runner):
mock_result = mock.Mock()
mock_result.returncode = 0
mock_result.stderr = ""
mock_config = mock.Mock()
mock_config.providers = {"claude_code": ProviderConfig()}
projects_dict = {}
mock_config.projects = projects_dict
def mock_set_provider(name, model):
mock_config.providers[name] = ProviderConfig(model=model, selected=True)
mock_config.set_provider = mock_set_provider
home_dir = os.path.expanduser("~")
with (
mock.patch("mlflow.assistant.cli.shutil.which", return_value="/usr/bin/claude"),
mock.patch(
"mlflow.assistant.providers.claude_code.subprocess.run",
return_value=mock_result,
),
mock.patch(
"mlflow.assistant.cli._fetch_recent_experiments",
return_value=[("1", "Test Experiment")],
),
mock.patch(
"mlflow.assistant.cli.AssistantConfig.load",
return_value=mock_config,
),
mock.patch.object(mock_config, "save"),
):
# Input: provider=1, connect=y, tracking_uri, experiment=1, project_path=~,
# model=default, skill_location=1
result = runner.invoke(
commands,
["--configure"],
input="1\ny\nhttp://localhost:5000\n1\n~\ndefault\n1\n",
)
# Should succeed because ~ expands to home dir which exists
assert "Setup Complete" in result.output
# Verify the saved path is the expanded path, not ~
assert "1" in projects_dict
assert projects_dict["1"].location == home_dir
def test_configure_relative_path(runner):
mock_result = mock.Mock()
mock_result.returncode = 0
mock_result.stderr = ""
mock_config = mock.Mock()
mock_config.providers = {"claude_code": ProviderConfig()}
projects_dict = {}
mock_config.projects = projects_dict
def mock_set_provider(name, model):
mock_config.providers[name] = ProviderConfig(model=model, selected=True)
mock_config.set_provider = mock_set_provider
with (
mock.patch("mlflow.assistant.cli.shutil.which", return_value="/usr/bin/claude"),
mock.patch(
"mlflow.assistant.providers.claude_code.subprocess.run",
return_value=mock_result,
),
mock.patch(
"mlflow.assistant.cli._fetch_recent_experiments",
return_value=[("1", "Test Experiment")],
),
mock.patch(
"mlflow.assistant.cli.AssistantConfig.load",
return_value=mock_config,
),
mock.patch.object(mock_config, "save"),
):
# Use "." which should resolve to current directory
# Input: provider=1, connect=y, tracking_uri, experiment=1, project_path=.,
# model=default, skill_location=1
result = runner.invoke(
commands,
["--configure"],
input="1\ny\nhttp://localhost:5000\n1\n.\ndefault\n1\n",
)
assert "Setup Complete" in result.output
# Verify the saved path is absolute, not "."
assert "1" in projects_dict
assert os.path.isabs(projects_dict["1"].location)
assert projects_dict["1"].location != "."
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/assistant/test_cli.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/assistant/test_api.py | import os
import shutil
import subprocess
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from fastapi import FastAPI, HTTPException
from fastapi.testclient import TestClient
from mlflow.assistant.config import AssistantConfig, ProjectConfig
from mlflow.assistant.config import ProviderConfig as AssistantProviderConfig
from mlflow.assistant.providers.base import (
AssistantProvider,
CLINotInstalledError,
NotAuthenticatedError,
ProviderConfig,
)
from mlflow.assistant.types import Event, Message
from mlflow.server.assistant.api import _require_localhost, assistant_router
from mlflow.server.assistant.session import SESSION_DIR, SessionManager, save_process_pid
from mlflow.utils.os import is_windows
class MockProvider(AssistantProvider):
"""Mock provider for testing."""
@property
def name(self) -> str:
return "mock_provider"
@property
def display_name(self) -> str:
return "Mock Provider"
@property
def description(self) -> str:
return "Mock provider for testing"
@property
def config_path(self) -> Path:
return Path.home() / ".mlflow" / "assistant" / "mock-config.json"
def is_available(self) -> bool:
return True
def load_config(self) -> ProviderConfig:
return ProviderConfig()
def check_connection(self, echo=print) -> None:
pass
def resolve_skills_path(self, base_directory: Path) -> Path:
return base_directory / ".mock" / "skills"
async def astream(
self,
prompt: str,
tracking_uri: str,
session_id: str | None = None,
cwd: Path | None = None,
context: dict[str, Any] | None = None,
mlflow_session_id: str | None = None,
):
yield Event.from_message(message=Message(role="user", content="Hello from mock"))
yield Event.from_result(result="complete", session_id="mock-session-123")
@pytest.fixture(autouse=True)
def isolated_config(tmp_path, monkeypatch):
"""Redirect config to tmp_path to avoid modifying real user config."""
import mlflow.assistant.config as config_module
config_home = tmp_path / ".mlflow" / "assistant"
config_path = config_home / "config.json"
monkeypatch.setattr(config_module, "MLFLOW_ASSISTANT_HOME", config_home)
monkeypatch.setattr(config_module, "CONFIG_PATH", config_path)
return config_home
@pytest.fixture(autouse=True)
def clear_sessions():
"""Clear session storage before each test."""
if SESSION_DIR.exists():
shutil.rmtree(SESSION_DIR)
yield
if SESSION_DIR.exists():
shutil.rmtree(SESSION_DIR)
@pytest.fixture
def client():
"""Create test client with mock provider and bypassed localhost check."""
app = FastAPI()
app.include_router(assistant_router)
# Override localhost dependency to allow TestClient requests
async def mock_require_localhost():
pass
app.dependency_overrides[_require_localhost] = mock_require_localhost
with patch("mlflow.server.assistant.api._provider", MockProvider()):
yield TestClient(app)
def test_message(client):
response = client.post(
"/ajax-api/3.0/mlflow/assistant/message",
json={
"message": "Hello",
"context": {"trace_id": "tr-123", "experiment_id": "exp-456"},
},
)
assert response.status_code == 200
data = response.json()
session_id = data["session_id"]
assert session_id is not None
assert data["stream_url"] == f"/ajax-api/3.0/mlflow/assistant/stream/{data['session_id']}"
# continue the conversation
response = client.post(
"/ajax-api/3.0/mlflow/assistant/message",
json={"message": "Second message", "session_id": session_id},
)
assert response.status_code == 200
assert response.json()["session_id"] == session_id
def test_stream_not_found_for_invalid_session(client):
response = client.get("/ajax-api/3.0/mlflow/assistant/sessions/invalid-session-id/stream")
assert response.status_code == 404
assert "Session not found" in response.json()["detail"]
def test_stream_bad_request_when_no_pending_message(client):
# Create session and consume the pending message
r = client.post("/ajax-api/3.0/mlflow/assistant/message", json={"message": "Hi"})
session_id = r.json()["session_id"]
client.get(f"/ajax-api/3.0/mlflow/assistant/sessions/{session_id}/stream")
# Try to stream again without a new message
response = client.get(f"/ajax-api/3.0/mlflow/assistant/sessions/{session_id}/stream")
assert response.status_code == 400
assert "No pending message" in response.json()["detail"]
def test_stream_returns_sse_events(client):
r = client.post("/ajax-api/3.0/mlflow/assistant/message", json={"message": "Hi"})
session_id = r.json()["session_id"]
response = client.get(f"/ajax-api/3.0/mlflow/assistant/sessions/{session_id}/stream")
assert response.status_code == 200
assert "text/event-stream" in response.headers["content-type"]
content = response.text
assert "event: message" in content
assert "event: done" in content
assert "Hello from mock" in content
def test_health_check_returns_ok_when_healthy(client):
response = client.get("/ajax-api/3.0/mlflow/assistant/providers/mock_provider/health")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
def test_health_check_returns_404_for_unknown_provider(client):
response = client.get("/ajax-api/3.0/mlflow/assistant/providers/unknown_provider/health")
assert response.status_code == 404
assert "not found" in response.json()["detail"]
def test_health_check_returns_412_when_cli_not_installed():
app = FastAPI()
app.include_router(assistant_router)
async def mock_require_localhost():
pass
app.dependency_overrides[_require_localhost] = mock_require_localhost
class CLINotInstalledProvider(MockProvider):
def check_connection(self, echo=None):
raise CLINotInstalledError("CLI not installed")
with patch("mlflow.server.assistant.api._provider", CLINotInstalledProvider()):
client = TestClient(app)
response = client.get("/ajax-api/3.0/mlflow/assistant/providers/mock_provider/health")
assert response.status_code == 412
assert "CLI not installed" in response.json()["detail"]
def test_health_check_returns_401_when_not_authenticated():
app = FastAPI()
app.include_router(assistant_router)
async def mock_require_localhost():
pass
app.dependency_overrides[_require_localhost] = mock_require_localhost
class NotAuthenticatedProvider(MockProvider):
def check_connection(self, echo=None):
raise NotAuthenticatedError("Not authenticated")
with patch("mlflow.server.assistant.api._provider", NotAuthenticatedProvider()):
client = TestClient(app)
response = client.get("/ajax-api/3.0/mlflow/assistant/providers/mock_provider/health")
assert response.status_code == 401
assert "Not authenticated" in response.json()["detail"]
def test_get_config_returns_empty_config(client):
response = client.get("/ajax-api/3.0/mlflow/assistant/config")
assert response.status_code == 200
data = response.json()
assert data["providers"] == {}
assert data["projects"] == {}
def test_get_config_returns_existing_config(client, tmp_path):
# Set up existing config by saving it first
project_dir = tmp_path / "project"
project_dir.mkdir()
config = AssistantConfig(
providers={"claude_code": AssistantProviderConfig(model="default", selected=True)},
projects={"exp-123": ProjectConfig(type="local", location=str(project_dir))},
)
config.save()
response = client.get("/ajax-api/3.0/mlflow/assistant/config")
assert response.status_code == 200
data = response.json()
assert data["providers"]["claude_code"]["model"] == "default"
assert data["providers"]["claude_code"]["selected"] is True
assert data["projects"]["exp-123"]["location"] == str(project_dir)
def test_update_config_sets_provider(client):
response = client.put(
"/ajax-api/3.0/mlflow/assistant/config",
json={"providers": {"claude_code": {"model": "opus", "selected": True}}},
)
assert response.status_code == 200
data = response.json()
assert data["providers"]["claude_code"]["selected"] is True
def test_update_config_sets_project(client, tmp_path):
project_dir = tmp_path / "my_project"
project_dir.mkdir()
response = client.put(
"/ajax-api/3.0/mlflow/assistant/config",
json={"projects": {"exp-456": {"type": "local", "location": str(project_dir)}}},
)
assert response.status_code == 200
data = response.json()
assert data["projects"]["exp-456"]["location"] == str(project_dir)
def test_update_config_expand_user_home(client, tmp_path):
# Create a directory under a "fake home" structure to test ~ expansion
fake_home = tmp_path / "home" / "user"
project_dir = fake_home / "my_project"
project_dir.mkdir(parents=True)
with patch("mlflow.server.assistant.api.Path.expanduser") as mock_expanduser:
# Make expanduser return our tmp_path directory
mock_expanduser.return_value = project_dir
response = client.put(
"/ajax-api/3.0/mlflow/assistant/config",
json={"projects": {"exp-456": {"type": "local", "location": "~/my_project"}}},
)
assert response.status_code == 200
data = response.json()
assert data["projects"]["exp-456"]["location"] == str(project_dir)
@pytest.mark.asyncio
async def test_localhost_allows_ipv4():
mock_request = MagicMock()
mock_request.client.host = "127.0.0.1"
await _require_localhost(mock_request)
@pytest.mark.asyncio
async def test_localhost_allows_ipv6():
mock_request = MagicMock()
mock_request.client.host = "::1"
await _require_localhost(mock_request)
@pytest.mark.asyncio
async def test_localhost_blocks_external_ip():
mock_request = MagicMock()
mock_request.client.host = "192.168.1.100"
with pytest.raises(HTTPException, match="same host"):
await _require_localhost(mock_request)
@pytest.mark.asyncio
async def test_localhost_blocks_external_hostname():
mock_request = MagicMock()
mock_request.client.host = "external.example.com"
with pytest.raises(HTTPException, match="same host"):
await _require_localhost(mock_request)
@pytest.mark.asyncio
async def test_localhost_blocks_when_no_client():
mock_request = MagicMock()
mock_request.client = None
with pytest.raises(HTTPException, match="same host"):
await _require_localhost(mock_request)
def test_validate_session_id_accepts_valid_uuid():
valid_uuid = "f5f28c66-5ec6-46a1-9a2e-ca55fb64bf47"
SessionManager.validate_session_id(valid_uuid) # Should not raise
def test_validate_session_id_rejects_invalid_format():
with pytest.raises(ValueError, match="Invalid session ID format"):
SessionManager.validate_session_id("invalid-session-id")
def test_validate_session_id_rejects_path_traversal():
with pytest.raises(ValueError, match="Invalid session ID format"):
SessionManager.validate_session_id("../../../etc/passwd")
def _is_process_running(pid: int) -> bool:
try:
os.kill(pid, 0)
return True
except (OSError, ValueError): # ValueError is raised on Windows
return False
def test_patch_session_cancel_with_process(client):
r = client.post("/ajax-api/3.0/mlflow/assistant/message", json={"message": "Hi"})
session_id = r.json()["session_id"]
# Start a real subprocess and register it with the session
with subprocess.Popen(["sleep", "10"]) as proc:
save_process_pid(session_id, proc.pid)
assert _is_process_running(proc.pid)
response = client.patch(
f"/ajax-api/3.0/mlflow/assistant/sessions/{session_id}",
json={"status": "cancelled"},
)
assert response.status_code == 200
data = response.json()
assert "terminated" in data["message"]
# Wait for the process to actually terminate
proc.wait(timeout=5)
assert proc.returncode is not None
# On non-Windows, verify the process is no longer running via PID check.
# Skip on Windows because PIDs are reused more aggressively.
if not is_windows():
assert not _is_process_running(proc.pid)
def test_install_skills_success(client):
with patch(
"mlflow.server.assistant.api.install_skills", return_value=["skill1", "skill2"]
) as mock_install:
response = client.post(
"/ajax-api/3.0/mlflow/assistant/skills/install",
json={"type": "custom", "custom_path": "/tmp/test-skills"},
)
assert response.status_code == 200
data = response.json()
assert data["installed_skills"] == ["skill1", "skill2"]
expected_path = os.path.join(os.sep, "tmp", "test-skills")
assert data["skills_directory"] == expected_path
mock_install.assert_called_once_with(Path(expected_path))
def test_install_skills_skips_when_already_installed(client):
with (
patch("mlflow.server.assistant.api.Path.exists", return_value=True),
patch(
"mlflow.server.assistant.api.list_installed_skills",
return_value=["existing_skill"],
) as mock_list,
patch("mlflow.server.assistant.api.install_skills") as mock_install,
):
response = client.post(
"/ajax-api/3.0/mlflow/assistant/skills/install",
json={"type": "custom", "custom_path": "/tmp/test-skills"},
)
assert response.status_code == 200
data = response.json()
assert data["installed_skills"] == ["existing_skill"]
mock_install.assert_not_called()
mock_list.assert_called_once()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/assistant/test_api.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/assistant/test_session.py | import shutil
import uuid
import pytest
from mlflow.assistant.types import Message
from mlflow.server.assistant.session import Session, SessionManager
def test_session_add_message():
session = Session()
session.add_message("user", "Hello")
assert len(session.messages) == 1
assert session.messages[0].role == "user"
assert session.messages[0].content == "Hello"
def test_session_add_multiple_messages():
session = Session()
session.add_message("user", "Hello")
session.add_message("assistant", "Hi there")
session.add_message("user", "How are you?")
assert len(session.messages) == 3
assert session.messages[0].role == "user"
assert session.messages[1].role == "assistant"
assert session.messages[2].role == "user"
def test_session_pending_message_lifecycle():
session = Session()
session.set_pending_message("user", "Test")
assert session.pending_message is not None
assert session.pending_message.content == "Test"
assert session.pending_message.role == "user"
msg = session.clear_pending_message()
assert msg.content == "Test"
assert session.pending_message is None
def test_session_clear_pending_message_returns_none_when_none():
session = Session()
msg = session.clear_pending_message()
assert msg is None
def test_session_update_context():
session = Session(context={"key1": "value1"})
session.update_context({"key2": "value2"})
assert session.context["key1"] == "value1"
assert session.context["key2"] == "value2"
def test_session_update_context_overwrites():
session = Session(context={"key": "old"})
session.update_context({"key": "new"})
assert session.context["key"] == "new"
def test_session_serialization():
session = Session()
session.add_message("user", "Hello")
session.add_message("assistant", "Hi")
session.set_pending_message("user", "Pending")
session.update_context({"trace_id": "tr-123"})
session.provider_session_id = "provider-session-456"
data = session.to_dict()
restored = Session.from_dict(data)
assert len(restored.messages) == 2
assert restored.messages[0].content == "Hello"
assert restored.messages[1].content == "Hi"
assert restored.pending_message.content == "Pending"
assert restored.context["trace_id"] == "tr-123"
assert restored.provider_session_id == "provider-session-456"
def test_session_serialization_with_no_pending_message():
session = Session()
session.add_message("user", "Hello")
data = session.to_dict()
restored = Session.from_dict(data)
assert restored.pending_message is None
assert len(restored.messages) == 1
def test_session_manager_validates_uuid():
with pytest.raises(ValueError, match="Invalid session ID"):
SessionManager.validate_session_id("not-a-uuid")
# Should not raise
SessionManager.validate_session_id("f5f28c66-5ec6-46a1-9a2e-ca55fb64bf47")
def test_session_manager_rejects_path_traversal():
with pytest.raises(ValueError, match="Invalid session ID"):
SessionManager.validate_session_id("../../../etc/passwd")
def test_session_manager_save_and_load(tmp_path):
import mlflow.server.assistant.session as session_module
# Override SESSION_DIR for test
original_dir = session_module.SESSION_DIR
session_module.SESSION_DIR = tmp_path / "sessions"
try:
session_id = str(uuid.uuid4())
session = SessionManager.create(context={"key": "value"})
session.add_message("user", "Hello")
session.set_pending_message("user", "Pending")
SessionManager.save(session_id, session)
loaded = SessionManager.load(session_id)
assert loaded is not None
assert loaded.context["key"] == "value"
assert len(loaded.messages) == 1
assert loaded.messages[0].content == "Hello"
assert loaded.pending_message.content == "Pending"
finally:
session_module.SESSION_DIR = original_dir
def test_session_manager_load_nonexistent():
loaded = SessionManager.load(str(uuid.uuid4()))
assert loaded is None
def test_session_manager_load_invalid_id():
loaded = SessionManager.load("invalid-id")
assert loaded is None
def test_session_manager_create():
session = SessionManager.create()
assert len(session.messages) == 0
assert session.pending_message is None
assert session.context == {}
assert session.provider_session_id is None
def test_session_manager_create_with_context():
session = SessionManager.create(context={"key": "value"})
assert session.context["key"] == "value"
def test_session_manager_atomic_save(tmp_path):
import mlflow.server.assistant.session as session_module
# Override SESSION_DIR for test
original_dir = session_module.SESSION_DIR
session_module.SESSION_DIR = tmp_path / "sessions"
try:
session_id = str(uuid.uuid4())
session = SessionManager.create(context={"key": "value1"})
SessionManager.save(session_id, session)
# Update and save again
session.update_context({"key": "value2"})
SessionManager.save(session_id, session)
# Load and verify latest value
loaded = SessionManager.load(session_id)
assert loaded.context["key"] == "value2"
# Verify no temp files remain
session_dir = tmp_path / "sessions"
temp_files = list(session_dir.glob("*.tmp"))
assert len(temp_files) == 0
finally:
session_module.SESSION_DIR = original_dir
if (tmp_path / "sessions").exists():
shutil.rmtree(tmp_path / "sessions")
def test_message_serialization():
msg = Message(role="user", content="Hello")
data = msg.model_dump()
assert data["role"] == "user"
assert data["content"] == "Hello"
restored = Message.model_validate(data)
assert restored.role == "user"
assert restored.content == "Hello"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/assistant/test_session.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/fixtures/flask_tracing_server.py | """Flask server for distributed tracing tests."""
import sys
import requests
from flask import Flask, jsonify, request
import mlflow
from mlflow.tracing.distributed import (
get_tracing_context_headers_for_http_request,
set_tracing_context_from_http_request_headers,
)
REQUEST_TIMEOUT = 20
app = Flask(__name__)
@app.get("/health")
def health():
return "ok", 200
@app.post("/handle")
def handle():
headers = dict(request.headers)
with set_tracing_context_from_http_request_headers(headers):
with mlflow.start_span("server-handler") as span:
return jsonify(
{
"trace_id": span.trace_id,
"span_id": span.span_id,
"parent_id": span.parent_id,
}
)
@app.post("/handle1")
def handle1():
headers = dict(request.headers)
with set_tracing_context_from_http_request_headers(headers):
with mlflow.start_span("server-handler1") as span:
# Get the URL for the second handler from environment or command line
# In nested tests, this will be passed via environment
second_server_url = request.args.get("second_server_url")
if not second_server_url:
return jsonify({"error": "second_server_url parameter required"}), 400
headers2 = get_tracing_context_headers_for_http_request()
resp2 = requests.post(
f"{second_server_url}/handle2", headers=headers2, timeout=REQUEST_TIMEOUT
)
if not resp2.ok:
return jsonify({"error": f"Nested call failed: {resp2.status_code}"}), 502
payload2 = resp2.json()
return jsonify(
{
"trace_id": span.trace_id,
"span_id": span.span_id,
"parent_id": span.parent_id,
"nested_call_resp": payload2,
}
)
@app.post("/handle2")
def handle2():
headers = dict(request.headers)
with set_tracing_context_from_http_request_headers(headers):
with mlflow.start_span("server-handler2") as span:
return jsonify(
{
"trace_id": span.trace_id,
"span_id": span.span_id,
"parent_id": span.parent_id,
}
)
if __name__ == "__main__":
if len(sys.argv) < 2:
raise SystemExit("Usage: flask_tracing_server.py <port>")
port = int(sys.argv[1])
app.run(host="127.0.0.1", port=port, debug=False, use_reloader=False)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/fixtures/flask_tracing_server.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/test_distributed.py | import re
import subprocess
import sys
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
import requests
import mlflow
from mlflow.tracing.distributed import (
get_tracing_context_headers_for_http_request,
set_tracing_context_from_http_request_headers,
)
from tests.helper_functions import get_safe_port
from tests.tracing.helper import skip_when_testing_trace_sdk
REQUEST_TIMEOUT = 30
@contextmanager
def flask_server(
server_script_path: Path,
port: int,
*,
wait_timeout: int = 30,
health_endpoint: str = "/health",
) -> Iterator[str]:
"""Context manager to run a Flask server in a subprocess."""
with subprocess.Popen([sys.executable, str(server_script_path), str(port)]) as proc:
base_url = f"http://127.0.0.1:{port}"
try:
# Wait for server to be ready
for _ in range(wait_timeout):
try:
response = requests.get(f"{base_url}{health_endpoint}", timeout=1.0)
if response.ok:
break
except requests.exceptions.RequestException:
time.sleep(0.2)
else:
raise RuntimeError(f"Flask server failed to start within {wait_timeout} seconds")
yield base_url
finally:
proc.terminate()
def _parse_traceparent(header_value: str) -> tuple[int, int]:
"""
Parse W3C traceparent header into (trace_id_int, span_id_int).
Format: version-traceid-spanid-flags (all lowercase hex, no 0x prefix).
"""
parts = header_value.split("-")
assert len(parts) == 4, f"Invalid traceparent format: {header_value}"
version, trace_id_hex, span_id_hex, flags = parts
assert re.fullmatch(r"[0-9a-f]{2}", version), f"Invalid version: {version}"
assert re.fullmatch(r"[0-9a-f]{32}", trace_id_hex), f"Invalid trace id: {trace_id_hex}"
assert re.fullmatch(r"[0-9a-f]{16}", span_id_hex), f"Invalid span id: {span_id_hex}"
assert re.fullmatch(r"[0-9a-f]{2}", flags), f"Invalid flags: {flags}"
return int(trace_id_hex, 16), int(span_id_hex, 16)
def test_get_tracing_context_headers_for_http_request_in_active_span():
with mlflow.start_span("client-span"):
current_span = mlflow.get_current_active_span()._span
assert current_span.get_span_context().is_valid
client_trace_id = current_span.get_span_context().trace_id
client_span_id = current_span.get_span_context().span_id
headers = get_tracing_context_headers_for_http_request()
assert isinstance(headers, dict)
assert "traceparent" in headers
# Validate that the header encodes the same trace and span IDs
header_trace_id, header_span_id = _parse_traceparent(headers["traceparent"])
assert header_trace_id == client_trace_id
assert header_span_id == client_span_id
def test_get_tracing_context_headers_for_http_request_without_active_span():
headers = get_tracing_context_headers_for_http_request()
assert headers == {}
def test_set_tracing_context_from_http_request_headers():
# Create headers from a client context first
with mlflow.start_span("client-to-generate-headers") as client_span:
client_headers = get_tracing_context_headers_for_http_request()
client_trace_id = client_span.trace_id
client_span_id = client_span.span_id
assert mlflow.get_current_active_span() is None
# Attach the context from headers and verify it becomes current inside the block
with set_tracing_context_from_http_request_headers(client_headers):
# get_current_active_span returns None because it is a `NonRecordingSpan`
assert mlflow.get_current_active_span() is None
with mlflow.start_span("child-span") as child_span:
assert child_span.parent_id == client_span_id
assert child_span.trace_id == client_trace_id
@skip_when_testing_trace_sdk
def test_distributed_tracing_e2e(tmp_path):
# Path to the Flask server script
server_path = Path(__file__).parent / "fixtures" / "flask_tracing_server.py"
port = get_safe_port()
# Start Flask server using the context manager
with flask_server(server_path, port) as base_url:
# Client side: create a span and send headers to server
with mlflow.start_span("client-root") as client_span:
headers = get_tracing_context_headers_for_http_request()
resp = requests.post(f"{base_url}/handle", headers=headers, timeout=REQUEST_TIMEOUT)
assert resp.ok, f"Server returned {resp.status_code}: {resp.text}"
payload = resp.json()
# Validate server span is a child in the same trace
assert payload["trace_id"] == client_span.trace_id
assert payload["parent_id"] == client_span.span_id
mlflow.flush_trace_async_logging()
trace = mlflow.get_trace(client_span.trace_id)
assert trace is not None, "Trace not found"
spans = trace.data.spans
assert len(spans) == 2
# Identify root and child
root_span = next(s for s in spans if s.parent_id is None)
child_span = next(s for s in spans if s.parent_id is not None)
assert root_span.name == "client-root"
assert child_span.name == "server-handler"
assert child_span.parent_id == root_span.span_id
@skip_when_testing_trace_sdk
def test_distributed_tracing_e2e_nested_call(tmp_path):
# Path to the Flask server script
server_path = Path(__file__).parent / "fixtures" / "flask_tracing_server.py"
# Start first Flask server, then get port for second server to avoid port conflicts
port = get_safe_port()
with flask_server(server_path, port) as base_url:
port2 = get_safe_port()
with flask_server(server_path, port2) as base_url2:
# Client side: create a span and send headers to server
with mlflow.start_span("client-root") as client_span:
headers = get_tracing_context_headers_for_http_request()
# Pass the second server URL as a query parameter
resp = requests.post(
f"{base_url}/handle1",
headers=headers,
params={"second_server_url": base_url2},
timeout=REQUEST_TIMEOUT,
)
assert resp.ok, f"Server returned {resp.status_code}: {resp.text}"
payload = resp.json()
# Validate server span is a child in the same trace
assert payload["trace_id"] == client_span.trace_id
assert payload["parent_id"] == client_span.span_id
child_span1_id = payload["span_id"]
assert payload["nested_call_resp"]["trace_id"] == client_span.trace_id
assert payload["nested_call_resp"]["parent_id"] == child_span1_id
child_span2_id = payload["nested_call_resp"]["span_id"]
mlflow.flush_trace_async_logging()
trace = mlflow.get_trace(client_span.trace_id)
assert trace is not None, "Trace not found"
spans = trace.data.spans
assert len(spans) == 3
# Identify root and child
root_span = next(s for s in spans if s.parent_id is None)
child_span1 = next(s for s in spans if s.parent_id == root_span.span_id)
child_span2 = next(s for s in spans if s.parent_id == child_span1.span_id)
assert root_span.name == "client-root"
assert child_span1.name == "server-handler1"
assert child_span2.name == "server-handler2"
assert child_span1.span_id == child_span1_id
assert child_span2.span_id == child_span2_id
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/test_distributed.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/phoenix/models.py | from __future__ import annotations
from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import (
call_chat_completions,
)
from mlflow.genai.judges.constants import _DATABRICKS_DEFAULT_JUDGE_MODEL
from mlflow.genai.scorers.phoenix.utils import _NoOpRateLimiter, check_phoenix_installed
from mlflow.metrics.genai.model_utils import _parse_model_uri
# Phoenix has BaseModel in phoenix.evals.models.base, but it requires implementing
# _generate_with_extra and _async_generate_with_extra abstract methods which add complexity.
# Phoenix evaluators only require __call__ for model compatibility, so we use duck typing
# to keep the adapters simple. This mirrors how the deepeval integration works.
class DatabricksPhoenixModel:
"""
Phoenix model adapter for Databricks managed judge.
Uses the dedicated judge endpoint via call_chat_completions.
"""
def __init__(self):
self._model_name = _DATABRICKS_DEFAULT_JUDGE_MODEL
self._verbose = False
self._rate_limiter = _NoOpRateLimiter()
def __call__(self, prompt, **kwargs) -> str:
prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
result = call_chat_completions(user_prompt=prompt_str, system_prompt="")
return result.output
def get_model_name(self) -> str:
return self._model_name
def create_phoenix_model(model_uri: str):
"""
Create a Phoenix model adapter from a model URI.
Args:
model_uri: Model URI in one of these formats:
- "databricks" - Use default Databricks managed judge
- "databricks:/endpoint" - Use Databricks serving endpoint
- "provider:/model" - Use LiteLLM model (e.g., "openai:/gpt-4")
Returns:
A Phoenix-compatible model adapter
Raises:
MlflowException: If the model URI format is invalid
"""
check_phoenix_installed()
if model_uri == "databricks":
return DatabricksPhoenixModel()
# Parse provider:/model format using shared helper
from phoenix.evals import LiteLLMModel
provider, model_name = _parse_model_uri(model_uri)
return LiteLLMModel(
model=f"{provider}/{model_name}",
model_kwargs={"drop_params": True},
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/phoenix/models.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/phoenix/registry.py | from __future__ import annotations
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers.phoenix.utils import check_phoenix_installed
_METRIC_REGISTRY = {
"Hallucination": "HallucinationEvaluator",
"Relevance": "RelevanceEvaluator",
"Toxicity": "ToxicityEvaluator",
"QA": "QAEvaluator",
"Summarization": "SummarizationEvaluator",
}
def get_evaluator_class(metric_name: str):
"""
Get the Phoenix evaluator class for a given metric name.
For metrics in the registry, uses the registered class name. For unknown metrics,
attempts to dynamically import <MetricName>Evaluator from phoenix.evals.
Args:
metric_name: Name of the metric (e.g., "Hallucination")
Returns:
The Phoenix evaluator class
Raises:
MlflowException: If the metric cannot be imported or phoenix is not installed
"""
check_phoenix_installed()
import phoenix.evals as phoenix_evals
if metric_name in _METRIC_REGISTRY:
evaluator_class_name = _METRIC_REGISTRY[metric_name]
else:
# Attempt dynamic import for metrics not in registry
evaluator_class_name = f"{metric_name}Evaluator"
try:
return getattr(phoenix_evals, evaluator_class_name)
except AttributeError:
available_metrics = ", ".join(sorted(_METRIC_REGISTRY.keys()))
raise MlflowException.invalid_parameter_value(
f"Unknown Phoenix metric: '{metric_name}'. Could not find '{evaluator_class_name}' "
f"in 'phoenix.evals'. Available pre-configured metrics: {available_metrics}"
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/phoenix/registry.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/phoenix/utils.py | from __future__ import annotations
from typing import Any
from mlflow.entities.trace import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.utils.trace_utils import (
extract_retrieval_context_from_trace,
parse_inputs_to_str,
parse_outputs_to_str,
resolve_expectations_from_trace,
resolve_inputs_from_trace,
resolve_outputs_from_trace,
)
class _NoOpRateLimiter:
"""Minimal rate limiter stub for Phoenix model compatibility."""
def __init__(self):
self._verbose = False
def check_phoenix_installed():
"""Check if Phoenix is installed and raise an error if not."""
try:
import phoenix.evals # noqa: F401
except ImportError:
raise MlflowException.invalid_parameter_value(
"Phoenix evaluators require the 'arize-phoenix-evals' package. "
"Install it with: pip install arize-phoenix-evals"
)
def map_scorer_inputs_to_phoenix_record(
inputs: Any = None,
outputs: Any = None,
expectations: dict[str, Any] | None = None,
trace: Trace | None = None,
) -> dict[str, Any]:
"""
Convert MLflow scorer inputs to Phoenix evaluator record format.
Args:
inputs: The input to evaluate
outputs: The output to evaluate
expectations: Expected values and context for evaluation
trace: MLflow trace for evaluation
Returns:
Dictionary formatted for Phoenix evaluator
"""
if trace:
inputs = resolve_inputs_from_trace(inputs, trace)
outputs = resolve_outputs_from_trace(outputs, trace)
expectations = resolve_expectations_from_trace(expectations, trace)
record = {}
if inputs is not None:
record["input"] = parse_inputs_to_str(inputs)
if outputs is not None:
record["output"] = parse_outputs_to_str(outputs)
# Extract reference from expectations using standard MLflow keys
reference = None
if expectations:
reference = (
expectations.get("expected_response")
or expectations.get("context")
or expectations.get("reference")
or expectations.get("expected_output")
)
# Fall back to extracting context from trace retrieval spans
if not reference and trace:
if span_id_to_context := extract_retrieval_context_from_trace(trace):
contexts = []
for ctx_list in span_id_to_context.values():
for ctx in ctx_list:
if isinstance(ctx, dict) and "content" in ctx:
contexts.append(ctx["content"])
else:
contexts.append(str(ctx))
reference = "\n".join(contexts) if contexts else None
if reference:
record["reference"] = reference
return record
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/phoenix/utils.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/phoenix/test_models.py | from unittest.mock import Mock, patch
import phoenix.evals as phoenix_evals
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers.phoenix.models import (
DatabricksPhoenixModel,
create_phoenix_model,
)
@pytest.fixture
def mock_call_chat_completions():
with patch("mlflow.genai.scorers.phoenix.models.call_chat_completions") as mock:
result = Mock()
result.output = "Test output"
mock.return_value = result
yield mock
def test_databricks_phoenix_model_call(mock_call_chat_completions):
model = DatabricksPhoenixModel()
result = model("Test prompt")
assert result == "Test output"
mock_call_chat_completions.assert_called_once_with(
user_prompt="Test prompt",
system_prompt="",
)
def test_databricks_phoenix_model_get_model_name():
model = DatabricksPhoenixModel()
assert model.get_model_name() == "databricks"
def test_create_phoenix_model_databricks():
model = create_phoenix_model("databricks")
assert isinstance(model, DatabricksPhoenixModel)
assert model.get_model_name() == "databricks"
def test_create_phoenix_model_databricks_endpoint():
model = create_phoenix_model("databricks:/my-endpoint")
assert isinstance(model, phoenix_evals.LiteLLMModel)
assert model.model == "databricks/my-endpoint"
def test_create_phoenix_model_openai(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-key")
model = create_phoenix_model("openai:/gpt-4")
assert isinstance(model, phoenix_evals.LiteLLMModel)
def test_create_phoenix_model_invalid_format():
with pytest.raises(MlflowException, match="Malformed model uri"):
create_phoenix_model("gpt-4")
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_models.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/phoenix/test_phoenix.py | from unittest.mock import Mock, patch
import phoenix.evals as phoenix_evals
import pytest
from mlflow.entities.assessment import Feedback
@pytest.fixture
def mock_model():
mock = Mock()
mock._verbose = False
mock._rate_limiter = Mock()
mock._rate_limiter._verbose = False
return mock
@pytest.mark.parametrize(
("scorer_class", "metric_name", "label", "score", "explanation"),
[
("Hallucination", "Hallucination", "factual", 0.9, "Output is grounded."),
("Relevance", "Relevance", "relevant", 0.85, "Context is relevant."),
("Toxicity", "Toxicity", "non-toxic", 0.95, "Safe content."),
("QA", "QA", "correct", 1.0, "Answer is correct."),
("Summarization", "Summarization", "good", 0.9, "Good summary."),
],
)
def test_phoenix_scorer(mock_model, scorer_class, metric_name, label, score, explanation):
with patch("mlflow.genai.scorers.phoenix.create_phoenix_model", return_value=mock_model):
from mlflow.genai.scorers import phoenix
scorer_cls = getattr(phoenix, scorer_class)
scorer = scorer_cls(model="openai:/gpt-4")
with patch.object(scorer._evaluator, "evaluate", return_value=(label, score, explanation)):
result = scorer(
inputs="test input",
outputs="test output",
expectations={"expected_response": "test reference"},
)
assert isinstance(result, Feedback)
assert result.name == metric_name
assert result.value == label
assert result.metadata["score"] == score
assert result.source.source_id == "openai:/gpt-4"
def test_phoenix_scorer_negative_label(mock_model):
with patch("mlflow.genai.scorers.phoenix.create_phoenix_model", return_value=mock_model):
from mlflow.genai.scorers.phoenix import Hallucination
scorer = Hallucination(model="openai:/gpt-4")
with patch.object(
scorer._evaluator, "evaluate", return_value=("hallucinated", None, "Made-up info.")
):
result = scorer(
inputs="test",
outputs="test output",
expectations={"expected_response": "test context"},
)
assert isinstance(result, Feedback)
assert result.value == "hallucinated"
assert result.rationale == "Made-up info."
def test_phoenix_scorer_none_explanation(mock_model):
with patch("mlflow.genai.scorers.phoenix.create_phoenix_model", return_value=mock_model):
from mlflow.genai.scorers.phoenix import Hallucination
scorer = Hallucination(model="openai:/gpt-4")
with patch.object(scorer._evaluator, "evaluate", return_value=("factual", 0.9, None)):
result = scorer(
inputs="test",
outputs="test output",
expectations={"expected_response": "test context"},
)
assert result.rationale is None
def test_phoenix_get_scorer(mock_model):
with patch("mlflow.genai.scorers.phoenix.create_phoenix_model", return_value=mock_model):
from mlflow.genai.scorers.phoenix import get_scorer
scorer = get_scorer("Hallucination", model="openai:/gpt-4")
with patch.object(scorer._evaluator, "evaluate", return_value=("factual", 0.9, "Grounded.")):
result = scorer(
inputs="test",
outputs="test output",
expectations={"expected_response": "test context"},
)
assert isinstance(result, Feedback)
assert result.name == "Hallucination"
def test_phoenix_scorer_evaluator_is_real_instance(mock_model):
with patch("mlflow.genai.scorers.phoenix.create_phoenix_model", return_value=mock_model):
from mlflow.genai.scorers.phoenix import Hallucination
scorer = Hallucination(model="openai:/gpt-4")
assert isinstance(scorer._evaluator, phoenix_evals.HallucinationEvaluator)
def test_phoenix_scorer_error_handling(mock_model):
with patch("mlflow.genai.scorers.phoenix.create_phoenix_model", return_value=mock_model):
from mlflow.genai.scorers.phoenix import Hallucination
scorer = Hallucination(model="openai:/gpt-4")
with patch.object(scorer._evaluator, "evaluate", side_effect=RuntimeError("Evaluation failed")):
result = scorer(
inputs="test",
outputs="test output",
expectations={"expected_response": "test context"},
)
assert isinstance(result, Feedback)
assert result.error is not None
assert "Evaluation failed" in str(result.error)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_phoenix.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/phoenix/test_registry.py | from unittest import mock
import pytest
from mlflow.exceptions import MlflowException
phoenix_evals = pytest.importorskip("phoenix.evals")
@pytest.mark.parametrize(
("metric_name", "evaluator_name"),
[
("Hallucination", "HallucinationEvaluator"),
("Relevance", "RelevanceEvaluator"),
("Toxicity", "ToxicityEvaluator"),
("QA", "QAEvaluator"),
("Summarization", "SummarizationEvaluator"),
],
)
def test_get_evaluator_class(metric_name, evaluator_name):
from mlflow.genai.scorers.phoenix.registry import get_evaluator_class
result = get_evaluator_class(metric_name)
expected = getattr(phoenix_evals, evaluator_name)
assert result is expected
def test_get_evaluator_class_invalid_metric():
from mlflow.genai.scorers.phoenix.registry import get_evaluator_class
with pytest.raises(MlflowException, match="Unknown Phoenix metric"):
get_evaluator_class("InvalidMetric")
def test_get_evaluator_class_dynamic_import():
from mlflow.genai.scorers.phoenix.registry import get_evaluator_class
mock_evaluator = mock.MagicMock()
mock_evaluator.__name__ = "NewMetricEvaluator"
with mock.patch.object(phoenix_evals, "NewMetricEvaluator", mock_evaluator, create=True):
result = get_evaluator_class("NewMetric")
assert result is mock_evaluator
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_registry.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/phoenix/test_utils.py | import json
import sys
import time
from unittest.mock import patch
import pytest
from opentelemetry.sdk.trace import ReadableSpan as OTelReadableSpan
from mlflow.entities.span import Span
from mlflow.entities.trace import Trace, TraceData, TraceInfo
from mlflow.entities.trace_location import TraceLocation
from mlflow.entities.trace_state import TraceState
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers.phoenix.utils import (
map_scorer_inputs_to_phoenix_record,
)
from mlflow.tracing.constant import TRACE_SCHEMA_VERSION, TRACE_SCHEMA_VERSION_KEY
from mlflow.tracing.utils import build_otel_context
def _create_test_trace(
inputs: dict[str, str] | None = None,
outputs: dict[str, str] | None = None,
) -> Trace:
current_time_ns = int(time.time() * 1e9)
trace_id = "test_trace_001"
attributes = {"mlflow.traceRequestId": json.dumps(trace_id)}
if inputs is not None:
attributes["mlflow.spanInputs"] = json.dumps(inputs)
if outputs is not None:
attributes["mlflow.spanOutputs"] = json.dumps(outputs)
attributes["mlflow.spanType"] = json.dumps("CHAIN")
otel_span = OTelReadableSpan(
name="root_span",
context=build_otel_context(12345, 111),
parent=None,
start_time=current_time_ns,
end_time=current_time_ns + 1000000,
attributes=attributes,
)
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=TraceLocation.from_experiment_id("0"),
request_time=int(time.time() * 1000),
state=TraceState.OK,
execution_duration=1000,
trace_metadata={TRACE_SCHEMA_VERSION_KEY: str(TRACE_SCHEMA_VERSION)},
tags={},
assessments=[],
request_preview=json.dumps(inputs) if inputs else None,
response_preview=json.dumps(outputs) if outputs else None,
)
trace_data = TraceData(spans=[Span(otel_span)])
return Trace(info=trace_info, data=trace_data)
def test_check_phoenix_installed_raises_without_phoenix():
with patch.dict("sys.modules", {"phoenix": None, "phoenix.evals": None}):
for mod in list(sys.modules.keys()):
if "mlflow.genai.scorers.phoenix" in mod:
del sys.modules[mod]
from mlflow.genai.scorers.phoenix.utils import check_phoenix_installed as check_fn
with pytest.raises(MlflowException, match="arize-phoenix-evals"):
check_fn()
def test_map_scorer_inputs_basic():
record = map_scorer_inputs_to_phoenix_record(
inputs="What is MLflow?",
outputs="MLflow is a platform",
)
assert record["input"] == "What is MLflow?"
assert record["output"] == "MLflow is a platform"
assert "reference" not in record
def test_map_scorer_inputs_with_expected_response():
record = map_scorer_inputs_to_phoenix_record(
inputs="What is MLflow?",
outputs="MLflow is a platform",
expectations={"expected_response": "MLflow is an ML platform."},
)
assert record["input"] == "What is MLflow?"
assert record["output"] == "MLflow is a platform"
assert record["reference"] == "MLflow is an ML platform."
def test_map_scorer_inputs_with_context():
record = map_scorer_inputs_to_phoenix_record(
inputs="What is MLflow?",
outputs="MLflow is a platform",
expectations={"context": "MLflow context here."},
)
assert record["reference"] == "MLflow context here."
def test_map_scorer_inputs_expected_response_priority():
record = map_scorer_inputs_to_phoenix_record(
inputs="test",
outputs="test output",
expectations={
"expected_response": "priority value",
"context": "should be ignored",
"reference": "also ignored",
},
)
assert record["reference"] == "priority value"
def test_map_scorer_inputs_with_trace():
trace = _create_test_trace(
inputs={"question": "What is MLflow?"},
outputs={"answer": "MLflow is a platform for ML lifecycle."},
)
record = map_scorer_inputs_to_phoenix_record(
expectations={"expected_response": "MLflow is an ML platform."},
trace=trace,
)
assert record["input"] == "{'question': 'What is MLflow?'}"
assert record["output"] == '{"answer": "MLflow is a platform for ML lifecycle."}'
assert record["reference"] == "MLflow is an ML platform."
def test_map_scorer_inputs_none_values():
record = map_scorer_inputs_to_phoenix_record()
assert "input" not in record
assert "output" not in record
assert "reference" not in record
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_utils.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/jobs/_periodic_tasks_consumer.py | """
This module is used for launching the periodic tasks Huey consumer.
This is a dedicated consumer that only runs periodic tasks (like the online scoring scheduler).
It is launched by the job runner and runs in a separate process from job execution consumers.
"""
import threading
from mlflow.server.jobs.logging_utils import configure_logging_for_jobs
from mlflow.server.jobs.utils import (
HUEY_PERIODIC_TASKS_INSTANCE_KEY,
_exit_when_orphaned,
_get_or_init_huey_instance,
register_periodic_tasks,
)
# Configure Python logging to suppress noisy job logs
configure_logging_for_jobs()
# Ensure the subprocess is killed when parent process dies.
# The huey consumer's parent process is `_job_runner` process,
# if `_job_runner` process is died, it means the MLflow server exits.
threading.Thread(
target=_exit_when_orphaned,
name="exit_when_orphaned",
daemon=True,
).start()
huey_instance = _get_or_init_huey_instance(HUEY_PERIODIC_TASKS_INSTANCE_KEY).instance
# Register periodic tasks with this dedicated instance
register_periodic_tasks(huey_instance)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/_periodic_tasks_consumer.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/optimizers/memalign/optimizer.py | import copy
import logging
from dataclasses import asdict
from typing import TYPE_CHECKING, Any
import mlflow
from mlflow.entities.assessment import Assessment, AssessmentSource, Feedback
from mlflow.entities.assessment_source import AssessmentSourceType
from mlflow.entities.trace import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.base import AlignmentOptimizer, Judge, JudgeField
from mlflow.genai.judges.optimizers.dspy_utils import (
_check_dspy_installed,
construct_dspy_lm,
create_dspy_signature,
trace_to_dspy_example,
)
from mlflow.genai.judges.optimizers.memalign.utils import (
Guideline,
create_extended_signature,
distill_guidelines,
get_default_embedding_model,
get_query_field,
retrieve_relevant_examples,
truncate_to_token_limit,
value_to_embedding_text,
)
from mlflow.genai.judges.utils import get_default_model
from mlflow.genai.scorers.base import (
_SERIALIZATION_VERSION,
ScorerKind,
SerializedScorer,
)
from mlflow.genai.utils.trace_utils import (
resolve_expectations_from_trace,
resolve_inputs_from_trace,
resolve_outputs_from_trace,
)
from mlflow.metrics.genai.model_utils import convert_mlflow_uri_to_litellm
from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, INVALID_PARAMETER_VALUE
from mlflow.utils.annotations import experimental
from mlflow.utils.docstring_utils import format_docstring
if TYPE_CHECKING:
import dspy
_logger = logging.getLogger(__name__)
_CONFIG_FIELDS = ("reflection_lm", "retrieval_k", "embedding_model", "embedding_dim")
# Databricks embedding endpoints have stricter input size limits than other providers.
# Use a smaller batch size to avoid "input embedding size too large" errors.
_DEFAULT_EMBEDDING_BATCH_SIZE = 200
_DATABRICKS_EMBEDDING_BATCH_SIZE = 150
def _get_embedding_batch_size(litellm_model: str) -> int:
if litellm_model.startswith("databricks/"):
return _DATABRICKS_EMBEDDING_BATCH_SIZE
return _DEFAULT_EMBEDDING_BATCH_SIZE
_MODEL_API_DOC = {
"reflection_lm": """Model to use for distilling guidelines from feedback.
Supported formats:
* `"databricks"` for a default Databricks-hosted model designed for GenAI quality assessments.
* `"databricks:/<model-name>"` for other Databricks-hosted models
(e.g., `databricks:/databricks-gpt-5-mini`, `databricks:/databricks-claude-sonnet-4-5`).
For a full list, see https://models.litellm.ai/ and select "databricks" as the provider.)
* `"databricks:/<endpoint-name>"` or `"endpoints:/<endpoint-name>"` for
custom endpoints on Databricks (e.g., `databricks:/my-endpoint`).
* `<provider>:/<model-name>` for other providers (e.g.,
`"openai:/gpt-4o-mini"`, `"anthropic:/claude-3.5-sonnet-20240620"`).
For a full list, see https://models.litellm.ai/.
MLflow natively supports `["openai", "anthropic", "bedrock", "mistral"]`,
and more providers are supported through
`LiteLLM <https://docs.litellm.ai/docs/providers>`_.
Default model depends on the tracking URI setup:
* Databricks: `databricks` (a default Databricks-hosted model designed
for GenAI quality assessments)
* Otherwise: `openai:/gpt-4o-mini`.
""",
"embedding_model": """Model to use for generating embeddings for
example retrieval. Must be a form of `<provider>:/<model-name>`, such as
`"openai:/text-embedding-3-small"`. Supported providers include OpenAI and
others via LiteLLM. Default: `"openai:/text-embedding-3-small"`.
""",
}
@experimental(version="3.9.0")
@format_docstring(_MODEL_API_DOC)
class MemoryAugmentedJudge(Judge):
"""
A judge augmented with dual memory systems.
This judge enhances evaluation with:
- Semantic Memory: Distilled guidelines from past feedback
- Episodic Memory: Retrieved similar examples from past feedback
The judge maintains state across evaluations and provides the exact same
interface as the original judge. Memories are managed internally.
Args:
base_judge: Base judge to augment with memory systems
reflection_lm: {{ reflection_lm }}
retrieval_k: Number of similar examples to retrieve from episodic memory (default: 5)
embedding_model: {{ embedding_model }}
embedding_dim: Dimension of embeddings (default: 512)
"""
def __init__(
self,
base_judge: Judge,
reflection_lm: str | None = None,
retrieval_k: int = 5,
embedding_model: str | None = None,
embedding_dim: int = 512,
*,
_defer_init: bool = False,
):
# Input validation
if not isinstance(retrieval_k, int) or retrieval_k <= 0:
raise MlflowException(
f"retrieval_k must be a positive integer, got {retrieval_k}",
error_code=INVALID_PARAMETER_VALUE,
)
if not isinstance(embedding_dim, int) or embedding_dim <= 0:
raise MlflowException(
f"embedding_dim must be a positive integer, got {embedding_dim}",
error_code=INVALID_PARAMETER_VALUE,
)
effective_base_judge = (
base_judge._base_judge if isinstance(base_judge, MemoryAugmentedJudge) else base_judge
)
super().__init__(
name=effective_base_judge.name,
description=effective_base_judge.description,
aggregations=effective_base_judge.aggregations,
)
self._base_judge = effective_base_judge
self._retrieval_k = retrieval_k
self._reflection_lm = reflection_lm if reflection_lm is not None else get_default_model()
self._embedding_model = (
embedding_model if embedding_model is not None else get_default_embedding_model()
)
self._embedding_dim = embedding_dim
# Always store trace IDs for serialization
self._episodic_trace_ids: list[str] = []
if _defer_init:
# Defer creating heavyweight DSPy objects until first use (_embedder=None signals this)
self._base_signature = None
self._embedder = None
self._retriever = None
self._predict_module = None
self._episodic_memory: list["dspy.Example"] = []
self._semantic_memory: list[Guideline] = []
else:
self._initialize_dspy_components(base_judge)
def _initialize_dspy_components(self, base_judge: Judge | None = None) -> None:
"""Initialize heavyweight DSPy components (embedder, predict module, memory index)."""
import dspy
effective_base_judge = base_judge or self._base_judge
self._base_signature = create_dspy_signature(effective_base_judge)
litellm_embedding_model = convert_mlflow_uri_to_litellm(self._embedding_model)
self._embedder = dspy.Embedder(
litellm_embedding_model,
dimensions=self._embedding_dim,
drop_params=True,
batch_size=_get_embedding_batch_size(litellm_embedding_model),
)
self._retriever = None
# Inherit memory from base_judge if it's a MemoryAugmentedJudge.
# Episodic memory index is not built here — _add_examples_to_memory() handles it.
if isinstance(base_judge, MemoryAugmentedJudge):
self._semantic_memory = copy.deepcopy(base_judge._semantic_memory)
self._episodic_trace_ids = base_judge._episodic_trace_ids.copy()
if base_judge._episodic_memory:
self._episodic_memory = copy.deepcopy(base_judge._episodic_memory)
elif self._episodic_trace_ids:
self._episodic_memory = self._reconstruct_episodic_memory()
else:
self._episodic_memory: list["dspy.Example"] = []
else:
self._episodic_memory: list["dspy.Example"] = []
self._semantic_memory: list[Guideline] = []
extended_signature = create_extended_signature(self._base_signature)
self._predict_module = dspy.Predict(extended_signature)
self._predict_module.set_lm(construct_dspy_lm(effective_base_judge.model))
def __call__(
self,
*,
inputs: Any = None,
outputs: Any = None,
expectations: dict[str, Any] | None = None,
trace: Trace | None = None,
) -> Assessment:
self._lazy_init()
if trace is not None:
inputs = resolve_inputs_from_trace(inputs, trace)
outputs = resolve_outputs_from_trace(outputs, trace)
expectations = resolve_expectations_from_trace(expectations, trace)
guidelines = [g.guideline_text for g in self._semantic_memory]
query_kwargs = {
"inputs": inputs,
"outputs": outputs,
"expectations": expectations,
"trace": trace,
}
retrieved_results = retrieve_relevant_examples(
retriever=self._retriever,
examples=self._episodic_memory,
query_kwargs=query_kwargs,
signature=self._base_signature,
)
relevant_examples = [example for example, _ in retrieved_results]
retrieved_trace_ids = [trace_id for _, trace_id in retrieved_results]
import dspy
from dspy.adapters.json_adapter import JSONAdapter
with dspy.context(adapter=JSONAdapter()):
prediction = self._predict_module(
guidelines=guidelines,
example_judgements=relevant_examples,
inputs=inputs,
outputs=outputs,
expectations=expectations,
trace=value_to_embedding_text(trace) if trace is not None else None,
)
return Feedback(
name=self._base_judge.name,
source=AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE,
source_id=self._base_judge.model,
),
value=prediction.result,
rationale=prediction.rationale,
metadata={"retrieved_example_trace_ids": retrieved_trace_ids}
if retrieved_trace_ids
else {},
)
@property
def name(self) -> str:
return self._base_judge.name
@property
def instructions(self) -> str:
instructions = self._base_judge.instructions
if self._semantic_memory:
instructions += f"\n\nDistilled Guidelines ({len(self._semantic_memory)}):\n"
for guideline in self._semantic_memory:
instructions += f" - {guideline.guideline_text}\n"
return instructions
@property
def model(self) -> str:
return self._base_judge.model
@property
def feedback_value_type(self) -> Any:
return self._base_judge.feedback_value_type
def get_input_fields(self) -> list[JudgeField]:
return self._base_judge.get_input_fields()
@property
def kind(self) -> ScorerKind:
return ScorerKind.MEMORY_AUGMENTED
def model_dump(self, **kwargs) -> dict[str, Any]:
base_judge_data = self._base_judge.model_dump(**kwargs)
memory_augmented_data = {
"base_judge": base_judge_data,
"episodic_trace_ids": self._episodic_trace_ids,
"semantic_memory": [g.model_dump() for g in self._semantic_memory],
**{field: getattr(self, f"_{field}") for field in _CONFIG_FIELDS},
}
serialized = SerializedScorer(
name=self.name,
description=self.description,
aggregations=self.aggregations,
mlflow_version=mlflow.__version__,
serialization_version=_SERIALIZATION_VERSION,
memory_augmented_judge_data=memory_augmented_data,
)
return asdict(serialized)
@classmethod
def _from_serialized(
cls,
serialized: SerializedScorer,
) -> "MemoryAugmentedJudge":
# Import here to avoid circular dependency: base.py imports MemoryAugmentedJudge
from mlflow.genai.scorers.base import Scorer
data = serialized.memory_augmented_judge_data
base_judge_serialized = SerializedScorer(**data["base_judge"])
base_judge = Scorer.model_validate(base_judge_serialized)
# Use constructor with _defer_init=True to skip heavyweight DSPy initialization
instance = cls(
base_judge=base_judge,
reflection_lm=data.get("reflection_lm"),
retrieval_k=data.get("retrieval_k", 5),
embedding_model=data.get("embedding_model"),
embedding_dim=data.get("embedding_dim", 512),
_defer_init=True,
)
# Restore semantic memory and episodic trace IDs for lazy loading
instance._semantic_memory = [Guideline(**g) for g in data["semantic_memory"]]
instance._episodic_trace_ids = data.get("episodic_trace_ids") or []
return instance
def _create_copy(self) -> "MemoryAugmentedJudge":
"""
Override base _create_copy for Scorer.register().
The base implementation uses model_copy(deep=True), which fails because
DSPy objects (_embedder, _retriever, _predict_module) contain thread locks
that can't be pickled. We create a new instance with _defer_init=True and
store trace IDs for lazy reconstruction.
"""
judge_copy = MemoryAugmentedJudge(
base_judge=self._base_judge,
reflection_lm=self._reflection_lm,
retrieval_k=self._retrieval_k,
embedding_model=self._embedding_model,
embedding_dim=self._embedding_dim,
_defer_init=True,
)
judge_copy._semantic_memory = copy.deepcopy(self._semantic_memory)
judge_copy._episodic_trace_ids = self._episodic_trace_ids.copy()
return judge_copy
def _reconstruct_episodic_memory(self) -> list["dspy.Example"]:
examples = []
missing_ids = []
for trace_id in self._episodic_trace_ids:
trace = mlflow.get_trace(trace_id, silent=True)
if trace is not None:
if example := trace_to_dspy_example(trace, self._base_judge):
example._trace_id = trace.info.trace_id
examples.append(example)
else:
missing_ids.append(trace_id)
if missing_ids:
_logger.warning(
f"Could not find {len(missing_ids)} traces for episodic memory reconstruction. "
f"Missing trace IDs: {missing_ids[:5]}"
f"{'...' if len(missing_ids) > 5 else ''}. "
f"Judge will operate with partial memory "
f"({len(examples)}/{len(self._episodic_trace_ids)} traces)."
)
return examples
def _lazy_init(self) -> None:
"""
Lazily initialize DSPy components and episodic memory from stored trace IDs.
This method is called on first use (e.g., __call__) when the judge was created
with _defer_init=True. It:
1. Creates DSPy components (embedder, predict module)
2. Fetches traces by ID and reconstructs episodic memory
3. Builds the episodic memory search index
No-op if already initialized (checked via _embedder not being None).
"""
if self._embedder is not None:
return
import dspy
self._base_signature = create_dspy_signature(self._base_judge)
litellm_embedding_model = convert_mlflow_uri_to_litellm(self._embedding_model)
self._embedder = dspy.Embedder(
litellm_embedding_model,
dimensions=self._embedding_dim,
drop_params=True,
batch_size=_get_embedding_batch_size(litellm_embedding_model),
)
extended_signature = create_extended_signature(self._base_signature)
self._predict_module = dspy.Predict(extended_signature)
self._predict_module.set_lm(construct_dspy_lm(self._base_judge.model))
self._episodic_memory = self._reconstruct_episodic_memory()
if self._episodic_memory:
self._build_episodic_memory()
@experimental(version="3.9.0")
def unalign(self, traces: list[Trace]) -> "MemoryAugmentedJudge":
"""
Remove specific traces from memory and return an updated judge.
This method allows you to selectively remove feedback examples from the judge's
memory systems. This is useful when you want to:
- Remove incorrect or low-quality feedback that negatively impacts performance
- Update the judge in case your evaluation criteria change
- Remove feedback from specific users or time periods
The returned judge will have guidelines selectively deleted based on source_trace_ids:
- Guidelines where all source traces were removed are deleted
- Guidelines with at least one remaining source trace are retained
Args:
traces: Traces containing feedback to remove from memory. Only traces with
feedback matching this judge's name will be removed.
Returns:
Updated MemoryAugmentedJudge with specified traces removed from memory.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.optimizers import MemAlignOptimizer
# Assuming `all_traces` contains human feedback for the judge
aligned_judge = judge.align(traces=all_traces, optimizer=MemAlignOptimizer())
aligned_judge_v2 = aligned_judge.unalign(traces=bad_traces)
# aligned_judge_v2 now only retains feedback from
# `set(all_traces) - set(bad_traces)`
"""
trace_ids_to_remove = {trace.info.trace_id for trace in traces}
# Filter examples to retain based on trace ids
examples_to_retain = [
example
for example in self._episodic_memory
if not (hasattr(example, "_trace_id") and example._trace_id in trace_ids_to_remove)
]
if len(examples_to_retain) == len(self._episodic_memory):
_logger.warning("No feedback records found for the provided traces")
return self
# Filter guidelines to retain based on source_trace_ids
# - Always retain user-provided guidelines (those without source_trace_ids)
# - Delete guideline only if ALL of its source traces were removed
guidelines_to_retain = [
guideline
for guideline in self._semantic_memory
if guideline.source_trace_ids is None
or any(tid not in trace_ids_to_remove for tid in guideline.source_trace_ids)
]
# Reinitialize new judge
new_judge = MemoryAugmentedJudge(
base_judge=self._base_judge,
reflection_lm=self._reflection_lm,
retrieval_k=self._retrieval_k,
embedding_model=self._embedding_model,
embedding_dim=self._embedding_dim,
)
new_judge._semantic_memory = guidelines_to_retain
new_judge._episodic_memory = examples_to_retain
new_judge._build_episodic_memory()
_logger.debug(
f"Removed {len(traces)} traces from memory. "
f"Episodic memory size: {len(new_judge._episodic_memory)} examples, "
f"Semantic memory size: {len(new_judge._semantic_memory)} guidelines."
)
return new_judge
def _distill_new_guidelines(self, new_examples: list["dspy.Example"]) -> None:
"""
Distill new guidelines from newly added examples and add to semantic memory.
Args:
new_examples: The examples that were just added (not all examples)
"""
existing_guideline_texts = [g.guideline_text for g in self._semantic_memory]
new_guidelines = distill_guidelines(
examples=new_examples,
judge_instructions=self._base_judge.instructions,
reflection_lm=self._reflection_lm,
existing_guidelines=existing_guideline_texts,
)
self._semantic_memory.extend(new_guidelines)
_logger.debug(
f"Distilled {len(new_guidelines)} new guidelines from {len(new_examples)} new "
f"examples. Semantic memory now has {len(self._semantic_memory)} guidelines."
)
def _build_episodic_memory(self) -> None:
"""Build episodic memory search index from examples."""
import dspy.retrievers
query_field = get_query_field(self._base_signature)
if query_field is None:
raise MlflowException(
"Unable to build episodic memory: no suitable input field found in judge "
"instructions. Please ensure the judge instructions reference at least one of "
"the following fields: inputs, outputs, expectations, conversation, trace.",
error_code=INTERNAL_ERROR,
)
# Build corpus and filter examples with empty query field
filtered_memory = []
corpus = []
for example in self._episodic_memory:
if value := getattr(example, query_field, None):
query = truncate_to_token_limit(
value_to_embedding_text(value), self._embedding_model, model_type="embedding"
)
corpus.append(query)
filtered_memory.append(example)
self._episodic_memory = filtered_memory
self._retriever = dspy.retrievers.Embeddings(
embedder=self._embedder, corpus=corpus, k=self._retrieval_k
)
_logger.debug(f"Episodic memory corpus contains {len(corpus)} examples")
def _add_examples_to_memory(self, examples: list["dspy.Example"]) -> None:
"""Add examples by updating both episodic memory and semantic memory.
Args:
examples: Examples to add
"""
# Update episodic memory and trace IDs
self._episodic_memory.extend(examples)
self._episodic_trace_ids.extend(ex._trace_id for ex in examples if hasattr(ex, "_trace_id"))
self._build_episodic_memory()
# Update semantic memory
self._distill_new_guidelines(examples)
@experimental(version="3.9.0")
@format_docstring(_MODEL_API_DOC)
class MemAlignOptimizer(AlignmentOptimizer):
"""
MemAlign alignment optimizer using dual memory systems.
This optimizer creates a memory-augmented judge that learns from feedback
through two complementary mechanisms:
**Semantic Memory** - Distills general guidelines from feedback:
- LLM extracts patterns from feedback records
- Guidelines describe user preferences and expectations
- Applied as context to all future evaluations
**Episodic Memory** - Retrieves similar past examples:
- Stores feedback records with embeddings
- Finds most similar examples during evaluation
- Provides concrete examples as evaluation context
The returned judge is a MemoryAugmentedJudge that maintains memory state.
Args:
reflection_lm: {{ reflection_lm }}
retrieval_k: Number of similar examples to retrieve from episodic memory (default: 5)
embedding_model: {{ embedding_model }}
embedding_dim: Dimension of embeddings (default: 512)
Note:
The number of parallel threads for LLM calls during guideline distillation can be
configured via the ``MLFLOW_GENAI_OPTIMIZE_MAX_WORKERS`` environment variable
(default: 8). Increasing this value can speed up alignment when processing many
feedback examples, but may increase API rate limit errors.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.optimizers import MemAlignOptimizer
judge = make_judge(name="quality", instructions="...", model="openai:/gpt-4")
optimizer = MemAlignOptimizer(
reflection_lm="openai:/gpt-4o-mini",
retrieval_k=3,
embedding_model="openai:/text-embedding-3-small",
)
# Assuming `traces` contains human feedback for the judge
optimized_judge = judge.align(traces=traces, optimizer=optimizer)
result = optimized_judge(inputs="...", outputs="...")
"""
def __init__(
self,
reflection_lm: str | None = None,
retrieval_k: int = 5,
embedding_model: str | None = None,
embedding_dim: int = 512,
):
_check_dspy_installed()
self._reflection_lm = reflection_lm if reflection_lm is not None else get_default_model()
self._retrieval_k = retrieval_k
self._embedding_model = (
embedding_model if embedding_model is not None else get_default_embedding_model()
)
self._embedding_dim = embedding_dim
def align(self, judge: Judge, traces: list[Trace]) -> MemoryAugmentedJudge:
"""
Align judge with human feedback from traces.
Args:
judge: Judge to align
traces: Traces containing human feedback
Returns:
Memory-augmented judge aligned with feedback
"""
try:
if not traces:
raise MlflowException(
"No traces provided for alignment", error_code=INVALID_PARAMETER_VALUE
)
_logger.debug(f"Starting MemAlign alignment with {len(traces)} traces")
new_examples = []
for trace in traces:
example = trace_to_dspy_example(trace, judge)
if example is not None:
example._trace_id = trace.info.trace_id
new_examples.append(example)
if not new_examples:
raise MlflowException(
f"No valid feedback records found in traces. "
f"Ensure traces contain human assessments with name '{judge.name}'",
error_code=INVALID_PARAMETER_VALUE,
)
_logger.debug(
f"Created {len(new_examples)} new feedback records from {len(traces)} traces"
)
memory_judge = MemoryAugmentedJudge(
base_judge=judge,
reflection_lm=self._reflection_lm,
retrieval_k=self._retrieval_k,
embedding_model=self._embedding_model,
embedding_dim=self._embedding_dim,
)
memory_judge._add_examples_to_memory(new_examples)
_logger.debug(f"MemAlign alignment completed successfully on {len(traces)} examples.")
return memory_judge
except Exception as e:
_logger.error(f"MemAlign alignment failed: {e}", exc_info=True)
raise MlflowException(
f"Alignment optimization failed: {e!s}", error_code=INTERNAL_ERROR
) from e
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/memalign/optimizer.py",
"license": "Apache License 2.0",
"lines": 572,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/optimizers/memalign/prompts.py | DISTILLATION_PROMPT_TEMPLATE = """You are helping improve an LLM judge with the \
following instructions:
{{ judge_instructions }}
Given a set of examples and a user's judgement of their quality, your task is to \
distill a set of guidelines from the judgements to model this user's perspective, \
which can be used to evaluate future responses.
A guideline can be about:
- The user's preference / expectation for quality
- The user's factual beliefs
- Other knowledge about the user (e.g., their background, expertise, interests, etc.)
The guidelines don't need to be general. Instead, they should be specific to this user.
If any of these conflict with your own understanding of quality, prioritize the user's \
perspective.
Here are the existing guidelines distilled from past judgements already:
{% for guideline in existing_guidelines %}
- {{ guideline }}
{% endfor %}
The new guidelines you distill should be complementary to the existing guidelines. \
Don't repeat what's already there.
If the existing guidelines already cover what's reflected in the judgement examples, \
you can return an empty list `{"guidelines": []}`.
Here are the user judgement examples:
{% for id, feedback_record in zip(ids, feedback_records) %}
{
"id": {{ id }},
{% for field, value in feedback_record.items() %}
"{{ field }}": "{{ value }}",
{% endfor %}
}
{% endfor %}
Now, distill a list of guidelines from the above judgement examples in the following \
format:
{
"guidelines": [
{
"guideline_text": str, # a short sentence describing one aspect of user belief / \
preference / expectation
"source_trace_ids": list[int] # a list of ids of the judgement examples which the \
above guideline is distilled from
},
...
]
}
"""
def create_guidelines_field():
import dspy
return dspy.InputField(
desc=(
"General guidelines you should always consider when evaluating an input. "
"IMPORTANT: Your output fields should NEVER directly refer to the presence "
"of these guidelines. Instead, weave the learned lessons into your reasoning."
)
)
def create_examples_field():
import dspy
return dspy.InputField(
desc=(
"Some example judgements (certain input fields might be omitted for "
"brevity). When evaluating the new input, try to align your judgements "
"with these examples. IMPORTANT: Your output fields should NEVER directly "
"refer to the presence of these examples. Instead, weave the learned "
"lessons into your reasoning."
)
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/memalign/prompts.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/optimizers/memalign/utils.py | import json
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import lru_cache
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
# Try to import jinja2 at module level
try:
from jinja2 import Template
_JINJA2_AVAILABLE = True
except ImportError:
_JINJA2_AVAILABLE = False
from mlflow.entities.trace import Trace
from mlflow.environment_variables import MLFLOW_GENAI_OPTIMIZE_MAX_WORKERS
from mlflow.genai.judges.optimizers.dspy_utils import (
construct_dspy_lm,
)
from mlflow.genai.judges.optimizers.memalign.prompts import (
DISTILLATION_PROMPT_TEMPLATE,
create_examples_field,
create_guidelines_field,
)
from mlflow.genai.utils.trace_utils import (
extract_request_from_trace,
extract_response_from_trace,
)
from mlflow.metrics.genai.model_utils import convert_mlflow_uri_to_litellm
# Try to import litellm at module level
try:
from litellm import get_model_info, token_counter
_LITELLM_AVAILABLE = True
except ImportError:
_LITELLM_AVAILABLE = False
if TYPE_CHECKING:
import dspy
_logger = logging.getLogger(__name__)
# Maximum input tokens for embedding models (most models have this limit or higher)
_MAX_EMBEDDING_MODEL_TOKENS = 8192
# Maximum input tokens for chat models (most models have this limit or higher)
_MAX_CHAT_MODEL_TOKENS = 128000
# Maximum records per batch for distillation
_MAX_RECORDS_PER_BATCH = 50
# Flexible tokens to reserve for response and variance in prompt length
_FLEX_TOKENS = 5000
# Priority list of fields to use for building corpus and retrieval queries
_QUERY_FIELD_PRIORITY = ["inputs", "outputs", "expectations", "conversation", "trace"]
def get_query_field(signature: "dspy.Signature") -> str | None:
"""Get the field name to use for building corpus and retrieval queries.
Args:
signature: DSPy signature defining input fields
Returns:
The first field from priority list that exists in signature's input_fields,
or None if no matching field is found.
"""
for field_name in _QUERY_FIELD_PRIORITY:
if field_name in signature.input_fields:
return field_name
return None
@lru_cache(maxsize=1)
def _get_model_max_input_tokens(model: str, model_type: str) -> int:
"""Get the maximum input token limit for a model.
Args:
model: Model identifier (e.g., "openai:/text-embedding-3-small")
model_type: Type of model ("embedding" or "chat")
Returns:
Maximum token limit for the model
"""
if _LITELLM_AVAILABLE:
litellm_model = convert_mlflow_uri_to_litellm(model)
try:
max_tokens = get_model_info(litellm_model)["max_input_tokens"]
if max_tokens is not None:
return max_tokens
except Exception as e:
_logger.debug(f"Error getting max tokens for model {model}: {e}", exc_info=True)
if model_type == "embedding":
return _MAX_EMBEDDING_MODEL_TOKENS
elif model_type == "chat":
return _MAX_CHAT_MODEL_TOKENS
else:
raise ValueError(f"Unknown model type: {model_type}")
@lru_cache(maxsize=1000)
def truncate_to_token_limit(text: str, model: str, model_type: str) -> str:
"""Truncate text to fit within the model's token limit.
Args:
text: Text to truncate
model: Model identifier (e.g., "openai:/text-embedding-3-small")
model_type: Type of model ("embedding" or "chat")
Returns:
Truncated text that fits within token limit
"""
max_tokens = _get_model_max_input_tokens(model, model_type=model_type)
if not _LITELLM_AVAILABLE:
# Naive truncation based on character count (1 token ~= 4 characters)
# if litellm is not available
_logger.warning(
f"LiteLLM is required for accurate token counting, using naive truncation to "
f"{max_tokens * 4} characters. Please install litellm using: `pip install litellm`"
)
return text[: max_tokens * 4]
# Optimization to avoid token counting if number of characters is well below limit
if len(text) <= max_tokens * 4 - _FLEX_TOKENS:
return text
litellm_model = convert_mlflow_uri_to_litellm(model)
token_count = token_counter(model=litellm_model, text=text)
if token_count <= max_tokens:
return text
original_token_count = token_count
ratio = max_tokens / token_count
truncated = text[: int(len(text) * ratio)]
while token_counter(model=litellm_model, text=truncated) > max_tokens:
truncated = truncated[: int(len(truncated) * 0.95)]
_logger.debug(f"Truncated text from {original_token_count} to ~{max_tokens} tokens")
return truncated
class Guideline(BaseModel):
guideline_text: str
source_trace_ids: list[str | int] | None = None
class Guidelines(BaseModel):
guidelines: list[Guideline]
def get_default_embedding_model() -> str:
return "openai:/text-embedding-3-small"
def _count_tokens(text: str, litellm_model: str | None) -> int:
"""Count tokens in text using litellm or naive whitespace estimation."""
if litellm_model is not None and _LITELLM_AVAILABLE:
return token_counter(model=litellm_model, text=text)
# Fallback: heuristic estimation based on character count
# Approximate 4 characters per token (see https://platform.openai.com/tokenizer)
return len(text) // 4
def _make_json_serializable(value: Any) -> Any:
if isinstance(value, dict):
return {k: _make_json_serializable(v) for k, v in value.items()}
if isinstance(value, list):
return [_make_json_serializable(item) for item in value]
return value_to_embedding_text(value)
def _create_batches(
examples_data: list[dict[str, Any]],
indices: list[int],
judge_instructions: str,
existing_guidelines: list[str],
reflection_lm: str,
) -> list[list[int]]:
"""Create batches using greedy bin-packing based on token counts.
Computes token count for each example and greedily packs them into batches
that fit within the model's token limit.
Args:
examples_data: List of feedback example data dicts
indices: List of indices corresponding to examples_data
judge_instructions: Original judge instructions
existing_guidelines: Previously distilled guidelines
reflection_lm: Model to use for distillation
Returns:
List of batches, where each batch is a list of indices into examples_data
"""
max_input_tokens = _get_model_max_input_tokens(reflection_lm, model_type="chat")
prompt_tokens_limit = max_input_tokens - _FLEX_TOKENS
litellm_model = convert_mlflow_uri_to_litellm(reflection_lm) if _LITELLM_AVAILABLE else None
# Compute base overhead (template + instructions + guidelines, without examples)
template = Template(DISTILLATION_PROMPT_TEMPLATE)
base_prompt = template.render(
judge_instructions=judge_instructions,
feedback_records=[],
ids=[],
existing_guidelines=existing_guidelines,
zip=zip,
len=len,
)
base_tokens = _count_tokens(base_prompt, litellm_model)
# Compute token count for each example
example_tokens = []
for example in examples_data:
example_str = json.dumps(_make_json_serializable(example))
tokens = _count_tokens(example_str, litellm_model)
example_tokens.append(tokens)
# Greedy bin-packing
batches = []
current_batch = []
current_tokens = base_tokens
for idx, tokens in zip(indices, example_tokens):
# Check if adding this example would exceed limits
if current_batch and (
current_tokens + tokens > prompt_tokens_limit
or len(current_batch) >= _MAX_RECORDS_PER_BATCH
):
# Start a new batch
batches.append(current_batch)
current_batch = []
current_tokens = base_tokens
current_batch.append(idx)
current_tokens += tokens
# Add the last batch
if current_batch:
batches.append(current_batch)
return batches
def _parse_batch_response(
response: str,
index_to_trace_id: dict[int, str],
existing_guideline_texts: set[str],
) -> list[Guideline]:
"""Parse LM response and convert to Guideline objects, filtering duplicates.
Args:
response: LM response in JSON format
index_to_trace_id: Mapping from example indices to trace IDs
existing_guideline_texts: Set of already existing guideline texts to avoid duplicates
Returns:
List of Guideline objects parsed from the response, excluding duplicates
"""
response_data = json.loads(response)
guidelines = []
trace_ids_set = set(index_to_trace_id.values())
def resolve_trace_id(idx: Any) -> str | None:
"""Resolve an LLM-returned index to a trace ID."""
if isinstance(idx, int):
return index_to_trace_id.get(idx)
if isinstance(idx, str):
if idx in trace_ids_set:
return idx
try:
return index_to_trace_id.get(int(idx))
except ValueError:
return None
return None
for guideline_data in response_data.get("guidelines", []):
# Skip empty or duplicate guidelines
guideline_text = guideline_data.get("guideline_text")
if not guideline_text or guideline_text in existing_guideline_texts:
continue
# Skip guidelines without valid source trace IDs
source_trace_ids_raw = guideline_data.get("source_trace_ids")
if source_trace_ids_raw is None:
continue
# Map indices back to trace IDs, filtering out invalid values
trace_ids = [
resolved
for idx in source_trace_ids_raw
if (resolved := resolve_trace_id(idx)) is not None
]
# Only add guideline if there is at least one valid trace ID
if trace_ids:
guidelines.append(
Guideline(
guideline_text=guideline_text,
source_trace_ids=trace_ids,
)
)
return guidelines
def value_to_embedding_text(value: Any) -> str:
"""
Convert an arbitrary value to text suitable for embedding.
For Trace objects, extracts the request and response text. We do not use other attributes
of the trace because the size is generally unbounded.
For other types, returns the string representation.
"""
if isinstance(value, Trace):
parts = []
if request := extract_request_from_trace(value):
parts.append(request)
if response := extract_response_from_trace(value):
parts.append(response)
return " ".join(parts) if parts else ""
return str(value)
def distill_guidelines(
examples: list["dspy.Example"],
judge_instructions: str,
reflection_lm: str,
existing_guidelines: list[str],
) -> list[Guideline]:
"""Distill general guidelines from feedback examples.
The number of parallel threads for LLM calls can be configured via the
``MLFLOW_GENAI_OPTIMIZE_MAX_WORKERS`` environment variable (default: 8).
Args:
examples: List of DSPy examples containing feedback (with _trace_id attribute)
judge_instructions: Original judge instructions
reflection_lm: Model to use for distillation
existing_guidelines: Previously distilled guidelines
Returns:
List of newly distilled Guideline objects (not including existing ones)
"""
if not _JINJA2_AVAILABLE:
raise ImportError(
"jinja2 is required for guideline distillation. "
"Please install it using: `pip install jinja2`"
)
if not examples:
return []
examples_data = [_make_json_serializable(dict(example)) for example in examples]
# Create index to trace_id mapping
indices = list(range(len(examples_data)))
index_to_trace_id = {
i: example._trace_id if hasattr(example, "_trace_id") else f"example_{i}"
for i, example in enumerate(examples)
}
distillation_lm = construct_dspy_lm(reflection_lm)
# Create batches using greedy bin-packing
batches = _create_batches(
examples_data=examples_data,
indices=indices,
judge_instructions=judge_instructions,
existing_guidelines=existing_guidelines,
reflection_lm=reflection_lm,
)
if not batches:
_logger.error(
"Inputs to the judge are too large, please reduce the size of inputs for alignment. "
)
return []
# Distill guidelines from each batch of feedback records in parallel
template = Template(DISTILLATION_PROMPT_TEMPLATE)
existing_guideline_texts = set(existing_guidelines)
def process_batch(batch_indices: list[int]) -> list[Guideline]:
batch_examples = [examples_data[i] for i in batch_indices]
prompt = template.render(
judge_instructions=judge_instructions,
feedback_records=batch_examples,
ids=batch_indices,
existing_guidelines=list(existing_guideline_texts),
zip=zip,
len=len,
)
try:
response = distillation_lm(
messages=[{"role": "user", "content": prompt}],
response_format=Guidelines,
)[0]
return _parse_batch_response(
response=response,
index_to_trace_id=index_to_trace_id,
existing_guideline_texts=existing_guideline_texts,
)
except Exception as e:
_logger.error(
f"Failed to generate/validate distilled guidelines for batch "
f"with indices {batch_indices}: {e}"
)
return []
# Process batches in parallel using ThreadPoolExecutor
all_guidelines = []
try:
from tqdm.auto import tqdm
use_tqdm = True
except ImportError:
use_tqdm = False
with ThreadPoolExecutor(
max_workers=MLFLOW_GENAI_OPTIMIZE_MAX_WORKERS.get(),
thread_name_prefix="MLflowMemAlignDistillation",
) as executor:
futures = {executor.submit(process_batch, batch): batch for batch in batches}
if use_tqdm:
futures_iter = tqdm(
as_completed(futures), total=len(futures), desc="Distilling guidelines"
)
else:
futures_iter = as_completed(futures)
for future in futures_iter:
batch_guidelines = future.result()
all_guidelines.extend(batch_guidelines)
# Deduplicate guidelines (since batches ran in parallel with the same existing_guideline_texts)
seen_texts = set(existing_guidelines)
new_guidelines = []
for guideline in all_guidelines:
if guideline.guideline_text not in seen_texts:
seen_texts.add(guideline.guideline_text)
new_guidelines.append(guideline)
return new_guidelines
def retrieve_relevant_examples(
retriever: "dspy.retrievers.Embeddings",
examples: list["dspy.Example"],
query_kwargs: dict[str, Any],
signature: "dspy.Signature",
) -> list[tuple["dspy.Example", str]]:
"""Retrieve relevant examples using semantic search.
Args:
retriever: DSPy Embeddings retriever
examples: List of all examples
query_kwargs: Query parameters to construct search query
signature: DSPy signature defining input fields
Returns:
List of tuples of (retrieved example, trace ID)
"""
if not examples or retriever is None:
return []
query_field = get_query_field(signature)
value = query_kwargs.get(query_field) if query_field else None
query = value_to_embedding_text(value) if value else ""
if not query:
return []
search_results = retriever(query)
indices = [int(i) for i in search_results.indices]
return [
(
examples[i],
examples[i]._trace_id if hasattr(examples[i], "_trace_id") else f"example_{i}",
)
for i in indices
]
def create_extended_signature(base_signature: "dspy.Signature") -> "dspy.Signature":
"""Create extended DSPy signature with guidelines and example judgements fields.
Args:
base_signature: Base DSPy signature to extend
Returns:
Extended signature with guidelines and example_judgements fields prepended
"""
extended_sig = base_signature.prepend("guidelines", create_guidelines_field())
return extended_sig.prepend("example_judgements", create_examples_field())
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/memalign/utils.py",
"license": "Apache License 2.0",
"lines": 403,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/judges/optimizers/memalign/test_optimizer.py | import json
from contextlib import contextmanager
from unittest.mock import MagicMock, patch
import pytest
import mlflow
from mlflow.entities.assessment import Assessment, AssessmentSource, Feedback
from mlflow.entities.assessment_source import AssessmentSourceType
from mlflow.exceptions import MlflowException
from mlflow.genai.judges import make_judge
from mlflow.genai.judges.optimizers import MemAlignOptimizer
from mlflow.genai.judges.optimizers.memalign.optimizer import (
_DATABRICKS_EMBEDDING_BATCH_SIZE,
_DEFAULT_EMBEDDING_BATCH_SIZE,
MemoryAugmentedJudge,
)
from mlflow.genai.scorers.base import Scorer, ScorerKind, SerializedScorer
@pytest.fixture
def sample_judge():
return make_judge(
name="test_judge",
instructions="Evaluate if {{ outputs }} correctly answers {{ inputs }}",
model="openai:/gpt-4",
)
@pytest.fixture
def mock_embedder():
with patch("dspy.Embedder") as mock_embedder_class:
mock_embedder = MagicMock()
mock_embedder_class.return_value = mock_embedder
yield mock_embedder
@pytest.fixture
def mock_search():
with patch("dspy.retrievers.Embeddings") as mock_embeddings_class:
mock_search = MagicMock()
mock_embeddings_class.return_value = mock_search
yield mock_search
@pytest.fixture
def mock_distillation_lm():
with patch(
"mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"
) as mock_construct_lm:
mock_lm = MagicMock()
mock_construct_lm.return_value = mock_lm
yield mock_lm
@contextmanager
def mock_apis(guidelines=None, batch_size=50):
"""Context manager for mocking API calls with optional guideline configuration."""
if guidelines is None:
guidelines = []
# _create_batches returns list of batches; mock returns single batch with all indices
# based on actual input size
def create_batches_side_effect(examples_data, indices, **kwargs):
# Return single batch containing all indices
return [list(indices)]
with (
patch("dspy.retrievers.Embeddings") as mock_embeddings_class,
patch("dspy.Embedder") as mock_embedder_class,
patch(
"mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"
) as mock_construct_lm,
patch(
"mlflow.genai.judges.optimizers.memalign.utils._create_batches",
side_effect=create_batches_side_effect,
) as mock_create_batches,
):
# Mock distillation LM - include source_trace_ids for guidelines to be retained
mock_lm = MagicMock()
guidelines_json = {
"guidelines": [
{"guideline_text": g, "source_trace_ids": list(range(batch_size))}
for g in guidelines
]
}
mock_lm.return_value = [f"{guidelines_json}".replace("'", '"')]
mock_construct_lm.return_value = mock_lm
mock_embedder = MagicMock()
mock_embedder_class.return_value = mock_embedder
mock_search = MagicMock()
mock_embeddings_class.return_value = mock_search
yield {
"lm": mock_lm,
"embedder": mock_embedder,
"search": mock_search,
"construct_lm": mock_construct_lm,
"embedder_class": mock_embedder_class,
"embeddings_class": mock_embeddings_class,
"create_batches": mock_create_batches,
}
@pytest.fixture
def sample_traces():
traces = []
for i in range(5):
with mlflow.start_span(name=f"test_span_{i}") as span:
span.set_inputs({"inputs": f"input_{i}"})
span.set_outputs({"outputs": f"output_{i}"})
traces.append(mlflow.get_trace(mlflow.get_last_active_trace_id()))
for i, trace in enumerate(traces):
assessment = Assessment(
name="test_judge",
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="user1"),
feedback=Feedback(value="yes", rationale=f"Reason {i}"),
)
trace.info.assessments = [assessment]
return traces
def test_init_default_config():
optimizer = MemAlignOptimizer()
assert optimizer._retrieval_k == 5
assert optimizer._embedding_model == "openai:/text-embedding-3-small"
assert optimizer._embedding_dim == 512
def test_init_custom_config():
optimizer = MemAlignOptimizer(
reflection_lm="openai:/gpt-4",
retrieval_k=3,
embedding_dim=256,
)
assert optimizer._reflection_lm == "openai:/gpt-4"
assert optimizer._retrieval_k == 3
assert optimizer._embedding_dim == 256
def test_align_empty_traces_raises_error(sample_judge):
optimizer = MemAlignOptimizer()
with pytest.raises(MlflowException, match="No traces provided"):
optimizer.align(sample_judge, [])
def test_align_no_valid_feedback_raises_error(sample_judge):
# Create a trace without any assessments - trace_to_dspy_example will return None
with mlflow.start_span(name="test_span") as span:
span.set_inputs({"inputs": "test input"})
span.set_outputs({"outputs": "test output"})
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
optimizer = MemAlignOptimizer()
with pytest.raises(MlflowException, match="No valid feedback records found"):
optimizer.align(sample_judge, [trace])
def test_align_creates_memory_augmented_judge(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline 1", "Guideline 2"]):
optimizer = MemAlignOptimizer(retrieval_k=3)
aligned_judge = optimizer.align(sample_judge, sample_traces[:3])
assert aligned_judge is not None
assert aligned_judge.name == sample_judge.name
assert aligned_judge.model == sample_judge.model
assert len(aligned_judge._episodic_memory) == 3
assert len(aligned_judge._semantic_memory) == 2
def test_unalign_removes_traces(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline 1"]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces)
# Verify all examples are present
num_examples = len(aligned_judge._episodic_memory)
assert num_examples == len(sample_traces)
traces_to_remove = [sample_traces[1], sample_traces[3]]
unaligned_judge = aligned_judge.unalign(traces=traces_to_remove)
# Verify examples for traces 1 and 3 are removed
assert len(unaligned_judge._episodic_memory) == num_examples - 2
remaining_trace_ids = {
ex._trace_id for ex in unaligned_judge._episodic_memory if hasattr(ex, "_trace_id")
}
expected_remaining_trace_ids = {
sample_traces[i].info.trace_id for i in range(len(sample_traces)) if i not in [1, 3]
}
assert remaining_trace_ids == expected_remaining_trace_ids
def test_unalign_no_matching_traces_returns_same_judge(sample_judge, sample_traces):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:3])
# Create trace with non-existent ID
mock_trace = MagicMock()
mock_trace.info.trace_id = "trace_999"
unaligned_judge = aligned_judge.unalign(traces=[mock_trace])
assert unaligned_judge is aligned_judge
assert len(unaligned_judge._episodic_memory) == 3
def test_judge_call_uses_semantic_memory(sample_judge, sample_traces):
with mock_apis(guidelines=["Be concise", "Be clear"]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
assert len(aligned_judge._semantic_memory) == 2
guideline_texts = [g.guideline_text for g in aligned_judge._semantic_memory]
assert "Be concise" in guideline_texts
assert "Be clear" in guideline_texts
def test_judge_call_retrieves_relevant_examples(sample_judge, sample_traces):
with mock_apis(guidelines=[]) as mocks:
# Configure search to return specific indices
search_results = MagicMock()
search_results.indices = [0, 2]
mocks["search"].return_value = search_results
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:3])
# Mock the predict module to return a result
mock_prediction = MagicMock()
mock_prediction.result = "yes"
mock_prediction.rationale = "Test rationale"
aligned_judge._predict_module = MagicMock(return_value=mock_prediction)
assessment = aligned_judge(inputs="test input", outputs="test output")
mocks["search"].assert_called_once()
assert "retrieved_example_trace_ids" in assessment.metadata
# Should return trace IDs, not indices
retrieved_trace_ids = assessment.metadata["retrieved_example_trace_ids"]
assert len(retrieved_trace_ids) == 2
# Verify they're actual trace IDs from the sample traces
expected_trace_ids = [sample_traces[0].info.trace_id, sample_traces[2].info.trace_id]
assert retrieved_trace_ids == expected_trace_ids
def test_memory_augmented_judge_properties(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline 1"]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
assert aligned_judge.name == sample_judge.name
assert aligned_judge.model == sample_judge.model
assert aligned_judge.get_input_fields() == sample_judge.get_input_fields()
assert sample_judge.instructions in aligned_judge.instructions
assert "Distilled Guidelines" in aligned_judge.instructions
assert "Guideline 1" in aligned_judge.instructions
def test_incremental_alignment_preserves_examples(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline 1"]):
optimizer = MemAlignOptimizer()
judge_v2 = optimizer.align(sample_judge, sample_traces[:2])
assert len(judge_v2._episodic_memory) == 2
assert judge_v2._base_judge is sample_judge
judge_v3 = optimizer.align(judge_v2, sample_traces[2:4])
assert len(judge_v3._episodic_memory) == 4
assert judge_v3._base_judge is sample_judge
trace_ids_in_v3 = {
ex._trace_id for ex in judge_v3._episodic_memory if hasattr(ex, "_trace_id")
}
expected_trace_ids = {sample_traces[i].info.trace_id for i in range(4)}
assert trace_ids_in_v3 == expected_trace_ids
def test_incremental_alignment_preserves_trace_ids(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline 1"]):
optimizer = MemAlignOptimizer()
judge_v2 = optimizer.align(sample_judge, sample_traces[:2])
batch1_ids = {t.info.trace_id for t in sample_traces[:2]}
assert set(judge_v2._episodic_trace_ids) == batch1_ids
judge_v3 = optimizer.align(judge_v2, sample_traces[2:4])
all_ids = batch1_ids | {t.info.trace_id for t in sample_traces[2:4]}
assert set(judge_v3._episodic_trace_ids) == all_ids
def test_incremental_alignment_with_single_example(sample_judge, sample_traces):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
judge_v2 = optimizer.align(sample_judge, sample_traces[:1])
assert len(judge_v2._episodic_memory) == 1
judge_v3 = optimizer.align(judge_v2, sample_traces[1:3])
assert len(judge_v3._episodic_memory) == 3
def test_incremental_alignment_after_deserialization(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline 1"]):
optimizer = MemAlignOptimizer()
aligned_v1 = optimizer.align(sample_judge, sample_traces[:3])
assert len(aligned_v1._episodic_memory) == 3
dumped = aligned_v1.model_dump()
serialized = SerializedScorer(**dumped)
deserialized = MemoryAugmentedJudge._from_serialized(serialized)
assert deserialized._episodic_memory == []
assert len(deserialized._episodic_trace_ids) == 3
trace_map = {t.info.trace_id: t for t in sample_traces[:3]}
with patch(
"mlflow.genai.judges.optimizers.memalign.optimizer.mlflow.get_trace",
side_effect=lambda tid, **kwargs: trace_map.get(tid),
):
aligned_v2 = optimizer.align(deserialized, sample_traces[3:5])
assert len(aligned_v2._episodic_memory) == 5
def test_incremental_alignment_redistills_guidelines(sample_judge, sample_traces):
# First alignment: distills "Guideline A"
with mock_apis(guidelines=["Guideline A"]):
optimizer = MemAlignOptimizer()
judge_v2 = optimizer.align(sample_judge, sample_traces[:2])
assert len(judge_v2._semantic_memory) == 1
guideline_texts = [g.guideline_text for g in judge_v2._semantic_memory]
assert "Guideline A" in guideline_texts
# Second alignment: distills "Guideline B" from ALL examples (old + new)
with mock_apis(guidelines=["Guideline B"]):
judge_v3 = optimizer.align(judge_v2, sample_traces[2:4])
# Should have both old + new guidelines (re-distilled from all examples)
assert len(judge_v3._semantic_memory) == 2
guideline_texts = [g.guideline_text for g in judge_v3._semantic_memory]
assert "Guideline A" in guideline_texts
assert "Guideline B" in guideline_texts
def test_unalign_filters_guidelines_by_source_ids(sample_judge, sample_traces):
# Test that unalign() filters guidelines based on source_ids
with mock_apis(guidelines=["Guideline 1", "Guideline 2"]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces)
assert len(aligned_judge._semantic_memory) == 2
# Unalign some traces - should filter guidelines based on source_trace_ids
traces_to_remove = [sample_traces[1], sample_traces[3]]
unaligned_judge = aligned_judge.unalign(traces=traces_to_remove)
# Unalign doesn't redistill, it filters guidelines based on source_trace_ids
# Guidelines without source_trace_ids are retained
# Guidelines are deleted only if ALL source traces were removed
# Since mock_apis doesn't provide source_trace_ids, all guidelines are retained
assert len(unaligned_judge._episodic_memory) == 3 # 5 - 2 removed
assert len(unaligned_judge._semantic_memory) == 2
# =============================================================================
# Serialization Tests
# =============================================================================
def test_memory_augmented_judge_kind_property(sample_judge, sample_traces):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
assert aligned_judge.kind == ScorerKind.MEMORY_AUGMENTED
def test_memory_augmented_judge_model_dump(sample_judge, sample_traces):
with mock_apis(guidelines=["Guideline A", "Guideline B"]):
optimizer = MemAlignOptimizer(
reflection_lm="openai:/gpt-4o-mini",
retrieval_k=3,
embedding_model="openai:/text-embedding-3-small",
embedding_dim=256,
)
aligned_judge = optimizer.align(sample_judge, sample_traces[:3])
dumped = aligned_judge.model_dump()
# Verify top-level structure
assert "memory_augmented_judge_data" in dumped
assert dumped["name"] == sample_judge.name
data = dumped["memory_augmented_judge_data"]
assert "base_judge" in data
assert "episodic_trace_ids" in data
assert "semantic_memory" in data
# Verify config fields
assert data["reflection_lm"] == "openai:/gpt-4o-mini"
assert data["retrieval_k"] == 3
assert data["embedding_model"] == "openai:/text-embedding-3-small"
assert data["embedding_dim"] == 256
# Verify episodic trace IDs are extracted
expected_trace_ids = [t.info.trace_id for t in sample_traces[:3]]
assert set(data["episodic_trace_ids"]) == set(expected_trace_ids)
# Verify semantic memory is serialized
assert len(data["semantic_memory"]) == 2
guideline_texts = [g["guideline_text"] for g in data["semantic_memory"]]
assert "Guideline A" in guideline_texts
assert "Guideline B" in guideline_texts
def test_memory_augmented_judge_from_serialized(sample_judge, sample_traces):
with mock_apis(guidelines=["Be concise", "Be accurate"]):
optimizer = MemAlignOptimizer(
reflection_lm="openai:/gpt-4",
retrieval_k=7,
embedding_model="openai:/text-embedding-3-large",
embedding_dim=1024,
)
aligned_judge = optimizer.align(sample_judge, sample_traces[:2])
dumped = aligned_judge.model_dump()
serialized = SerializedScorer(**dumped)
restored = MemoryAugmentedJudge._from_serialized(serialized)
# Verify config fields are restored
assert restored._reflection_lm == "openai:/gpt-4"
assert restored._retrieval_k == 7
assert restored._embedding_model == "openai:/text-embedding-3-large"
assert restored._embedding_dim == 1024
# Verify semantic memory is restored
assert len(restored._semantic_memory) == 2
guideline_texts = [g.guideline_text for g in restored._semantic_memory]
assert "Be concise" in guideline_texts
assert "Be accurate" in guideline_texts
# Verify lazy initialization state (_embedder is None means deferred)
assert restored._embedder is None
assert restored._episodic_memory == []
assert len(restored._episodic_trace_ids) == 2
# Verify deferred components are None
assert restored._base_signature is None
assert restored._retriever is None
assert restored._predict_module is None
def test_scorer_model_validate_routes_to_memory_augmented_judge(sample_judge, sample_traces):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
dumped = aligned_judge.model_dump()
restored = Scorer.model_validate(dumped)
assert isinstance(restored, MemoryAugmentedJudge)
assert restored.name == sample_judge.name
def test_scorer_model_validate_json_routes_to_memory_augmented_judge(sample_judge, sample_traces):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
dumped = aligned_judge.model_dump()
restored = Scorer.model_validate_json(json.dumps(dumped))
assert isinstance(restored, MemoryAugmentedJudge)
assert restored.name == sample_judge.name
def test_memory_augmented_judge_round_trip_serialization(sample_judge, sample_traces):
with mock_apis(guidelines=["Test guideline"]):
optimizer = MemAlignOptimizer(
reflection_lm="openai:/gpt-4o-mini",
retrieval_k=5,
embedding_model="openai:/text-embedding-3-small",
embedding_dim=512,
)
original_judge = optimizer.align(sample_judge, sample_traces[:3])
dumped = original_judge.model_dump()
serialized = SerializedScorer(**dumped)
restored_judge = MemoryAugmentedJudge._from_serialized(serialized)
# Verify config matches
assert restored_judge.name == original_judge.name
assert restored_judge._reflection_lm == original_judge._reflection_lm
assert restored_judge._retrieval_k == original_judge._retrieval_k
assert restored_judge._embedding_model == original_judge._embedding_model
assert restored_judge._embedding_dim == original_judge._embedding_dim
# Verify semantic memory matches
original_guidelines = [g.guideline_text for g in original_judge._semantic_memory]
restored_guidelines = [g.guideline_text for g in restored_judge._semantic_memory]
assert original_guidelines == restored_guidelines
# Verify episodic trace IDs match
assert set(restored_judge._episodic_trace_ids) == set(original_judge._episodic_trace_ids)
def test_memory_augmented_judge_lazy_init_triggered_on_call(sample_judge, sample_traces):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:2])
dumped = aligned_judge.model_dump()
serialized = SerializedScorer(**dumped)
restored = MemoryAugmentedJudge._from_serialized(serialized)
# Verify deferred state (_embedder is None means not initialized)
assert restored._embedder is None
# Mock mlflow.get_trace and predict module for the call
trace_map = {t.info.trace_id: t for t in sample_traces[:2]}
with (
patch(
"mlflow.genai.judges.optimizers.memalign.optimizer.mlflow.get_trace",
side_effect=lambda tid, **kwargs: trace_map.get(tid),
) as mock_get_trace,
patch("dspy.Embedder") as mock_embedder_class,
patch("dspy.Predict") as mock_predict_class,
patch("dspy.retrievers.Embeddings"),
):
mock_embedder_class.return_value = MagicMock()
mock_prediction = MagicMock()
mock_prediction.result = "yes"
mock_prediction.rationale = "Test"
mock_predict_instance = MagicMock(return_value=mock_prediction)
mock_predict_class.return_value = mock_predict_instance
restored(inputs="test", outputs="test")
# Verify initialization happened
assert restored._embedder is not None
assert mock_get_trace.call_count == 2
def test_memory_augmented_judge_lazy_init_logs_warning_for_missing_traces(
sample_judge, sample_traces
):
with mock_apis(guidelines=[]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:3])
dumped = aligned_judge.model_dump()
serialized = SerializedScorer(**dumped)
restored = MemoryAugmentedJudge._from_serialized(serialized)
# Mock get_trace to return only 1 of 3 traces (simulating missing traces)
first_trace = sample_traces[0]
def mock_get_trace_fn(tid, **kwargs):
if tid == first_trace.info.trace_id:
return first_trace
return None
with (
patch(
"mlflow.genai.judges.optimizers.memalign.optimizer.mlflow.get_trace",
side_effect=mock_get_trace_fn,
),
patch("dspy.Embedder"),
patch("dspy.Predict") as mock_predict_class,
patch("dspy.retrievers.Embeddings"),
patch("mlflow.genai.judges.optimizers.memalign.optimizer._logger") as mock_logger,
):
mock_prediction = MagicMock()
mock_prediction.result = "yes"
mock_prediction.rationale = "Test"
mock_predict_instance = MagicMock(return_value=mock_prediction)
mock_predict_class.return_value = mock_predict_instance
restored(inputs="test", outputs="test")
mock_logger.warning.assert_called_once()
warning_msg = mock_logger.warning.call_args[0][0]
assert "Could not find 2 traces" in warning_msg
assert "Judge will operate with partial memory" in warning_msg
def test_memory_augmented_judge_create_copy_preserves_trace_ids(sample_judge, sample_traces):
with mock_apis(guidelines=["Test guideline"]):
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:3])
assert len(aligned_judge._episodic_trace_ids) == 3
judge_copy = aligned_judge._create_copy()
# Copy should have trace IDs and be in deferred state
assert judge_copy._embedder is None
assert judge_copy._episodic_memory == []
assert set(judge_copy._episodic_trace_ids) == set(aligned_judge._episodic_trace_ids)
def test_judge_call_uses_json_adapter(sample_judge, sample_traces):
with mock_apis(guidelines=[]) as mocks:
mocks["search"].return_value = MagicMock(indices=[0])
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
mock_prediction = MagicMock()
mock_prediction.result = "yes"
mock_prediction.rationale = "Test rationale"
aligned_judge._predict_module = MagicMock(return_value=mock_prediction)
with patch("dspy.context") as mock_context:
mock_context.return_value.__enter__ = MagicMock()
mock_context.return_value.__exit__ = MagicMock(return_value=False)
aligned_judge(inputs="test input", outputs="test output")
mock_context.assert_called_once()
adapter_arg = mock_context.call_args.kwargs["adapter"]
from dspy.adapters.json_adapter import JSONAdapter
assert isinstance(adapter_arg, JSONAdapter)
def test_memory_augmented_judge_extracts_inputs_outputs_from_trace(sample_judge, sample_traces):
with mock_apis(guidelines=[]) as mocks:
mocks["search"].return_value = MagicMock(indices=[])
optimizer = MemAlignOptimizer()
aligned_judge = optimizer.align(sample_judge, sample_traces[:1])
mock_prediction = MagicMock()
mock_prediction.result = "yes"
mock_prediction.rationale = "Test rationale"
aligned_judge._predict_module = MagicMock(return_value=mock_prediction)
# Call with only trace - inputs/outputs should be extracted from trace
test_trace = sample_traces[0]
aligned_judge(trace=test_trace)
# Verify predict_module was called with extracted inputs/outputs
call_kwargs = aligned_judge._predict_module.call_args.kwargs
assert call_kwargs["inputs"] == {"inputs": "input_0"}
assert call_kwargs["outputs"] == {"outputs": "output_0"}
@pytest.mark.parametrize(
("embedding_model", "expected_batch_size"),
[
("endpoints:/databricks-bge-large-en", _DATABRICKS_EMBEDDING_BATCH_SIZE),
("databricks:/my-embedding-endpoint", _DATABRICKS_EMBEDDING_BATCH_SIZE),
("openai:/text-embedding-3-small", _DEFAULT_EMBEDDING_BATCH_SIZE),
],
)
def test_embedder_batch_size(sample_judge, sample_traces, embedding_model, expected_batch_size):
with mock_apis(guidelines=[]) as mocks:
optimizer = MemAlignOptimizer(embedding_model=embedding_model)
optimizer.align(sample_judge, sample_traces[:1])
_, kwargs = mocks["embedder_class"].call_args
assert kwargs["batch_size"] == expected_batch_size
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/memalign/test_optimizer.py",
"license": "Apache License 2.0",
"lines": 515,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/judges/optimizers/memalign/test_utils.py | from unittest.mock import MagicMock, patch
import dspy
import pytest
import mlflow
from mlflow.genai.judges.optimizers.memalign.utils import (
_count_tokens,
_create_batches,
distill_guidelines,
get_default_embedding_model,
retrieve_relevant_examples,
truncate_to_token_limit,
value_to_embedding_text,
)
def test_get_default_embedding_model():
assert get_default_embedding_model() == "openai:/text-embedding-3-small"
def test_distill_guidelines_empty_examples():
with patch(
"mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"
) as mock_construct_lm:
result = distill_guidelines(
examples=[],
judge_instructions="Test instructions",
reflection_lm="openai:/gpt-4",
existing_guidelines=[],
)
assert result == []
mock_construct_lm.assert_not_called()
def test_distill_guidelines_with_examples():
with (
patch(
"mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"
) as mock_construct_lm,
patch(
"mlflow.genai.judges.optimizers.memalign.utils._create_batches",
return_value=[[0, 1]],
),
):
example1 = MagicMock(spec=dspy.Example)
example1.__iter__ = lambda self: iter([("input", "test input"), ("output", "good")])
example1._trace_id = "trace_1"
example2 = MagicMock(spec=dspy.Example)
example2.__iter__ = lambda self: iter([("input", "test input 2"), ("output", "bad")])
example2._trace_id = "trace_2"
mock_lm = MagicMock()
mock_lm.return_value = [
'{"guidelines": [{"guideline_text": "Be concise", "source_trace_ids": [0, 1]}]}'
]
mock_construct_lm.return_value = mock_lm
result = distill_guidelines(
examples=[example1, example2],
judge_instructions="Evaluate quality",
reflection_lm="openai:/gpt-4",
existing_guidelines=[],
)
assert len(result) == 1
assert result[0].guideline_text == "Be concise"
# The LLM returns indices [0, 1] which get mapped to trace IDs
assert result[0].source_trace_ids == ["trace_1", "trace_2"]
mock_construct_lm.assert_called_once_with("openai:/gpt-4")
def test_distill_guidelines_filters_existing():
with (
patch(
"mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"
) as mock_construct_lm,
patch(
"mlflow.genai.judges.optimizers.memalign.utils._create_batches",
return_value=[[0]],
),
):
example1 = MagicMock(spec=dspy.Example)
example1.__iter__ = lambda self: iter([("input", "test"), ("output", "good")])
example1._trace_id = "trace_1"
mock_lm = MagicMock()
# Guidelines need source_trace_ids to be retained
mock_lm.return_value = [
'{"guidelines": [{"guideline_text": "Be concise", "source_trace_ids": [0]}, '
'{"guideline_text": "Be clear", "source_trace_ids": [0]}]}'
]
mock_construct_lm.return_value = mock_lm
result = distill_guidelines(
examples=[example1],
judge_instructions="Evaluate quality",
reflection_lm="openai:/gpt-4",
existing_guidelines=["Be concise"],
)
assert len(result) == 1
assert result[0].guideline_text == "Be clear"
def test_distill_guidelines_handles_lm_error():
# When LM fails for a batch, distill_guidelines logs error and continues
with (
patch(
"mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"
) as mock_construct_lm,
patch(
"mlflow.genai.judges.optimizers.memalign.utils._create_batches",
return_value=[[0]],
),
):
example1 = MagicMock(spec=dspy.Example)
example1.__iter__ = lambda self: iter([("input", "test"), ("output", "good")])
example1._trace_id = "trace_1"
mock_lm = MagicMock()
mock_lm.side_effect = Exception("API Error")
mock_construct_lm.return_value = mock_lm
# The function catches errors per batch and continues, returning empty list
result = distill_guidelines(
examples=[example1],
judge_instructions="Evaluate quality",
reflection_lm="openai:/gpt-4",
existing_guidelines=[],
)
assert result == []
def test_retrieve_relevant_examples_empty():
results = retrieve_relevant_examples(
retriever=None,
examples=[],
query_kwargs={"inputs": "test"},
signature=MagicMock(),
)
assert results == []
def test_retrieve_relevant_examples_no_search():
examples = [MagicMock(), MagicMock()]
results = retrieve_relevant_examples(
retriever=None,
examples=examples,
query_kwargs={"inputs": "test"},
signature=MagicMock(),
)
assert results == []
def test_retrieve_relevant_examples_success():
example1 = MagicMock()
example1._trace_id = "trace_1"
example2 = MagicMock()
example2._trace_id = "trace_2"
example3 = MagicMock()
example3._trace_id = "trace_3"
examples = [example1, example2, example3]
# Mock retriever results
mock_retriever = MagicMock()
search_results = MagicMock()
search_results.indices = [2, 0] # Return example3 and example1
mock_retriever.return_value = search_results
# Mock signature
signature = MagicMock()
signature.input_fields = ["inputs", "outputs"]
results = retrieve_relevant_examples(
retriever=mock_retriever,
examples=examples,
query_kwargs={"inputs": "test query", "outputs": "test output"},
signature=signature,
)
assert len(results) == 2
assert results[0] == (example3, "trace_3")
assert results[1] == (example1, "trace_1")
# Now uses only the first matching field from priority list ("inputs")
mock_retriever.assert_called_once_with("test query")
def test_retrieve_relevant_examples_uses_first_priority_field():
examples = [MagicMock()]
mock_retriever = MagicMock()
search_results = MagicMock()
search_results.indices = [0]
mock_retriever.return_value = search_results
signature = MagicMock()
signature.input_fields = ["inputs", "outputs", "context"]
retrieve_relevant_examples(
retriever=mock_retriever,
examples=examples,
query_kwargs={"inputs": "test", "outputs": "output_val", "context": "ctx"},
signature=signature,
)
# Should only use "inputs" (first in priority list that exists in input_fields)
mock_retriever.assert_called_once_with("test")
def test_retrieve_relevant_examples_returns_empty_for_none_value():
examples = [MagicMock()]
mock_retriever = MagicMock()
signature = MagicMock()
signature.input_fields = ["inputs", "outputs"]
# When the first priority field value is None, should return empty list
results = retrieve_relevant_examples(
retriever=mock_retriever,
examples=examples,
query_kwargs={"inputs": None, "outputs": "output_val"},
signature=signature,
)
assert results == []
mock_retriever.assert_not_called()
def test_retrieve_relevant_examples_out_of_bounds_raises():
examples = [MagicMock(), MagicMock()]
mock_retriever = MagicMock()
search_results = MagicMock()
search_results.indices = [5] # Out of bounds
mock_retriever.return_value = search_results
signature = MagicMock()
signature.input_fields = ["inputs"]
with pytest.raises(IndexError, match="list index out of range"):
retrieve_relevant_examples(
retriever=mock_retriever,
examples=examples,
query_kwargs={"inputs": "test"},
signature=signature,
)
@pytest.mark.parametrize(
("token_count", "text"),
[
(50, "This is a short text"),
(100, "This text is exactly at the limit"),
],
)
def test_truncate_to_token_limit_no_truncation_needed(token_count, text):
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.get_model_info",
return_value={"max_input_tokens": 100},
),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.token_counter",
return_value=token_count,
),
):
result = truncate_to_token_limit(text, "openai:/gpt-4", model_type="chat")
assert result == text
def test_truncate_to_token_limit_happy_path_with_truncation():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.get_model_info",
return_value={"max_input_tokens": 100},
),
patch("mlflow.genai.judges.optimizers.memalign.utils.token_counter") as mock_counter,
):
mock_counter.side_effect = [150, 90]
text = "x" * 500
result = truncate_to_token_limit(text, "openai:/gpt-4", model_type="chat")
assert len(result) < len(text)
assert mock_counter.call_count == 2
def test_truncate_to_token_limit_multiple_iterations():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.get_model_info",
return_value={"max_input_tokens": 100},
),
patch("mlflow.genai.judges.optimizers.memalign.utils.token_counter") as mock_counter,
):
mock_counter.side_effect = [200, 120, 95]
text = "x" * 1000
result = truncate_to_token_limit(text, "openai:/gpt-4", model_type="chat")
assert len(result) < len(text)
assert mock_counter.call_count == 3
def test_truncate_to_token_limit_without_litellm():
with patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", False):
text = "a" * 200
result = truncate_to_token_limit(text, "openai:/gpt-4", model_type="chat")
# Without litellm, falls back to _MAX_CHAT_MODEL_TOKENS (128000)
# Since text length (200) < 128000, no truncation occurs
assert result == text
@pytest.mark.parametrize(
"get_model_info_side_effect",
[
Exception("API Error"),
{"max_input_tokens": None},
],
)
def test_truncate_to_token_limit_get_model_info_fallback(get_model_info_side_effect):
with patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True):
if isinstance(get_model_info_side_effect, Exception):
mock_get_model_info = patch(
"mlflow.genai.judges.optimizers.memalign.utils.get_model_info",
side_effect=get_model_info_side_effect,
)
else:
mock_get_model_info = patch(
"mlflow.genai.judges.optimizers.memalign.utils.get_model_info",
return_value=get_model_info_side_effect,
)
with (
mock_get_model_info,
patch("mlflow.genai.judges.optimizers.memalign.utils.token_counter", return_value=50),
):
text = "This is a short text"
result = truncate_to_token_limit(text, "openai:/gpt-4", model_type="chat")
assert result == text
@pytest.mark.parametrize(
("value", "expected"),
[
("hello world", "hello world"),
(42, "42"),
({"key": "value"}, "{'key': 'value'}"),
([1, 2, 3], "[1, 2, 3]"),
(None, "None"),
],
)
def test_value_to_embedding_text_non_trace(value, expected):
assert value_to_embedding_text(value) == expected
def test_value_to_embedding_text_trace():
with mlflow.start_span(name="test_span") as span:
span.set_inputs({"question": "What is ML?"})
span.set_outputs({"answer": "ML is machine learning."})
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
result = value_to_embedding_text(trace)
assert "What is ML?" in result
assert "ML is machine learning." in result
def test_count_tokens_with_litellm():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.token_counter", return_value=42
) as mock_counter,
):
result = _count_tokens("test text", "gpt-4")
assert result == 42
mock_counter.assert_called_once_with(model="gpt-4", text="test text")
def test_count_tokens_without_litellm():
with patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", False):
# Fallback uses len(text) // 4
result = _count_tokens("a" * 100, None)
assert result == 25
def test_count_tokens_with_none_model():
with patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True):
# Even if litellm is available, None model uses fallback
result = _count_tokens("a" * 100, None)
assert result == 25
def test_create_batches_empty_examples():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", False),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._get_model_max_input_tokens",
return_value=10000,
),
):
result = _create_batches(
examples_data=[],
indices=[],
judge_instructions="test",
existing_guidelines=[],
reflection_lm="openai:/gpt-4",
)
assert result == []
def test_create_batches_single_batch():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", False),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._get_model_max_input_tokens",
return_value=100000,
),
):
examples = [{"input": "test1"}, {"input": "test2"}, {"input": "test3"}]
result = _create_batches(
examples_data=examples,
indices=[0, 1, 2],
judge_instructions="test",
existing_guidelines=[],
reflection_lm="openai:/gpt-4",
)
# All examples should fit in one batch
assert len(result) == 1
assert result[0] == [0, 1, 2]
def test_create_batches_multiple_batches_by_token_limit():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._get_model_max_input_tokens",
return_value=10000,
),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.convert_mlflow_uri_to_litellm",
return_value="gpt-4",
),
patch("mlflow.genai.judges.optimizers.memalign.utils.token_counter") as mock_counter,
):
# Base prompt = 1000 tokens, each example = 3000 tokens
# Limit = 10000 - 5000 (flex) = 5000 tokens
# Can fit 1 example per batch: 1000 + 3000 = 4000 < 5000
# But 2 examples: 1000 + 6000 = 7000 > 5000
mock_counter.side_effect = [1000, 3000, 3000, 3000]
examples = [{"input": f"test{i}"} for i in range(3)]
result = _create_batches(
examples_data=examples,
indices=[0, 1, 2],
judge_instructions="test",
existing_guidelines=[],
reflection_lm="openai:/gpt-4",
)
assert len(result) == 3
assert result == [[0], [1], [2]]
def test_create_batches_multiple_batches_by_max_records():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", False),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._get_model_max_input_tokens",
return_value=10000000,
),
patch("mlflow.genai.judges.optimizers.memalign.utils._MAX_RECORDS_PER_BATCH", 2),
):
examples = [{"input": f"test{i}"} for i in range(5)]
result = _create_batches(
examples_data=examples,
indices=[0, 1, 2, 3, 4],
judge_instructions="test",
existing_guidelines=[],
reflection_lm="openai:/gpt-4",
)
# Max 2 records per batch, so 5 examples -> 3 batches
assert len(result) == 3
assert result[0] == [0, 1]
assert result[1] == [2, 3]
assert result[2] == [4]
def test_create_batches_variable_length_examples():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._get_model_max_input_tokens",
return_value=10000,
),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.convert_mlflow_uri_to_litellm",
return_value="gpt-4",
),
patch("mlflow.genai.judges.optimizers.memalign.utils.token_counter") as mock_counter,
):
# Base = 1000, limit = 5000
# Examples: 500, 500, 500, 3500 tokens
# Batch 1: 1000 + 500 + 500 + 500 = 2500 (fits)
# Adding 3500 would make 6000 > 5000, so start new batch
# Batch 2: 1000 + 3500 = 4500 (fits)
mock_counter.side_effect = [1000, 500, 500, 500, 3500]
examples = [
{"input": "short1"},
{"input": "short2"},
{"input": "short3"},
{"input": "very long example " * 100},
]
result = _create_batches(
examples_data=examples,
indices=[0, 1, 2, 3],
judge_instructions="test",
existing_guidelines=[],
reflection_lm="openai:/gpt-4",
)
assert len(result) == 2
assert result[0] == [0, 1, 2]
assert result[1] == [3]
def test_create_batches_single_large_example():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils._LITELLM_AVAILABLE", True),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._get_model_max_input_tokens",
return_value=10000,
),
patch(
"mlflow.genai.judges.optimizers.memalign.utils.convert_mlflow_uri_to_litellm",
return_value="gpt-4",
),
patch("mlflow.genai.judges.optimizers.memalign.utils.token_counter") as mock_counter,
):
# Base = 1000, limit = 5000
# Single example = 6000 tokens (exceeds limit even alone)
# Still gets added to a batch (we don't skip it)
mock_counter.side_effect = [1000, 6000]
examples = [{"input": "huge example"}]
result = _create_batches(
examples_data=examples,
indices=[0],
judge_instructions="test",
existing_guidelines=[],
reflection_lm="openai:/gpt-4",
)
# Single example still forms a batch even if over limit
assert len(result) == 1
assert result[0] == [0]
def test_distill_guidelines_empty_batches():
with (
patch("mlflow.genai.judges.optimizers.memalign.utils.construct_dspy_lm"),
patch(
"mlflow.genai.judges.optimizers.memalign.utils._create_batches",
return_value=[],
),
):
example1 = MagicMock(spec=dspy.Example)
example1.__iter__ = lambda self: iter([("input", "test")])
example1._trace_id = "trace_1"
result = distill_guidelines(
examples=[example1],
judge_instructions="test",
reflection_lm="openai:/gpt-4",
existing_guidelines=[],
)
assert result == []
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/memalign/test_utils.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/session_processor.py | """Session-level online scoring processor for executing scorers on completed sessions."""
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass, field
from mlflow.entities.assessment import Assessment
from mlflow.environment_variables import MLFLOW_ONLINE_SCORING_MAX_WORKER_THREADS
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.online.constants import (
EXCLUDE_EVAL_RUN_TRACES_FILTER,
MAX_SESSIONS_PER_JOB,
)
from mlflow.genai.scorers.online.entities import CompletedSession, OnlineScorer
from mlflow.genai.scorers.online.sampler import OnlineScorerSampler
from mlflow.genai.scorers.online.session_checkpointer import (
OnlineSessionCheckpointManager,
OnlineSessionScoringCheckpoint,
OnlineSessionScoringTimeWindow,
)
from mlflow.genai.scorers.online.trace_loader import OnlineTraceLoader
from mlflow.store.tracking.abstract_store import AbstractStore
from mlflow.tracing.constant import AssessmentMetadataKey
_logger = logging.getLogger(__name__)
@dataclass
class SessionScoringTask:
session: CompletedSession
scorers: list[Scorer] = field(default_factory=list)
class OnlineSessionScoringProcessor:
"""
Orchestrates online scoring of completed sessions.
This processor identifies sessions that have been inactive for a completion buffer
period (no new traces added), applies session-level scorers to them, and maintains
a checkpoint to avoid reprocessing. Sessions are processed in parallel with one
thread per session.
The processor:
- Fetches completed sessions within a time window based on checkpoint state
- Applies sampling to determine which scorers should run on each session
- Loads all traces for each session and evaluates session-level scorers
- Logs assessments with session metadata for cleanup tracking
- Removes old assessments when a session is re-scored (e.g., if new traces are
added since the last time it was scored)
- Updates the checkpoint to the last processed session
Sessions are processed in chronological order (sorted by last_trace_timestamp_ms
and session_id) to ensure deterministic, resumable processing.
"""
def __init__(
self,
trace_loader: "OnlineTraceLoader",
checkpoint_manager: OnlineSessionCheckpointManager,
sampler: OnlineScorerSampler,
experiment_id: str,
tracking_store: AbstractStore,
):
self._trace_loader = trace_loader
self._checkpoint_manager = checkpoint_manager
self._sampler = sampler
self._experiment_id = experiment_id
self._tracking_store = tracking_store
@classmethod
def create(
cls,
experiment_id: str,
online_scorers: list[OnlineScorer],
tracking_store: AbstractStore,
) -> "OnlineSessionScoringProcessor":
"""
Factory method to create an OnlineSessionScoringProcessor with dependencies.
Args:
experiment_id: The experiment ID to process sessions from.
online_scorers: List of OnlineScorer instances.
tracking_store: The tracking store instance.
Returns:
Configured OnlineSessionScoringProcessor instance.
"""
return cls(
trace_loader=OnlineTraceLoader(tracking_store),
checkpoint_manager=OnlineSessionCheckpointManager(tracking_store, experiment_id),
sampler=OnlineScorerSampler(online_scorers),
experiment_id=experiment_id,
tracking_store=tracking_store,
)
def process_sessions(self) -> None:
"""
Execute online scoring for completed sessions in the experiment.
Finds sessions that have been inactive for the completion buffer duration,
applies sampling to select scorers, runs scoring in parallel (one thread per
session), and updates the checkpoint.
"""
if not self._sampler._online_scorers:
_logger.debug("No scorer configs provided, skipping")
return
time_window = self._checkpoint_manager.calculate_time_window()
checkpoint = self._checkpoint_manager.get_checkpoint()
_logger.debug(
f"Session scoring for experiment {self._experiment_id}: "
f"looking for sessions in "
f"[{time_window.min_last_trace_timestamp_ms}, "
f"{time_window.max_last_trace_timestamp_ms}]"
)
session_tasks = self._fetch_and_filter_completed_sessions(time_window, checkpoint)
if not session_tasks:
_logger.debug("No completed sessions found, skipping")
# Still need to advance checkpoint to avoid reprocessing the same time window
checkpoint = OnlineSessionScoringCheckpoint(
timestamp_ms=time_window.max_last_trace_timestamp_ms,
session_id=None,
)
self._checkpoint_manager.persist_checkpoint(checkpoint)
return
_logger.debug(f"Found {len(session_tasks)} completed sessions for scoring")
self._execute_session_scoring(session_tasks)
# Update checkpoint to last processed session
latest_task = session_tasks[-1]
checkpoint = OnlineSessionScoringCheckpoint(
timestamp_ms=latest_task.session.last_trace_timestamp_ms,
session_id=latest_task.session.session_id,
)
self._checkpoint_manager.persist_checkpoint(checkpoint)
_logger.debug(f"Online session scoring completed for experiment {self._experiment_id}")
def _fetch_and_filter_completed_sessions(
self,
time_window: OnlineSessionScoringTimeWindow,
checkpoint: OnlineSessionScoringCheckpoint | None,
) -> list[SessionScoringTask]:
"""
Fetch completed sessions and create scoring tasks with applicable scorers.
Fetches sessions separately for each unique filter_string used by session-level scorers,
creating tasks that track which scorers should run on each session based on filter match.
Sessions at the checkpoint boundary are filtered based on session_id to avoid
reprocessing sessions that were already scored in a previous run.
Args:
time_window: Time window with min/max last trace timestamps
checkpoint: Current checkpoint with timestamp and session_id
Returns:
List of SessionScoringTask objects, each containing a session and applicable scorers,
sorted by (session.last_trace_timestamp_ms ASC, session.session_id ASC)
"""
# Group session-level scorers by their filter_string
session_scorers_by_filter = self._sampler.group_scorers_by_filter(session_level=True)
# Fetch completed sessions for each filter group and build tasks
tasks = {}
for filter_string, scorers in session_scorers_by_filter.items():
sessions = self._tracking_store.find_completed_sessions(
experiment_id=self._experiment_id,
min_last_trace_timestamp_ms=time_window.min_last_trace_timestamp_ms,
max_last_trace_timestamp_ms=time_window.max_last_trace_timestamp_ms,
max_results=MAX_SESSIONS_PER_JOB,
filter_string=filter_string,
)
# For each session that matches this filter, add applicable scorers
for session in sessions:
# Apply sampling to select which scorers from this filter group should run
if selected := self._sampler.sample(session.session_id, scorers):
if session.session_id not in tasks:
tasks[session.session_id] = SessionScoringTask(session=session, scorers=[])
# Add scorers, avoiding duplicates (same scorer from different filters)
existing_scorer_names = {s.name for s in tasks[session.session_id].scorers}
tasks[session.session_id].scorers.extend(
s for s in selected if s.name not in existing_scorer_names
)
# Sort tasks by (last_trace_timestamp_ms ASC, session_id ASC) for deterministic ordering
sorted_tasks = sorted(
tasks.values(),
key=lambda t: (t.session.last_trace_timestamp_ms, t.session.session_id),
)
# Filter out sessions at checkpoint boundary that have already been processed
if checkpoint is not None and checkpoint.session_id is not None:
sorted_tasks = [
task
for task in sorted_tasks
if not (
task.session.last_trace_timestamp_ms == checkpoint.timestamp_ms
and task.session.session_id <= checkpoint.session_id
)
]
# Respect max_results limit
return sorted_tasks[:MAX_SESSIONS_PER_JOB]
def _clean_up_old_assessments(
self, trace, session_id: str, new_assessments: list[Assessment]
) -> None:
"""
Remove old online scoring assessments after successfully logging new ones.
Finds and deletes previous assessments from the same session/scorers to avoid
duplicates when a session is re-scored (e.g., when new traces are added).
Args:
trace: The Trace object containing all assessments.
session_id: The session ID to match in assessment metadata.
new_assessments: List of new assessments that were just logged.
"""
if not trace or not trace.info.assessments:
return
new_assessment_names = {a.name for a in new_assessments}
new_assessment_ids = {a.assessment_id for a in new_assessments}
deleted_count = 0
for assessment in trace.info.assessments:
metadata = assessment.metadata or {}
online_session_id = metadata.get(AssessmentMetadataKey.ONLINE_SCORING_SESSION_ID)
if (
online_session_id == session_id
and assessment.name in new_assessment_names
and assessment.assessment_id not in new_assessment_ids
):
self._tracking_store.delete_assessment(
trace_id=trace.info.trace_id, assessment_id=assessment.assessment_id
)
deleted_count += 1
if deleted_count > 0:
_logger.debug(f"Deleted {deleted_count} old assessments for session {session_id}")
def _execute_session_scoring(self, tasks: list[SessionScoringTask]) -> None:
"""
Execute session-level scoring tasks in parallel.
Each thread loads traces for its session independently and runs all applicable
scorers on that session.
Args:
tasks: List of SessionScoringTask objects containing sessions and their scorers.
"""
with ThreadPoolExecutor(
max_workers=MLFLOW_ONLINE_SCORING_MAX_WORKER_THREADS.get(),
thread_name_prefix="SessionScoring",
) as executor:
futures = {}
for task in tasks:
future = executor.submit(self._score_session, task)
futures[future] = task
for future in as_completed(futures):
task = futures[future]
try:
future.result()
except Exception as e:
_logger.warning(
f"Failed to score session {task.session.session_id}: {e}",
exc_info=True,
)
def _score_session(self, task: SessionScoringTask) -> None:
"""
Score a single session by loading its traces and applying pre-selected scorers.
This method runs in a worker thread. It fetches all traces for the session
and runs the scorers that were already selected during task creation.
Args:
task: The SessionScoringTask containing the session and applicable scorers.
"""
# Import evaluation modules lazily to avoid pulling in pandas at module load
# time, which would break the skinny client.
from mlflow.genai.evaluation.entities import EvalItem
from mlflow.genai.evaluation.harness import _log_assessments
from mlflow.genai.evaluation.session_utils import evaluate_session_level_scorers
if not task.scorers:
return
session = task.session
session_filter = f"metadata.`mlflow.trace.session` = '{session.session_id}'"
combined_filter = f"{EXCLUDE_EVAL_RUN_TRACES_FILTER} AND {session_filter}"
trace_infos = self._trace_loader.fetch_trace_infos_in_range(
experiment_id=self._experiment_id,
start_time_ms=session.first_trace_timestamp_ms,
end_time_ms=session.last_trace_timestamp_ms,
filter_string=combined_filter,
)
if not trace_infos:
_logger.warning(f"No traces found for session {session.session_id}")
return
trace_ids = [t.trace_id for t in trace_infos]
full_traces = self._trace_loader.fetch_traces(trace_ids)
if not full_traces:
_logger.warning(f"Failed to fetch full traces for session {session.session_id}")
return
full_traces.sort(key=lambda t: t.info.timestamp_ms)
trace_map = {t.info.trace_id: t for t in full_traces}
session_items = [EvalItem.from_trace(t) for t in full_traces]
result = evaluate_session_level_scorers(
session_id=session.session_id,
session_items=session_items,
multi_turn_scorers=task.scorers,
)
for trace_id, feedbacks in result.items():
if feedbacks and (trace := trace_map.get(trace_id)):
try:
# Add session ID metadata to identify these as online scoring assessments
for feedback in feedbacks:
feedback.metadata = {
**(feedback.metadata or {}),
AssessmentMetadataKey.ONLINE_SCORING_SESSION_ID: session.session_id,
}
_log_assessments(run_id=None, trace=trace, assessments=feedbacks)
# Clean up old assessments after successfully logging new ones
self._clean_up_old_assessments(trace, session.session_id, feedbacks)
except Exception as e:
_logger.warning(
f"Failed to log assessments for trace {trace_id} "
f"in session {session.session_id}: {e}",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/session_processor.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/online/test_session_processor.py | import json
import uuid
from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities import Trace, TraceData, TraceInfo
from mlflow.entities.assessment import Assessment
from mlflow.entities.trace_location import (
MlflowExperimentLocation,
TraceLocation,
TraceLocationType,
)
from mlflow.entities.trace_state import TraceState
from mlflow.genai.scorers.builtin_scorers import ConversationCompleteness
from mlflow.genai.scorers.online.entities import CompletedSession, OnlineScorer, OnlineScoringConfig
from mlflow.genai.scorers.online.sampler import OnlineScorerSampler
from mlflow.genai.scorers.online.session_checkpointer import (
OnlineSessionCheckpointManager,
OnlineSessionScoringCheckpoint,
OnlineSessionScoringTimeWindow,
)
from mlflow.genai.scorers.online.session_processor import OnlineSessionScoringProcessor
from mlflow.genai.scorers.online.trace_loader import OnlineTraceLoader
from mlflow.tracing.constant import AssessmentMetadataKey
def make_online_scorer(scorer, sample_rate: float = 1.0, filter_string: str | None = None):
config = OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=sample_rate,
experiment_id="exp1",
filter_string=filter_string,
)
return OnlineScorer(
name=scorer.name, serialized_scorer=json.dumps(scorer.model_dump()), online_config=config
)
def make_completed_session(
session_id: str, first_trace_timestamp_ms: int, last_trace_timestamp_ms: int
):
return CompletedSession(
session_id=session_id,
first_trace_timestamp_ms=first_trace_timestamp_ms,
last_trace_timestamp_ms=last_trace_timestamp_ms,
)
def make_trace_info(trace_id: str, timestamp_ms: int = 1000):
return TraceInfo(
trace_id=trace_id,
trace_location=TraceLocation(
type=TraceLocationType.MLFLOW_EXPERIMENT,
mlflow_experiment=MlflowExperimentLocation(experiment_id="exp1"),
),
request_time=timestamp_ms,
state=TraceState.OK,
)
def make_trace(trace_id: str, timestamp_ms: int = 1000, assessments=None):
return Trace(
info=TraceInfo(
trace_id=trace_id,
trace_location=TraceLocation(
type=TraceLocationType.MLFLOW_EXPERIMENT,
mlflow_experiment=MlflowExperimentLocation(experiment_id="exp1"),
),
request_time=timestamp_ms,
state=TraceState.OK,
assessments=assessments or [],
),
data=TraceData(spans=[]),
)
def make_processor(loader, checkpoint_mgr, sampler, store):
return OnlineSessionScoringProcessor(
trace_loader=loader,
checkpoint_manager=checkpoint_mgr,
sampler=sampler,
experiment_id="exp1",
tracking_store=store,
)
def make_assessment(assessment_id: str, name: str, session_id: str | None = None):
assessment = MagicMock(spec=Assessment)
assessment.assessment_id = assessment_id
assessment.name = name
assessment.metadata = (
{AssessmentMetadataKey.ONLINE_SCORING_SESSION_ID: session_id} if session_id else {}
)
return assessment
@pytest.fixture
def mock_trace_loader():
return MagicMock(spec=OnlineTraceLoader)
@pytest.fixture
def mock_checkpoint_manager():
manager = MagicMock(spec=OnlineSessionCheckpointManager)
manager.calculate_time_window.return_value = OnlineSessionScoringTimeWindow(
min_last_trace_timestamp_ms=1000, max_last_trace_timestamp_ms=2000
)
manager.get_checkpoint.return_value = None
return manager
@pytest.fixture
def mock_tracking_store():
return MagicMock()
@pytest.fixture
def sampler_with_scorers():
return OnlineScorerSampler([make_online_scorer(ConversationCompleteness(), sample_rate=1.0)])
@pytest.fixture
def empty_sampler():
return OnlineScorerSampler([])
@pytest.fixture
def mock_evaluate():
with patch("mlflow.genai.evaluation.session_utils.evaluate_session_level_scorers") as mock:
yield mock
@pytest.fixture
def mock_log_assessments():
with patch("mlflow.genai.evaluation.harness._log_assessments") as mock:
yield mock
@pytest.fixture
def mock_score_session():
with patch(
"mlflow.genai.scorers.online.session_processor.OnlineSessionScoringProcessor._score_session"
) as mock:
yield mock
def test_process_sessions_skips_when_no_scorers(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store, empty_sampler
):
"""
Scenario: No online scorers are configured.
When there are no scorers, the processor should skip all processing work
(no time window calculation, no session fetching) to avoid unnecessary overhead.
"""
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, empty_sampler, mock_tracking_store
)
processor.process_sessions()
mock_checkpoint_manager.calculate_time_window.assert_not_called()
mock_tracking_store.find_completed_sessions.assert_not_called()
def test_process_sessions_updates_checkpoint_when_no_sessions(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store, sampler_with_scorers
):
"""
Scenario: Scorers are configured but no completed sessions are found in the time window.
The processor should advance the checkpoint to the end of the time window
(max_last_trace_timestamp_ms) to avoid reprocessing the same empty window on the next run.
"""
mock_tracking_store.find_completed_sessions.return_value = []
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
processor.process_sessions()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 2000
assert checkpoint.session_id is None
def test_process_sessions_filters_checkpoint_boundary(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_score_session,
):
"""
Scenario: Four sessions exist at the checkpoint boundary (timestamp=1000),
where sessions sess-001 and sess-002 were already scored in a previous run
(checkpoint at timestamp=1000, session_id=sess-002).
The processor should only score sess-003 (at same timestamp but after checkpoint
session_id) and sess-004 (at later timestamp), filtering out sess-001 and sess-002
to avoid duplicate scoring.
"""
mock_checkpoint_manager.get_checkpoint.return_value = OnlineSessionScoringCheckpoint(
timestamp_ms=1000, session_id="sess-002"
)
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1000),
make_completed_session("sess-002", 500, 1000),
make_completed_session("sess-003", 500, 1000),
make_completed_session("sess-004", 500, 1500),
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
processor.process_sessions()
assert mock_score_session.call_count == 2
scored_tasks = [call[0][0] for call in mock_score_session.call_args_list]
assert scored_tasks[0].session.session_id == "sess-003"
assert scored_tasks[1].session.session_id == "sess-004"
def test_session_rescored_when_new_trace_added_after_checkpoint(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
):
"""
Scenario: A session (sess-001) was previously scored when its last trace was at
timestamp 1000, but a new trace (tr-002) was added at timestamp 2000, making the
session "complete" again with a new last_trace_timestamp.
The processor should re-score the session with all traces (including the new one)
and update old assessments with new scores that incorporate the additional trace.
"""
mock_checkpoint_manager.get_checkpoint.return_value = OnlineSessionScoringCheckpoint(
timestamp_ms=1000, session_id="sess-001"
)
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 2000)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [
make_trace_info("tr-001", 500),
make_trace_info("tr-002", 2000),
]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-001", 500),
make_trace("tr-002", 2000),
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
mock_evaluate.return_value = {
"tr-001": [make_assessment("assess-1", "ConversationCompleteness/v1")],
"tr-002": [make_assessment("assess-2", "ConversationCompleteness/v1")],
}
processor.process_sessions()
call_kwargs = mock_evaluate.call_args[1]
assert call_kwargs["session_id"] == "sess-001"
assert len(call_kwargs["session_items"]) == 2
assert len(call_kwargs["multi_turn_scorers"]) == 1
def test_process_sessions_samples_and_scores(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
mock_log_assessments,
):
"""
Scenario: A completed session is found with traces, and sampling selects scorers to run.
The processor should evaluate the session with the selected scorers and log the
resulting assessments to the trace. This is the happy path for session scoring.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [make_trace_info("tr-001", 1000)]
mock_trace_loader.fetch_traces.return_value = [make_trace("tr-001", 1000)]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
mock_evaluate.return_value = {"tr-001": [MagicMock()]}
processor.process_sessions()
call_kwargs = mock_evaluate.call_args[1]
assert call_kwargs["session_id"] == "sess-001"
assert len(call_kwargs["session_items"]) == 1
assert len(call_kwargs["multi_turn_scorers"]) == 1
mock_log_assessments.assert_called_once()
def test_process_sessions_updates_checkpoint_on_success(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_score_session,
):
"""
Scenario: Two sessions are successfully scored (sess-001 at timestamp 1000,
sess-002 at timestamp 1500).
The processor should update the checkpoint to the last scored session (sess-002
at timestamp 1500) to resume from this point in the next run.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1000),
make_completed_session("sess-002", 500, 1500),
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
processor.process_sessions()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 1500
assert checkpoint.session_id == "sess-002"
def test_execute_session_scoring_handles_failures(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_score_session,
):
"""
Scenario: Two sessions need scoring, but the first one (sess-001) fails with an error
while the second one (sess-002) succeeds.
The processor should log the failure, continue processing sess-002, and still update
the checkpoint after both attempts. This ensures one failing session doesn't block
progress on other sessions.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1000),
make_completed_session("sess-002", 500, 1500),
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
mock_score_session.side_effect = [Exception("Session failed"), None]
processor.process_sessions()
assert mock_score_session.call_count == 2
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 1500
assert checkpoint.session_id == "sess-002"
def test_score_session_logs_assessments_individually(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
mock_log_assessments,
):
"""
Scenario: A session has multiple traces. The scorer evaluates the session and
produces assessments for both traces. However, logging fails for one trace
(tr-001) but succeeds for the other (tr-002).
The processor should attempt to log assessments for each trace independently,
allow partial success (tr-002 succeeds), log the failure with trace_id and
session_id, and still update the checkpoint.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [
make_trace_info("tr-001", 1000),
make_trace_info("tr-002", 1200),
]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-001", 1000),
make_trace("tr-002", 1200),
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
mock_evaluate.return_value = {
"tr-001": [make_assessment("assess-1", "ConversationCompleteness/v1")],
"tr-002": [make_assessment("assess-2", "ConversationCompleteness/v1")],
}
# First call fails (for tr-001), second call succeeds (for tr-002)
mock_log_assessments.side_effect = [Exception("Failed to log"), None]
with patch("mlflow.genai.scorers.online.session_processor._logger") as mock_logger:
processor.process_sessions()
# Both traces should have attempted logging
assert mock_log_assessments.call_count == 2
# Verify warning was logged with trace_id and session_id
mock_logger.warning.assert_called_once()
warning_call = mock_logger.warning.call_args
assert "tr-001" in warning_call[0][0]
assert "sess-001" in warning_call[0][0]
# Checkpoint should still be updated
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 1500
assert checkpoint.session_id == "sess-001"
def test_create_factory_method(mock_tracking_store):
"""
Scenario: Creating a processor using the factory method instead of direct instantiation.
The factory method should properly initialize all dependencies (trace loader,
checkpoint manager, sampler) with the correct configuration.
"""
configs = [make_online_scorer(ConversationCompleteness())]
processor = OnlineSessionScoringProcessor.create(
experiment_id="exp1", online_scorers=configs, tracking_store=mock_tracking_store
)
assert processor._experiment_id == "exp1"
assert isinstance(processor._trace_loader, OnlineTraceLoader)
assert isinstance(processor._checkpoint_manager, OnlineSessionCheckpointManager)
assert isinstance(processor._sampler, OnlineScorerSampler)
assert processor._tracking_store == mock_tracking_store
assert processor._checkpoint_manager._tracking_store == mock_tracking_store
assert processor._checkpoint_manager._experiment_id == "exp1"
assert processor._sampler._online_scorers == configs
def test_score_session_excludes_eval_run_traces(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store, sampler_with_scorers
):
"""
Scenario: A session exists, but we need to ensure eval-generated traces are filtered out.
The processor should apply the "metadata.mlflow.sourceRun IS NULL" filter when
fetching traces to exclude traces generated during MLflow evaluation runs
(which already have assessments and shouldn't be scored again).
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = []
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
processor.process_sessions()
filter_string = mock_trace_loader.fetch_trace_infos_in_range.call_args[1]["filter_string"]
assert (
filter_string
== "metadata.mlflow.sourceRun IS NULL AND metadata.`mlflow.trace.session` = 'sess-001'"
)
def test_score_session_adds_session_metadata_to_assessments(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
mock_log_assessments,
):
"""
Scenario: Session scoring produces assessments that need to be logged.
The processor should add session metadata (ONLINE_SCORING_SESSION_ID) to each
assessment before logging it, enabling cleanup of old assessments when the session
is re-scored later.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [make_trace_info("tr-001", 1000)]
mock_trace_loader.fetch_traces.return_value = [make_trace("tr-001", 1000)]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
feedback = make_assessment("new-id", "ConversationCompleteness/v1")
mock_evaluate.return_value = {"tr-001": [feedback]}
processor.process_sessions()
logged_feedbacks = mock_log_assessments.call_args[1]["assessments"]
assert (
logged_feedbacks[0].metadata[AssessmentMetadataKey.ONLINE_SCORING_SESSION_ID] == "sess-001"
)
def test_score_session_skips_when_no_traces_found(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
):
"""
Scenario: A session is marked as completed, but no traces are found for it
(possibly all traces were filtered out as eval-generated traces).
The processor should skip scoring this session since there's no data to evaluate,
logging a warning and moving on to process other sessions.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = []
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
processor.process_sessions()
mock_evaluate.assert_not_called()
def test_score_session_skips_when_no_applicable_scorers(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store, mock_evaluate
):
"""
Scenario: A session exists with traces, but sampling excludes all scorers
(e.g., all scorers have sample_rate=0.0 for this session).
The processor should skip scoring since no scorers were selected to run,
avoiding unnecessary evaluation work.
"""
sampler = OnlineScorerSampler([make_online_scorer(ConversationCompleteness(), sample_rate=0.0)])
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [make_trace_info("tr-001", 1000)]
mock_trace_loader.fetch_traces.return_value = [make_trace("tr-001", 1000)]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler, mock_tracking_store
)
processor.process_sessions()
mock_evaluate.assert_not_called()
def test_checkpoint_advances_when_all_traces_are_from_eval_runs(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store, sampler_with_scorers
):
"""
Scenario: Two sessions exist, but all their traces are from evaluation runs
(filtered out by the eval run exclusion filter).
The processor should still advance the checkpoint to the last session (sess-002)
even though no actual scoring occurred, preventing the processor from getting
stuck repeatedly attempting to score eval-only sessions.
"""
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1000),
make_completed_session("sess-002", 500, 1500),
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = []
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
processor.process_sessions()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 1500
assert checkpoint.session_id == "sess-002"
def test_clean_up_old_assessments_removes_duplicates(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
mock_log_assessments,
):
"""
Scenario: A session is re-scored, and there's an old assessment from a previous
scoring run for the same session/scorer combination.
The processor should delete the old assessment (old-id) after logging the new one,
preventing accumulation of duplicate assessments when sessions are re-scored
(e.g., when new traces are added).
"""
old_assessment = make_assessment("old-id", "ConversationCompleteness/v1", session_id="sess-001")
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [make_trace_info("tr-001", 1000)]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-001", 1000, assessments=[old_assessment])
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
new_assessment = make_assessment("new-id", "ConversationCompleteness/v1")
mock_evaluate.return_value = {"tr-001": [new_assessment]}
processor.process_sessions()
mock_tracking_store.delete_assessment.assert_called_once_with(
trace_id="tr-001", assessment_id="old-id"
)
def test_clean_up_old_assessments_preserves_different_sessions(
mock_trace_loader,
mock_checkpoint_manager,
mock_tracking_store,
sampler_with_scorers,
mock_evaluate,
mock_log_assessments,
):
"""
Scenario: A trace has an old assessment from a different session (sess-002),
and we're now scoring sess-001 which produces a new assessment.
The processor should NOT delete the old assessment since it belongs to a different
session, preserving assessments from all sessions that the trace participates in.
"""
old_assessment = make_assessment("old-id", "ConversationCompleteness/v1", session_id="sess-002")
mock_tracking_store.find_completed_sessions.return_value = [
make_completed_session("sess-001", 500, 1500)
]
mock_trace_loader.fetch_trace_infos_in_range.return_value = [make_trace_info("tr-001", 1000)]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-001", 1000, assessments=[old_assessment])
]
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers, mock_tracking_store
)
new_assessment = make_assessment("new-id", "ConversationCompleteness/v1")
mock_evaluate.return_value = {"tr-001": [new_assessment]}
processor.process_sessions()
mock_tracking_store.delete_assessment.assert_not_called()
def test_fetch_sessions_calls_once_per_filter_when_scorers_have_different_filters(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store
):
"""
Scenario: Two scorers have different filter strings (scorer1 with 'tag.env=prod',
scorer2 with 'tag.env=dev').
The processor should call find_completed_sessions once per unique filter to
efficiently fetch sessions that match each filter, avoiding duplicate work
and ensuring each session is evaluated by the correct scorers.
"""
scorer1 = ConversationCompleteness()
scorer2 = ConversationCompleteness()
scorer1.name = "scorer1"
scorer2.name = "scorer2"
sampler = OnlineScorerSampler(
[
make_online_scorer(scorer1, filter_string="tag.env = 'prod'"),
make_online_scorer(scorer2, filter_string="tag.env = 'dev'"),
]
)
mock_tracking_store.find_completed_sessions.return_value = []
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler, mock_tracking_store
)
processor.process_sessions()
assert mock_tracking_store.find_completed_sessions.call_count == 2
filter_strings = [
call[1]["filter_string"]
for call in mock_tracking_store.find_completed_sessions.call_args_list
]
assert set(filter_strings) == {"tag.env = 'prod'", "tag.env = 'dev'"}
def test_fetch_sessions_calls_once_per_filter_when_any_scorer_has_no_filter(
mock_trace_loader, mock_checkpoint_manager, mock_tracking_store
):
"""
Scenario: Two scorers, one with a filter ('tag.env=prod') and one without (None).
The processor should call find_completed_sessions twice: once with the specific
filter for scorer1, and once with no filter for scorer2. This ensures both filtered
and unfiltered scorers get the appropriate sessions.
"""
scorer1 = ConversationCompleteness()
scorer2 = ConversationCompleteness()
scorer1.name = "scorer1"
scorer2.name = "scorer2"
sampler = OnlineScorerSampler(
[
make_online_scorer(scorer1, filter_string="tag.env = 'prod'"),
make_online_scorer(scorer2, filter_string=None),
]
)
mock_tracking_store.find_completed_sessions.return_value = []
processor = make_processor(
mock_trace_loader, mock_checkpoint_manager, sampler, mock_tracking_store
)
processor.process_sessions()
assert mock_tracking_store.find_completed_sessions.call_count == 2
filter_strings = [
call[1]["filter_string"]
for call in mock_tracking_store.find_completed_sessions.call_args_list
]
assert set(filter_strings) == {"tag.env = 'prod'", None}
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_session_processor.py",
"license": "Apache License 2.0",
"lines": 601,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/session_checkpointer.py | """Checkpoint management for session-level online scoring."""
import json
import logging
import time
from dataclasses import asdict, dataclass
from mlflow.entities.experiment_tag import ExperimentTag
from mlflow.environment_variables import (
MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS,
)
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.store.tracking.abstract_store import AbstractStore
from mlflow.utils.mlflow_tags import MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT
_logger = logging.getLogger(__name__)
@dataclass
class OnlineSessionScoringCheckpoint:
timestamp_ms: int # Last trace timestamp of the last processed session
session_id: str | None = None # Session ID for tiebreaking when timestamps match
def to_json(self) -> str:
return json.dumps(asdict(self))
@classmethod
def from_json(cls, json_str: str) -> "OnlineSessionScoringCheckpoint":
data = json.loads(json_str)
return cls(**data)
@dataclass
class OnlineSessionScoringTimeWindow:
min_last_trace_timestamp_ms: int
max_last_trace_timestamp_ms: int
class OnlineSessionCheckpointManager:
def __init__(self, tracking_store: AbstractStore, experiment_id: str):
self._tracking_store = tracking_store
self._experiment_id = experiment_id
def get_checkpoint(self) -> OnlineSessionScoringCheckpoint | None:
"""
Get the last processed session checkpoint from the experiment tag.
Returns:
OnlineSessionScoringCheckpoint, or None if no checkpoint exists.
"""
experiment = self._tracking_store.get_experiment(self._experiment_id)
if checkpoint_str := experiment.tags.get(MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT):
try:
return OnlineSessionScoringCheckpoint.from_json(checkpoint_str)
except (TypeError, ValueError, json.JSONDecodeError) as e:
_logger.debug(
f"Failed to parse checkpoint for experiment {self._experiment_id}: {e}",
exc_info=True,
)
return None
def persist_checkpoint(self, checkpoint: OnlineSessionScoringCheckpoint) -> None:
"""
Persist the checkpoint tag with a new checkpoint.
Args:
checkpoint: The checkpoint to store.
"""
self._tracking_store.set_experiment_tag(
self._experiment_id,
ExperimentTag(MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT, checkpoint.to_json()),
)
def calculate_time_window(self) -> OnlineSessionScoringTimeWindow:
"""
Calculate the time window for session scoring.
Enforces a maximum lookback period to prevent getting stuck on persistently
failing sessions. If the checkpoint is older than MAX_LOOKBACK_MS, uses
current_time - MAX_LOOKBACK_MS instead to skip over old problematic sessions.
Returns:
OnlineSessionScoringTimeWindow with min and max last trace timestamps.
min_last_trace_timestamp_ms is the checkpoint if it exists and is within
the lookback period, otherwise now - MAX_LOOKBACK_MS.
max_last_trace_timestamp_ms is current time - session completion buffer.
"""
current_time_ms = int(time.time() * 1000)
checkpoint = self.get_checkpoint()
# Start from checkpoint, but never look back more than MAX_LOOKBACK_MS
min_lookback_time_ms = current_time_ms - MAX_LOOKBACK_MS
min_last_trace_timestamp_ms = max(
checkpoint.timestamp_ms if checkpoint else 0, min_lookback_time_ms
)
buffer_seconds = max(
0, MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS.get()
)
max_last_trace_timestamp_ms = current_time_ms - buffer_seconds * 1000
return OnlineSessionScoringTimeWindow(
min_last_trace_timestamp_ms=min_last_trace_timestamp_ms,
max_last_trace_timestamp_ms=max_last_trace_timestamp_ms,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/session_checkpointer.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/genai/scorers/online/test_session_checkpointer.py | import time
from unittest.mock import MagicMock
import pytest
from mlflow.environment_variables import (
MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS,
)
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.genai.scorers.online.session_checkpointer import (
OnlineSessionCheckpointManager,
OnlineSessionScoringCheckpoint,
)
from mlflow.utils.mlflow_tags import MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT
@pytest.fixture
def mock_store():
return MagicMock()
@pytest.fixture
def checkpoint_manager(mock_store):
return OnlineSessionCheckpointManager(mock_store, "exp1")
def test_checkpoint_json_roundtrip():
original = OnlineSessionScoringCheckpoint(timestamp_ms=5000, session_id="sess-abc")
restored = OnlineSessionScoringCheckpoint.from_json(original.to_json())
assert restored.timestamp_ms == original.timestamp_ms
assert restored.session_id == original.session_id
def test_get_checkpoint_returns_none_when_no_tag(checkpoint_manager, mock_store):
experiment = MagicMock()
experiment.tags = {}
mock_store.get_experiment.return_value = experiment
result = checkpoint_manager.get_checkpoint()
assert result is None
def test_get_checkpoint_deserializes_correctly(checkpoint_manager, mock_store):
experiment = MagicMock()
checkpoint_json = '{"timestamp_ms": 1000, "session_id": "sess-1"}'
experiment.tags = {MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT: checkpoint_json}
mock_store.get_experiment.return_value = experiment
result = checkpoint_manager.get_checkpoint()
assert result.timestamp_ms == 1000
assert result.session_id == "sess-1"
def test_get_checkpoint_handles_invalid_json(checkpoint_manager, mock_store):
experiment = MagicMock()
experiment.tags = {MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT: "invalid json"}
mock_store.get_experiment.return_value = experiment
result = checkpoint_manager.get_checkpoint()
assert result is None
def test_persist_checkpoint_sets_experiment_tag(checkpoint_manager, mock_store):
checkpoint = OnlineSessionScoringCheckpoint(timestamp_ms=2000, session_id="sess-2")
checkpoint_manager.persist_checkpoint(checkpoint)
mock_store.set_experiment_tag.assert_called_once()
call_args = mock_store.set_experiment_tag.call_args
assert call_args[0][0] == "exp1"
assert call_args[0][1].key == MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT
def test_calculate_time_window_no_checkpoint(checkpoint_manager, mock_store, monkeypatch):
experiment = MagicMock()
experiment.tags = {}
mock_store.get_experiment.return_value = experiment
fixed_time = 1000000
monkeypatch.setattr(time, "time", lambda: fixed_time)
result = checkpoint_manager.calculate_time_window()
expected_min = (fixed_time * 1000) - MAX_LOOKBACK_MS
expected_max = (
fixed_time * 1000
) - MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS.get() * 1000
assert result.min_last_trace_timestamp_ms == expected_min
assert result.max_last_trace_timestamp_ms == expected_max
def test_calculate_time_window_recent_checkpoint(checkpoint_manager, mock_store, monkeypatch):
fixed_time = 1000000
recent_checkpoint_time = (fixed_time * 1000) - 60000
experiment = MagicMock()
checkpoint_json = f'{{"timestamp_ms": {recent_checkpoint_time}}}'
experiment.tags = {MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT: checkpoint_json}
mock_store.get_experiment.return_value = experiment
monkeypatch.setattr(time, "time", lambda: fixed_time)
result = checkpoint_manager.calculate_time_window()
expected_max = (
fixed_time * 1000
) - MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS.get() * 1000
assert result.min_last_trace_timestamp_ms == recent_checkpoint_time
assert result.max_last_trace_timestamp_ms == expected_max
def test_calculate_time_window_old_checkpoint(checkpoint_manager, mock_store, monkeypatch):
fixed_time = 1000000
old_checkpoint_time = (fixed_time * 1000) - MAX_LOOKBACK_MS - 1000000
experiment = MagicMock()
checkpoint_json = f'{{"timestamp_ms": {old_checkpoint_time}}}'
experiment.tags = {MLFLOW_LATEST_ONLINE_SCORING_SESSION_CHECKPOINT: checkpoint_json}
mock_store.get_experiment.return_value = experiment
monkeypatch.setattr(time, "time", lambda: fixed_time)
result = checkpoint_manager.calculate_time_window()
expected_min = (fixed_time * 1000) - MAX_LOOKBACK_MS
expected_max = (
fixed_time * 1000
) - MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS.get() * 1000
assert result.min_last_trace_timestamp_ms == expected_min
assert result.max_last_trace_timestamp_ms == expected_max
def test_calculate_time_window_with_custom_buffer(checkpoint_manager, mock_store, monkeypatch):
# Empty tags simulates no existing checkpoint
experiment = MagicMock()
experiment.tags = {}
mock_store.get_experiment.return_value = experiment
fixed_time = 1000000
custom_buffer_seconds = 60
monkeypatch.setattr(time, "time", lambda: fixed_time)
monkeypatch.setenv(
"MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS",
str(custom_buffer_seconds),
)
result = checkpoint_manager.calculate_time_window()
expected_min = (fixed_time * 1000) - MAX_LOOKBACK_MS
expected_max = (fixed_time * 1000) - (custom_buffer_seconds * 1000)
assert result.min_last_trace_timestamp_ms == expected_min
assert result.max_last_trace_timestamp_ms == expected_max
def test_calculate_time_window_with_negative_buffer_defaults_to_zero(
checkpoint_manager, mock_store, monkeypatch
):
# Empty tags simulates no existing checkpoint
experiment = MagicMock()
experiment.tags = {}
mock_store.get_experiment.return_value = experiment
fixed_time = 1000000
monkeypatch.setattr(time, "time", lambda: fixed_time)
monkeypatch.setenv("MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS", "-100")
result = checkpoint_manager.calculate_time_window()
expected_min = (fixed_time * 1000) - MAX_LOOKBACK_MS
expected_max = fixed_time * 1000 # buffer is 0, so max = current_time
assert result.min_last_trace_timestamp_ms == expected_min
assert result.max_last_trace_timestamp_ms == expected_max
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_session_checkpointer.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/optimize/optimizers/metaprompt_optimizer.py | import json
import logging
import re
from contextlib import nullcontext
from typing import Any
import mlflow
from mlflow.entities.span import SpanType
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.optimizers.base import BasePromptOptimizer, _EvalFunc
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
from mlflow.utils.annotations import experimental
_logger = logging.getLogger(__name__)
# Compiled regex pattern for extracting template variables
_TEMPLATE_VAR_PATTERN = re.compile(r"\{\{(\w+)\}\}")
# Unified meta-prompt template that supports both zero-shot and few-shot modes
META_PROMPT_TEMPLATE = """\
You are an expert prompt engineer. Your task is to improve
the following prompts to achieve better performance.
CURRENT PROMPTS:
{current_prompts_formatted}
{evaluation_examples}
PROMPT ENGINEERING BEST PRACTICES:
Apply these proven techniques to create effective prompts:
1. **Clarity & Specificity**: Be explicit about the task, expected output format,
and any constraints
2. **Structured Formatting**: Use numbered lists, sections, or delimiters to
organize complex instructions clearly
3. **Few-Shot Examples**: Include concrete examples showing desired input/output
pairs when appropriate
4. **Role/Persona**: Specify expertise level if relevant (e.g., "You are an expert
mathematician...")
5. **Step-by-Step Decomposition**: Break complex reasoning tasks into explicit
steps or phases
6. **Output Format Specification**: Explicitly define the format, structure, and
constraints for outputs
7. **Constraint Specification**: Clearly state what to avoid, exclude, or not do
8. **Verification Instructions**: Add self-checking steps for calculation-heavy
or error-prone tasks
9. **Chain-of-Thought Prompting**: For reasoning tasks, explicitly instruct to
show intermediate steps
CRITICAL REQUIREMENT - TEMPLATE VARIABLES:
The following variables MUST be preserved EXACTLY as shown in the original prompts.
DO NOT modify, remove, or change the formatting of these variables in any way:
{template_variables}
IMPORTANT: Template variables use double curly braces like {{{{variable_name}}}}.
You MUST copy them exactly as they appear in the original prompt into your improved
prompt. If a variable appears as {{{{question}}}} in the original, it must appear as
{{{{question}}}} in your improvement.
{custom_guidelines}
INSTRUCTIONS:
Generate improved versions of the prompts by applying relevant prompt engineering
best practices. Make your prompts specific and actionable.
{extra_instructions}
CRITICAL: Preserve all template variables in their exact original format with
double curly braces.
CRITICAL: You must respond with a valid JSON object using the EXACT prompt names
shown above. The JSON keys must match the "Prompt name" fields exactly. Use this
structure:
{{
{response_format_example}
}}
REMINDER:
1. Use the exact prompt names as JSON keys (e.g., if the prompt is named
"aime_solver", use "aime_solver" as the key)
2. Every template variable from the original prompt must appear unchanged in your
improved version
3. Apply best practices that are most relevant to the task at hand
Do not include any text before or after the JSON object. Do not include
explanations or reasoning.
"""
@experimental(version="3.9.0")
class MetaPromptOptimizer(BasePromptOptimizer):
"""
A prompt optimizer that uses metaprompting with LLMs to improve prompts in a single pass.
Automatically detects optimization mode based on training data:
- Zero-shot: No evaluation data - applies general prompt engineering best practices
- Few-shot: Has evaluation data - learns from evaluation results
This optimizer performs a single optimization pass, making it faster than iterative
approaches like GEPA while requiring less data. The optimized prompt is always
registered regardless of performance improvement.
Args:
reflection_model: Name of the model to use for prompt optimization.
Format: "<provider>:/<model>" (e.g., "openai:/gpt-5.2",
"anthropic:/claude-sonnet-4-5-20250929")
lm_kwargs: Optional dictionary of additional parameters to pass to the reflection
model (e.g., {"temperature": 1.0, "max_tokens": 4096}). These are passed
directly to the underlying litellm.completion() call. Default: None
guidelines: Optional custom guidelines to provide domain-specific or task-specific
context for prompt optimization (e.g., "This is for a finance advisor to
project tax situations."). Default: None
Example with evaluation data (few-shot mode):
.. code-block:: python
import mlflow
import openai
from mlflow.genai.optimize.optimizers import MetaPromptOptimizer
from mlflow.genai.scorers import Correctness
prompt = mlflow.genai.register_prompt(
name="qa",
template="Answer the following question: {{question}}",
)
def predict_fn(question: str) -> str:
prompt = mlflow.genai.load_prompt("prompts:/qa@latest")
completion = openai.OpenAI().chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt.format(question=question)}],
)
return completion.choices[0].message.content
dataset = [
{"inputs": {"question": "What is the capital of France?"}, "outputs": "Paris"},
{"inputs": {"question": "What is 2+2?"}, "outputs": "4"},
]
result = mlflow.genai.optimize_prompts(
predict_fn=predict_fn,
train_data=dataset,
prompt_uris=[prompt.uri],
optimizer=MetaPromptOptimizer(
reflection_model="openai:/gpt-4o",
lm_kwargs={"temperature": 1.0, "max_tokens": 4096},
),
scorers=[Correctness(model="openai:/gpt-4o")],
)
print(f"Improved prompt: {result.optimized_prompts[0].template}")
Example without evaluation data (zero-shot mode):
.. code-block:: python
import mlflow
from mlflow.genai.optimize.optimizers import MetaPromptOptimizer
prompt = mlflow.genai.register_prompt(
name="qa",
template="Answer: {{question}}",
)
# Zero-shot mode: no evaluation data
result = mlflow.genai.optimize_prompts(
predict_fn=lambda question: "", # Not used in zero-shot
train_data=[], # Empty dataset triggers zero-shot mode
prompt_uris=[prompt.uri],
optimizer=MetaPromptOptimizer(
reflection_model="openai:/gpt-4o",
guidelines="This is for a finance advisor to project tax situations.",
),
scorers=[], # No scorers needed for zero-shot
)
print(f"Improved prompt: {result.optimized_prompts[0].template}")
"""
def __init__(
self,
reflection_model: str,
lm_kwargs: dict[str, Any] | None = None,
guidelines: str | None = None,
):
from mlflow.metrics.genai.model_utils import _parse_model_uri
self.reflection_model = reflection_model
self.lm_kwargs = lm_kwargs or {}
self.guidelines = guidelines
self.provider, self.model = _parse_model_uri(self.reflection_model)
self._validate_parameters()
def _validate_parameters(self):
if not isinstance(self.lm_kwargs, dict):
raise MlflowException("`lm_kwargs` must be a dictionary")
def optimize(
self,
eval_fn: _EvalFunc,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
"""
Optimize the target prompts using metaprompting in a single pass.
Automatically detects mode:
- If train_data is empty: zero-shot mode (no evaluation)
- If train_data has examples: few-shot mode (with baseline evaluation for feedback)
The optimized prompt is always returned regardless of performance improvement.
Args:
eval_fn: The evaluation function that takes candidate prompts and dataset,
returns evaluation results. Not used in zero-shot mode.
train_data: The dataset to use for optimization. Empty list triggers zero-shot
mode. In few-shot mode, train_data is always used for baseline evaluation
(to capture feedback) and for showing examples in the meta-prompt.
target_prompts: The target prompt templates as dict (name -> template).
enable_tracking: If True (default), automatically log optimization progress.
Returns:
The optimized prompts with initial score (final_eval_score is None for
single-pass).
"""
# Extract template variables
template_variables = self._extract_template_variables(target_prompts)
# Auto-detect mode based on training data
if not train_data:
_logger.info("No training data provided, using zero-shot metaprompting")
return self._optimize_zero_shot(target_prompts, template_variables, enable_tracking)
else:
_logger.info(
f"{len(train_data)} training examples provided, using few-shot metaprompting"
)
return self._optimize_few_shot(
eval_fn,
train_data,
target_prompts,
template_variables,
enable_tracking,
)
def _optimize_zero_shot(
self,
target_prompts: dict[str, str],
template_variables: dict[str, set[str]],
enable_tracking: bool,
) -> PromptOptimizerOutput:
"""
Optimize prompts using zero-shot metaprompting (no evaluation data).
Applies general prompt engineering best practices in a single pass.
"""
_logger.info("Applying zero-shot prompt optimization with best practices")
meta_prompt = self._build_zero_shot_meta_prompt(target_prompts, template_variables)
try:
improved_prompts = self._call_reflection_model(meta_prompt, enable_tracking)
self._validate_prompt_names(target_prompts, improved_prompts)
self._validate_template_variables(target_prompts, improved_prompts)
_logger.info("Successfully generated improved prompts")
return PromptOptimizerOutput(
optimized_prompts=improved_prompts,
initial_eval_score=None, # No evaluation in zero-shot mode
final_eval_score=None,
)
except Exception as e:
_logger.warning(f"Zero-shot optimization failed: {e}. Returning original prompts.")
return PromptOptimizerOutput(
optimized_prompts=target_prompts,
initial_eval_score=None,
final_eval_score=None,
)
def _optimize_few_shot(
self,
eval_fn: _EvalFunc,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
template_variables: dict[str, set[str]],
enable_tracking: bool,
) -> PromptOptimizerOutput:
"""
Optimize prompts using few-shot metaprompting (with evaluation feedback).
Performs a single optimization pass based on evaluation results from training examples.
The optimized prompt is always returned regardless of performance improvement.
Args:
eval_fn: Evaluation function to score prompts
train_data: Training data used for baseline evaluation to capture feedback
target_prompts: Initial prompts to optimize
template_variables: Template variables extracted from prompts
enable_tracking: Whether to log metrics to MLflow
"""
# Always evaluate baseline on train_data to capture feedback for metaprompting
_logger.info("Evaluating baseline prompts on training data...")
baseline_results = eval_fn(target_prompts, train_data)
initial_eval_score = self._compute_aggregate_score(baseline_results)
initial_eval_score_per_scorer = self._compute_per_scorer_scores(baseline_results)
if initial_eval_score is not None:
_logger.info(f"Baseline score: {initial_eval_score:.4f}")
# Build meta-prompt with evaluation feedback
_logger.info("Generating optimized prompts...")
meta_prompt = self._build_few_shot_meta_prompt(
target_prompts,
template_variables,
baseline_results,
)
# Call LLM to generate improved prompts
try:
improved_prompts = self._call_reflection_model(meta_prompt, enable_tracking)
self._validate_prompt_names(target_prompts, improved_prompts)
self._validate_template_variables(target_prompts, improved_prompts)
_logger.info("Successfully generated optimized prompts")
except Exception as e:
_logger.warning(f"Few-shot optimization failed: {e}. Returning original prompts.")
return PromptOptimizerOutput(
optimized_prompts=target_prompts,
initial_eval_score=initial_eval_score,
final_eval_score=None,
initial_eval_score_per_scorer=initial_eval_score_per_scorer,
final_eval_score_per_scorer={},
)
final_eval_score = None
final_eval_score_per_scorer: dict[str, float] = {}
if initial_eval_score is not None:
_logger.info(
"Evaluating optimized prompts on training data, please note that this is more of "
"a sanity check than a final evaluation because the data has already been used "
"for meta-prompting. To accurately evaluate the optimized prompts, please use a "
"separate validation dataset and run mlflow.genai.evaluate() on it."
)
final_results = eval_fn(improved_prompts, train_data)
final_eval_score = self._compute_aggregate_score(final_results)
final_eval_score_per_scorer = self._compute_per_scorer_scores(final_results)
_logger.info(f"Final score: {final_eval_score:.4f}")
return PromptOptimizerOutput(
optimized_prompts=improved_prompts,
initial_eval_score=initial_eval_score,
final_eval_score=final_eval_score,
initial_eval_score_per_scorer=initial_eval_score_per_scorer,
final_eval_score_per_scorer=final_eval_score_per_scorer,
)
def _extract_template_variables(self, prompts: dict[str, str]) -> dict[str, set[str]]:
"""
Extract template variables ({{var}}) from each prompt.
Args:
prompts: Dict mapping prompt_name -> template
Returns:
Dict mapping prompt_name -> set of variable names
"""
variables = {}
for name, template in prompts.items():
# Match {{variable}} pattern (MLflow uses double braces)
matches = _TEMPLATE_VAR_PATTERN.findall(template)
variables[name] = set(matches)
return variables
def _validate_prompt_names(
self, original_prompts: dict[str, str], new_prompts: dict[str, str]
) -> bool:
"""
Validate that prompt names match between original and new prompts.
Args:
original_prompts: Original prompt templates
new_prompts: New prompt templates to validate
Returns:
True if valid
Raises:
MlflowException: If prompt names don't match
"""
# Check for unexpected prompts in the improved prompts
if unexpected_prompts := set(new_prompts) - set(original_prompts):
raise MlflowException(
f"Unexpected prompts found in improved prompts: {sorted(unexpected_prompts)}"
)
# Check for missing prompts in the improved prompts
if missing_prompts := set(original_prompts) - set(new_prompts):
raise MlflowException(
f"Prompts missing from improved prompts: {sorted(missing_prompts)}"
)
return True
def _validate_template_variables(
self, original_prompts: dict[str, str], new_prompts: dict[str, str]
) -> bool:
"""Validate that all template variables are preserved in new prompts."""
original_vars = self._extract_template_variables(original_prompts)
new_vars = self._extract_template_variables(new_prompts)
for name in original_prompts:
if original_vars[name] != new_vars[name]:
missing = original_vars[name] - new_vars[name]
extra = new_vars[name] - original_vars[name]
msg = f"Template variables mismatch in prompt '{name}'."
if missing:
msg += f" Missing: {missing}."
if extra:
msg += f" Extra: {extra}."
raise MlflowException(msg)
return True
def _build_zero_shot_meta_prompt(
self,
current_prompts: dict[str, str],
template_variables: dict[str, set[str]],
) -> str:
# Format the current prompts for each module
prompts_formatted = "\n\n".join(
[
f"Prompt name: {name}\nTemplate: {template}"
for name, template in current_prompts.items()
]
)
# Format template variables
vars_formatted = "\n".join(
[
f"- Prompt '{name}': {', '.join(sorted(vars)) if vars else 'none'}"
for name, vars in template_variables.items()
]
)
# Add custom guidelines to the meta-prompt if provided
custom_guidelines = f"CUSTOM GUIDELINES:\n{self.guidelines}" if self.guidelines else ""
# Format example JSON response with actual prompt names
response_format_example = "\n".join(
[
f' "{name}": "improved prompt text with variables preserved exactly"'
for name in current_prompts.keys()
]
)
return META_PROMPT_TEMPLATE.format(
current_prompts_formatted=prompts_formatted,
evaluation_examples="",
extra_instructions="",
template_variables=vars_formatted,
custom_guidelines=custom_guidelines,
response_format_example=response_format_example,
)
def _build_few_shot_meta_prompt(
self,
current_prompts: dict[str, str],
template_variables: dict[str, set[str]],
eval_results: list[EvaluationResultRecord],
) -> str:
"""Build few-shot meta-prompt with evaluation feedback."""
# Format current prompts
prompts_formatted = "\n\n".join(
[
f"Prompt name: {name}\nTemplate: {template}"
for name, template in current_prompts.items()
]
)
if not eval_results:
raise MlflowException(
"Few-shot metaprompting requires evaluation results. "
"No evaluation results were provided to _build_few_shot_meta_prompt."
)
# Calculate current score from evaluation results (if scores are available)
current_score = self._compute_aggregate_score(eval_results)
# Format examples and their evaluation results in the meta-prompt
examples_formatted = self._format_examples(eval_results)
# Format template variables
vars_formatted = "\n".join(
[
f"- Prompt '{name}': {', '.join(sorted(vars)) if vars else 'none'}"
for name, vars in template_variables.items()
]
)
# Add custom guidelines to the meta-prompt if provided
custom_guidelines = f"CUSTOM GUIDELINES:\n{self.guidelines}" if self.guidelines else ""
# Format example JSON response with actual prompt names
response_format_example = "\n".join(
[
f' "{name}": "improved prompt text with variables preserved exactly"'
for name in current_prompts.keys()
]
)
# Build evaluation examples section (with or without score)
if current_score is not None:
score_info = f" (Current Score: {current_score:.3f})"
analysis_instructions = """
Before applying best practices, analyze the examples to identify:
1. **Common Failure Patterns**: What mistakes appear repeatedly? (wrong format,
missing steps, calculation errors, etc.)
2. **Success Patterns**: What made successful examples work? (format, detail level,
reasoning approach)
3. **Key Insights**: What do the rationales tell you about quality criteria and
needed improvements?
4. **Task Requirements**: What output format, explanation level, and edge cases
are expected?"""
else:
score_info = ""
analysis_instructions = """
Before applying best practices, analyze the examples to identify:
1. **Output Patterns**: What are the expected outputs for different inputs?
2. **Task Requirements**: What output format, explanation level, and edge cases
are expected?
3. **Common Themes**: What patterns do you see in the input-output relationships?"""
evaluation_examples = f"""EVALUATION EXAMPLES{score_info}:
Below are examples showing how the current prompts performed. Study these to identify
patterns in what worked and what failed.
{examples_formatted}
{analysis_instructions}"""
extra_instructions = """
Focus on applying best practices that directly address the observed patterns.
Add specific instructions, format specifications, or verification steps that would
improve the prompt's effectiveness."""
return META_PROMPT_TEMPLATE.format(
current_prompts_formatted=prompts_formatted,
evaluation_examples=evaluation_examples,
extra_instructions=extra_instructions,
template_variables=vars_formatted,
custom_guidelines=custom_guidelines,
response_format_example=response_format_example,
)
def _format_examples(self, eval_results: list[EvaluationResultRecord]) -> str:
"""Format evaluation results for meta-prompting."""
formatted = []
for i, result in enumerate(eval_results, 1):
rationale_str = (
"\n".join([f" - {k}: {v}" for k, v in result.rationales.items()])
if result.rationales
else " None"
)
# Build example with optional score
example_lines = [
f"Example {i}:",
f" Input: {json.dumps(result.inputs)}",
f" Output: {result.outputs}",
f" Expected: {result.expectations}",
]
if result.score is not None:
example_lines.append(f" Score: {result.score:.3f}")
example_lines.append(f" Rationales:\n{rationale_str}")
formatted.append("\n".join(example_lines) + "\n")
return "\n".join(formatted)
def _call_reflection_model(
self, meta_prompt: str, enable_tracking: bool = True
) -> dict[str, str]:
"""Call the reflection model to generate improved prompts."""
try:
import litellm
except ImportError as e:
raise ImportError(
"litellm is required for metaprompt optimization. "
"Please install it with: `pip install litellm`"
) from e
litellm_model = f"{self.provider}/{self.model}"
litellm_params = {
"model": litellm_model,
"messages": [{"role": "user", "content": meta_prompt}],
"response_format": {"type": "json_object"}, # Request JSON output
"max_retries": 3,
**self.lm_kwargs, # Merge user-provided parameters
}
content = None # Initialize to avoid NameError in exception handler
span_context = (
mlflow.start_span(name="metaprompt_reflection", span_type=SpanType.LLM)
if enable_tracking
else nullcontext()
)
with span_context as span:
if enable_tracking:
span.set_inputs({"meta_prompt": meta_prompt, "model": litellm_model})
try:
response = litellm.completion(**litellm_params)
# Extract and parse response
content = response.choices[0].message.content.strip()
# Strip markdown code blocks if present as some models have the tendency to add them
if content.startswith("```json"):
content = content[7:]
elif content.startswith("```"):
content = content[3:]
content = content.removesuffix("```").strip()
# The content should be a valid JSON object with keys being the prompt
# names and values being the improved prompts.
improved_prompts = json.loads(content)
if not isinstance(improved_prompts, dict):
raise MlflowException(
f"Reflection model returned invalid format. Expected JSON object, "
f"got {type(improved_prompts).__name__}"
)
for key, value in improved_prompts.items():
if not isinstance(value, str):
raise MlflowException(
f"Prompt '{key}' must be a string, got {type(value).__name__}"
)
if enable_tracking:
span.set_outputs(improved_prompts)
return improved_prompts
except json.JSONDecodeError as e:
response_preview = content[:2000] if content else "No content received"
raise MlflowException(
f"Failed to parse reflection model response as JSON: {e}\n"
f"Response: {response_preview}"
) from e
except Exception as e:
raise MlflowException(
f"Failed to call reflection model {litellm_model}: {e}"
) from e
def _compute_aggregate_score(self, results: list[EvaluationResultRecord]) -> float | None:
"""
Compute aggregate score from evaluation results.
Args:
results: List of evaluation results
Returns:
Average score across all examples, or None if no results or scores are None
"""
if not results:
return None
# If any score is None, return None (no scorers were provided)
scores = [r.score for r in results]
if any(s is None for s in scores):
return None
return sum(scores) / len(scores)
def _compute_per_scorer_scores(self, results: list[EvaluationResultRecord]) -> dict[str, float]:
"""
Compute per-scorer average scores from evaluation results.
Args:
results: List of evaluation results
Returns:
Dict mapping scorer name to average score across all examples
"""
if not results:
return {}
scorer_names = results[0].individual_scores.keys()
if not scorer_names:
return {}
# Compute average for each scorer
per_scorer_avg: dict[str, float] = {}
for scorer_name in scorer_names:
scores = [r.individual_scores[scorer_name] for r in results]
per_scorer_avg[scorer_name] = sum(scores) / len(scores)
return per_scorer_avg
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/optimizers/metaprompt_optimizer.py",
"license": "Apache License 2.0",
"lines": 584,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/optimize/optimizers/test_metaprompt_optimizer.py | import json
import sys
from typing import Any
from unittest.mock import Mock, patch
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.optimizers.metaprompt_optimizer import MetaPromptOptimizer
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
@pytest.fixture
def sample_train_data():
return [
{
"inputs": {"question": "What is 2+2?"},
"outputs": "4",
},
{
"inputs": {"question": "What is the capital of France?"},
"outputs": "Paris",
},
{
"inputs": {"question": "What is 3*3?"},
"outputs": "9",
},
{
"inputs": {"question": "What color is the sky?"},
"outputs": "Blue",
},
]
@pytest.fixture
def sample_target_prompts():
return {
"instruction": "Answer the following question: {{question}}",
}
@pytest.fixture
def sample_target_prompts_multiple():
return {
"system_prompt": "You are a helpful assistant.",
"instruction": "Answer the following question: {{question}}",
}
def mock_eval_fn(candidate_prompts: dict[str, str], dataset: list[dict[str, Any]]):
"""Mock evaluation function that returns varied scores."""
# Return varied scores for diverse sampling
scores = [0.9, 0.7, 0.4, 0.2] # High to low
return [
EvaluationResultRecord(
inputs=record["inputs"],
outputs="mock output",
expectations=record["outputs"],
score=scores[i % len(scores)],
trace=Mock(), # Use Mock for trace
rationales={"correctness": f"Score {scores[i % len(scores)]}"},
)
for i, record in enumerate(dataset)
]
@pytest.fixture
def mock_litellm_response():
"""Mock litellm response with improved prompts."""
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message = Mock()
mock_response.choices[0].message.content = json.dumps(
{"instruction": "Improved: Answer this question carefully: {{question}}"}
)
return mock_response
def test_metaprompt_optimizer_initialization():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
assert optimizer.reflection_model == "openai:/gpt-4o"
assert optimizer.lm_kwargs == {}
def test_metaprompt_optimizer_initialization_with_custom_params():
optimizer = MetaPromptOptimizer(
reflection_model="anthropic:/claude-3-5-sonnet-20241022",
lm_kwargs={"temperature": 0.9, "max_tokens": 4096},
)
assert optimizer.reflection_model == "anthropic:/claude-3-5-sonnet-20241022"
assert optimizer.lm_kwargs == {"temperature": 0.9, "max_tokens": 4096}
def test_metaprompt_optimizer_invalid_lm_kwargs():
with pytest.raises(MlflowException, match="`lm_kwargs` must be a dictionary"):
MetaPromptOptimizer(reflection_model="openai:/gpt-4o", lm_kwargs="invalid")
with pytest.raises(MlflowException, match="`lm_kwargs` must be a dictionary"):
MetaPromptOptimizer(reflection_model="openai:/gpt-4o", lm_kwargs=123)
def test_extract_template_variables():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
prompts = {
"instruction": "Answer {{question}} about {{topic}}",
"system": "You are a {{role}}",
}
variables = optimizer._extract_template_variables(prompts)
assert variables["instruction"] == {"question", "topic"}
assert variables["system"] == {"role"}
def test_validate_template_variables_success():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
original = {"instruction": "Answer {{question}}"}
new = {"instruction": "Please answer this {{question}} carefully"}
# Should not raise
assert optimizer._validate_template_variables(original, new) is True
def test_validate_template_variables_missing_var():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
original = {"instruction": "Answer {{question}}"}
new = {"instruction": "Answer the question"} # Missing {{question}}
with pytest.raises(MlflowException, match="Missing.*question"):
optimizer._validate_template_variables(original, new)
def test_validate_template_variables_extra_var():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
original = {"instruction": "Answer {{question}}"}
new = {"instruction": "Answer {{question}} about {{topic}}"} # Extra {{topic}}
with pytest.raises(MlflowException, match="Extra.*topic"):
optimizer._validate_template_variables(original, new)
def test_validate_prompt_names_missing():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
original = {"instruction": "Answer {{question}}", "system": "You are helpful"}
new = {"instruction": "Answer {{question}}"}
with pytest.raises(MlflowException, match="Prompts missing.*system"):
optimizer._validate_prompt_names(original, new)
def test_validate_prompt_names_unexpected():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
original = {"instruction": "Answer {{question}}"}
new = {
"instruction": "Answer {{question}}",
"extra_prompt": "This is unexpected",
}
with pytest.raises(MlflowException, match="Unexpected prompts.*extra_prompt"):
optimizer._validate_prompt_names(original, new)
def test_validate_prompt_names_success():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
original = {"instruction": "Answer {{question}}", "system": "You are helpful"}
new = {"instruction": "Answer {{question}}", "system": "You are an expert"}
assert optimizer._validate_prompt_names(original, new) is True
def test_build_zero_shot_meta_prompt(sample_target_prompts):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
template_vars = optimizer._extract_template_variables(sample_target_prompts)
meta_prompt = optimizer._build_zero_shot_meta_prompt(sample_target_prompts, template_vars)
assert "PROMPT ENGINEERING BEST PRACTICES" in meta_prompt
assert "{{question}}" in meta_prompt or "question" in meta_prompt
assert "instruction" in meta_prompt
assert "JSON" in meta_prompt
def test_build_few_shot_meta_prompt(sample_train_data, sample_target_prompts):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
template_vars = optimizer._extract_template_variables(sample_target_prompts)
eval_results = mock_eval_fn(sample_target_prompts, sample_train_data)
meta_prompt = optimizer._build_few_shot_meta_prompt(
sample_target_prompts, template_vars, eval_results
)
assert "EVALUATION EXAMPLES" in meta_prompt
assert "Example 1:" in meta_prompt
assert "Score:" in meta_prompt
assert "JSON" in meta_prompt
def test_build_few_shot_meta_prompt_empty_eval_results(sample_target_prompts):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
template_vars = optimizer._extract_template_variables(sample_target_prompts)
with pytest.raises(MlflowException, match="Few-shot metaprompting requires evaluation results"):
optimizer._build_few_shot_meta_prompt(sample_target_prompts, template_vars, [])
def test_format_examples(sample_train_data):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
eval_results = mock_eval_fn({}, sample_train_data)
formatted = optimizer._format_examples(eval_results[:2])
assert "Example 1:" in formatted
assert "Example 2:" in formatted
assert "Input:" in formatted
assert "Output:" in formatted
assert "Score:" in formatted
def test_call_reflection_model_success(mock_litellm_response):
with patch("litellm.completion", return_value=mock_litellm_response) as mock_completion:
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer._call_reflection_model("test prompt")
assert isinstance(result, dict)
assert "instruction" in result
assert "{{question}}" in result["instruction"]
# Verify litellm.completion was called with correct base parameters
mock_completion.assert_called_once()
call_kwargs = mock_completion.call_args.kwargs
assert call_kwargs["model"] == "openai/gpt-4o"
assert call_kwargs["response_format"] == {"type": "json_object"}
assert call_kwargs["max_retries"] == 3
def test_call_reflection_model_with_markdown():
# Test response with markdown code blocks
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message = Mock()
mock_response.choices[0].message.content = """```json
{
"instruction": "Improved: Answer {{question}}"
}
```"""
with patch("litellm.completion", return_value=mock_response):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer._call_reflection_model("test prompt")
assert isinstance(result, dict)
assert "instruction" in result
def test_call_reflection_model_litellm_not_installed():
with patch.dict(sys.modules, {"litellm": None}):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
with pytest.raises(ImportError, match="litellm is required"):
optimizer._call_reflection_model("test prompt")
def test_call_reflection_model_llm_failure():
with patch("litellm.completion", side_effect=Exception("API error")):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
with pytest.raises(MlflowException, match="Failed to call reflection model"):
optimizer._call_reflection_model("test prompt")
def test_call_reflection_model_with_lm_kwargs(mock_litellm_response):
custom_lm_kwargs = {"temperature": 0.5, "max_tokens": 2048, "top_p": 0.9}
with patch("litellm.completion", return_value=mock_litellm_response) as mock_completion:
optimizer = MetaPromptOptimizer(
reflection_model="openai:/gpt-4o", lm_kwargs=custom_lm_kwargs
)
result = optimizer._call_reflection_model("test prompt")
assert isinstance(result, dict)
# Verify that custom lm_kwargs were passed through
mock_completion.assert_called_once()
call_kwargs = mock_completion.call_args.kwargs
assert call_kwargs["temperature"] == 0.5
assert call_kwargs["max_tokens"] == 2048
assert call_kwargs["top_p"] == 0.9
# Also verify base parameters are still present
assert call_kwargs["model"] == "openai/gpt-4o"
assert call_kwargs["response_format"] == {"type": "json_object"}
assert call_kwargs["max_retries"] == 3
def test_optimize_zero_shot_mode(sample_target_prompts, mock_litellm_response):
with patch("litellm.completion", return_value=mock_litellm_response) as mock_completion:
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer.optimize(
eval_fn=Mock(), # Not used in zero-shot
train_data=[], # Empty triggers zero-shot
target_prompts=sample_target_prompts,
enable_tracking=False,
)
assert isinstance(result, PromptOptimizerOutput)
assert result.initial_eval_score is None # No evaluation in zero-shot
assert result.final_eval_score is None
assert "instruction" in result.optimized_prompts
assert "{{question}}" in result.optimized_prompts["instruction"]
# Zero-shot uses single pass
assert mock_completion.call_count == 1
def test_optimize_few_shot_mode(sample_train_data, sample_target_prompts, mock_litellm_response):
with patch("litellm.completion", return_value=mock_litellm_response) as mock_completion:
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
enable_tracking=False,
)
assert isinstance(result, PromptOptimizerOutput)
assert result.initial_eval_score is not None
assert result.final_eval_score is not None # Sanity check evaluation on train data
assert "instruction" in result.optimized_prompts
assert mock_completion.call_count == 1 # Single pass
def test_optimize_few_shot_with_baseline_eval(sample_train_data, sample_target_prompts):
# Mock litellm to return improved prompts
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message = Mock()
mock_response.choices[0].message.content = json.dumps(
{"instruction": "Better: Answer {{question}}"}
)
# Mock eval_fn that returns scores
def mock_eval_fn(candidate_prompts, dataset):
return [
EvaluationResultRecord(
inputs=record["inputs"],
outputs="mock output",
expectations=record["outputs"],
score=0.7,
trace=Mock(),
rationales={},
)
for record in dataset
]
with patch("litellm.completion", return_value=mock_response):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts,
enable_tracking=False,
)
# Should have both baseline and final eval scores (sanity check)
assert result.initial_eval_score is not None
assert result.final_eval_score is not None
assert "Better" in result.optimized_prompts["instruction"]
def test_optimize_preserves_template_variables(sample_train_data):
# Mock response that drops the {{question}} variable
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message = Mock()
mock_response.choices[0].message.content = json.dumps(
{"instruction": "Answer the question"} # Missing {{question}}
)
prompts = {"instruction": "Answer {{question}}"}
with patch("litellm.completion", return_value=mock_response):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=prompts,
enable_tracking=False,
)
# Should keep original prompts due to validation failure
# (caught as exception and logged as warning)
assert "{{question}}" in result.optimized_prompts["instruction"]
def test_optimize_with_multiple_prompts(sample_train_data, sample_target_prompts_multiple):
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message = Mock()
mock_response.choices[0].message.content = json.dumps(
{
"system_prompt": "Improved: You are an expert assistant.",
"instruction": "Improved: Answer {{question}}",
}
)
with patch("litellm.completion", return_value=mock_response):
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
result = optimizer.optimize(
eval_fn=mock_eval_fn,
train_data=sample_train_data,
target_prompts=sample_target_prompts_multiple,
enable_tracking=False,
)
assert "system_prompt" in result.optimized_prompts
assert "instruction" in result.optimized_prompts
assert "{{question}}" in result.optimized_prompts["instruction"]
def test_build_zero_shot_meta_prompt_with_guidelines(sample_target_prompts):
custom_guidelines = "Focus on concise, accurate answers for finance domain."
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o", guidelines=custom_guidelines)
template_vars = optimizer._extract_template_variables(sample_target_prompts)
meta_prompt = optimizer._build_zero_shot_meta_prompt(sample_target_prompts, template_vars)
# Verify structure
assert "CUSTOM GUIDELINES:" in meta_prompt
assert custom_guidelines in meta_prompt
assert "TEMPLATE VARIABLES:" in meta_prompt
assert "PROMPT ENGINEERING BEST PRACTICES:" in meta_prompt
def test_build_few_shot_meta_prompt_with_guidelines(sample_target_prompts):
custom_guidelines = "Focus on concise, accurate answers for finance domain."
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o", guidelines=custom_guidelines)
template_vars = optimizer._extract_template_variables(sample_target_prompts)
# Create sample evaluation results
eval_results = [
EvaluationResultRecord(
inputs={"question": "test"},
outputs="answer",
expectations="answer",
score=0.8,
trace=Mock(),
rationales={"correctness": "Good"},
)
]
meta_prompt = optimizer._build_few_shot_meta_prompt(
sample_target_prompts, template_vars, eval_results
)
# Verify structure
assert "CUSTOM GUIDELINES:" in meta_prompt
assert custom_guidelines in meta_prompt
assert "TEMPLATE VARIABLES:" in meta_prompt
assert "EVALUATION EXAMPLES" in meta_prompt # Now includes score in header
assert "Current Score:" in meta_prompt
def test_compute_per_scorer_scores():
optimizer = MetaPromptOptimizer(reflection_model="openai:/gpt-4o")
# Test with multiple results having individual scores
eval_results = [
EvaluationResultRecord(
inputs={"q": "1"},
outputs="a",
expectations="a",
score=0.8,
trace=Mock(),
rationales={},
individual_scores={"Correctness": 0.9, "Safety": 0.7},
),
EvaluationResultRecord(
inputs={"q": "2"},
outputs="b",
expectations="b",
score=0.6,
trace=Mock(),
rationales={},
individual_scores={"Correctness": 0.7, "Safety": 0.5},
),
]
per_scorer = optimizer._compute_per_scorer_scores(eval_results)
assert per_scorer == {"Correctness": 0.8, "Safety": 0.6} # Average of each scorer
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/optimize/optimizers/test_metaprompt_optimizer.py",
"license": "Apache License 2.0",
"lines": 379,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/pyfunc/custom_model/transitive_test/model_with_transitive.py | from custom_model.transitive_test.transitive_dependency import some_function
from mlflow.pyfunc import PythonModel
class ModelWithTransitiveDependency(PythonModel):
def predict(self, context, model_input, params=None):
result = some_function()
return [result] * len(model_input)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/pyfunc/custom_model/transitive_test/model_with_transitive.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:.claude/skills/src/skills/commands/fetch_unresolved_comments.py | # ruff: noqa: T201
"""Fetch unresolved PR review comments using GitHub GraphQL API."""
from __future__ import annotations
import argparse
import asyncio
from typing import Any
from pydantic import BaseModel
from skills.github import GitHubClient, parse_pr_url
from skills.github.types import ReviewComment, ReviewThread
class UnresolvedCommentsResult(BaseModel):
total: int
by_file: dict[str, list[ReviewThread]]
REVIEW_THREADS_QUERY = """
query($owner: String!, $repo: String!, $prNumber: Int!) {
repository(owner: $owner, name: $repo) {
pullRequest(number: $prNumber) {
reviewThreads(first: 100) {
nodes {
id
isResolved
isOutdated
comments(first: 100) {
nodes {
id
databaseId
body
path
line
startLine
diffHunk
author {
login
}
createdAt
updatedAt
}
}
}
}
}
}
}
"""
def format_comments(data: dict[str, Any]) -> UnresolvedCommentsResult:
"""Format unresolved comments grouped by file."""
threads = data["data"]["repository"]["pullRequest"]["reviewThreads"]["nodes"]
by_file: dict[str, list[ReviewThread]] = {}
total_comments = 0
for thread in threads:
if thread["isResolved"] or thread["isOutdated"]:
continue
comments: list[ReviewComment] = []
path = None
line = None
start_line = None
diff_hunk = None
for comment in thread["comments"]["nodes"]:
if path is None:
path = comment["path"]
line = comment["line"]
start_line = comment.get("startLine")
diff_hunk = comment.get("diffHunk")
comments.append(
ReviewComment(
id=comment["databaseId"],
body=comment["body"],
author=comment["author"]["login"] if comment["author"] else "unknown",
createdAt=comment["createdAt"],
)
)
total_comments += 1
if path and comments:
if path not in by_file:
by_file[path] = []
by_file[path].append(
ReviewThread(
thread_id=thread["id"],
line=line,
startLine=start_line,
diffHunk=diff_hunk,
comments=comments,
)
)
return UnresolvedCommentsResult(total=total_comments, by_file=by_file)
async def fetch_unresolved_comments(pr_url: str) -> UnresolvedCommentsResult:
owner, repo, pr_number = parse_pr_url(pr_url)
async with GitHubClient() as client:
data = await client.graphql(
REVIEW_THREADS_QUERY,
{"owner": owner, "repo": repo, "prNumber": pr_number},
)
return format_comments(data)
def register(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
parser = subparsers.add_parser(
"fetch-unresolved-comments",
help="Fetch unresolved PR review comments",
)
parser.add_argument("pr_url", help="GitHub PR URL")
parser.set_defaults(func=run)
def run(args: argparse.Namespace) -> None:
result = asyncio.run(fetch_unresolved_comments(args.pr_url))
print(result.model_dump_json(indent=2))
| {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/commands/fetch_unresolved_comments.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:.claude/skills/src/skills/github/client.py | from collections.abc import AsyncIterator
from typing import Any, cast
import aiohttp
from typing_extensions import Self
from skills.github.types import Job, JobRun, PullRequest
from skills.github.utils import get_github_token
class GitHubClient:
def __init__(self, token: str | None = None) -> None:
self.token = token or get_github_token()
self._session: aiohttp.ClientSession | None = None
async def __aenter__(self) -> Self:
headers = {
"Authorization": f"Bearer {self.token}",
"Accept": "application/vnd.github+json",
}
self._session = aiohttp.ClientSession(
base_url="https://api.github.com",
headers=headers,
)
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: object,
) -> None:
if self._session:
await self._session.close()
async def _get_json(
self, endpoint: str, params: dict[str, Any] | None = None
) -> dict[str, Any]:
if self._session is None:
raise RuntimeError("GitHubClient must be used as async context manager")
async with self._session.get(endpoint, params=params) as resp:
resp.raise_for_status()
return cast(dict[str, Any], await resp.json())
async def _get_text(self, endpoint: str, accept: str) -> str:
if self._session is None:
raise RuntimeError("GitHubClient must be used as async context manager")
headers = {"Accept": accept}
async with self._session.get(endpoint, headers=headers) as resp:
resp.raise_for_status()
return await resp.text()
async def get_pr(self, owner: str, repo: str, pr_number: int) -> PullRequest:
data = await self._get_json(f"/repos/{owner}/{repo}/pulls/{pr_number}")
return PullRequest.model_validate(data)
async def get_pr_diff(self, owner: str, repo: str, pr_number: int) -> str:
return await self._get_text(
f"/repos/{owner}/{repo}/pulls/{pr_number}",
accept="application/vnd.github.v3.diff",
)
async def get_compare_diff(self, owner: str, repo: str, base: str, head: str) -> str:
return await self._get_text(
f"/repos/{owner}/{repo}/compare/{base}...{head}",
accept="application/vnd.github.v3.diff",
)
async def graphql(self, query: str, variables: dict[str, Any]) -> dict[str, Any]:
if self._session is None:
raise RuntimeError("GitHubClient must be used as async context manager")
payload = {"query": query, "variables": variables}
async with self._session.post(
"https://api.github.com/graphql",
json=payload,
) as resp:
resp.raise_for_status()
return cast(dict[str, Any], await resp.json())
async def get_raw(self, endpoint: str) -> aiohttp.ClientResponse:
"""Get raw response for streaming."""
if self._session is None:
raise RuntimeError("GitHubClient must be used as async context manager")
return await self._session.get(endpoint, allow_redirects=True)
async def get_workflow_runs(
self,
owner: str,
repo: str,
head_sha: str | None = None,
status: str | None = None,
) -> AsyncIterator[JobRun]:
"""Get workflow runs for a repository."""
params: dict[str, Any] = {"per_page": 100}
if head_sha:
params["head_sha"] = head_sha
if status:
params["status"] = status
page = 1
while True:
params["page"] = page
data = await self._get_json(f"/repos/{owner}/{repo}/actions/runs", params)
runs = data.get("workflow_runs", [])
if not runs:
break
for run in runs:
yield JobRun.model_validate(run)
if len(runs) < 100:
break
page += 1
async def get_jobs(self, owner: str, repo: str, run_id: int) -> AsyncIterator[Job]:
"""Get jobs for a workflow run."""
page = 1
while True:
data = await self._get_json(
f"/repos/{owner}/{repo}/actions/runs/{run_id}/jobs",
{"per_page": 100, "page": page},
)
jobs = data.get("jobs", [])
if not jobs:
break
for job in jobs:
yield Job.model_validate(job)
if len(jobs) < 100:
break
page += 1
async def get_job(self, owner: str, repo: str, job_id: int) -> Job:
"""Get a specific job."""
data = await self._get_json(f"/repos/{owner}/{repo}/actions/jobs/{job_id}")
return Job.model_validate(data)
async def get_job_run(self, owner: str, repo: str, run_id: int) -> JobRun:
"""Get a specific workflow run."""
data = await self._get_json(f"/repos/{owner}/{repo}/actions/runs/{run_id}")
return JobRun.model_validate(data)
| {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/github/client.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:.claude/skills/src/skills/github/types.py | from pydantic import BaseModel
class GitRef(BaseModel):
sha: str
ref: str
class PullRequest(BaseModel):
title: str
body: str | None
head: GitRef
class ReviewComment(BaseModel):
id: int
body: str
author: str
createdAt: str
class ReviewThread(BaseModel):
thread_id: str
line: int | None
startLine: int | None
diffHunk: str | None
comments: list[ReviewComment]
class JobStep(BaseModel):
name: str
status: str
conclusion: str | None
number: int
started_at: str | None
completed_at: str | None
class Job(BaseModel):
id: int
run_id: int
url: str
name: str
workflow_name: str
status: str
conclusion: str | None
html_url: str
started_at: str | None
completed_at: str | None
steps: list[JobStep] = []
class JobRun(BaseModel):
id: int
name: str
head_sha: str
status: str
conclusion: str | None
html_url: str
created_at: str
updated_at: str
| {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/github/types.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:.claude/skills/src/skills/github/utils.py | # ruff: noqa: T201
import os
import re
import subprocess
import sys
def get_github_token() -> str:
if token := os.environ.get("GH_TOKEN"):
return token
try:
return subprocess.check_output(["gh", "auth", "token"], text=True).strip()
except (subprocess.CalledProcessError, FileNotFoundError):
print("Error: GH_TOKEN not found (set env var or install gh CLI)", file=sys.stderr)
sys.exit(1)
def parse_pr_url(url: str) -> tuple[str, str, int]:
if m := re.match(r"https://github\.com/([^/]+)/([^/]+)/pull/(\d+)", url):
return m.group(1), m.group(2), int(m.group(3))
raise ValueError(f"Invalid PR URL: {url}")
| {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/github/utils.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:.claude/skills/src/skills/cli.py | import argparse
from skills.commands import analyze_ci, fetch_diff, fetch_unresolved_comments
def main() -> None:
parser = argparse.ArgumentParser(prog="skills")
subparsers = parser.add_subparsers(dest="command", required=True)
analyze_ci.register(subparsers)
fetch_diff.register(subparsers)
fetch_unresolved_comments.register(subparsers)
args = parser.parse_args()
args.func(args)
| {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/cli.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/server/jobs/helpers.py | """Shared test helpers for job execution tests."""
import os
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
from mlflow.entities._job_status import JobStatus
from mlflow.server import (
ARTIFACT_ROOT_ENV_VAR,
BACKEND_STORE_URI_ENV_VAR,
HUEY_STORAGE_PATH_ENV_VAR,
handlers,
)
from mlflow.server.jobs import (
_ALLOWED_JOB_NAME_LIST,
_SUPPORTED_JOB_FUNCTION_LIST,
get_job,
)
from mlflow.server.jobs.utils import _launch_job_runner
from mlflow.store.jobs.sqlalchemy_store import SqlAlchemyJobStore
def _get_mlflow_repo_home():
root = str(Path(__file__).resolve().parents[3])
return f"{root}{os.pathsep}{path}" if (path := os.environ.get("PYTHONPATH")) else root
@contextmanager
def _launch_job_runner_for_test():
new_pythonpath = _get_mlflow_repo_home()
with _launch_job_runner(
{"PYTHONPATH": new_pythonpath},
os.getpid(),
) as proc:
try:
yield proc
finally:
proc.kill()
@contextmanager
def _setup_job_runner(
monkeypatch: pytest.MonkeyPatch,
tmp_path: Path,
supported_job_functions: list[str],
allowed_job_names: list[str],
backend_store_uri: str | None = None,
):
backend_store_uri = backend_store_uri or f"sqlite:///{tmp_path / 'mlflow.db'}"
huey_store_path = tmp_path / "huey_store"
huey_store_path.mkdir()
default_artifact_root = str(tmp_path / "artifacts")
try:
monkeypatch.setenv("MLFLOW_SERVER_ENABLE_JOB_EXECUTION", "true")
monkeypatch.setenv(BACKEND_STORE_URI_ENV_VAR, backend_store_uri)
monkeypatch.setenv(ARTIFACT_ROOT_ENV_VAR, default_artifact_root)
monkeypatch.setenv(HUEY_STORAGE_PATH_ENV_VAR, str(huey_store_path))
monkeypatch.setenv("_MLFLOW_SUPPORTED_JOB_FUNCTION_LIST", ",".join(supported_job_functions))
monkeypatch.setenv("_MLFLOW_ALLOWED_JOB_NAME_LIST", ",".join(allowed_job_names))
_SUPPORTED_JOB_FUNCTION_LIST.clear()
_SUPPORTED_JOB_FUNCTION_LIST.extend(supported_job_functions)
_ALLOWED_JOB_NAME_LIST.clear()
_ALLOWED_JOB_NAME_LIST.extend(allowed_job_names)
# Pre-initialize the database before launching the job runner subprocess
# to prevent race conditions during concurrent Alembic migrations
SqlAlchemyJobStore(backend_store_uri)
with _launch_job_runner_for_test() as job_runner_proc:
time.sleep(10)
yield job_runner_proc
finally:
# Clear the huey instance cache AFTER killing the runner to ensure clean state for next test
import mlflow.server.jobs.utils
mlflow.server.jobs.utils._huey_instance_map.clear()
if handlers._job_store is not None:
# close all db connections and drops connection pool
handlers._job_store.engine.dispose()
handlers._job_store = None
def wait_for_process_exit(pid: int, timeout: float = 5) -> None:
"""Poll until a process is no longer alive, or fail the test."""
from mlflow.server.jobs.utils import is_process_alive
deadline = time.time() + timeout
while time.time() < deadline:
if not is_process_alive(pid):
return
time.sleep(0.1)
pytest.fail(f"Process {pid} still alive after {timeout}s")
def wait_job_finalize(job_id, timeout=60):
beg_time = time.time()
while time.time() - beg_time <= timeout:
job = get_job(job_id)
if JobStatus.is_finalized(job.status):
return
time.sleep(0.5)
raise TimeoutError("The job is not finalized within the timeout.")
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/helpers.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/jobs/test_online_scoring_jobs.py | import json
import os
import uuid
from dataclasses import asdict
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities._job_status import JobStatus
from mlflow.genai.judges import make_judge
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.builtin_scorers import Completeness, RelevanceToQuery
from mlflow.genai.scorers.job import (
run_online_scoring_scheduler,
run_online_session_scorer_job,
run_online_trace_scorer_job,
)
from mlflow.genai.scorers.online.entities import OnlineScorer, OnlineScoringConfig
from mlflow.server.jobs import get_job, submit_job
from tests.server.jobs.helpers import _setup_job_runner, wait_job_finalize
pytestmark = pytest.mark.skipif(
os.name == "nt", reason="MLflow job execution is not supported on Windows"
)
def make_online_scorer_dict(scorer: Scorer, sample_rate: float = 1.0) -> dict[str, Any]:
return {
"name": scorer.name,
"serialized_scorer": json.dumps(scorer.model_dump()),
"online_config": {
"online_scoring_config_id": uuid.uuid4().hex,
"scorer_id": uuid.uuid4().hex,
"sample_rate": sample_rate,
"experiment_id": "exp1",
"filter_string": None,
},
}
def test_run_online_trace_scorer_job_calls_processor():
mock_processor = MagicMock()
mock_tracking_store = MagicMock()
with (
patch("mlflow.genai.scorers.job._get_tracking_store", return_value=mock_tracking_store),
patch(
"mlflow.genai.scorers.online.trace_processor.OnlineTraceScoringProcessor.create",
return_value=mock_processor,
) as mock_create,
):
online_scorers = [make_online_scorer_dict(Completeness())]
run_online_trace_scorer_job(experiment_id="exp1", online_scorers=online_scorers)
exp_id, scorers, store = mock_create.call_args[0]
assert exp_id == "exp1"
assert len(scorers) == 1
assert scorers[0].name == "completeness"
assert store is mock_tracking_store
mock_processor.process_traces.assert_called_once()
def test_run_online_trace_scorer_job_runs_exclusively_per_experiment(monkeypatch, tmp_path: Path):
"""
Test that online trace scorer jobs are exclusive per experiment_id.
When two jobs are submitted for the same experiment with different scorers,
only one should run and the other should be canceled due to exclusivity.
"""
with _setup_job_runner(
monkeypatch,
tmp_path,
supported_job_functions=["mlflow.genai.scorers.job.run_online_trace_scorer_job"],
allowed_job_names=["run_online_trace_scorer"],
):
# Create two different scorer lists for the same experiment using asdict()
scorer1 = OnlineScorer(
name="completeness",
serialized_scorer=json.dumps(Completeness().model_dump()),
online_config=OnlineScoringConfig(
online_scoring_config_id="config1",
scorer_id="completeness",
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
),
)
scorer2 = OnlineScorer(
name="relevance_to_query",
serialized_scorer=json.dumps(RelevanceToQuery().model_dump()),
online_config=OnlineScoringConfig(
online_scoring_config_id="config2",
scorer_id="relevance_to_query",
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
),
)
params1 = {"experiment_id": "exp1", "online_scorers": [asdict(scorer1)]}
params2 = {"experiment_id": "exp1", "online_scorers": [asdict(scorer2)]}
# Submit two jobs with same experiment_id but different scorers
job1_id = submit_job(run_online_trace_scorer_job, params1).job_id
job2_id = submit_job(run_online_trace_scorer_job, params2).job_id
wait_job_finalize(job1_id)
wait_job_finalize(job2_id)
job1 = get_job(job1_id)
job2 = get_job(job2_id)
# One job is canceled (skipped due to exclusive lock on experiment_id),
# the other either succeeds or fails (we only care about exclusivity, not job success)
statuses = {job1.status, job2.status}
assert JobStatus.CANCELED in statuses
# The non-canceled job should have attempted to run (either SUCCEEDED or FAILED)
non_canceled_statuses = statuses - {JobStatus.CANCELED}
assert len(non_canceled_statuses) == 1
assert non_canceled_statuses.pop() in {JobStatus.SUCCEEDED, JobStatus.FAILED}
def test_run_online_session_scorer_job_calls_processor():
mock_processor = MagicMock()
mock_tracking_store = MagicMock()
with (
patch("mlflow.genai.scorers.job._get_tracking_store", return_value=mock_tracking_store),
patch(
"mlflow.genai.scorers.online.session_processor.OnlineSessionScoringProcessor.create",
return_value=mock_processor,
) as mock_create,
):
online_scorers = [make_online_scorer_dict(Completeness())]
run_online_session_scorer_job(experiment_id="exp1", online_scorers=online_scorers)
exp_id, scorers, store = mock_create.call_args[0]
assert exp_id == "exp1"
assert len(scorers) == 1
assert scorers[0].name == "completeness"
assert store is mock_tracking_store
mock_processor.process_sessions.assert_called_once()
def test_scheduler_submits_jobs_via_submit_job():
# Create trace-level scorers (2)
trace_scorer_1 = Completeness()
trace_scorer_2 = RelevanceToQuery()
# Create session-level scorer (1) using make_judge with {{ conversation }}
session_scorer = make_judge(
name="conversation_judge",
instructions="Evaluate {{ conversation }} for quality",
feedback_value_type=str,
model="openai:/gpt-4",
)
config1 = OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
)
config2 = OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
)
config3 = OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
)
mock_scorer1 = OnlineScorer(
name="completeness",
serialized_scorer=json.dumps(trace_scorer_1.model_dump()),
online_config=config1,
)
mock_scorer2 = OnlineScorer(
name="relevance_to_query",
serialized_scorer=json.dumps(trace_scorer_2.model_dump()),
online_config=config2,
)
mock_scorer3 = OnlineScorer(
name="conversation_judge",
serialized_scorer=json.dumps(session_scorer.model_dump()),
online_config=config3,
)
mock_tracking_store = MagicMock()
mock_tracking_store.get_active_online_scorers.return_value = [
mock_scorer1,
mock_scorer2,
mock_scorer3,
]
with (
patch("mlflow.genai.scorers.job._get_tracking_store", return_value=mock_tracking_store),
patch("mlflow.genai.scorers.job.submit_job") as mock_submit_job,
):
run_online_scoring_scheduler()
# Should submit both trace and session jobs for exp1
assert mock_submit_job.call_count == 2
# Verify correct job functions and parameters were passed
call_args_list = mock_submit_job.call_args_list
trace_scorer_calls = [
call for call in call_args_list if call[0][0] == run_online_trace_scorer_job
]
session_scorer_calls = [
call for call in call_args_list if call[0][0] == run_online_session_scorer_job
]
# Should have 1 trace job and 1 session job
assert len(trace_scorer_calls) == 1
assert len(session_scorer_calls) == 1
# Verify trace job has 2 scorers with correct names
trace_params = trace_scorer_calls[0].args[1]
assert len(trace_params["online_scorers"]) == 2
assert trace_params["experiment_id"] == "exp1"
trace_scorer_names = {s["name"] for s in trace_params["online_scorers"]}
assert trace_scorer_names == {"completeness", "relevance_to_query"}
# Verify session job has 1 scorer with correct name
session_params = session_scorer_calls[0].args[1]
assert len(session_params["online_scorers"]) == 1
assert session_params["experiment_id"] == "exp1"
assert session_params["online_scorers"][0]["name"] == "conversation_judge"
def test_scheduler_skips_invalid_scorers():
valid_scorer = OnlineScorer(
name="completeness",
serialized_scorer=json.dumps(Completeness().model_dump()),
online_config=OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
),
)
invalid_scorer = OnlineScorer(
name="invalid_scorer",
serialized_scorer='{"bad": "data"}',
online_config=OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=1.0,
experiment_id="exp1",
filter_string=None,
),
)
mock_store = MagicMock()
mock_store.get_active_online_scorers.return_value = [valid_scorer, invalid_scorer]
with (
patch("mlflow.genai.scorers.job._get_tracking_store", return_value=mock_store),
patch("mlflow.genai.scorers.job.submit_job") as mock_submit,
patch("mlflow.genai.scorers.job._logger") as mock_logger,
):
run_online_scoring_scheduler()
mock_logger.warning.assert_called_once()
assert "invalid_scorer" in mock_logger.warning.call_args[0][0]
assert mock_submit.call_count == 1 # Only valid scorer submitted
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/test_online_scoring_jobs.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/trace_processor.py | """Online scoring processor for executing scorers on traces."""
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from mlflow.entities import Trace
from mlflow.environment_variables import MLFLOW_ONLINE_SCORING_MAX_WORKER_THREADS
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.online.constants import EXCLUDE_EVAL_RUN_TRACES_FILTER, MAX_TRACES_PER_JOB
from mlflow.genai.scorers.online.entities import OnlineScorer
from mlflow.genai.scorers.online.sampler import OnlineScorerSampler
from mlflow.genai.scorers.online.trace_checkpointer import (
OnlineTraceCheckpointManager,
OnlineTraceScoringCheckpoint,
)
from mlflow.genai.scorers.online.trace_loader import OnlineTraceLoader
from mlflow.store.tracking.abstract_store import AbstractStore
_logger = logging.getLogger(__name__)
@dataclass
class TraceScoringTask:
"""A task to score a single trace with multiple scorers."""
trace: Trace
scorers: list[Scorer]
timestamp_ms: int
class OnlineTraceScoringProcessor:
"""Orchestrates online scoring of individual traces."""
def __init__(
self,
trace_loader: OnlineTraceLoader,
checkpoint_manager: OnlineTraceCheckpointManager,
sampler: OnlineScorerSampler,
experiment_id: str,
):
self._trace_loader = trace_loader
self._checkpoint_manager = checkpoint_manager
self._sampler = sampler
self._experiment_id = experiment_id
@classmethod
def create(
cls,
experiment_id: str,
online_scorers: list[OnlineScorer],
tracking_store: AbstractStore,
) -> "OnlineTraceScoringProcessor":
"""
Factory method to create an OnlineTraceScoringProcessor with dependencies.
Args:
experiment_id: The experiment ID to process traces from.
online_scorers: List of OnlineScorer instances.
tracking_store: The tracking store instance.
Returns:
Configured OnlineTraceScoringProcessor instance.
"""
return cls(
trace_loader=OnlineTraceLoader(tracking_store),
checkpoint_manager=OnlineTraceCheckpointManager(tracking_store, experiment_id),
sampler=OnlineScorerSampler(online_scorers),
experiment_id=experiment_id,
)
def process_traces(self) -> None:
"""
Execute online scoring for the experiment.
Fetches traces since the last checkpoint, applies sampling to select
scorers, runs scoring in parallel, and updates the checkpoint.
"""
time_window = self._checkpoint_manager.calculate_time_window()
checkpoint = self._checkpoint_manager.get_checkpoint()
_logger.debug(
f"Online scoring for experiment {self._experiment_id}: "
f"time window [{time_window.min_trace_timestamp_ms}, "
f"{time_window.max_trace_timestamp_ms}]"
)
tasks = self._build_scoring_tasks(time_window, checkpoint)
if not tasks:
_logger.debug("No traces selected after sampling, skipping")
# Still need to advance checkpoint to avoid reprocessing the same time window
checkpoint = OnlineTraceScoringCheckpoint(
timestamp_ms=time_window.max_trace_timestamp_ms,
trace_id=None,
)
self._checkpoint_manager.persist_checkpoint(checkpoint)
return
_logger.debug(f"Running scoring: {len(tasks)} trace tasks")
sampled_trace_ids = list(tasks.keys())
full_traces = self._trace_loader.fetch_traces(sampled_trace_ids)
trace_map = {t.info.trace_id: t for t in full_traces}
for trace_id, task in tasks.items():
task.trace = trace_map.get(trace_id)
self._execute_scoring(tasks)
# Find the trace with the latest timestamp to use for checkpoint
if full_traces:
latest_trace = max(full_traces, key=lambda t: (t.info.timestamp_ms, t.info.trace_id))
checkpoint = OnlineTraceScoringCheckpoint(
timestamp_ms=latest_trace.info.timestamp_ms,
trace_id=latest_trace.info.trace_id,
)
else:
# If no traces were fetched, use the end of the time window as checkpoint
checkpoint = OnlineTraceScoringCheckpoint(
timestamp_ms=time_window.max_trace_timestamp_ms,
trace_id=None,
)
self._checkpoint_manager.persist_checkpoint(checkpoint)
_logger.debug(f"Online trace scoring completed for experiment {self._experiment_id}")
def _build_scoring_tasks(
self,
time_window,
checkpoint,
) -> dict[str, TraceScoringTask]:
"""
Build scoring tasks by fetching trace infos and applying sampling.
Args:
time_window: OnlineTraceScoringTimeWindow with timestamp bounds.
checkpoint: OnlineTraceScoringCheckpoint with last processed trace info.
Returns:
Dictionary mapping trace_id to TraceScoringTask.
"""
tasks: dict[str, TraceScoringTask] = {}
# Group scorers by filter string to fetch matching traces in a single query per filter
for filter_string, scorers in self._sampler.group_scorers_by_filter(
session_level=False
).items():
combined_filter = (
f"{EXCLUDE_EVAL_RUN_TRACES_FILTER} AND {filter_string}"
if filter_string
else EXCLUDE_EVAL_RUN_TRACES_FILTER
)
trace_infos = self._trace_loader.fetch_trace_infos_in_range(
self._experiment_id,
time_window.min_trace_timestamp_ms,
time_window.max_trace_timestamp_ms,
combined_filter,
MAX_TRACES_PER_JOB,
)
if not trace_infos:
_logger.debug(f"No trace infos found for filter: {filter_string}")
continue
# Filter out traces at checkpoint boundary that have already been processed.
# Traces are ordered by (timestamp_ms ASC, trace_id ASC), so we filter out
# any traces with the checkpoint timestamp and trace_id <= checkpoint.trace_id.
if checkpoint is not None and checkpoint.trace_id is not None:
trace_infos = [
t
for t in trace_infos
if not (
t.timestamp_ms == checkpoint.timestamp_ms
and t.trace_id <= checkpoint.trace_id
)
]
_logger.debug(f"Found {len(trace_infos)} trace infos for filter: {filter_string}")
for trace_info in trace_infos:
trace_id = trace_info.trace_id
if selected := self._sampler.sample(trace_id, scorers):
# Store just the trace_id and scorers - we'll fetch full traces later
if trace_id not in tasks:
tasks[trace_id] = TraceScoringTask(
trace=None, scorers=[], timestamp_ms=trace_info.timestamp_ms
)
# Add scorers, avoiding duplicates (same scorer from different filters)
existing_scorer_names = {s.name for s in tasks[trace_id].scorers}
tasks[trace_id].scorers.extend(
s for s in selected if s.name not in existing_scorer_names
)
# Sort tasks by timestamp (ascending) to ensure chronological processing
# and truncate to MAX_TRACES_PER_JOB (list slicing handles len < MAX).
sorted_trace_ids = sorted(tasks.keys(), key=lambda tid: (tasks[tid].timestamp_ms, tid))
return {tid: tasks[tid] for tid in sorted_trace_ids[:MAX_TRACES_PER_JOB]}
def _log_error_assessments(
self,
error: Exception,
scorers: list[Scorer],
trace: Trace,
) -> None:
"""
Log error assessments for failed scoring operations.
Creates and logs error Feedback objects for each scorer when scoring fails,
making failures visible in the trace's assessment history.
Args:
error: The exception that occurred during scoring.
scorers: List of scorers that were being executed.
trace: The trace being scored.
"""
from mlflow.entities import AssessmentSource, AssessmentSourceType, Feedback
from mlflow.genai.evaluation.harness import _log_assessments
error_feedbacks = [
Feedback(
name=scorer.name,
error=error,
source=AssessmentSource(source_type=AssessmentSourceType.LLM_JUDGE),
trace_id=trace.info.trace_id,
)
for scorer in scorers
]
try:
_log_assessments(trace=trace, assessments=error_feedbacks, run_id=None)
except Exception as log_error:
_logger.warning(
f"Failed to log error assessments for trace {trace.info.trace_id}: {log_error}",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
def _execute_scoring(
self,
tasks: dict[str, TraceScoringTask],
) -> None:
"""
Execute trace scoring tasks in parallel.
Args:
tasks: Trace-level scoring tasks.
"""
# Import evaluation modules lazily to avoid pulling in pandas at module load
# time, which would break the skinny client.
from mlflow.genai.evaluation.entities import EvalItem
from mlflow.genai.evaluation.harness import _compute_eval_scores, _log_assessments
with ThreadPoolExecutor(
max_workers=MLFLOW_ONLINE_SCORING_MAX_WORKER_THREADS.get(),
thread_name_prefix="OnlineScoring",
) as executor:
futures = {}
for trace_id, task in tasks.items():
if task.trace is None:
_logger.warning(f"Skipping task with no trace for trace_id: {trace_id}")
continue
eval_item = EvalItem.from_trace(task.trace)
future = executor.submit(
_compute_eval_scores, eval_item=eval_item, scorers=task.scorers
)
futures[future] = task
for future in as_completed(futures):
task = futures[future]
try:
if feedbacks := future.result():
_log_assessments(trace=task.trace, assessments=feedbacks, run_id=None)
except Exception as e:
_logger.warning(
f"Failed to score trace {task.trace.info.trace_id}: {e}",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
self._log_error_assessments(e, task.scorers, task.trace)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/trace_processor.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/online/test_trace_processor.py | import json
import uuid
from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities import Trace, TraceData, TraceInfo
from mlflow.genai.scorers.builtin_scorers import Completeness
from mlflow.genai.scorers.online.entities import OnlineScorer, OnlineScoringConfig
from mlflow.genai.scorers.online.sampler import OnlineScorerSampler
from mlflow.genai.scorers.online.trace_checkpointer import (
OnlineTraceCheckpointManager,
OnlineTraceScoringCheckpoint,
OnlineTraceScoringTimeWindow,
)
from mlflow.genai.scorers.online.trace_loader import OnlineTraceLoader
from mlflow.genai.scorers.online.trace_processor import (
OnlineTraceScoringProcessor,
)
def make_online_scorer(scorer, sample_rate: float = 1.0, filter_string: str | None = None):
config = OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=sample_rate,
experiment_id="exp1",
filter_string=filter_string,
)
return OnlineScorer(
name=scorer.name,
serialized_scorer=json.dumps(scorer.model_dump()),
online_config=config,
)
def make_trace_info(trace_id: str, timestamp_ms: int = 1000):
return MagicMock(spec=TraceInfo, trace_id=trace_id, timestamp_ms=timestamp_ms)
def make_trace(trace_id: str, timestamp_ms: int = 1000):
info = MagicMock(spec=TraceInfo, trace_id=trace_id, timestamp_ms=timestamp_ms)
data = MagicMock(spec=TraceData)
return MagicMock(spec=Trace, info=info, data=data)
@pytest.fixture
def mock_trace_loader():
return MagicMock(spec=OnlineTraceLoader)
@pytest.fixture
def mock_checkpoint_manager():
manager = MagicMock(spec=OnlineTraceCheckpointManager)
manager.calculate_time_window.return_value = OnlineTraceScoringTimeWindow(
min_trace_timestamp_ms=1000, max_trace_timestamp_ms=2000
)
manager.get_checkpoint.return_value = None
return manager
@pytest.fixture
def sampler_with_scorers():
configs = [make_online_scorer(Completeness(), sample_rate=1.0)]
return OnlineScorerSampler(configs)
@pytest.fixture
def empty_sampler():
return OnlineScorerSampler([])
def test_process_traces_skips_when_no_scorers(
mock_trace_loader, mock_checkpoint_manager, empty_sampler
):
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=empty_sampler,
experiment_id="exp1",
)
processor.process_traces()
# When there are no scorers, _build_scoring_tasks returns empty dict,
# so checkpoint is still advanced but no trace fetching occurs
mock_checkpoint_manager.persist_checkpoint.assert_called_once()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 2000
assert checkpoint.trace_id is None
mock_trace_loader.fetch_traces.assert_not_called()
def test_process_traces_updates_checkpoint_when_no_traces(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
mock_trace_loader.fetch_trace_infos_in_range.return_value = []
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
processor.process_traces()
mock_checkpoint_manager.persist_checkpoint.assert_called_once()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 2000
assert checkpoint.trace_id is None
def test_process_traces_updates_checkpoint_when_full_traces_empty(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
"""
Test that when traces are sampled but fetch_traces returns empty
(e.g., traces deleted between sampling and fetching), we still
advance the checkpoint using the time window end.
"""
mock_trace_loader.fetch_trace_infos_in_range.return_value = [
make_trace_info("tr-001", 1500),
make_trace_info("tr-002", 1800),
]
mock_trace_loader.fetch_traces.return_value = []
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
with patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute:
mock_compute.return_value = []
processor.process_traces()
mock_checkpoint_manager.persist_checkpoint.assert_called_once()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 2000
assert checkpoint.trace_id is None
def test_process_traces_filters_checkpoint_boundary(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
mock_checkpoint_manager.get_checkpoint.return_value = OnlineTraceScoringCheckpoint(
timestamp_ms=1000, trace_id="tr-002"
)
mock_trace_loader.fetch_trace_infos_in_range.return_value = [
make_trace_info("tr-001", 1000),
make_trace_info("tr-002", 1000),
make_trace_info("tr-003", 1000),
make_trace_info("tr-004", 1500),
]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-003", 1000),
make_trace("tr-004", 1500),
]
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
with patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute:
mock_compute.return_value = []
processor.process_traces()
mock_trace_loader.fetch_traces.assert_called_once_with(["tr-003", "tr-004"])
def test_process_traces_groups_by_filter(mock_trace_loader, mock_checkpoint_manager):
configs = [
make_online_scorer(Completeness(), filter_string="tags.env = 'prod'"),
make_online_scorer(Completeness(name="c2"), filter_string="tags.env = 'staging'"),
]
sampler = OnlineScorerSampler(configs)
mock_trace_loader.fetch_trace_infos_in_range.return_value = []
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler,
experiment_id="exp1",
)
processor.process_traces()
assert mock_trace_loader.fetch_trace_infos_in_range.call_count == 2
call_args = [c[0] for c in mock_trace_loader.fetch_trace_infos_in_range.call_args_list]
filters = [args[3] for args in call_args]
assert any("tags.env = 'prod'" in f for f in filters)
assert any("tags.env = 'staging'" in f for f in filters)
def test_process_traces_excludes_eval_run_traces(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
mock_trace_loader.fetch_trace_infos_in_range.return_value = []
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
processor.process_traces()
call_args = mock_trace_loader.fetch_trace_infos_in_range.call_args[0]
filter_string = call_args[3]
assert "metadata.mlflow.sourceRun IS NULL" in filter_string
def test_process_traces_samples_and_scores(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
trace = make_trace("tr-001", 1500)
mock_trace_loader.fetch_trace_infos_in_range.return_value = [make_trace_info("tr-001", 1500)]
mock_trace_loader.fetch_traces.return_value = [trace]
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
with (
patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute,
patch("mlflow.genai.evaluation.harness._log_assessments") as mock_log,
):
mock_compute.return_value = [MagicMock()]
processor.process_traces()
mock_compute.assert_called_once()
mock_log.assert_called_once()
def test_process_traces_updates_checkpoint_on_success(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
mock_trace_loader.fetch_trace_infos_in_range.return_value = [
make_trace_info("tr-001", 1000),
make_trace_info("tr-002", 1500),
]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-001", 1000),
make_trace("tr-002", 1500),
]
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
with patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute:
mock_compute.return_value = []
processor.process_traces()
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
assert checkpoint.timestamp_ms == 1500
assert checkpoint.trace_id == "tr-002"
def test_execute_scoring_handles_failures(
mock_trace_loader, mock_checkpoint_manager, sampler_with_scorers
):
mock_trace_loader.fetch_trace_infos_in_range.return_value = [
make_trace_info("tr-001", 1000),
make_trace_info("tr-002", 1500),
]
mock_trace_loader.fetch_traces.return_value = [
make_trace("tr-001", 1000),
make_trace("tr-002", 1500),
]
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler_with_scorers,
experiment_id="exp1",
)
with (
patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute,
patch("mlflow.genai.evaluation.harness._log_assessments") as mock_log,
):
mock_compute.side_effect = [Exception("Scorer failed"), [MagicMock()]]
processor.process_traces()
assert mock_compute.call_count == 2
# Now we log error assessments for failures + successful assessments
assert mock_log.call_count == 2
# Verify error assessments were logged for the failed trace
error_log_call = mock_log.call_args_list[0]
error_assessments = error_log_call[1]["assessments"]
assert len(error_assessments) == 1 # One error per scorer
assert error_assessments[0].error is not None
mock_checkpoint_manager.persist_checkpoint.assert_called_once()
def test_process_traces_truncates_and_sorts_across_filters(
mock_trace_loader, mock_checkpoint_manager
):
"""
Test that when multiple filters return MAX_TRACES_PER_JOB traces each,
we truncate to MAX_TRACES_PER_JOB total while preserving chronological order.
This ensures we don't skip earlier traces from certain filters.
Uses overlapping timestamp ranges to verify correct chronological sorting
across filters when truncating.
"""
from mlflow.genai.scorers.online.constants import MAX_TRACES_PER_JOB
# Create two scorers with different filters
configs = [
make_online_scorer(Completeness(), filter_string="tags.env = 'prod'"),
make_online_scorer(Completeness(name="c2"), filter_string="tags.env = 'staging'"),
]
sampler = OnlineScorerSampler(configs)
# Compute timestamp ranges dynamically based on MAX_TRACES_PER_JOB
# Staging: starts at 0, covers full MAX_TRACES_PER_JOB range (0 to MAX-1)
# Prod: starts at 80% through staging range, creating 20% overlap
staging_start = 0
prod_start = int(0.8 * MAX_TRACES_PER_JOB)
# Each filter returns MAX_TRACES_PER_JOB traces
filter1_traces = [
make_trace_info(f"tr-prod-{i}", prod_start + i) for i in range(MAX_TRACES_PER_JOB)
]
filter2_traces = [
make_trace_info(f"tr-staging-{i}", staging_start + i) for i in range(MAX_TRACES_PER_JOB)
]
def mock_fetch_trace_infos(exp_id, min_ts, max_ts, filter_str, limit):
if "tags.env = 'prod'" in filter_str:
return filter1_traces
elif "tags.env = 'staging'" in filter_str:
return filter2_traces
return []
mock_trace_loader.fetch_trace_infos_in_range.side_effect = mock_fetch_trace_infos
# Mock fetch_traces to return full traces for sampled IDs
def mock_fetch_traces(trace_ids):
result = []
for tid in trace_ids:
if tid.startswith("tr-prod-"):
idx = int(tid.split("-")[-1])
result.append(make_trace(tid, prod_start + idx))
elif tid.startswith("tr-staging-"):
idx = int(tid.split("-")[-1])
result.append(make_trace(tid, staging_start + idx))
return result
mock_trace_loader.fetch_traces.side_effect = mock_fetch_traces
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler,
experiment_id="exp1",
)
with patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute:
mock_compute.return_value = []
processor.process_traces()
# Verify we only processed MAX_TRACES_PER_JOB traces total
fetched_trace_ids = mock_trace_loader.fetch_traces.call_args[0][0]
assert len(fetched_trace_ids) == MAX_TRACES_PER_JOB
# Build expected list of traces in chronological order:
# 1. Staging-only traces (timestamps 0 to prod_start-1)
# 2. Interleaved traces in overlap region (prod_start onwards)
staging_only_count = prod_start # traces before overlap
overlap_pairs = (MAX_TRACES_PER_JOB - staging_only_count) // 2
expected_trace_ids = [f"tr-staging-{i}" for i in range(staging_only_count)]
for i in range(overlap_pairs):
expected_trace_ids.append(f"tr-prod-{i}")
expected_trace_ids.append(f"tr-staging-{staging_only_count + i}")
# Verify exact traces processed in exact order
assert fetched_trace_ids == expected_trace_ids
# Verify checkpoint was updated to the last processed trace
checkpoint = mock_checkpoint_manager.persist_checkpoint.call_args[0][0]
last_timestamp = prod_start + overlap_pairs - 1
last_trace_id = f"tr-staging-{staging_only_count + overlap_pairs - 1}"
assert checkpoint.timestamp_ms == last_timestamp
assert checkpoint.trace_id == last_trace_id
def test_process_traces_deduplicates_scorers_across_filters(
mock_trace_loader, mock_checkpoint_manager
):
"""
Test that when a trace matches multiple filters that would select the same scorer,
the scorer only appears once in the task's scorer list.
"""
# Create the same scorer with two different filters
completeness_scorer = Completeness()
configs = [
make_online_scorer(completeness_scorer, filter_string="tags.env = 'prod'"),
make_online_scorer(completeness_scorer, filter_string="tags.priority = 'high'"),
]
sampler = OnlineScorerSampler(configs)
# Same trace returned by both filters (trace matches both conditions)
shared_trace = make_trace_info("tr-001", 1500)
def mock_fetch_trace_infos(exp_id, min_ts, max_ts, filter_str, limit):
# Both filters return the same trace
return [shared_trace]
mock_trace_loader.fetch_trace_infos_in_range.side_effect = mock_fetch_trace_infos
mock_trace_loader.fetch_traces.return_value = [make_trace("tr-001", 1500)]
processor = OnlineTraceScoringProcessor(
trace_loader=mock_trace_loader,
checkpoint_manager=mock_checkpoint_manager,
sampler=sampler,
experiment_id="exp1",
)
with patch("mlflow.genai.evaluation.harness._compute_eval_scores") as mock_compute:
mock_compute.return_value = []
processor.process_traces()
# Verify the scorer was called only once per trace (not duplicated)
assert mock_compute.call_count == 1
call_args = mock_compute.call_args[1]
scorers_used = call_args["scorers"]
# Should only have one instance of Completeness scorer despite matching 2 filters
assert len(scorers_used) == 1
assert scorers_used[0].name == completeness_scorer.name
def test_create_factory_method():
mock_store = MagicMock()
configs = [make_online_scorer(Completeness())]
processor = OnlineTraceScoringProcessor.create(
experiment_id="exp1",
online_scorers=configs,
tracking_store=mock_store,
)
assert processor._experiment_id == "exp1"
assert isinstance(processor._trace_loader, OnlineTraceLoader)
assert isinstance(processor._checkpoint_manager, OnlineTraceCheckpointManager)
assert isinstance(processor._sampler, OnlineScorerSampler)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_trace_processor.py",
"license": "Apache License 2.0",
"lines": 372,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/constants.py | """Constants for online scoring."""
from mlflow.tracing.constant import TraceMetadataKey
# Maximum lookback period to prevent getting stuck on old failing traces (1 hour)
MAX_LOOKBACK_MS = 60 * 60 * 1000
# Maximum traces to include in a single scoring job
MAX_TRACES_PER_JOB = 500
# Maximum sessions to include in a single scoring job
MAX_SESSIONS_PER_JOB = 100
# Filter to exclude eval run traces (traces generated from MLflow runs)
EXCLUDE_EVAL_RUN_TRACES_FILTER = f"metadata.{TraceMetadataKey.SOURCE_RUN} IS NULL"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/constants.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/online/sampler.py | """Dense sampling strategy for online scoring."""
import hashlib
import logging
from collections import defaultdict
from typing import TYPE_CHECKING
from mlflow.genai.scorers.base import Scorer
if TYPE_CHECKING:
from mlflow.genai.scorers.online.entities import OnlineScorer
_logger = logging.getLogger(__name__)
class OnlineScorerSampler:
"""
Samples scorers for traces using dense sampling strategy.
Dense sampling ensures traces that are selected get thorough coverage:
- Sort scorers by sample_rate descending
- Use conditional probability: if a scorer is rejected, skip all lower-rate scorers
"""
def __init__(self, online_scorers: list["OnlineScorer"]):
self._online_scorers = online_scorers
self._sample_rates: dict[str, float] = {}
self._scorers: dict[str, Scorer] = {}
for online_scorer in online_scorers:
try:
scorer = Scorer.model_validate_json(online_scorer.serialized_scorer)
self._sample_rates[scorer.name] = online_scorer.online_config.sample_rate
self._scorers[scorer.name] = scorer
except Exception as e:
_logger.info(
f"Failed to load scorer '{online_scorer.name}'; scorer will be skipped: {e}"
)
def group_scorers_by_filter(self, session_level: bool) -> dict[str | None, list[Scorer]]:
"""
Group scorers by their filter string.
Args:
session_level: If True, return session-level scorers. If False, return trace-level.
Returns:
Dictionary mapping filter_string to list of scorers with that filter.
"""
result: dict[str | None, list[Scorer]] = defaultdict(list)
for online_scorer in self._online_scorers:
scorer = self._scorers.get(online_scorer.name)
if scorer and scorer.is_session_level_scorer == session_level:
filter_str = online_scorer.online_config.filter_string
result[filter_str].append(scorer)
return result
def sample(self, entity_id: str, scorers: list[Scorer]) -> list[Scorer]:
"""
Apply dense sampling to select scorers for an entity.
Dense sampling ensures selected entities receive comprehensive evaluation across
multiple scorers, rather than spreading scorers thinly across all entities.
For example, with two scorers at 50% and 25% sample rates:
- 50% of entities get both scorers (dense coverage)
- 25% get only the first scorer
- 25% get no scorers
This enables better comparisons between scorers on the same entities.
Args:
entity_id: The trace ID or session ID to sample for.
scorers: List of scorers to sample from.
Returns:
A subset of scorers selected via conditional probability waterfall.
"""
if not scorers:
return []
# Sort by sample rate descending
sorted_scorers = sorted(
scorers,
key=lambda s: self._sample_rates.get(s.name, 0.0),
reverse=True,
)
selected = []
prev_rate = 1.0
for scorer in sorted_scorers:
rate = self._sample_rates.get(scorer.name, 0.0)
conditional_rate = rate / prev_rate if prev_rate > 0 else 0
# Hash entity_id + scorer name to get deterministic value in [0, 1]
hash_input = f"{entity_id}:{scorer.name}"
hash_value = int(hashlib.sha256(hash_input.encode()).hexdigest(), 16) / (2**256)
if hash_value > conditional_rate:
break
selected.append(scorer)
prev_rate = rate
_logger.debug(
f"Sampled {len(selected)}/{len(scorers)} scorers for entity {entity_id[:8]}..."
)
return selected
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/sampler.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/scorers/online/trace_checkpointer.py | """Checkpoint management for trace-level online scoring."""
import json
import logging
import time
from dataclasses import asdict, dataclass
from mlflow.entities.experiment_tag import ExperimentTag
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.store.tracking.abstract_store import AbstractStore
from mlflow.utils.mlflow_tags import MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT
_logger = logging.getLogger(__name__)
@dataclass
class OnlineTraceScoringCheckpoint:
timestamp_ms: int # Timestamp of the last processed trace in milliseconds
trace_id: str | None = None # Trace ID used as tie breaker when traces have same timestamp
def to_json(self) -> str:
return json.dumps(asdict(self))
@classmethod
def from_json(cls, json_str: str) -> "OnlineTraceScoringCheckpoint":
data = json.loads(json_str)
return cls(**data)
@dataclass
class OnlineTraceScoringTimeWindow:
min_trace_timestamp_ms: int
max_trace_timestamp_ms: int
class OnlineTraceCheckpointManager:
def __init__(self, tracking_store: AbstractStore, experiment_id: str):
self._tracking_store = tracking_store
self._experiment_id = experiment_id
def get_checkpoint(self) -> OnlineTraceScoringCheckpoint | None:
"""
Get the last processed trace checkpoint from the experiment tag.
Returns:
OnlineTraceScoringCheckpoint, or None if no checkpoint exists.
"""
try:
experiment = self._tracking_store.get_experiment(self._experiment_id)
if checkpoint_str := experiment.tags.get(MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT):
return OnlineTraceScoringCheckpoint.from_json(checkpoint_str)
except (TypeError, ValueError, json.JSONDecodeError) as e:
_logger.debug(
f"Failed to parse checkpoint for experiment {self._experiment_id}: {e}",
exc_info=True,
)
def persist_checkpoint(self, checkpoint: OnlineTraceScoringCheckpoint) -> None:
"""
Persist the checkpoint tag with a new checkpoint.
Args:
checkpoint: The checkpoint to store.
"""
self._tracking_store.set_experiment_tag(
self._experiment_id,
ExperimentTag(MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT, checkpoint.to_json()),
)
def calculate_time_window(self) -> OnlineTraceScoringTimeWindow:
"""
Calculate the time window for trace scoring.
Enforces a maximum lookback period to prevent getting stuck on persistently
failing traces. If the checkpoint is older than MAX_LOOKBACK_MS, uses
current_time - MAX_LOOKBACK_MS instead to skip over old problematic traces.
Returns:
OnlineTraceScoringTimeWindow with min and max trace timestamps.
min_trace_timestamp_ms is the checkpoint if it exists and is within the
lookback period, otherwise now - MAX_LOOKBACK_MS.
max_trace_timestamp_ms is the current time.
"""
current_time_ms = int(time.time() * 1000)
checkpoint = self.get_checkpoint()
# Start from checkpoint, but never look back more than MAX_LOOKBACK_MS
min_lookback_time_ms = current_time_ms - MAX_LOOKBACK_MS
if checkpoint is not None:
min_trace_timestamp_ms = max(checkpoint.timestamp_ms, min_lookback_time_ms)
else:
min_trace_timestamp_ms = min_lookback_time_ms
return OnlineTraceScoringTimeWindow(
min_trace_timestamp_ms=min_trace_timestamp_ms,
max_trace_timestamp_ms=current_time_ms,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/trace_checkpointer.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/online/trace_loader.py | """Trace loading utilities for online scoring."""
import logging
from mlflow.entities import Trace, TraceInfo
from mlflow.store.tracking.abstract_store import AbstractStore
_logger = logging.getLogger(__name__)
class OnlineTraceLoader:
def __init__(self, tracking_store: AbstractStore):
self._tracking_store = tracking_store
def fetch_traces(self, trace_ids: list[str]) -> list[Trace]:
"""
Fetch full traces by their IDs.
Args:
trace_ids: List of trace IDs to fetch.
Returns:
List of Trace objects (in same order as input, skipping any not found).
"""
if not trace_ids:
return []
traces = self._tracking_store.batch_get_traces(trace_ids)
trace_map = {t.info.trace_id: t for t in traces}
# Preserve order, skip missing
return [trace_map[tid] for tid in trace_ids if tid in trace_map]
def fetch_trace_infos_in_range(
self,
experiment_id: str,
start_time_ms: int,
end_time_ms: int,
filter_string: str | None = None,
max_traces: int = 500,
page_size: int = 100,
) -> list[TraceInfo]:
"""
Fetch trace infos within a time window, optionally filtered.
Args:
experiment_id: The experiment ID to search.
start_time_ms: Start of time window (inclusive).
end_time_ms: End of time window (inclusive).
filter_string: Optional additional filter criteria.
max_traces: Maximum number of traces to return.
page_size: Number of traces to fetch per API call.
Returns:
List of TraceInfo objects matching the criteria.
"""
time_filter = (
f"trace.timestamp_ms >= {start_time_ms} AND trace.timestamp_ms <= {end_time_ms}"
)
combined_filter = f"{time_filter} AND {filter_string}" if filter_string else time_filter
_logger.debug(f"Fetching traces with filter: {combined_filter}")
all_trace_infos = []
page_token = None
while len(all_trace_infos) < max_traces:
batch_size = min(page_size, max_traces - len(all_trace_infos))
trace_batch, token = self._tracking_store.search_traces(
experiment_ids=[experiment_id],
filter_string=combined_filter,
max_results=batch_size,
order_by=[
"timestamp_ms ASC",
# Order by trace ID to ensure that we have a consistent tie-breaker when
# multiple traces have the same timestamp and max_traces is reached in
# the middle of such a group
"request_id ASC",
],
page_token=page_token,
)
if not trace_batch:
break
remaining = max_traces - len(all_trace_infos)
all_trace_infos.extend(trace_batch[:remaining])
_logger.debug(
f"Fetched batch of {len(trace_batch)} traces, total: {len(all_trace_infos)}"
)
if not token:
break
page_token = token
_logger.debug(
f"Fetched {len(all_trace_infos)} trace infos in range [{start_time_ms}, {end_time_ms}]"
)
return [t.info if isinstance(t, Trace) else t for t in all_trace_infos]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/trace_loader.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/genai/scorers/online/test_sampler.py | import json
import uuid
import pytest
from mlflow.genai.scorers.builtin_scorers import Completeness, ConversationCompleteness
from mlflow.genai.scorers.online.entities import OnlineScorer, OnlineScoringConfig
from mlflow.genai.scorers.online.sampler import OnlineScorerSampler
def make_online_scorer(
scorer,
sample_rate: float = 1.0,
filter_string: str | None = None,
) -> OnlineScorer:
return OnlineScorer(
name=scorer.name,
serialized_scorer=json.dumps(scorer.model_dump()),
online_config=OnlineScoringConfig(
online_scoring_config_id=uuid.uuid4().hex,
scorer_id=uuid.uuid4().hex,
sample_rate=sample_rate,
experiment_id="exp1",
filter_string=filter_string,
),
)
def test_group_scorers_by_filter_empty():
sampler = OnlineScorerSampler([])
assert sampler.group_scorers_by_filter(session_level=False) == {}
def test_group_scorers_by_filter_no_filters():
configs = [
make_online_scorer(Completeness()),
make_online_scorer(ConversationCompleteness()),
]
sampler = OnlineScorerSampler(configs)
trace_groups = sampler.group_scorers_by_filter(session_level=False)
session_groups = sampler.group_scorers_by_filter(session_level=True)
assert set(trace_groups.keys()) == {None}
assert [s.name for s in trace_groups[None]] == ["completeness"]
assert set(session_groups.keys()) == {None}
assert [s.name for s in session_groups[None]] == ["conversation_completeness"]
def test_group_scorers_by_filter_with_filters():
configs = [
make_online_scorer(Completeness(), filter_string="tags.env = 'prod'"),
make_online_scorer(Completeness(name="c2"), filter_string="tags.model = 'gpt-4'"),
make_online_scorer(Completeness(name="c3")),
]
sampler = OnlineScorerSampler(configs)
result = sampler.group_scorers_by_filter(session_level=False)
assert set(result.keys()) == {None, "tags.env = 'prod'", "tags.model = 'gpt-4'"}
assert [s.name for s in result["tags.env = 'prod'"]] == ["completeness"]
assert [s.name for s in result["tags.model = 'gpt-4'"]] == ["c2"]
assert [s.name for s in result[None]] == ["c3"]
def test_group_scorers_by_filter_multiple_scorers_same_filter():
configs = [
make_online_scorer(Completeness(), filter_string="tags.env = 'prod'"),
make_online_scorer(Completeness(name="c2"), filter_string="tags.env = 'prod'"),
]
sampler = OnlineScorerSampler(configs)
result = sampler.group_scorers_by_filter(session_level=False)
assert set(result.keys()) == {"tags.env = 'prod'"}
assert len(result["tags.env = 'prod'"]) == 2
def test_group_scorers_by_filter_session_level():
configs = [
make_online_scorer(Completeness()),
make_online_scorer(ConversationCompleteness()),
]
sampler = OnlineScorerSampler(configs)
trace_groups = sampler.group_scorers_by_filter(session_level=False)
session_groups = sampler.group_scorers_by_filter(session_level=True)
assert [s.name for s in trace_groups[None]] == ["completeness"]
assert [s.name for s in session_groups[None]] == ["conversation_completeness"]
def test_sample_all_selected_at_100_percent():
configs = [
make_online_scorer(Completeness(), sample_rate=1.0),
make_online_scorer(ConversationCompleteness(), sample_rate=1.0),
]
sampler = OnlineScorerSampler(configs)
scorers = list(sampler._scorers.values())
result = sampler.sample("entity_123", scorers)
assert len(result) == 2
def test_sample_none_selected_at_0_percent():
configs = [make_online_scorer(Completeness(), sample_rate=0.0)]
sampler = OnlineScorerSampler(configs)
scorers = list(sampler._scorers.values())
result = sampler.sample("entity_123", scorers)
assert result == []
def test_sample_deterministic_by_entity_id():
configs = [make_online_scorer(Completeness(), sample_rate=0.5)]
sampler = OnlineScorerSampler(configs)
scorers = list(sampler._scorers.values())
results = [sampler.sample("same_entity", scorers) for _ in range(10)]
assert all(r == results[0] for r in results)
@pytest.mark.parametrize("entity_id", [f"entity_{i}" for i in range(20)])
def test_sample_dense_waterfall_behavior(entity_id):
high = Completeness(name="high")
medium = Completeness(name="medium")
low = Completeness(name="low")
configs = [
make_online_scorer(high, sample_rate=0.8),
make_online_scorer(medium, sample_rate=0.5),
make_online_scorer(low, sample_rate=0.2),
]
sampler = OnlineScorerSampler(configs)
scorers = [sampler._scorers["high"], sampler._scorers["medium"], sampler._scorers["low"]]
result = sampler.sample(entity_id, scorers)
result_names = [s.name for s in result]
if "high" not in result_names:
assert "medium" not in result_names
assert "low" not in result_names
if "medium" not in result_names and "high" in result_names:
assert "low" not in result_names
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_sampler.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/online/test_trace_checkpointer.py | import time
from unittest.mock import MagicMock
import pytest
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.genai.scorers.online.trace_checkpointer import (
OnlineTraceCheckpointManager,
OnlineTraceScoringCheckpoint,
)
from mlflow.utils.mlflow_tags import MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT
@pytest.fixture
def mock_store():
return MagicMock()
@pytest.fixture
def checkpoint_manager(mock_store):
return OnlineTraceCheckpointManager(mock_store, "exp1")
def test_checkpoint_json_roundtrip():
original = OnlineTraceScoringCheckpoint(timestamp_ms=5000, trace_id="tr-abc")
restored = OnlineTraceScoringCheckpoint.from_json(original.to_json())
assert restored.timestamp_ms == original.timestamp_ms
assert restored.trace_id == original.trace_id
def test_get_checkpoint_returns_none_when_no_tag(checkpoint_manager, mock_store):
experiment = MagicMock()
experiment.tags = {}
mock_store.get_experiment.return_value = experiment
result = checkpoint_manager.get_checkpoint()
assert result is None
def test_get_checkpoint_deserializes_correctly(checkpoint_manager, mock_store):
experiment = MagicMock()
experiment.tags = {
MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT: '{"timestamp_ms": 1000, "trace_id": "tr-1"}'
}
mock_store.get_experiment.return_value = experiment
result = checkpoint_manager.get_checkpoint()
assert result.timestamp_ms == 1000
assert result.trace_id == "tr-1"
def test_get_checkpoint_handles_invalid_json(checkpoint_manager, mock_store):
experiment = MagicMock()
experiment.tags = {MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT: "invalid json"}
mock_store.get_experiment.return_value = experiment
result = checkpoint_manager.get_checkpoint()
assert result is None
def test_persist_checkpoint_sets_experiment_tag(checkpoint_manager, mock_store):
checkpoint = OnlineTraceScoringCheckpoint(timestamp_ms=2000, trace_id="tr-2")
checkpoint_manager.persist_checkpoint(checkpoint)
mock_store.set_experiment_tag.assert_called_once()
call_args = mock_store.set_experiment_tag.call_args
assert call_args[0][0] == "exp1"
assert call_args[0][1].key == MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT
def test_calculate_time_window_no_checkpoint(checkpoint_manager, mock_store, monkeypatch):
experiment = MagicMock()
experiment.tags = {}
mock_store.get_experiment.return_value = experiment
fixed_time = 1000000
monkeypatch.setattr(time, "time", lambda: fixed_time)
result = checkpoint_manager.calculate_time_window()
expected_min = (fixed_time * 1000) - MAX_LOOKBACK_MS
assert result.min_trace_timestamp_ms == expected_min
assert result.max_trace_timestamp_ms == fixed_time * 1000
def test_calculate_time_window_recent_checkpoint(checkpoint_manager, mock_store, monkeypatch):
fixed_time = 1000000
recent_checkpoint_time = (fixed_time * 1000) - 60000 # 1 minute ago
experiment = MagicMock()
checkpoint_json = f'{{"timestamp_ms": {recent_checkpoint_time}}}'
experiment.tags = {MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT: checkpoint_json}
mock_store.get_experiment.return_value = experiment
monkeypatch.setattr(time, "time", lambda: fixed_time)
result = checkpoint_manager.calculate_time_window()
assert result.min_trace_timestamp_ms == recent_checkpoint_time
assert result.max_trace_timestamp_ms == fixed_time * 1000
def test_calculate_time_window_old_checkpoint(checkpoint_manager, mock_store, monkeypatch):
fixed_time = 1000000
old_checkpoint_time = (
(fixed_time * 1000) - MAX_LOOKBACK_MS - 1000000
) # Way older than max lookback
experiment = MagicMock()
experiment.tags = {
MLFLOW_LATEST_ONLINE_SCORING_TRACE_CHECKPOINT: f'{{"timestamp_ms": {old_checkpoint_time}}}'
}
mock_store.get_experiment.return_value = experiment
monkeypatch.setattr(time, "time", lambda: fixed_time)
result = checkpoint_manager.calculate_time_window()
expected_min = (fixed_time * 1000) - MAX_LOOKBACK_MS
assert result.min_trace_timestamp_ms == expected_min
assert result.max_trace_timestamp_ms == fixed_time * 1000
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_trace_checkpointer.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/online/test_trace_loader.py | from unittest.mock import MagicMock
import pytest
from mlflow.entities import Trace, TraceInfo
from mlflow.genai.scorers.online.trace_loader import OnlineTraceLoader
@pytest.fixture
def mock_store():
return MagicMock()
@pytest.fixture
def trace_loader(mock_store):
return OnlineTraceLoader(mock_store)
@pytest.fixture
def sample_traces():
traces = []
for i in range(5):
trace = MagicMock(spec=Trace)
trace.info = MagicMock(spec=TraceInfo)
trace.info.trace_id = f"trace_{i}"
traces.append(trace)
return traces
def test_fetch_traces_success(trace_loader, mock_store, sample_traces):
mock_store.batch_get_traces.return_value = sample_traces
trace_ids = [t.info.trace_id for t in sample_traces]
result = trace_loader.fetch_traces(trace_ids)
assert [t.info.trace_id for t in result] == trace_ids
mock_store.batch_get_traces.assert_called_once_with(trace_ids)
def test_fetch_traces_some_missing(trace_loader, mock_store, sample_traces):
mock_store.batch_get_traces.return_value = [
sample_traces[0],
sample_traces[2],
sample_traces[4],
]
trace_ids = [f"trace_{i}" for i in range(5)]
result = trace_loader.fetch_traces(trace_ids)
assert [t.info.trace_id for t in result] == ["trace_0", "trace_2", "trace_4"]
def test_fetch_traces_empty_list(trace_loader, mock_store):
result = trace_loader.fetch_traces([])
assert result == []
mock_store.batch_get_traces.assert_not_called()
def test_fetch_trace_infos_in_range_single_page(trace_loader, mock_store, sample_traces):
mock_store.search_traces.return_value = (sample_traces, None)
result = trace_loader.fetch_trace_infos_in_range(
"exp1", 1000, 2000, filter_string="tags.env = 'prod'"
)
assert len(result) == 5
mock_store.search_traces.assert_called_once_with(
experiment_ids=["exp1"],
filter_string=(
"trace.timestamp_ms >= 1000 AND trace.timestamp_ms <= 2000 AND tags.env = 'prod'"
),
max_results=100,
order_by=["timestamp_ms ASC", "request_id ASC"],
page_token=None,
)
def test_fetch_trace_infos_in_range_multiple_pages(trace_loader, mock_store, sample_traces):
mock_store.search_traces.side_effect = [
(sample_traces[:2], "token1"),
(sample_traces[2:4], "token2"),
(sample_traces[4:], None),
]
result = trace_loader.fetch_trace_infos_in_range("exp1", 1000, 2000, page_size=2)
assert len(result) == 5
assert mock_store.search_traces.call_count == 3
calls = mock_store.search_traces.call_args_list
assert calls[0][1]["page_token"] is None
assert calls[1][1]["page_token"] == "token1"
assert calls[2][1]["page_token"] == "token2"
def test_fetch_trace_infos_in_range_max_traces_limit(trace_loader, mock_store, sample_traces):
mock_store.search_traces.return_value = (sample_traces, None)
result = trace_loader.fetch_trace_infos_in_range("exp1", 1000, 2000, max_traces=3)
assert len(result) == 3
assert mock_store.search_traces.call_args[1]["max_results"] == 3
def test_fetch_trace_infos_in_range_empty_response(trace_loader, mock_store):
mock_store.search_traces.return_value = ([], None)
result = trace_loader.fetch_trace_infos_in_range("exp1", 1000, 2000)
assert result == []
mock_store.search_traces.assert_called_once()
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_trace_loader.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/entities.py | """
Online scorer entities and configuration.
This module contains entities for online scorer configuration used by the store layer
and online scoring infrastructure.
"""
from dataclasses import dataclass
@dataclass
class OnlineScoringConfig:
"""
Internal entity representing the online configuration for a scorer.
This configuration controls how a scorer is applied to traces in an online/real-time
manner. It defines sampling rates and optional filters for selecting which traces
should be scored.
"""
online_scoring_config_id: str
scorer_id: str
sample_rate: float
experiment_id: str
filter_string: str | None = None
def to_dict(self) -> dict[str, str | float]:
result: dict[str, str | float] = {
"online_scoring_config_id": self.online_scoring_config_id,
"scorer_id": self.scorer_id,
"sample_rate": self.sample_rate,
"experiment_id": self.experiment_id,
}
if self.filter_string is not None:
result["filter_string"] = self.filter_string
return result
@dataclass
class OnlineScorer:
"""
Internal entity representing a serialized scorer and its online execution configuration.
This entity combines the scorer's executable form (name and serialized_scorer) with
its configuration (OnlineScoringConfig) that specifies how it should be applied to
traces in an online/real-time manner.
"""
name: str
serialized_scorer: str
online_config: OnlineScoringConfig
@dataclass
class CompletedSession:
"""
Metadata about a session that has been determined complete and is eligible for online scoring.
Contains only the session ID and timestamp range, not the actual trace data.
"""
session_id: str
first_trace_timestamp_ms: int
last_trace_timestamp_ms: int
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/entities.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/simulators/prompts.py | DEFAULT_PERSONA = "You are an inquisitive user having a natural conversation."
INITIAL_USER_PROMPT = """Instructions:
You are role-playing as a real user interacting with an AI assistant.
- Write like a human user, not like an assistant or expert. Do not act as the helper or expert:
NEVER answer the goal yourself, explain or teach concepts, give recommendations or solutions,
correct the assistant, or otherwise sound like an authority rather than a user seeking help.
- Adhere to the given persona and continuously steer the conversation toward achieving the
underlying goal. Do NOT discuss topics that are not relevant to the goal.
- Do not reveal understanding or expertise the persona would not plausibly have.
- Be concise (within 1-3 sentences), conversational, straightforward and not overly formal
or verbose. Avoid structured explanations, lists, or polished phrasing.
- Do NOT reveal all persona details upfront.
- **CRITICAL**: Your first message must NOT directly state, ask about, or summarize your goal.
Real users never open with "I want to [exact goal]." Instead, start with a specific,
concrete question or request that is a natural *first step* toward the goal. If the goal
has multiple components, pursue them gradually across turns — never request everything at
once. The full goal should only become apparent over multiple turns.
- If simulation guidelines are provided, strictly follow them as requirements for how to
conduct the conversation. They take precedence over default behavior. The guidelines
describe how YOU (the user) should behave, not how the assistant should respond.
<persona>
Your role's persona is:
{persona}
</persona>
<goal>
Your underlying goal in this conversation is:
{goal}
</goal>
{guidelines_section}
Begin the conversation with a concise, natural opening message. Remember: do NOT state your
goal directly — start with a concrete, narrow question that naturally leads toward it."""
# NB: We embed history into the prompt instead of passing a message list directly to reduce
# noise, since the prompt only cares about message content and sender role.
FOLLOWUP_USER_PROMPT = """
You are continuing to role-play as a real user interacting with an AI assistant.
Inputs:
<conversation>
Conversation so far:
{conversation_history}
</conversation>
<last_response>
The assistant just said:
{last_response}
</last_response>
<persona>
Your role's persona is:
{persona}
</persona>
<goal>
Your underlying goal in this conversation is:
{goal}
</goal>
{guidelines_section}
Instructions:
- Write a natural follow-up like a human user, not like an assistant or expert.
Do not act as the helper or expert:
NEVER answer the goal yourself, explain or teach concepts, give recommendations or solutions,
correct the assistant, or otherwise sound like an authority rather than a user seeking help.
- Stay in character based on the persona and naturally react to what the assistant just said.
- Continue steering the conversation toward the underlying goal. Do NOT introduce topics
unrelated to the goal.
- Do not reveal understanding or expertise the persona would not plausibly have.
- Be concise (within 1-3 sentences), conversational, and natural. Avoid structured
explanations, lists, or polished phrasing.
- Do NOT reveal all persona details at once. Reveal information incrementally as the
conversation progresses.
- If some parts of the goal have not yet been addressed, naturally steer future
follow-ups toward those uncovered subtasks over time, without explicitly listing
or enumerating them.
- If simulation guidelines are provided, strictly follow them as requirements for how to
conduct the conversation. They take precedence over default behavior. The guidelines
describe how YOU (the user) should behave, not how the assistant should respond."""
CHECK_GOAL_PROMPT = """A user has the following goal: {goal}
Conversation so far:
{conversation_history}
The assistant just responded with: {last_response}
Has the user's goal been FULLY and COMPLETELY achieved? The goal should only be considered \
achieved if the assistant has provided comprehensive, actionable information that fully \
addresses what the user wanted to learn or accomplish. Simply mentioning the topic or \
providing partial information is NOT enough.
You must output your response as a valid JSON object with the following format:
{{
"rationale": "Reason for the assessment. Explain whether the goal has been achieved and why.
Start each rationale with `Let's think step by step`",
"result": "yes|no"
}}"""
# NB: We include "rationale" to invoke chain-of-thought reasoning for better results.
DISTILL_GOAL_AND_PERSONA_PROMPT = """Analyze the following conversation between a user and an \
AI assistant. Extract the user's underlying goal, persona, and simulation guidelines.
<conversation>
{conversation}
</conversation>
Based on this conversation, identify:
1. **Goal**: What is the user trying to accomplish by talking to the assistant? Describe their \
objective in one clear sentence from the user's perspective (e.g., "Learn how to deploy a model", \
"Get help debugging an authentication issue", or "Understand how experiment tracking works").
2. **Persona**: How does the user communicate? Describe their communication style, expertise \
level, and personality in 1-2 sentences. Start with "You are..." (e.g., "You are a data \
scientist who asks detailed technical questions" or "You are a beginner who needs step-by-step \
guidance").
3. **Simulation Guidelines**: The goal is to reproduce the original conversation trajectory as \
closely as possible. Describe specific requirements for how a simulated user should conduct the \
conversation. Guidelines can cover any of the following:
- **Information withholding**: If the user only reveals a detail in a later turn, specify that \
the simulated user must NOT reveal it earlier (e.g., "Do not provide the error message upfront; \
wait until the assistant asks for more details").
- **Implicit expectations**: Things the user deliberately does NOT ask for, expecting the \
assistant to infer them on its own (e.g., "Do not explicitly ask for the percentage change; \
the assistant should provide it without being asked", "Do not tell the assistant how to handle \
edge cases; it should infer the correct behavior").
- **Pacing and ordering**: The order in which topics or sub-tasks are raised (e.g., "Start \
with a broad question and only narrow down after the assistant responds").
Return null if the conversation is straightforward with no notable patterns."""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/simulators/prompts.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/simulators/simulator.py | from __future__ import annotations
import inspect
import logging
import math
import time
import uuid
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import contextmanager
from dataclasses import dataclass, field
from threading import Lock
from typing import TYPE_CHECKING, Any, Callable
import pydantic
import mlflow
from mlflow.environment_variables import MLFLOW_GENAI_SIMULATOR_MAX_WORKERS
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets import EvaluationDataset
from mlflow.genai.simulators.prompts import (
CHECK_GOAL_PROMPT,
DEFAULT_PERSONA,
FOLLOWUP_USER_PROMPT,
INITIAL_USER_PROMPT,
)
from mlflow.genai.simulators.utils import (
format_history,
get_default_simulation_model,
invoke_model_without_tracing,
)
from mlflow.genai.utils.trace_utils import parse_outputs_to_str
from mlflow.telemetry.events import SimulateConversationEvent
from mlflow.telemetry.track import record_usage_event
from mlflow.tracing.constant import TraceMetadataKey
from mlflow.utils.annotations import experimental
from mlflow.utils.docstring_utils import format_docstring
try:
from tqdm.auto import tqdm
except ImportError:
tqdm = None
if TYPE_CHECKING:
from pandas import DataFrame
from mlflow.entities import Trace
_logger = logging.getLogger(__name__)
_MAX_METADATA_LENGTH = 250
_EXPECTED_TEST_CASE_KEYS = {"goal", "persona", "context", "expectations", "simulation_guidelines"}
_REQUIRED_TEST_CASE_KEYS = {"goal"}
_RESERVED_CONTEXT_KEYS = {"input", "messages", "mlflow_session_id"}
PGBAR_FORMAT = (
"{l_bar}{bar}| {n_fmt}/{total_fmt} [Elapsed: {elapsed}, Remaining: {remaining}] {postfix}"
)
@dataclass
class SimulationTimingTracker:
_lock: Lock = field(default_factory=Lock, repr=False)
predict_fn_seconds: float = 0.0
generate_message_seconds: float = 0.0
check_goal_seconds: float = 0.0
def add(
self,
predict_fn_seconds: float = 0,
generate_message_seconds: float = 0,
check_goal_seconds: float = 0,
):
with self._lock:
self.predict_fn_seconds += predict_fn_seconds
self.generate_message_seconds += generate_message_seconds
self.check_goal_seconds += check_goal_seconds
def format_postfix(self) -> str:
with self._lock:
simulator_seconds = self.generate_message_seconds + self.check_goal_seconds
total = self.predict_fn_seconds + simulator_seconds
if total == 0:
return "(predict: 0%, simulator: 0%)"
predict_pct = 100 * self.predict_fn_seconds / total
simulator_pct = 100 * simulator_seconds / total
return f"(predict: {predict_pct:.1f}%, simulator: {simulator_pct:.1f}%)"
_MODEL_API_DOC = {
"model": """Model to use for generating user messages. Must be one of:
* `"databricks"` - Uses the Databricks managed LLM endpoint
* `"databricks:/<endpoint-name>"` - Uses a Databricks model serving endpoint \
(e.g., `"databricks:/databricks-claude-sonnet-4-5"`)
* `"gateway:/<endpoint-name>"` - Uses an MLflow AI Gateway endpoint \
(e.g., `"gateway:/my-chat-endpoint"`)
* `"<provider>:/<model-name>"` - Uses LiteLLM (e.g., `"openai:/gpt-4.1-mini"`, \
`"anthropic:/claude-3.5-sonnet-20240620"`)
MLflow natively supports `["openai", "anthropic", "bedrock", "mistral"]`, and more \
providers are supported through `LiteLLM <https://docs.litellm.ai/docs/providers>`_.
Default model depends on the tracking URI setup:
* Databricks: `"databricks"`
* Otherwise: `"openai:/gpt-4.1-mini"`
""",
}
class GoalCheckResult(pydantic.BaseModel):
"""Structured output for goal achievement check."""
rationale: str = pydantic.Field(
description="Reason for the assessment explaining whether the goal has been achieved"
)
result: str = pydantic.Field(description="'yes' if goal achieved, 'no' otherwise")
@contextmanager
def _suppress_tracing_logging():
# Suppress INFO logs when flushing traces from the async trace export queue.
async_logger = logging.getLogger("mlflow.tracing.export.async_export_queue")
# Suppress WARNING logs when the tracing provider is automatically traced, but used in a
# tracing-disabled context (e.g., generating user messages).
fluent_logger = logging.getLogger("mlflow.tracing.fluent")
original_async_level = async_logger.level
original_fluent_level = fluent_logger.level
async_logger.setLevel(logging.WARNING)
fluent_logger.setLevel(logging.ERROR)
try:
yield
finally:
async_logger.setLevel(original_async_level)
fluent_logger.setLevel(original_fluent_level)
def _get_last_response(conversation_history: list[dict[str, Any]]) -> str | None:
if not conversation_history:
return None
last_msg = conversation_history[-1]
content = last_msg.get("content")
if isinstance(content, str) and content:
return content
result = parse_outputs_to_str(last_msg)
if result and result.strip():
return result
return str(last_msg)
def _fetch_traces(all_trace_ids: list[list[str]]) -> list[list["Trace"]]:
from mlflow.tracing.client import TracingClient
flat_trace_ids = [tid for trace_ids in all_trace_ids for tid in trace_ids]
if not flat_trace_ids:
raise MlflowException(
"Simulation produced no traces. This may indicate that all conversations failed during "
"simulation. Check the logs above for error details."
)
mlflow.flush_trace_async_logging()
client = TracingClient()
max_workers = min(len(flat_trace_ids), MLFLOW_GENAI_SIMULATOR_MAX_WORKERS.get())
with ThreadPoolExecutor(
max_workers=max_workers, thread_name_prefix="ConversationSimulatorTraceFetcher"
) as executor:
flat_traces = list(executor.map(client.get_trace, flat_trace_ids))
all_traces: list[list["Trace"]] = []
idx = 0
for trace_ids in all_trace_ids:
all_traces.append(flat_traces[idx : idx + len(trace_ids)])
idx += len(trace_ids)
return all_traces
@experimental(version="3.10.0")
@dataclass(frozen=True)
class SimulatorContext:
"""
Context information passed to simulated user agents for message generation.
This dataclass bundles all input information needed for a simulated user to
generate their next message in a conversation.
Args:
goal: The objective the simulated user is trying to achieve.
persona: Description of the user's personality and background.
conversation_history: The full conversation history as a list of message dicts.
turn: The current turn number (0-indexed).
simulation_guidelines: Optional instructions for how the simulated user should
conduct the conversation. Can be a string or a list of strings.
"""
goal: str
persona: str
conversation_history: list[dict[str, Any]]
turn: int
simulation_guidelines: str | list[str] | None = None
@property
def is_first_turn(self) -> bool:
return self.turn == 0
@property
def formatted_history(self) -> str | None:
return format_history(self.conversation_history)
@property
def last_assistant_response(self) -> str | None:
if not self.conversation_history:
return None
return _get_last_response(self.conversation_history)
@format_docstring(_MODEL_API_DOC)
@experimental(version="3.10.0")
class BaseSimulatedUserAgent(ABC):
"""
Abstract base class for simulated user agents.
Subclass this to create custom simulated user implementations with specialized
behavior. The base class provides common functionality like LLM invocation and
context construction.
Args:
goal: The objective the simulated user is trying to achieve in the conversation.
persona: Description of the user's personality and background. If None, uses a
default helpful user persona.
model: {{ model }}
**inference_params: Additional parameters passed to the LLM (e.g., temperature).
Example:
.. code-block:: python
from mlflow.genai.simulators import BaseSimulatedUserAgent, SimulatorContext
class ImpatientUserAgent(BaseSimulatedUserAgent):
def generate_message(self, context: SimulatorContext) -> str:
if context.is_first_turn:
return f"I need help NOW with: {context.goal}"
return self.invoke_llm(
f"Respond impatiently. Goal: {context.goal}. "
f"Last response: {context.last_assistant_response}"
)
"""
def __init__(
self,
model: str | None = None,
**inference_params,
):
self.model = model or get_default_simulation_model()
self.inference_params = inference_params
@abstractmethod
def generate_message(self, context: SimulatorContext) -> str:
"""
Generate a user message based on the provided context.
Args:
context: A SimulatorContext containing information like goal, persona,
conversation history, and turn.
Returns:
The generated user message string.
"""
def invoke_llm(self, prompt: str, system_prompt: str | None = None) -> str:
from mlflow.types.llm import ChatMessage
messages = []
if system_prompt:
messages.append(ChatMessage(role="system", content=system_prompt))
messages.append(ChatMessage(role="user", content=prompt))
return invoke_model_without_tracing(
model_uri=self.model,
messages=messages,
num_retries=3,
inference_params=self.inference_params,
)
@format_docstring(_MODEL_API_DOC)
@experimental(version="3.10.0")
class SimulatedUserAgent(BaseSimulatedUserAgent):
"""
An LLM-powered agent that simulates user behavior in conversations.
The agent generates realistic user messages based on a specified goal and persona,
enabling automated testing of conversational AI systems.
Args:
goal: The objective the simulated user is trying to achieve in the conversation.
persona: Description of the user's personality and background. If None, uses a
default helpful user persona.
simulation_guidelines: Instructions for how the simulated user should conduct
the conversation.
model: {{ model }}
**inference_params: Additional parameters passed to the LLM (e.g., temperature).
"""
def generate_message(self, context: SimulatorContext) -> str:
if guidelines := context.simulation_guidelines:
if isinstance(guidelines, list):
formatted = "\n".join(f"- {g}" for g in guidelines)
else:
formatted = guidelines
guidelines_section = (
"\n<simulation_guidelines>\n"
"Follow these requirements for how YOU (the user) should conduct the "
"conversation. Remember, you are the USER seeking help, not the assistant "
"providing answers:\n"
f"{formatted}\n"
"</simulation_guidelines>"
)
else:
guidelines_section = ""
if context.is_first_turn:
prompt = INITIAL_USER_PROMPT.format(
persona=context.persona, goal=context.goal, guidelines_section=guidelines_section
)
else:
history_without_last = context.conversation_history[:-1]
history_str = format_history(history_without_last)
prompt = FOLLOWUP_USER_PROMPT.format(
persona=context.persona,
goal=context.goal,
guidelines_section=guidelines_section,
conversation_history=history_str if history_str is not None else "",
last_response=context.last_assistant_response or "",
)
return self.invoke_llm(prompt)
def _is_missing_context_value(value: Any) -> bool:
return value is None or (isinstance(value, float) and math.isnan(value))
def _validate_simulator_predict_fn_signature(
predict_fn: Callable[..., dict[str, Any]],
) -> None:
parameters = inspect.signature(predict_fn).parameters
if "messages" in parameters and "input" in parameters:
raise MlflowException(
"predict_fn cannot have both 'messages' and 'input' parameters. "
"Use 'messages' for Chat Completions API format or 'input' for Responses "
"API format."
)
if "messages" not in parameters and "input" not in parameters:
raise MlflowException(
"predict_fn must accept either 'messages' or 'input' parameter for the "
"conversation history. Use 'messages' for Chat Completions API format or "
"'input' for Responses API format."
)
@format_docstring(_MODEL_API_DOC)
@experimental(version="3.9.0")
class ConversationSimulator:
"""
Generates multi-turn conversations by simulating user interactions with a target agent.
The simulator creates a simulated user agent that interacts with your agent's predict function.
Each conversation is traced in MLflow, allowing you to evaluate how your agent handles
various user goals and personas.
The predict function passed to the simulator must accept the conversation history
as a list of message dictionaries (e.g., ``[{"role": "user", "content": "..."}]``).
Two formats are supported:
- **Responses API format**: Use an ``input`` parameter
(https://platform.openai.com/docs/api-reference/responses)
- **Chat Completions API format**: Use a ``messages`` parameter
(https://platform.openai.com/docs/api-reference/chat)
The predict function:
- Must accept either ``input`` or ``messages`` (not both) for the conversation history
- May accept additional keyword arguments from the test case's ``context`` field
- Receives an ``mlflow_session_id`` parameter that uniquely identifies the conversation
session. This ID is consistent across all turns in the same conversation, allowing you
to associate related traces or maintain stateful context (e.g., for thread-based agents).
- Should return a response (the assistant's message content will be extracted)
Args:
test_cases: List of test case dicts, a DataFrame, or an EvaluationDataset,
with the following fields:
- "goal": Describing what the simulated user wants to achieve.
- "persona" (optional): Custom persona for the simulated user.
- "context" (optional): Dict of additional kwargs to pass to predict_fn.
Keys ``input``, ``messages``, and ``mlflow_session_id`` are reserved
by the simulator and cannot be used.
- "expectations" (optional): Dict of expected values (ground truth) for
session-level evaluation. These are logged to the first trace of the
session with the session ID in metadata, allowing session-level scorers
to retrieve them.
- "simulation_guidelines" (optional): Instructions for how the simulated user
should conduct the conversation. Can be a string or a list of strings.
max_turns: Maximum number of conversation turns before stopping. Default is 10.
user_model: {{ model }}
user_agent_class: Optional custom simulated user agent class. Must be a subclass
of :py:class:`BaseSimulatedUserAgent`. If not provided, uses the default
:py:class:`SimulatedUserAgent`.
**user_llm_params: Additional parameters passed to the simulated user's LLM calls.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.simulators import ConversationSimulator
from mlflow.genai.scorers import ConversationalSafety, Safety
# Dummy cache to store conversation threads by session ID
conversation_threads = {}
def predict_fn(input: list[dict], **kwargs) -> dict:
# The mlflow_session_id uniquely identifies this conversation session.
# All turns in the same conversation share the same session ID.
session_id = kwargs.get("mlflow_session_id")
# Use the session ID to maintain state across turns - for example,
# storing conversation context, user preferences, or agent memory
if session_id not in conversation_threads:
conversation_threads[session_id] = {"turn_count": 0}
conversation_threads[session_id]["turn_count"] += 1
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=input,
)
return response
# Each test case requires a "goal". "persona", "context", "expectations",
# and "simulation_guidelines" are optional.
simulator = ConversationSimulator(
test_cases=[
{"goal": "Learn about MLflow tracking"},
{"goal": "Debug deployment issue", "persona": "A data scientist"},
{
"goal": "Set up model registry",
"persona": "A beginner",
"context": {"user_id": "123"},
"expectations": {"expected_topic": "model registry"},
"simulation_guidelines": [
"Ask clarifying questions before proceeding",
"Do not mention deployment until the assistant brings it up",
],
},
],
max_turns=5,
)
mlflow.genai.evaluate(
data=simulator,
predict_fn=predict_fn,
scorers=[ConversationalSafety(), Safety()],
)
"""
def __init__(
self,
test_cases: list[dict[str, Any]] | "DataFrame" | EvaluationDataset,
max_turns: int = 10,
user_model: str | None = None,
user_agent_class: type[BaseSimulatedUserAgent] | None = None,
**user_llm_params,
):
if user_agent_class is not None and not issubclass(
user_agent_class, BaseSimulatedUserAgent
):
raise TypeError(
f"user_agent_class must be a subclass of BaseSimulatedUserAgent, "
f"got {user_agent_class.__name__}"
)
# Store original dataset reference if test_cases is an EvaluationDataset, so we can
# preserve the dataset name when creating the evaluation dataset.
self._source_dataset = test_cases if isinstance(test_cases, EvaluationDataset) else None
self.test_cases = test_cases
self.max_turns = max_turns
self.user_model = user_model or get_default_simulation_model()
self.user_agent_class = user_agent_class or SimulatedUserAgent
self.user_llm_params = user_llm_params
def __setattr__(self, name: str, value: Any) -> None:
if name == "test_cases":
value = self._normalize_test_cases(value)
self._validate_test_cases(value)
super().__setattr__(name, value)
def _normalize_test_cases(
self, test_cases: list[dict[str, Any]] | "DataFrame" | EvaluationDataset
) -> list[dict[str, Any]]:
from pandas import DataFrame
if isinstance(test_cases, EvaluationDataset):
records = test_cases.to_df()["inputs"].to_list()
if not records or not (records[0].keys() & _REQUIRED_TEST_CASE_KEYS):
raise ValueError(
"EvaluationDataset passed to ConversationSimulator must contain "
"conversational test cases with a 'goal' field in the 'inputs' column"
)
return records
if isinstance(test_cases, DataFrame):
return test_cases.to_dict("records")
return test_cases
def _validate_test_cases(self, test_cases: list[dict[str, Any]]) -> None:
if not test_cases:
raise ValueError("test_cases cannot be empty")
missing_goal_indices = [
i for i, test_case in enumerate(test_cases) if not test_case.get("goal")
]
if missing_goal_indices:
raise ValueError(f"Test cases at indices {missing_goal_indices} must have 'goal' field")
indices_with_invalid_context = [
i
for i, test_case in enumerate(test_cases)
if not (
isinstance(test_case.get("context"), dict)
or _is_missing_context_value(test_case.get("context"))
)
]
if indices_with_invalid_context:
raise ValueError(
f"Test cases at indices {indices_with_invalid_context} must have 'context' as "
"a dict when provided."
)
indices_with_reserved_context_keys = [
i
for i, test_case in enumerate(test_cases)
if isinstance(test_case.get("context"), dict)
and set(test_case["context"]) & _RESERVED_CONTEXT_KEYS
]
if indices_with_reserved_context_keys:
raise ValueError(
f"Test cases at indices {indices_with_reserved_context_keys} have context keys "
f"that conflict with keys reserved by ConversationSimulator "
f"({_RESERVED_CONTEXT_KEYS}). These keys are used to inject conversation "
"history ('input', 'messages') or session ID ('mlflow_session_id'). "
"Rename the conflicting keys in the test case context."
)
indices_with_extra_keys = [
i
for i, test_case in enumerate(test_cases)
if set(test_case.keys()) - _EXPECTED_TEST_CASE_KEYS
]
if indices_with_extra_keys:
_logger.warning(
f"Test cases at indices {indices_with_extra_keys} contain unexpected keys "
f"which will be ignored. Expected keys: {_EXPECTED_TEST_CASE_KEYS}."
)
def _compute_test_case_digest(self) -> str:
"""Compute a digest based on the test cases for consistent dataset identification.
This ensures the same test cases produce the same digest regardless of
simulation output variations caused by LLM non-determinism.
"""
import pandas as pd
from mlflow.data.digest_utils import compute_pandas_digest
test_case_df = pd.DataFrame(self.test_cases)
return compute_pandas_digest(test_case_df)
def _get_dataset_name(self) -> str:
"""Get the dataset name to use for the evaluation dataset.
If test_cases was an EvaluationDataset, use its name. Otherwise, use the
default name for conversational datasets.
"""
if self._source_dataset is not None:
return self._source_dataset.name
return "conversational_dataset"
@experimental(version="3.10.0")
@record_usage_event(SimulateConversationEvent)
def simulate(self, predict_fn: Callable[..., dict[str, Any]]) -> list[list["Trace"]]:
"""
Run conversation simulations for all test cases.
Executes the simulated user agent against the provided predict function
for each test case, generating multi-turn conversations. Each conversation
is traced in MLflow.
Args:
predict_fn: The target function to evaluate. Must accept either an ``input``
parameter (Responses API format) or a ``messages`` parameter (Chat
Completions API format) containing the conversation history as a list of
message dicts. May also accept additional kwargs from the test case's
context. Cannot have both ``input`` and ``messages`` parameters.
Returns:
A list of lists containing Trace objects. Each inner list corresponds to
a test case and contains the traces for each turn in that conversation.
"""
_validate_simulator_predict_fn_signature(predict_fn)
run_context = (
contextmanager(lambda: (yield))()
if mlflow.active_run()
else mlflow.start_run(run_name=f"simulation-{uuid.uuid4().hex[:8]}")
)
with run_context:
return self._execute_simulation(predict_fn)
def _execute_simulation(self, predict_fn: Callable[..., dict[str, Any]]) -> list[list["Trace"]]:
num_test_cases = len(self.test_cases)
all_trace_ids: list[list[str]] = [[] for _ in range(num_test_cases)]
max_workers = min(num_test_cases, MLFLOW_GENAI_SIMULATOR_MAX_WORKERS.get())
timings = SimulationTimingTracker()
progress_bar = (
tqdm(
total=num_test_cases,
desc="Simulating conversations",
bar_format=PGBAR_FORMAT,
postfix=timings.format_postfix(),
)
if tqdm
else None
)
with (
_suppress_tracing_logging(),
ThreadPoolExecutor(
max_workers=max_workers,
thread_name_prefix="MlflowConversationSimulator",
) as executor,
):
futures = {
executor.submit(self._run_conversation, test_case, predict_fn, timings): i
for i, test_case in enumerate(self.test_cases)
}
try:
for future in as_completed(futures):
idx = futures[future]
try:
all_trace_ids[idx] = future.result()
except Exception as e:
_logger.error(
f"Failed to run conversation for test case "
f"{self.test_cases[idx].get('goal')}: {e}"
)
if progress_bar:
progress_bar.set_postfix_str(timings.format_postfix(), refresh=False)
progress_bar.update(1)
finally:
if progress_bar:
progress_bar.close()
return _fetch_traces(all_trace_ids)
def _run_conversation(
self,
test_case: dict[str, Any],
predict_fn: Callable[..., dict[str, Any]],
timings: SimulationTimingTracker,
) -> list[str]:
goal = test_case["goal"]
persona = test_case.get("persona") or DEFAULT_PERSONA
simulation_guidelines = test_case.get("simulation_guidelines")
context = test_case.get("context")
context = context if isinstance(context, dict) else {}
expectations = test_case.get("expectations", {})
trace_session_id = f"sim-{uuid.uuid4().hex[:16]}"
user_agent = self.user_agent_class(
model=self.user_model,
**self.user_llm_params,
)
conversation_history: list[dict[str, Any]] = []
trace_ids: list[str] = []
for turn in range(self.max_turns):
try:
start_time = time.perf_counter()
simulator_context = SimulatorContext(
goal=goal,
persona=persona,
conversation_history=conversation_history,
turn=turn,
simulation_guidelines=simulation_guidelines,
)
user_message_content = user_agent.generate_message(simulator_context)
timings.add(generate_message_seconds=time.perf_counter() - start_time)
user_message = {"role": "user", "content": user_message_content}
conversation_history.append(user_message)
start_time = time.perf_counter()
response, trace_id = self._invoke_predict_fn(
predict_fn=predict_fn,
input_messages=conversation_history,
trace_session_id=trace_session_id,
goal=goal,
persona=persona,
simulation_guidelines=simulation_guidelines,
context=context,
expectations=expectations if turn == 0 else None,
turn=turn,
)
timings.add(predict_fn_seconds=time.perf_counter() - start_time)
if trace_id:
trace_ids.append(trace_id)
assistant_content = parse_outputs_to_str(response)
if not assistant_content or not assistant_content.strip():
_logger.debug(f"Stopping conversation: empty response at turn {turn}")
break
conversation_history.append({"role": "assistant", "content": assistant_content})
start_time = time.perf_counter()
goal_achieved = self._check_goal_achieved(
conversation_history, assistant_content, goal
)
timings.add(check_goal_seconds=time.perf_counter() - start_time)
if goal_achieved:
_logger.debug(f"Stopping conversation: goal achieved at turn {turn}")
break
except Exception as e:
_logger.error(f"Error during turn {turn}: {e}", exc_info=True)
break
return trace_ids
def _invoke_predict_fn(
self,
predict_fn: Callable[..., dict[str, Any]],
input_messages: list[dict[str, Any]],
trace_session_id: str,
goal: str,
persona: str | None,
simulation_guidelines: str | list[str] | None,
context: dict[str, Any],
expectations: dict[str, Any] | None,
turn: int,
) -> tuple[dict[str, Any], str | None]:
# NB: We trace the predict_fn call to add session and simulation metadata to the trace.
# This adds a new root span to the trace, with the same inputs and outputs as the
# predict_fn call. The goal/persona/turn metadata is used for trace comparison UI
# since message content may differ between simulation runs.
@mlflow.trace(name=f"simulation_turn_{turn}", span_type="CHAIN")
def traced_predict(**kwargs):
metadata = {
TraceMetadataKey.TRACE_SESSION: trace_session_id,
"mlflow.simulation.goal": goal[:_MAX_METADATA_LENGTH],
"mlflow.simulation.persona": (persona or DEFAULT_PERSONA)[:_MAX_METADATA_LENGTH],
"mlflow.simulation.turn": str(turn),
}
if simulation_guidelines:
guidelines_str = (
"\n".join(simulation_guidelines)
if isinstance(simulation_guidelines, list)
else simulation_guidelines
)
metadata["mlflow.simulation.simulation_guidelines"] = guidelines_str[
:_MAX_METADATA_LENGTH
]
mlflow.update_current_trace(metadata=metadata)
if span := mlflow.get_current_active_span():
span.set_attributes(
{
"mlflow.simulation.goal": goal,
"mlflow.simulation.persona": persona or DEFAULT_PERSONA,
"mlflow.simulation.context": context,
}
)
return predict_fn(**kwargs)
sig = inspect.signature(predict_fn)
input_key = "messages" if "messages" in sig.parameters else "input"
predict_kwargs = {
input_key: input_messages,
"mlflow_session_id": trace_session_id,
**context,
}
response = traced_predict(**predict_kwargs)
trace_id = mlflow.get_last_active_trace_id(thread_local=True)
# Log expectations to the first trace of the session
if expectations and trace_id:
for name, value in expectations.items():
mlflow.log_expectation(
trace_id=trace_id,
name=name,
value=value,
metadata={TraceMetadataKey.TRACE_SESSION: trace_session_id},
)
return response, trace_id
def _check_goal_achieved(
self,
conversation_history: list[dict[str, Any]],
last_response: str,
goal: str,
) -> bool:
from mlflow.types.llm import ChatMessage
history_str = format_history(conversation_history)
eval_prompt = CHECK_GOAL_PROMPT.format(
goal=goal,
conversation_history=history_str if history_str is not None else "",
last_response=last_response,
)
messages = [ChatMessage(role="user", content=eval_prompt)]
try:
text_result = invoke_model_without_tracing(
model_uri=self.user_model,
messages=messages,
num_retries=3,
inference_params={"temperature": 0.0, "response_format": GoalCheckResult},
)
result = GoalCheckResult.model_validate_json(text_result)
return result.result.strip().lower() == "yes"
except pydantic.ValidationError:
_logger.warning(f"Could not parse response for goal achievement check: {text_result}")
return False
except Exception as e:
_logger.warning(f"Goal achievement check failed: {e}")
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/simulators/simulator.py",
"license": "Apache License 2.0",
"lines": 713,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/simulators/test_simulator.py | import re
from unittest.mock import Mock, patch
import pandas as pd
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets.evaluation_dataset import EvaluationDataset
from mlflow.genai.simulators import (
BaseSimulatedUserAgent,
ConversationSimulator,
SimulatedUserAgent,
SimulatorContext,
)
from mlflow.genai.simulators.prompts import DEFAULT_PERSONA
from mlflow.genai.simulators.simulator import _MAX_METADATA_LENGTH
from mlflow.tracing.constant import TraceMetadataKey
def create_mock_evaluation_dataset(inputs: list[dict[str, object]]) -> Mock:
mock_dataset = Mock(spec=EvaluationDataset)
mock_dataset.to_df.return_value = pd.DataFrame({"inputs": inputs})
return mock_dataset
def test_simulated_user_agent_generate_initial_message():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "Hello, I have a question about ML."
agent = SimulatedUserAgent()
context = SimulatorContext(
goal="Learn about MLflow",
persona="You are a beginner who asks curious questions.",
conversation_history=[],
turn=0,
)
message = agent.generate_message(context)
assert message == "Hello, I have a question about ML."
mock_invoke.assert_called_once()
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
prompt = messages[0].content
assert "Learn about MLflow" in prompt
assert "beginner" in prompt
def test_simulated_user_agent_generate_followup_message():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "Can you tell me more?"
agent = SimulatedUserAgent()
context = SimulatorContext(
goal="Learn about MLflow",
persona="A helpful user",
conversation_history=[
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
],
turn=1,
)
message = agent.generate_message(context)
assert message == "Can you tell me more?"
mock_invoke.assert_called_once()
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
prompt = messages[0].content
assert "Hi there!" in prompt
def test_simulated_user_agent_default_persona():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "Test message"
agent = SimulatedUserAgent()
context = SimulatorContext(
goal="Learn about ML",
persona=DEFAULT_PERSONA,
conversation_history=[],
turn=0,
)
message = agent.generate_message(context)
assert message == "Test message"
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
prompt = messages[0].content
assert "inquisitive user" in prompt.lower()
def test_conversation_simulator_basic_simulation(
simple_test_case, mock_predict_fn, simulation_mocks
):
# Each turn: generate_message + _check_goal_achieved
simulation_mocks["invoke"].side_effect = [
"What is MLflow?", # turn 0 generate_message
'{"rationale": "Goal not achieved yet", "result": "no"}', # turn 0 goal check
"Can you explain more?", # turn 1 generate_message
'{"rationale": "Goal not achieved yet", "result": "no"}', # turn 1 goal check
]
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=2,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces) == 1 # 1 test case
assert len(all_traces[0]) == 2 # 2 traces
assert all(t is simulation_mocks["trace"] for t in all_traces[0])
assert simulation_mocks["invoke"].call_count == 4 # 2 turns * 2 calls each
assert simulation_mocks["update_trace"].call_count == 2
def test_conversation_simulator_max_turns_stopping(
simple_test_case, mock_predict_fn, simulation_mocks
):
simulation_mocks["invoke"].side_effect = [
"Test message", # turn 0 generate_message
'{"rationale": "Not yet", "result": "no"}', # turn 0 goal check
"Test message", # turn 1 generate_message
'{"rationale": "Not yet", "result": "no"}', # turn 1 goal check
"Test message", # turn 2 generate_message
'{"rationale": "Not yet", "result": "no"}', # turn 2 goal check
]
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=3,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces) == 1 # 1 test case
assert len(all_traces[0]) == 3 # 3 traces
assert simulation_mocks["invoke"].call_count == 6 # 3 turns * 2 calls each
def test_conversation_simulator_empty_response_stopping(simple_test_case, simulation_mocks):
simulation_mocks["invoke"].return_value = "Test message"
def empty_predict_fn(input=None, **kwargs):
return {
"output": [
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": ""}],
}
]
}
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=5,
)
all_traces = simulator.simulate(empty_predict_fn)
assert len(all_traces) == 1
assert len(all_traces[0]) == 1 # Only 1 trace before stopping
# Only generate_message called, goal check not called due to empty response
assert simulation_mocks["invoke"].call_count == 1
def test_conversation_simulator_goal_achieved_stopping(
simple_test_case, mock_predict_fn, simulation_mocks
):
simulation_mocks["invoke"].side_effect = [
"Test message", # turn 0 generate_message
'{"rationale": "Goal achieved!", "result": "yes"}', # turn 0 goal check -> stop
]
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=5,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces) == 1
# Only 1 trace before goal was achieved
assert len(all_traces[0]) == 1
# 2 calls: generate_message + goal check
assert simulation_mocks["invoke"].call_count == 2
# Verify goal check was the second call with goal check prompt
goal_check_call = simulation_mocks["invoke"].call_args_list[1]
goal_check_prompt = goal_check_call.kwargs["messages"][0].content
assert "achieved" in goal_check_prompt.lower()
def test_conversation_simulator_context_passing(test_case_with_context, simulation_mocks):
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Not achieved", "result": "no"}',
]
captured_kwargs = {}
def capturing_predict_fn(input=None, **kwargs):
captured_kwargs.update(kwargs)
return {
"output": [
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "Response"}],
}
]
}
simulator = ConversationSimulator(
test_cases=[test_case_with_context],
max_turns=1,
)
all_traces = simulator.simulate(capturing_predict_fn)
assert len(all_traces) == 1
assert len(all_traces[0]) == 1
# Verify context was passed to predict_fn
assert captured_kwargs.get("user_id") == "U001"
assert captured_kwargs.get("session_id") == "S001"
def test_conversation_simulator_mlflow_session_id_passed_to_predict_fn(
simple_test_case, simulation_mocks
):
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Not yet", "result": "no"}',
"Test message 2",
'{"rationale": "Not yet", "result": "no"}',
]
captured_session_ids = []
def capturing_predict_fn(input=None, **kwargs):
captured_session_ids.append(kwargs.get("mlflow_session_id"))
return {
"output": [
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "Response"}],
}
]
}
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=2,
)
all_traces = simulator.simulate(capturing_predict_fn)
assert len(all_traces) == 1
assert len(all_traces[0]) == 2
# Verify mlflow_session_id was passed to predict_fn
assert len(captured_session_ids) == 2
assert all(sid is not None for sid in captured_session_ids)
assert all(sid.startswith("sim-") for sid in captured_session_ids)
# Verify session ID is consistent across all turns in the same conversation
assert captured_session_ids[0] == captured_session_ids[1]
def test_conversation_simulator_multiple_test_cases(
simple_test_case, test_case_with_persona, mock_predict_fn, simulation_mocks
):
# 2 test cases * 2 turns each * 2 calls per turn = 8 calls
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Not yet", "result": "no"}',
"Test message",
'{"rationale": "Not yet", "result": "no"}',
"Test message",
'{"rationale": "Not yet", "result": "no"}',
"Test message",
'{"rationale": "Not yet", "result": "no"}',
]
simulator = ConversationSimulator(
test_cases=[simple_test_case, test_case_with_persona],
max_turns=2,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces) == 2 # 2 test cases
assert len(all_traces[0]) == 2 # 2 traces for first test case
assert len(all_traces[1]) == 2 # 2 traces for second test case
@pytest.mark.parametrize(
("test_cases", "expected_error"),
[
([], "test_cases cannot be empty"),
([{"persona": "test"}], r"indices \[0\].*'goal' field"),
(
[{"goal": "valid"}, {"persona": "missing goal"}],
r"indices \[1\].*'goal' field",
),
(
[{"persona": "a"}, {"goal": "valid"}, {"persona": "b"}],
r"indices \[0, 2\].*'goal' field",
),
],
ids=[
"empty_test_cases",
"missing_goal",
"second_case_missing_goal",
"multiple_missing_goals",
],
)
def test_conversation_simulator_validation(test_cases, expected_error):
with pytest.raises(ValueError, match=expected_error):
ConversationSimulator(
test_cases=test_cases,
max_turns=2,
)
@pytest.mark.parametrize(
("test_cases", "expected_error"),
[
(
[{"goal": "test", "context": {"input": "foo"}}],
r"indices \[0\].*reserved",
),
(
[{"goal": "test", "context": {"messages": []}}],
r"indices \[0\].*reserved",
),
(
[{"goal": "test", "context": {"mlflow_session_id": "abc"}}],
r"indices \[0\].*reserved",
),
(
[{"goal": "ok"}, {"goal": "test", "context": {"input": "bar"}}],
r"indices \[1\].*reserved",
),
],
ids=[
"context_has_input",
"context_has_messages",
"context_has_mlflow_session_id",
"second_case_has_reserved_key",
],
)
def test_conversation_simulator_rejects_reserved_context_keys(test_cases, expected_error):
with pytest.raises(ValueError, match=expected_error):
ConversationSimulator(
test_cases=test_cases,
max_turns=2,
)
@pytest.mark.parametrize(
("test_cases", "expected_error"),
[
([{"goal": "test", "context": "foo"}], r"indices \[0\].*'context' as a dict"),
([{"goal": "test", "context": ["foo"]}], r"indices \[0\].*'context' as a dict"),
([{"goal": "ok"}, {"goal": "test", "context": 1}], r"indices \[1\].*'context' as a dict"),
],
ids=["context_string", "context_list", "second_case_context_int"],
)
def test_conversation_simulator_rejects_invalid_context_types(test_cases, expected_error):
with pytest.raises(ValueError, match=expected_error):
ConversationSimulator(
test_cases=test_cases,
max_turns=2,
)
def test_conversation_simulator_accepts_dataframe_with_missing_context_values():
test_cases_df = pd.DataFrame(
[
{"goal": "Debug an error", "context": {"user_id": "U001"}},
{"goal": "Learn about MLflow"},
]
)
simulator = ConversationSimulator(
test_cases=test_cases_df,
max_turns=2,
)
assert len(simulator.test_cases) == 2
assert simulator.test_cases[0]["context"] == {"user_id": "U001"}
@pytest.mark.parametrize(
"inputs",
[
[{"goal": "Learn about MLflow"}],
[{"goal": "Debug issue", "persona": "Engineer"}],
[{"goal": "Ask questions", "persona": "Student", "context": {"id": "1"}}],
[{"goal": "Learn ML", "simulation_guidelines": "Be concise"}],
[{"goal": "Learn ML", "simulation_guidelines": ["Be concise", "Ask follow-ups"]}],
[
{
"goal": "Debug deployment",
"persona": "Engineer",
"context": {"env": "prod"},
"simulation_guidelines": "Focus on logs",
}
],
],
)
def test_conversation_simulator_evaluation_dataset_valid(inputs):
mock_dataset = create_mock_evaluation_dataset(inputs)
simulator = ConversationSimulator(test_cases=mock_dataset, max_turns=2)
assert len(simulator.test_cases) == len(inputs)
assert simulator.test_cases == inputs
@pytest.mark.parametrize(
"inputs",
[
[{"request": "What is MLflow?"}],
[{"inputs": {"query": "Help me"}, "expected_response": "Sure!"}],
[{"inputs": {"question": "How to log?", "answer": "Use mlflow.log"}}],
[],
],
)
def test_conversation_simulator_evaluation_dataset_invalid(inputs):
mock_dataset = create_mock_evaluation_dataset(inputs)
with pytest.raises(ValueError, match="conversational test cases with a 'goal' field"):
ConversationSimulator(test_cases=mock_dataset, max_turns=2)
def test_reassignment_with_valid_test_cases(simple_test_case):
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=2)
new_test_cases = [
{"goal": "New goal"},
]
simulator.test_cases = new_test_cases
assert simulator.test_cases == new_test_cases
assert len(simulator.test_cases) == 1
def test_reassignment_with_dataframe(simple_test_case):
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=2)
df = pd.DataFrame([{"goal": "Goal from DataFrame", "persona": "Analyst"}])
simulator.test_cases = df
assert simulator.test_cases == [{"goal": "Goal from DataFrame", "persona": "Analyst"}]
@pytest.mark.parametrize(
("invalid_test_cases", "expected_error"),
[
([], "test_cases cannot be empty"),
([{"persona": "no goal here"}], r"indices \[0\].*'goal' field"),
],
)
def test_reassignment_with_invalid_test_cases_raises_error(
simple_test_case, invalid_test_cases, expected_error
):
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=2)
original_test_cases = simulator.test_cases
with pytest.raises(ValueError, match=expected_error):
simulator.test_cases = invalid_test_cases
assert simulator.test_cases == original_test_cases
def test_simulator_context_is_first_turn():
context_first = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[],
turn=0,
)
assert context_first.is_first_turn is True
context_later = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[{"role": "user", "content": "Hello"}],
turn=1,
)
assert context_later.is_first_turn is False
def test_simulator_context_formatted_history():
context_empty = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[],
turn=0,
)
assert context_empty.formatted_history is None
context_with_history = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
],
turn=1,
)
assert context_with_history.formatted_history == "user: Hello\nassistant: Hi there!"
def test_simulator_context_last_assistant_response():
context_empty = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[],
turn=0,
)
assert context_empty.last_assistant_response is None
context_with_history = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
],
turn=1,
)
assert context_with_history.last_assistant_response == "Hi there!"
def test_simulator_context_is_frozen():
context = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[],
turn=0,
)
with pytest.raises(AttributeError, match="cannot assign to field"):
context.goal = "New goal"
def test_simulator_context_with_simulation_guidelines():
context = SimulatorContext(
goal="Test goal",
persona="Test persona",
conversation_history=[],
turn=0,
simulation_guidelines="Be concise and ask clarifying questions",
)
assert context.simulation_guidelines == "Be concise and ask clarifying questions"
def test_custom_user_agent_class(simple_test_case, mock_predict_fn, simulation_mocks):
class CustomUserAgent(BaseSimulatedUserAgent):
def generate_message(self, context: SimulatorContext) -> str:
return f"Custom message for: {context.goal}"
simulation_mocks["invoke"].return_value = '{"rationale": "Goal achieved!", "result": "yes"}'
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=2,
user_agent_class=CustomUserAgent,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces) == 1
assert len(all_traces[0]) == 1
def test_user_agent_class_default(simple_test_case):
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=2,
)
assert simulator.user_agent_class is SimulatedUserAgent
def test_user_agent_class_receives_context(simple_test_case, mock_predict_fn, simulation_mocks):
captured_contexts = []
class ContextCapturingAgent(BaseSimulatedUserAgent):
def generate_message(self, context: SimulatorContext) -> str:
captured_contexts.append(context)
return f"Message for turn {context.turn}"
simulation_mocks["invoke"].return_value = '{"rationale": "Not yet", "result": "no"}'
simulator = ConversationSimulator(
test_cases=[simple_test_case],
max_turns=2,
user_agent_class=ContextCapturingAgent,
)
simulator.simulate(mock_predict_fn)
assert len(captured_contexts) == 2
assert captured_contexts[0].turn == 0
assert captured_contexts[0].is_first_turn is True
assert captured_contexts[0].goal == simple_test_case["goal"]
assert captured_contexts[1].turn == 1
assert captured_contexts[1].is_first_turn is False
def test_conversation_simulator_sets_span_attributes(mock_predict_fn_with_context):
long_goal = "A" * 500
long_persona = "B" * 500
context = {"user_id": "U001", "session_id": "S001"}
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.side_effect = [
"Test message",
'{"rationale": "Not achieved", "result": "no"}',
"Follow up message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
simulator = ConversationSimulator(
test_cases=[{"goal": long_goal, "persona": long_persona, "context": context}],
max_turns=2,
)
all_traces = simulator.simulate(mock_predict_fn_with_context)
first_test_case_traces = all_traces[0]
assert len(first_test_case_traces) == 2
for trace in first_test_case_traces:
root_span = trace.data.spans[0]
metadata = trace.info.request_metadata
assert root_span.attributes["mlflow.simulation.goal"] == long_goal
assert root_span.attributes["mlflow.simulation.persona"] == long_persona
assert root_span.attributes["mlflow.simulation.context"] == context
assert metadata["mlflow.simulation.goal"] == long_goal[:_MAX_METADATA_LENGTH]
assert metadata["mlflow.simulation.persona"] == long_persona[:_MAX_METADATA_LENGTH]
def test_conversation_simulator_uses_default_persona_and_empty_context(mock_predict_fn):
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.side_effect = [
"Test message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
simulator = ConversationSimulator(
test_cases=[{"goal": "Test goal"}],
max_turns=1,
)
all_traces = simulator.simulate(mock_predict_fn)
trace = all_traces[0][0]
root_span = trace.data.spans[0]
assert root_span.attributes["mlflow.simulation.goal"] == "Test goal"
assert root_span.attributes["mlflow.simulation.persona"] == DEFAULT_PERSONA
assert root_span.attributes["mlflow.simulation.context"] == {}
def test_conversation_simulator_logs_expectations_to_first_trace(mock_predict_fn):
expectations = {"expected_topic": "MLflow", "expected_sentiment": "positive"}
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.side_effect = [
"Test message",
'{"rationale": "Not achieved", "result": "no"}',
"Follow up message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
simulator = ConversationSimulator(
test_cases=[{"goal": "Test goal", "expectations": expectations}],
max_turns=2,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces[0]) == 2
first_trace = all_traces[0][0]
expectation_assessments = [
a for a in first_trace.info.assessments if a.expectation is not None
]
assert len(expectation_assessments) == 2
for assessment in expectation_assessments:
assert assessment.name in expectations
assert assessment.expectation.value == expectations[assessment.name]
assert TraceMetadataKey.TRACE_SESSION in assessment.metadata
second_trace = all_traces[0][1]
second_trace_assessments = second_trace.info.assessments
second_expectation_assessments = [
a for a in second_trace_assessments if a.expectation is not None
]
assert len(second_expectation_assessments) == 0
def test_invoke_llm_with_prompt_only():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "LLM response"
agent = SimulatedUserAgent()
result = agent.invoke_llm("Test prompt")
assert result == "LLM response"
mock_invoke.assert_called_once()
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
assert len(messages) == 1
assert messages[0].role == "user"
assert messages[0].content == "Test prompt"
def test_invoke_llm_with_system_prompt():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "LLM response with system"
agent = SimulatedUserAgent()
result = agent.invoke_llm("Test prompt", system_prompt="System instructions")
assert result == "LLM response with system"
mock_invoke.assert_called_once()
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
assert len(messages) == 2
assert messages[0].role == "system"
assert messages[0].content == "System instructions"
assert messages[1].role == "user"
assert messages[1].content == "Test prompt"
def test_invalid_user_agent_class_raises_type_error(simple_test_case):
class NotAUserAgent:
pass
with pytest.raises(TypeError, match="must be a subclass of BaseSimulatedUserAgent"):
ConversationSimulator(
test_cases=[simple_test_case],
max_turns=2,
user_agent_class=NotAUserAgent,
)
def test_conversation_simulator_digest_is_deterministic():
test_cases = [
{"goal": "Learn about MLflow"},
{"goal": "Debug deployment", "persona": "Data scientist"},
{"goal": "Setup", "context": {"env": "prod"}},
]
simulator1 = ConversationSimulator(test_cases=test_cases, max_turns=2)
simulator2 = ConversationSimulator(test_cases=test_cases, max_turns=2)
digest1 = simulator1._compute_test_case_digest()
digest2 = simulator2._compute_test_case_digest()
assert digest1 == digest2
assert isinstance(digest1, str)
assert len(digest1) == 8
@pytest.mark.parametrize(
("test_cases_1", "test_cases_2"),
[
# Different goals
([{"goal": "Goal A"}], [{"goal": "Goal B"}]),
# Adding persona changes digest
([{"goal": "Goal"}], [{"goal": "Goal", "persona": "Engineer"}]),
# Different order
([{"goal": "A"}, {"goal": "B"}], [{"goal": "B"}, {"goal": "A"}]),
],
ids=["different_goals", "added_persona", "different_order"],
)
def test_conversation_simulator_digest_differs_for_different_test_cases(test_cases_1, test_cases_2):
simulator1 = ConversationSimulator(test_cases=test_cases_1, max_turns=2)
simulator2 = ConversationSimulator(test_cases=test_cases_2, max_turns=2)
assert simulator1._compute_test_case_digest() != simulator2._compute_test_case_digest()
def test_conversation_simulator_get_dataset_name_default():
test_cases = [{"goal": "Learn about MLflow"}]
simulator = ConversationSimulator(test_cases=test_cases, max_turns=2)
assert simulator._get_dataset_name() == "conversational_dataset"
def test_conversation_simulator_get_dataset_name_from_evaluation_dataset():
inputs = [{"goal": "Learn about MLflow"}]
mock_dataset = create_mock_evaluation_dataset(inputs)
mock_dataset.name = "my_custom_dataset"
simulator = ConversationSimulator(test_cases=mock_dataset, max_turns=2)
assert simulator._get_dataset_name() == "my_custom_dataset"
def test_simulate_creates_run_when_no_parent_run(
tmp_path, simple_test_case, mock_predict_fn, simulation_mocks
):
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
mlflow.set_tracking_uri(f"sqlite:///{tmp_path}/mlflow.db")
mlflow.set_experiment("test-experiment")
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=1)
simulator.simulate(mock_predict_fn)
runs = mlflow.search_runs()
assert len(runs) == 1
run_name = runs.iloc[0]["tags.mlflow.runName"]
assert re.match(r"^simulation-[0-9a-f]{8}$", run_name)
def test_simulate_uses_parent_run_when_exists(
tmp_path, simple_test_case, mock_predict_fn, simulation_mocks
):
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
mlflow.set_tracking_uri(f"sqlite:///{tmp_path}/mlflow.db")
mlflow.set_experiment("test-experiment")
with mlflow.start_run(run_name="parent-run") as parent_run:
parent_run_id = parent_run.info.run_id
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=1)
simulator.simulate(mock_predict_fn)
assert mlflow.active_run().info.run_id == parent_run_id
runs = mlflow.search_runs()
assert len(runs) == 1
assert runs.iloc[0]["tags.mlflow.runName"] == "parent-run"
def test_simulate_run_name_format(tmp_path, simple_test_case, mock_predict_fn, simulation_mocks):
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
mlflow.set_tracking_uri(f"sqlite:///{tmp_path}/mlflow.db")
mlflow.set_experiment("test-experiment")
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=1)
simulator.simulate(mock_predict_fn)
runs = mlflow.search_runs()
run_name = runs.iloc[0]["tags.mlflow.runName"]
assert run_name.startswith("simulation-")
hex_part = run_name[len("simulation-") :]
assert len(hex_part) == 8
assert re.match(r"^[0-9a-f]+$", hex_part)
def test_conversation_simulator_completions_messages_format(simple_test_case, simulation_mocks):
simulation_mocks["invoke"].side_effect = [
"Test message",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
captured_messages_snapshots = []
def capturing_predict_fn(messages: list[dict[str, str]] | None = None, **kwargs):
captured_messages_snapshots.append(list(messages) if messages else None)
return {
"output": [
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "Response"}],
}
]
}
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=1)
simulator.simulate(capturing_predict_fn)
assert len(captured_messages_snapshots) == 1
assert captured_messages_snapshots[0][0]["role"] == "user"
assert captured_messages_snapshots[0][0]["content"] == "Test message"
def test_conversation_simulator_rejects_both_input_and_messages(simple_test_case, simulation_mocks):
simulation_mocks["invoke"].return_value = "Test message"
def invalid_predict_fn(input: list[dict[str, str]], messages: list[dict[str, str]], **kwargs):
return {
"output": [
{"role": "assistant", "content": [{"type": "output_text", "text": "Response"}]}
]
}
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=1)
with pytest.raises(MlflowException, match="cannot have both 'messages' and 'input' parameters"):
simulator.simulate(invalid_predict_fn)
def test_conversation_simulator_rejects_neither_input_nor_messages(
simple_test_case, simulation_mocks
):
simulation_mocks["invoke"].return_value = "Test message"
def invalid_predict_fn(**kwargs):
return None
simulator = ConversationSimulator(test_cases=[simple_test_case], max_turns=1)
with pytest.raises(MlflowException, match="must accept either 'messages' or 'input'"):
simulator.simulate(invalid_predict_fn)
def test_simulated_user_agent_with_simulation_guidelines():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "I have a question about ML pipelines."
agent = SimulatedUserAgent()
context = SimulatorContext(
goal="Learn about ML pipelines",
persona=DEFAULT_PERSONA,
conversation_history=[],
turn=0,
simulation_guidelines="Ask clarifying questions before proceeding",
)
message = agent.generate_message(context)
assert message == "I have a question about ML pipelines."
mock_invoke.assert_called_once()
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
prompt = messages[0].content
assert "Learn about ML pipelines" in prompt
assert "Ask clarifying questions before proceeding" in prompt
assert "simulation_guidelines" in prompt
def test_simulated_user_agent_followup_with_simulation_guidelines():
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.return_value = "Let me clarify something first."
agent = SimulatedUserAgent()
context = SimulatorContext(
goal="Learn about ML",
persona=DEFAULT_PERSONA,
conversation_history=[
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
],
turn=1,
simulation_guidelines="Be thorough and ask follow-up questions",
)
message = agent.generate_message(context)
assert message == "Let me clarify something first."
mock_invoke.assert_called_once()
call_args = mock_invoke.call_args
messages = call_args.kwargs["messages"]
prompt = messages[0].content
assert "Be thorough and ask follow-up questions" in prompt
assert "simulation_guidelines" in prompt
def test_conversation_simulator_with_simulation_guidelines(mock_predict_fn):
test_case = {
"goal": "Learn about ML pipelines",
"simulation_guidelines": "Ask clarifying questions before proceeding",
}
with patch("mlflow.genai.simulators.simulator.invoke_model_without_tracing") as mock_invoke:
mock_invoke.side_effect = [
"Test message with simulation_guidelines",
'{"rationale": "Goal achieved!", "result": "yes"}',
]
simulator = ConversationSimulator(
test_cases=[test_case],
max_turns=2,
)
all_traces = simulator.simulate(mock_predict_fn)
assert len(all_traces) == 1
assert len(all_traces[0]) == 1
# Verify simulation_guidelines are in the generate_message prompt
generate_call = mock_invoke.call_args_list[0]
prompt = generate_call.kwargs["messages"][0].content
assert "Ask clarifying questions before proceeding" in prompt
# Verify simulation_guidelines are in trace metadata
trace = all_traces[0][0]
metadata = trace.info.request_metadata
assert "mlflow.simulation.simulation_guidelines" in metadata
assert (
metadata["mlflow.simulation.simulation_guidelines"]
== "Ask clarifying questions before proceeding"
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/simulators/test_simulator.py",
"license": "Apache License 2.0",
"lines": 807,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/conversational_guidelines.py | CONVERSATIONAL_GUIDELINES_ASSESSMENT_NAME = "conversational_guidelines"
CONVERSATIONAL_GUIDELINES_PROMPT = """\
Consider the following conversation history between a user and an assistant.
Your task is to evaluate whether the assistant's responses throughout the conversation comply with
the provided guidelines and output exactly one label: "yes" or "no".
<guidelines>
{{ guidelines }}
</guidelines>
Evaluation criteria:
- Assess whether EVERY assistant response in the conversation follows ALL the provided guidelines.
- Focus on judging only the assistant's responses, not the user's messages.
- Only focus on the provided guidelines and not the correctness, relevance, or effectiveness of the responses.
- A guideline violation at ANY point in the conversation means the entire conversation fails.
- If none of the guidelines apply to the given conversation, the result must be "yes".
Output "yes" if all assistant responses comply with all guidelines throughout the entire conversation.
Output "no" if any assistant response violates any guideline at any point in the conversation.
<conversation>{{ conversation }}</conversation>
""" # noqa: E501
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/conversational_guidelines.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:dev/clint/src/clint/rules/prefer_dict_union.py | import ast
from clint.rules.base import Rule
def _is_simple_name_or_attribute(node: ast.expr) -> bool:
"""
Check if a node is a simple name (e.g., `a`) or a chain of attribute
accesses on a simple name (e.g., `obj.attr` or `a.b.c`).
"""
if isinstance(node, ast.Name):
return True
if isinstance(node, ast.Attribute):
return _is_simple_name_or_attribute(node.value)
return False
class PreferDictUnion(Rule):
def _message(self) -> str:
return (
"Use `|` operator for dictionary merging (e.g., `a | b`) "
"instead of `{**a, **b}` for better readability."
)
@staticmethod
def check(node: ast.Dict) -> bool:
"""
Returns True if the dictionary is composed entirely of 2+ dictionary unpacking
expressions that can be replaced with the `|` operator.
Examples that should be flagged:
- {**a, **b}
- {**a, **b, **c}
- {**obj.attr, **b}
- {**a.b.c, **d}
Examples that should NOT be flagged:
- {**a} # Single unpack
- {**a, "key": value} # Mixed with literal keys
- {**data[0], **b}, {**func(), **b} # Complex expressions
- {**a,\n**b} # Multi-line dicts
"""
# Need at least 2 elements for a merge
if len(node.keys) < 2:
return False
# Skip multi-line dicts
if node.end_lineno and node.end_lineno > node.lineno:
return False
# All keys must be None (indicating dictionary unpacking with **)
if not all(key is None for key in node.keys):
return False
# All values must be simple names or attribute access on a name
return all(_is_simple_name_or_attribute(value) for value in node.values)
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/prefer_dict_union.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/tests/rules/test_prefer_dict_union.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import lint_file
from clint.rules import PreferDictUnion
@pytest.mark.parametrize(
"code",
[
pytest.param("{**dict1, **dict2}", id="two_dict_unpacks"),
pytest.param("{**dict1, **dict2, **dict3}", id="three_dict_unpacks"),
pytest.param("{**a, **b, **c, **d}", id="four_dict_unpacks"),
pytest.param("{**obj.attr, **other}", id="attribute_and_name"),
pytest.param("{**a.x, **b.y}", id="two_attributes"),
pytest.param("{**a.b.c, **d}", id="chained_attribute"),
],
)
def test_flag(index_path: Path, code: str) -> None:
config = Config(select={PreferDictUnion.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 1
assert isinstance(results[0].rule, PreferDictUnion)
@pytest.mark.parametrize(
"code",
[
pytest.param("{**dict1}", id="single_dict_unpack"),
pytest.param('{**dict1, "extra_key": "value"}', id="unpack_then_literal"),
pytest.param('{"key": "value", **dict1}', id="literal_then_unpack"),
pytest.param('{"key1": "value1", "key2": "value2"}', id="only_literals"),
pytest.param("{}", id="empty_dict"),
pytest.param('{**dict1, "k1": "v1", **dict2, "k2": "v2"}', id="mixed_unpacks_and_literals"),
pytest.param('{**dict1, **dict2, "override": "value"}', id="unpacks_with_trailing_literal"),
pytest.param("{**data[0], **other}", id="subscript_access"),
pytest.param('{**configs, **{"key": "value"}}', id="dict_literal_unpack"),
pytest.param("{**func(), **other}", id="function_call"),
pytest.param("{**a,\n**b}", id="multi_line"),
],
)
def test_no_flag(index_path: Path, code: str) -> None:
config = Config(select={PreferDictUnion.name})
results = lint_file(Path("test.py"), code, config, index_path)
assert len(results) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_prefer_dict_union.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/gateway/providers/test_fallback.py | from typing import Any
from unittest import mock
import pytest
from fastapi import HTTPException
from mlflow.entities.gateway_endpoint import FallbackStrategy
from mlflow.gateway.config import EndpointConfig
from mlflow.gateway.exceptions import AIGatewayException
from mlflow.gateway.providers.base import FallbackProvider
from mlflow.gateway.schemas import chat, embeddings
from tests.gateway.providers.test_openai import (
_run_test_chat,
_run_test_chat_stream,
_run_test_completions,
_run_test_completions_stream,
_run_test_embeddings,
chat_config,
chat_response,
chat_stream_response,
chat_stream_response_incomplete,
completions_config,
completions_response,
completions_stream_response,
completions_stream_response_incomplete,
embedding_config,
)
from tests.gateway.tools import MockAsyncResponse, MockAsyncStreamingResponse, mock_http_client
def _get_fallback_provider(
endpoint_configs: list[dict[str, Any]], max_attempts: int | None = None
) -> FallbackProvider:
from mlflow.gateway.providers import get_provider
providers = [
get_provider(config["model"]["provider"])(EndpointConfig(**config))
for config in endpoint_configs
]
return FallbackProvider(
providers=providers,
strategy=FallbackStrategy.SEQUENTIAL,
max_attempts=max_attempts,
)
@pytest.mark.asyncio
async def test_fallback_chat_first_provider_succeeds():
config = chat_config()
provider = _get_fallback_provider([config])
await _run_test_chat(provider)
@pytest.mark.asyncio
async def test_fallback_chat_second_provider_succeeds():
config1 = chat_config()
config2 = chat_config()
config2["name"] = "chat-fallback"
provider = _get_fallback_provider([config1, config2])
resp = chat_response()
mock_client = mock_http_client(MockAsyncResponse(resp))
with mock.patch("aiohttp.ClientSession") as mock_session:
mock_session.return_value = mock_client
mock_client.post.side_effect = [
Exception("First provider failed"),
MockAsyncResponse(resp),
]
payload = chat.RequestPayload(
messages=[{"role": "user", "content": "Tell me a joke"}],
temperature=0.5,
)
response = await provider.chat(payload)
assert response.choices[0].message.content == "\n\nThis is a test!"
# Verify we tried twice (first failed, second succeeded)
assert mock_client.post.call_count == 2
@pytest.mark.asyncio
async def test_fallback_chat_all_providers_fail():
config1 = chat_config()
config2 = chat_config()
config2["name"] = "chat-fallback"
provider = _get_fallback_provider([config1, config2])
# Mock both providers to fail
with mock.patch("aiohttp.ClientSession") as mock_session:
mock_client = mock_http_client(MockAsyncResponse({}))
mock_session.return_value = mock_client
mock_client.post.side_effect = [
Exception("First provider failed"),
Exception("Second provider failed"),
]
payload = chat.RequestPayload(
messages=[{"role": "user", "content": "Tell me a joke"}],
temperature=0.5,
)
with pytest.raises(Exception, match="All 2 fallback attempts failed"):
await provider.chat(payload)
# Verify we tried both providers
assert mock_client.post.call_count == 2
@pytest.mark.asyncio
async def test_fallback_chat_max_attempts():
config1 = chat_config()
config2 = chat_config()
config2["name"] = "chat-fallback2"
config3 = chat_config()
config3["name"] = "chat-fallback3"
# Set max_attempts to 2, even though we have 3 providers
provider = _get_fallback_provider([config1, config2, config3], max_attempts=2)
# Mock all providers to fail
with mock.patch("aiohttp.ClientSession") as mock_session:
mock_client = mock_http_client(MockAsyncResponse({}))
mock_session.return_value = mock_client
mock_client.post.side_effect = [
Exception("First provider failed"),
Exception("Second provider failed"),
Exception("Third provider failed"),
]
payload = chat.RequestPayload(
messages=[{"role": "user", "content": "Tell me a joke"}],
temperature=0.5,
)
with pytest.raises(Exception, match="All 2 fallback attempts failed"):
await provider.chat(payload)
# Verify we only tried 2 providers (max_attempts=2)
assert mock_client.post.call_count == 2
@pytest.mark.parametrize("resp", [chat_stream_response(), chat_stream_response_incomplete()])
@pytest.mark.asyncio
async def test_fallback_chat_stream(resp):
config = chat_config()
provider = _get_fallback_provider([config])
await _run_test_chat_stream(resp, provider)
@pytest.mark.asyncio
async def test_fallback_chat_stream_with_fallback():
config1 = chat_config()
config2 = chat_config()
config2["name"] = "chat-fallback"
provider = _get_fallback_provider([config1, config2])
resp = chat_stream_response()
mock_client = mock_http_client(MockAsyncStreamingResponse(resp))
# Mock the first provider to fail and second to succeed
with mock.patch("aiohttp.ClientSession") as mock_session:
mock_session.return_value = mock_client
mock_client.post.side_effect = [
Exception("First provider failed"),
MockAsyncStreamingResponse(resp),
]
payload = chat.RequestPayload(
messages=[{"role": "user", "content": "Tell me a joke"}],
temperature=0.5,
stream=True,
)
chunks = [chunk async for chunk in provider.chat_stream(payload)]
assert len(chunks) > 0
assert mock_client.post.call_count == 2
@pytest.mark.parametrize("resp", [completions_response(), chat_response()])
@pytest.mark.asyncio
async def test_fallback_completions(resp):
config = completions_config()
provider = _get_fallback_provider([config])
await _run_test_completions(resp, provider)
@pytest.mark.parametrize(
"resp", [completions_stream_response(), completions_stream_response_incomplete()]
)
@pytest.mark.asyncio
async def test_fallback_completions_stream(resp):
config = completions_config()
provider = _get_fallback_provider([config])
await _run_test_completions_stream(resp, provider)
@pytest.mark.asyncio
async def test_fallback_embeddings():
config = embedding_config()
provider = _get_fallback_provider([config])
await _run_test_embeddings(provider)
@pytest.mark.asyncio
async def test_fallback_embeddings_with_fallback():
config1 = embedding_config()
config2 = embedding_config()
config2["name"] = "embeddings-fallback"
provider = _get_fallback_provider([config1, config2])
embedding_response = {
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [0.1, 0.2, 0.3],
"index": 0,
}
],
"model": "text-embedding-ada-002",
"usage": {
"prompt_tokens": 8,
"total_tokens": 8,
},
}
mock_client = mock_http_client(MockAsyncResponse(embedding_response))
# Mock the first provider to fail and second to succeed
with mock.patch("aiohttp.ClientSession") as mock_session:
mock_session.return_value = mock_client
mock_client.post.side_effect = [
Exception("First provider failed"),
MockAsyncResponse(embedding_response),
]
payload = embeddings.RequestPayload(input="Test input")
response = await provider.embeddings(payload)
assert response.data[0].embedding == [0.1, 0.2, 0.3]
# Verify we tried twice (first failed, second succeeded)
assert mock_client.post.call_count == 2
@pytest.mark.asyncio
async def test_fallback_provider_empty_providers():
with pytest.raises(Exception, match="must contain at least one provider"):
FallbackProvider(providers=[], max_attempts=None, strategy=FallbackStrategy.SEQUENTIAL)
@pytest.mark.asyncio
async def test_fallback_provider_passthrough():
config = chat_config()
provider = _get_fallback_provider([config])
passthrough_response = {"id": "test-id", "result": "success"}
mock_client = mock_http_client(MockAsyncResponse(passthrough_response))
with mock.patch("aiohttp.ClientSession", return_value=mock_client):
from mlflow.gateway.providers.base import PassthroughAction
action = PassthroughAction.OPENAI_CHAT
payload = {"messages": [{"role": "user", "content": "Hello"}]}
headers = {
"X-Custom-Header": "custom-value",
"X-Request-ID": "req-123",
"host": "example.com",
"content-length": "100",
}
response = await provider.passthrough(action, payload, headers=headers)
assert response["result"] == "success"
@pytest.mark.parametrize(
("exception", "expected_status"),
[
pytest.param(
AIGatewayException(status_code=503, detail="Service unavailable"),
503,
),
pytest.param(
HTTPException(status_code=403, detail="Forbidden"),
403,
),
],
)
@pytest.mark.asyncio
async def test_fallback_provider_propagates_http_status(exception, expected_status):
config1 = chat_config()
config2 = chat_config()
config2["name"] = "chat-fallback"
provider = _get_fallback_provider([config1, config2])
with mock.patch("aiohttp.ClientSession") as mock_session:
mock_client = mock_http_client(MockAsyncResponse({}))
mock_session.return_value = mock_client
mock_client.post.side_effect = [
Exception("First provider failed"),
exception,
]
payload = chat.RequestPayload(
messages=[{"role": "user", "content": "Tell me a joke"}],
temperature=0.5,
)
with pytest.raises(Exception, match="All 2 fallback attempts failed") as exc_info:
await provider.chat(payload)
assert exc_info.value.status_code == expected_status
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/gateway/providers/test_fallback.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/job.py | """Huey job functions for async scorer invocation."""
import logging
import os
import random
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import nullcontext
from dataclasses import asdict, dataclass, field
from typing import Any
from mlflow.entities import Trace
from mlflow.environment_variables import (
_MLFLOW_INTERNAL_GATEWAY_AUTH_TOKEN,
MLFLOW_ENABLE_WORKSPACES,
MLFLOW_GENAI_EVAL_MAX_WORKERS,
MLFLOW_SERVER_JUDGE_INVOKE_MAX_WORKERS,
MLFLOW_SERVER_ONLINE_SCORING_MAX_WORKERS,
MLFLOW_SERVER_SCORER_INVOKE_BATCH_SIZE,
)
from mlflow.exceptions import MlflowException
from mlflow.genai.evaluation.entities import EvalItem
from mlflow.genai.evaluation.harness import _compute_eval_scores, _log_assessments
from mlflow.genai.evaluation.session_utils import (
evaluate_session_level_scorers,
get_first_trace_in_session,
)
from mlflow.genai.scorers.base import Scorer
from mlflow.genai.scorers.online import (
OnlineScorer,
OnlineScoringConfig,
OnlineSessionScoringProcessor,
OnlineTraceScoringProcessor,
)
from mlflow.server.handlers import _get_tracking_store
from mlflow.server.jobs import job, submit_job
from mlflow.store.tracking.abstract_store import AbstractStore
from mlflow.tracing.constant import TraceMetadataKey
from mlflow.utils.workspace_context import WorkspaceContext
_logger = logging.getLogger(__name__)
# Constants for job names that are referenced in multiple locations
ONLINE_TRACE_SCORER_JOB_NAME = "run_online_trace_scorer"
ONLINE_SESSION_SCORER_JOB_NAME = "run_online_session_scorer"
@dataclass
class ScorerFailure:
error_code: str
error_message: str
@dataclass
class TraceResult:
assessments: list[Any] = field(default_factory=list)
failures: list[ScorerFailure] = field(default_factory=list)
def _extract_failures_from_feedbacks(feedbacks: list[Any]) -> list[ScorerFailure]:
return [
ScorerFailure(
error_code=feedback.error.error_code,
error_message=feedback.error.error_message,
)
for feedback in feedbacks
if feedback.error
]
@job(
name=ONLINE_TRACE_SCORER_JOB_NAME,
max_workers=MLFLOW_SERVER_ONLINE_SCORING_MAX_WORKERS.get(),
exclusive=["experiment_id"],
)
def run_online_trace_scorer_job(
experiment_id: str,
online_scorers: list[dict[str, Any]],
) -> None:
"""
Job that fetches samples of individual traces and runs scorers on them.
This job is exclusive per experiment_id to prevent duplicate scoring of the same
experiment. Multiple jobs with different scorers for the same experiment will not
run simultaneously, ensuring consistent checkpoint management.
Args:
experiment_id: The experiment ID to fetch traces from.
online_scorers: List of OnlineScorer dicts specifying which scorers to run.
"""
scorer_objects = [
OnlineScorer(
name=scorer_dict["name"],
serialized_scorer=scorer_dict["serialized_scorer"],
online_config=OnlineScoringConfig(**scorer_dict["online_config"]),
)
for scorer_dict in online_scorers
]
tracking_store = _get_tracking_store()
processor = OnlineTraceScoringProcessor.create(experiment_id, scorer_objects, tracking_store)
processor.process_traces()
@job(
name=ONLINE_SESSION_SCORER_JOB_NAME,
max_workers=MLFLOW_SERVER_ONLINE_SCORING_MAX_WORKERS.get(),
exclusive=["experiment_id"],
)
def run_online_session_scorer_job(
experiment_id: str,
online_scorers: list[dict[str, Any]],
) -> None:
"""
Job that finds completed sessions and runs session-level scorers on them.
This job is exclusive per experiment_id to prevent duplicate scoring of the same
experiment. Multiple jobs with different scorers for the same experiment will not
run simultaneously, ensuring consistent checkpoint management.
Args:
experiment_id: The experiment ID to fetch sessions from.
online_scorers: List of OnlineScorer dicts specifying which scorers to run.
"""
scorer_objects = [
OnlineScorer(
name=scorer_dict["name"],
serialized_scorer=scorer_dict["serialized_scorer"],
online_config=OnlineScoringConfig(**scorer_dict["online_config"]),
)
for scorer_dict in online_scorers
]
tracking_store = _get_tracking_store()
processor = OnlineSessionScoringProcessor.create(experiment_id, scorer_objects, tracking_store)
processor.process_sessions()
@job(name="invoke_scorer", max_workers=MLFLOW_SERVER_JUDGE_INVOKE_MAX_WORKERS.get())
def invoke_scorer_job(
experiment_id: str,
serialized_scorer: str,
trace_ids: list[str],
log_assessments: bool = True,
username: str | None = None,
) -> dict[str, Any]:
"""
Huey job function for async scorer invocation.
Reuses the core scoring logic from the evaluation harness
for consistency and feature parity.
Args:
experiment_id: The experiment ID for the traces.
serialized_scorer: JSON string of the serialized scorer.
trace_ids: List of trace IDs to evaluate.
log_assessments: Whether to log assessments to the traces.
username: The authenticated user who triggered the job, propagated to
gateway requests so they are authorised as this user.
Returns:
Dict mapping trace_id to TraceResult (assessments and failures).
"""
# Propagate the original user identity to gateway requests. These env vars
# are read by get_gateway_litellm_config() and encoded into a Basic auth
# header so the auth middleware can authenticate as the correct user.
if username is not None:
os.environ["MLFLOW_TRACKING_USERNAME"] = username
if internal_token := _MLFLOW_INTERNAL_GATEWAY_AUTH_TOKEN.get():
os.environ["MLFLOW_TRACKING_PASSWORD"] = internal_token
# Deserialize scorer
scorer = Scorer.model_validate_json(serialized_scorer)
tracking_store = _get_tracking_store()
if scorer.is_session_level_scorer:
result = _run_session_scorer(scorer, trace_ids, tracking_store, log_assessments)
else:
result = _run_single_turn_scorer_batch(scorer, trace_ids, tracking_store, log_assessments)
return {trace_id: asdict(trace_result) for trace_id, trace_result in result.items()}
def _fetch_traces_batch(
trace_ids: list[str],
tracking_store: AbstractStore,
) -> dict[str, Trace]:
"""
Fetch traces in batch and return a mapping.
Args:
trace_ids: List of trace IDs to fetch.
tracking_store: The tracking store instance.
Returns:
Dict mapping trace_id to Trace.
Raises:
MlflowException: If any trace IDs are not found.
"""
traces = tracking_store.batch_get_traces(trace_ids)
trace_map = {t.info.trace_id: t for t in traces}
if missing_ids := [tid for tid in trace_ids if tid not in trace_map]:
raise MlflowException(f"Traces not found: {missing_ids}")
return trace_map
def _run_session_scorer(
scorer: Any,
trace_ids: list[str],
tracking_store: AbstractStore,
log_assessments: bool,
) -> dict[str, TraceResult]:
"""
Run a session-level scorer on all traces as a conversation.
Reuses evaluate_session_level_scorers from the evaluation harness.
Args:
scorer: The scorer instance.
trace_ids: List of trace IDs (representing a session).
tracking_store: The tracking store instance.
log_assessments: Whether to log assessments to the traces.
Returns:
Dict mapping trace_id to TraceResult.
"""
trace_map = _fetch_traces_batch(trace_ids, tracking_store)
# Preserve order of traces as requested
traces = [trace_map[tid] for tid in trace_ids]
session_items = [EvalItem.from_trace(t) for t in traces]
# Get session_id from the first trace's metadata
first_session_item = get_first_trace_in_session(session_items)
trace_metadata = first_session_item.trace.info.trace_metadata or {}
session_id = trace_metadata.get(TraceMetadataKey.TRACE_SESSION)
if not session_id:
raise MlflowException(
"Session-level scorer requires traces with session metadata. "
f"Trace {first_session_item.trace.info.trace_id} is missing "
f"'{TraceMetadataKey.TRACE_SESSION}' in its metadata."
)
first_trace = first_session_item.trace
first_trace_id = first_trace.info.trace_id
try:
result = evaluate_session_level_scorers(
session_id=session_id,
session_items=session_items,
multi_turn_scorers=[scorer],
)
# result is {first_trace_id: [feedbacks]}
feedbacks = result[first_trace_id]
failures = _extract_failures_from_feedbacks(feedbacks)
if log_assessments and feedbacks:
_log_assessments(
run_id=None, # No MLflow run context in API path
trace=first_trace,
assessments=feedbacks,
)
return {
first_trace_id: TraceResult(
assessments=[f.to_dictionary() for f in feedbacks],
failures=failures,
)
}
except Exception as e:
return {
first_trace_id: TraceResult(
failures=[ScorerFailure(error_code=type(e).__name__, error_message=str(e))]
)
}
def _run_single_turn_scorer_batch(
scorer: Any,
trace_ids: list[str],
tracking_store: AbstractStore,
log_assessments: bool,
) -> dict[str, TraceResult]:
"""
Run a single-turn scorer on each trace in parallel.
Args:
scorer: The scorer instance.
trace_ids: List of trace IDs to evaluate.
tracking_store: The tracking store instance.
log_assessments: Whether to log assessments to the traces.
Returns:
Dict mapping trace_id to TraceResult.
"""
trace_map = _fetch_traces_batch(trace_ids, tracking_store)
def process_trace(trace_id: str, trace: Trace) -> tuple[str, TraceResult]:
eval_item = EvalItem.from_trace(trace)
try:
# Use _compute_eval_scores from harness - supports scorer tracing,
# captures stack traces on errors
feedbacks = _compute_eval_scores(
eval_item=eval_item,
scorers=[scorer],
)
failures = _extract_failures_from_feedbacks(feedbacks)
if log_assessments and feedbacks:
_log_assessments(
run_id=None, # No MLflow run context in API path
trace=trace,
assessments=feedbacks,
)
return trace_id, TraceResult(
assessments=[f.to_dictionary() for f in feedbacks],
failures=failures,
)
except Exception as e:
return trace_id, TraceResult(
failures=[ScorerFailure(error_code=type(e).__name__, error_message=str(e))]
)
max_workers = min(len(trace_map), MLFLOW_GENAI_EVAL_MAX_WORKERS.get())
results: dict[str, TraceResult] = {}
with ThreadPoolExecutor(
max_workers=max_workers,
thread_name_prefix="MlflowScorerInvoke",
) as executor:
futures = {
executor.submit(process_trace, tid, trace): tid for tid, trace in trace_map.items()
}
for future in as_completed(futures):
trace_id, result = future.result()
results[trace_id] = result
return results
def _group_traces_by_session_id(
trace_ids: list[str],
tracking_store: AbstractStore,
) -> dict[str, list[str]]:
"""
Group trace_ids by their session_id metadata.
Fetches trace info from the tracking store and groups them by session_id.
Traces without a session_id are skipped.
Args:
trace_ids: List of trace IDs to group.
tracking_store: The tracking store instance.
Returns:
Dictionary mapping session_id to list of trace_ids, sorted by timestamp.
"""
session_groups: dict[str, list[str]] = {}
# trace_id -> (session_id, timestamp_ms)
trace_info_cache: dict[str, tuple[str, int | None]] = {}
trace_infos = tracking_store.batch_get_trace_infos(trace_ids)
for trace_info in trace_infos:
trace_metadata = trace_info.trace_metadata or {}
if session_id := trace_metadata.get(TraceMetadataKey.TRACE_SESSION):
if session_id not in session_groups:
session_groups[session_id] = []
session_groups[session_id].append(trace_info.trace_id)
trace_info_cache[trace_info.trace_id] = (session_id, trace_info.timestamp_ms)
# Sort trace_ids within each session by trace timestamp (None timestamps sort last)
for session_id in session_groups:
session_groups[session_id] = sorted(
session_groups[session_id],
key=lambda tid: trace_info_cache.get(tid, ("", None))[1] or float("inf"),
)
return session_groups
def get_trace_batches_for_scorer(
trace_ids: list[str],
scorer: Scorer,
tracking_store: AbstractStore,
) -> list[list[str]]:
"""
Get trace ID batches for scorer invocation.
Handles batching logic for both session-level and single-turn scorers:
- Session-level scorers: Groups traces by session_id, returns one batch per session.
- Single-turn scorers: Batches traces based on MLFLOW_SERVER_SCORER_INVOKE_BATCH_SIZE.
Args:
trace_ids: List of trace IDs to evaluate.
scorer: The validated Scorer instance.
tracking_store: The tracking store instance.
Returns:
List of trace ID batches, where each batch should be submitted as a separate job.
"""
if scorer.is_session_level_scorer:
# For conversation judges, group traces by session_id
session_groups = _group_traces_by_session_id(trace_ids, tracking_store)
return list(session_groups.values())
else:
# For single-turn judges, batch traces into fixed-size batches
batch_size = MLFLOW_SERVER_SCORER_INVOKE_BATCH_SIZE.get()
return [trace_ids[i : i + batch_size] for i in range(0, len(trace_ids), batch_size)]
def run_online_scoring_scheduler() -> None:
"""
Periodic task that fetches active online scorers and submits scoring jobs.
Groups scorers by experiment_id and submits two jobs per experiment:
1. Trace-level scoring job for single-turn scorers
2. Session-level scoring job for session scorers
Groups are shuffled to prevent starvation when there are limited job runners available.
"""
tracking_store = _get_tracking_store()
# Iterate on each workspaces in the tracking store to run all registered scorers
for workspace_ctx in _get_online_scoring_workspace_contexts():
with workspace_ctx as workspace:
online_scorers = tracking_store.get_active_online_scorers()
workspace_label = f" in workspace '{workspace}'" if workspace else ""
_logger.debug(
f"Online scoring scheduler found {len(online_scorers)} active scorers"
f"{workspace_label}"
)
scorers_by_experiment: dict[str, list[OnlineScorer]] = defaultdict(list)
for scorer in online_scorers:
scorers_by_experiment[scorer.online_config.experiment_id].append(scorer)
# Shuffle configs randomly to prevent scorer starvation when there are
# limited job runners available
experiment_groups = list(scorers_by_experiment.items())
random.shuffle(experiment_groups)
_logger.debug(
f"Grouped into {len(experiment_groups)} experiments, submitting jobs per experiment"
)
for experiment_id, scorers in experiment_groups:
# Separate scorers by type
session_level_scorers = []
trace_level_scorers = []
for scorer in scorers:
try:
scorer_obj = Scorer.model_validate_json(scorer.serialized_scorer)
if scorer_obj.is_session_level_scorer:
session_level_scorers.append(scorer)
else:
trace_level_scorers.append(scorer)
except Exception as e:
_logger.warning(
f"Failed to load scorer '{scorer.name}'; scorer will be skipped: {e}"
)
# Only submit jobs for scorer types that exist
if trace_level_scorers:
_logger.debug(
f"Submitting trace scoring job for experiment {experiment_id} "
f"with {len(trace_level_scorers)} scorers"
)
trace_scorer_dicts = [asdict(scorer) for scorer in trace_level_scorers]
submit_job(
run_online_trace_scorer_job,
{"experiment_id": experiment_id, "online_scorers": trace_scorer_dicts},
)
if session_level_scorers:
_logger.debug(
f"Submitting session scoring job for experiment {experiment_id} "
f"with {len(session_level_scorers)} scorers"
)
session_scorer_dicts = [asdict(scorer) for scorer in session_level_scorers]
submit_job(
run_online_session_scorer_job,
{"experiment_id": experiment_id, "online_scorers": session_scorer_dicts},
)
def _get_online_scoring_workspace_contexts():
if not MLFLOW_ENABLE_WORKSPACES.get():
return [nullcontext()]
from mlflow.server.workspace_helpers import _get_workspace_store # avoid circular import
store = _get_workspace_store()
workspaces = list(store.list_workspaces())
if not workspaces:
_logger.info("Online scoring scheduler found no workspaces; skipping.")
return []
return [WorkspaceContext(workspace.name) for workspace in workspaces]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/job.py",
"license": "Apache License 2.0",
"lines": 417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/server/jobs/test_scorer_invocation.py | """
E2E integration tests for async scorer invocation via the MLflow server.
These tests spin up a real MLflow server with job execution enabled and test
the full flow of invoking scorers on traces asynchronously.
The MLflow AI Gateway is mocked to avoid real LLM calls during testing.
"""
import json
import os
import signal
import subprocess
import sys
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Literal
import pytest
import requests
import mlflow
from mlflow.genai.judges import make_judge
from mlflow.tracing.assessment import log_expectation
pytestmark = pytest.mark.skipif(
os.name == "nt", reason="MLflow job execution is not supported on Windows"
)
class MockGatewayHandler(BaseHTTPRequestHandler):
"""Mock handler for MLflow gateway chat completions endpoint.
Uses the model name from the request body to determine response behavior
name (e.g., mock-single-turn, mock-conversation) to signal expected behavior.
"""
def do_POST(self):
content_length = int(self.headers.get("Content-Length", 0))
body = json.loads(self.rfile.read(content_length))
model = body.get("model", "")
messages = body.get("messages", [])
prompt_text = str(messages)
tools = body.get("tools", [])
# Route based on model/endpoint name for explicit behavior selection
if model == "mock-agentic":
# Agentic scorers ({{trace}} template) must use tools to fetch trace data
response = self._handle_agentic_request(tools)
if response is None:
return # Error already sent
elif model == "mock-conversation":
response = self._handle_conversation_request(prompt_text)
if response is None:
return # Error already sent
elif model == "mock-safety":
response = self._make_response("yes", "Content is safe")
elif model == "mock-single-turn":
response = self._handle_single_turn_request(prompt_text)
if response is None:
return # Error already sent
else:
self._send_error(f"Unknown model: {model}")
return
response_body = json.dumps(response).encode()
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(response_body)))
self.end_headers()
self.wfile.write(response_body)
def _handle_conversation_request(self, prompt_text: str) -> dict[str, Any] | None:
"""Handle conversation scorer requests, returning different responses per session."""
prompt_lower = prompt_text.lower()
# Session 1: "Hello, how are you?" / "What's your name?"
# Session 2: "What's the weather?" / "Thanks!"
has_session_1 = "hello" in prompt_lower and (
"name" in prompt_lower or "assistant" in prompt_lower
)
has_session_2 = "weather" in prompt_lower and "thanks" in prompt_lower
if has_session_1:
return self._make_response("Good", "Session 1: Good conversation")
elif has_session_2:
return self._make_response("Average", "Session 2: Average conversation")
else:
self._send_error(
"Conversation content not found. Expected session 1 (hello/name) "
f"or session 2 (weather/thanks). Got: {prompt_text[:500]}"
)
return None
def _handle_agentic_request(self, tools: list[dict[str, Any]]) -> dict[str, Any] | None:
"""Handle agentic scorer requests with validation that tools are present."""
if not tools:
self._send_error("Agentic scorer requests must include tools for trace data fetching")
return None
return self._make_response("3", "Counted 3 spans")
def _handle_single_turn_request(self, prompt_text: str) -> dict[str, Any] | None:
"""Handle single-turn scorer requests with validation."""
prompt_lower = prompt_text.lower()
if "what is" not in prompt_lower or "the answer is" not in prompt_lower:
self._send_error(f"Trace inputs/outputs not found in prompt: {prompt_text[:500]}")
return None
if "expected_answer" not in prompt_lower:
self._send_error(f"Expectations not found in prompt: {prompt_text[:500]}")
return None
return self._make_response("Yes", "Mock response")
def _make_response(self, result: str, rationale: str) -> dict[str, Any]:
return {
"id": "chatcmpl-mock",
"object": "chat.completion",
"created": 1234567890,
"model": "gpt-4o-mini",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": json.dumps({"result": result, "rationale": rationale}),
},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
}
def _send_error(self, message: str):
error_response = {"error": {"message": message, "type": "invalid_request_error"}}
response_body = json.dumps(error_response).encode()
self.send_response(400)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(response_body)))
self.end_headers()
self.wfile.write(response_body)
def log_message(self, format, *args):
pass # Suppress logging
class Client:
"""HTTP client for interacting with MLflow server endpoints."""
def __init__(self, server_url: str):
self.server_url = server_url
def invoke_scorer(
self,
experiment_id: str,
serialized_scorer: str,
trace_ids: list[str],
log_assessments: bool = False,
) -> dict[str, Any]:
payload = {
"experiment_id": experiment_id,
"serialized_scorer": serialized_scorer,
"trace_ids": trace_ids,
"log_assessments": log_assessments,
}
response = requests.post(
f"{self.server_url}/ajax-api/3.0/mlflow/scorer/invoke",
json=payload,
)
if not response.ok:
raise AssertionError(
f"invoke_scorer failed with status {response.status_code}: {response.text}"
)
return response.json()
def get_job(self, job_id: str) -> dict[str, Any]:
response = requests.get(f"{self.server_url}/ajax-api/3.0/jobs/{job_id}")
response.raise_for_status()
return response.json()
def wait_job(self, job_id: str, timeout: float = 30) -> dict[str, Any]:
beg_time = time.time()
while time.time() - beg_time <= timeout:
job_json = self.get_job(job_id)
if job_json["status"] in ["SUCCEEDED", "FAILED", "TIMEOUT"]:
return job_json
time.sleep(0.5)
raise TimeoutError("The job did not complete within the timeout.")
def wait_job_succeeded(self, job_id: str) -> dict[str, Any]:
result = self.wait_job(job_id)
assert result["status"] == "SUCCEEDED", f"Job failed: {result}"
return result
@pytest.fixture(scope="module")
def client(tmp_path_factory: pytest.TempPathFactory, mock_gateway_server: str) -> Client:
"""Start an MLflow server with job execution enabled for scorer invocation."""
from tests.helper_functions import get_safe_port
tmp_path = tmp_path_factory.mktemp("scorer_job_server")
backend_store_uri = f"sqlite:///{tmp_path / 'mlflow.db'}"
port = get_safe_port()
with subprocess.Popen(
[
sys.executable,
"-m",
"mlflow",
"server",
"-h",
"127.0.0.1",
"-p",
str(port),
"--backend-store-uri",
backend_store_uri,
],
env={
**os.environ,
"MLFLOW_SERVER_ENABLE_JOB_EXECUTION": "true",
# Register the scorer invoke job function
"_MLFLOW_SUPPORTED_JOB_FUNCTION_LIST": ("mlflow.genai.scorers.job.invoke_scorer_job"),
"_MLFLOW_ALLOWED_JOB_NAME_LIST": "invoke_scorer",
# Point gateway calls to our mock server
"MLFLOW_GATEWAY_URI": mock_gateway_server,
# Set batch size to 2 for testing job batching behavior
"MLFLOW_SERVER_SCORER_INVOKE_BATCH_SIZE": "2",
},
start_new_session=True,
) as server_proc:
try:
# Wait for job runner to start
time.sleep(10)
# Wait for server to be healthy
deadline = time.time() + 15
while time.time() < deadline:
time.sleep(1)
try:
resp = requests.get(f"http://127.0.0.1:{port}/health")
except requests.ConnectionError:
continue
if resp.status_code == 200:
break
else:
raise TimeoutError("Server did not report healthy within 15 seconds")
yield Client(f"http://127.0.0.1:{port}")
finally:
os.killpg(server_proc.pid, signal.SIGKILL)
@pytest.fixture
def experiment_with_traces(client: Client):
"""Create an experiment with traces for testing, including expectations."""
mlflow.set_tracking_uri(client.server_url)
experiment_name = f"test_scorer_job_{time.time()}"
experiment_id = mlflow.create_experiment(experiment_name)
mlflow.set_experiment(experiment_id=experiment_id)
trace_ids = []
for i in range(3):
with mlflow.start_span(name=f"test_span_{i}") as span:
span.set_inputs({"question": f"What is {i} + {i}?"})
span.set_outputs(f"The answer is {i + i}")
trace_ids.append(span.trace_id)
# Add expectation (ground truth) to each trace
log_expectation(
trace_id=trace_ids[-1],
name="expected_answer",
value=str(i + i),
)
return experiment_id, trace_ids
@pytest.fixture(scope="module")
def mock_gateway_server():
"""Start a mock server that handles gateway chat completion requests."""
from tests.helper_functions import get_safe_port
port = get_safe_port()
server = HTTPServer(("127.0.0.1", port), MockGatewayHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
yield f"http://127.0.0.1:{port}"
server.shutdown()
def test_invoke_scorer_basic(client: Client, experiment_with_traces):
experiment_id, trace_ids = experiment_with_traces
judge = make_judge(
name="answer_quality",
instructions="Input: {{ inputs }}\nOutput: {{ outputs }}\nExpected: {{ expectations }}",
model="gateway:/mock-single-turn",
feedback_value_type=Literal["Yes", "No"],
)
response = client.invoke_scorer(
experiment_id=experiment_id,
serialized_scorer=json.dumps(judge.model_dump()),
trace_ids=trace_ids,
)
# 3 traces with batch size 2 -> 2 jobs (sizes [2, 1])
jobs = response["jobs"]
assert len(jobs) == 2
assert sorted(len(j["trace_ids"]) for j in jobs) == [1, 2]
# Verify all trace IDs are accounted for
all_job_trace_ids = {tid for j in jobs for tid in j["trace_ids"]}
assert all_job_trace_ids == set(trace_ids)
# Wait for all jobs and verify results
for job_info in jobs:
result = client.wait_job_succeeded(job_info["job_id"])["result"]
for trace_id in job_info["trace_ids"]:
assert result[trace_id]["failures"] == []
assessment = result[trace_id]["assessments"][0]
assert assessment["assessment_name"] == "answer_quality"
assert assessment["feedback"]["value"] == "Yes"
def test_invoke_scorer_missing_trace(client: Client, experiment_with_traces):
experiment_id, _ = experiment_with_traces
fake_trace_id = "tr-does-not-exist-00000000000000"
judge = make_judge(
name="answer_quality",
instructions="Input: {{ inputs }}\nOutput: {{ outputs }}",
model="gateway:/mock-single-turn",
feedback_value_type=Literal["Yes", "No"],
)
response = client.invoke_scorer(
experiment_id=experiment_id,
serialized_scorer=json.dumps(judge.model_dump()),
trace_ids=[fake_trace_id],
)
# Job fails because trace doesn't exist
job_result = client.wait_job(response["jobs"][0]["job_id"])
assert job_result["status"] == "FAILED"
assert "Traces not found" in job_result["result"]
@pytest.fixture
def experiment_with_agentic_trace(client: Client):
"""Create an experiment with a multi-span trace for agentic scorer testing."""
mlflow.set_tracking_uri(client.server_url)
experiment_name = f"test_agentic_scorer_{time.time()}"
experiment_id = mlflow.create_experiment(experiment_name)
mlflow.set_experiment(experiment_id=experiment_id)
# Create a trace with multiple spans (simulating an agentic workflow)
with mlflow.start_span(name="agent_main") as parent_span:
parent_span.set_inputs({"query": "What is the weather?"})
with mlflow.start_span(name="tool_call_1") as tool_span1:
tool_span1.set_inputs({"tool": "get_weather"})
tool_span1.set_outputs({"temperature": 72})
with mlflow.start_span(name="tool_call_2") as tool_span2:
tool_span2.set_inputs({"tool": "format_response"})
tool_span2.set_outputs({"message": "It's 72 degrees"})
parent_span.set_outputs("The weather is 72 degrees")
trace_id = parent_span.trace_id
return experiment_id, trace_id
@pytest.fixture
def experiment_with_conversation_traces(client: Client):
"""Create an experiment with conversation traces from two different sessions."""
mlflow.set_tracking_uri(client.server_url)
experiment_name = f"test_conversation_scorer_{time.time()}"
experiment_id = mlflow.create_experiment(experiment_name)
mlflow.set_experiment(experiment_id=experiment_id)
session_1_id = f"session_1_{time.time()}"
session_2_id = f"session_2_{time.time()}"
session_1_trace_ids = []
session_2_trace_ids = []
# Session 1: Two turns
for i, (user_msg, assistant_msg) in enumerate(
[
("Hello, how are you?", "I'm doing well, thank you!"),
("What's your name?", "I'm an AI assistant."),
]
):
with mlflow.start_span(name=f"session1_turn_{i}") as span:
mlflow.update_current_trace(metadata={"mlflow.trace.session": session_1_id})
span.set_inputs({"messages": [{"role": "user", "content": user_msg}]})
span.set_outputs({"choices": [{"message": {"content": assistant_msg}}]})
session_1_trace_ids.append(span.trace_id)
# Session 2: Two turns (different conversation)
for i, (user_msg, assistant_msg) in enumerate(
[
("What's the weather?", "It's sunny today."),
("Thanks!", "You're welcome!"),
]
):
with mlflow.start_span(name=f"session2_turn_{i}") as span:
mlflow.update_current_trace(metadata={"mlflow.trace.session": session_2_id})
span.set_inputs({"messages": [{"role": "user", "content": user_msg}]})
span.set_outputs({"choices": [{"message": {"content": assistant_msg}}]})
session_2_trace_ids.append(span.trace_id)
return {
"experiment_id": experiment_id,
"session_1_id": session_1_id,
"session_1_trace_ids": session_1_trace_ids,
"session_2_id": session_2_id,
"session_2_trace_ids": session_2_trace_ids,
}
def test_invoke_agentic_scorer(client: Client, experiment_with_agentic_trace):
experiment_id, trace_id = experiment_with_agentic_trace
# Scorer using {{trace}} template variable (triggers tool-based flow)
judge = make_judge(
name="span_counter",
instructions="Count spans in: {{ trace }}",
model="gateway:/mock-agentic",
feedback_value_type=Literal["1", "2", "3", "4", "5"],
)
response = client.invoke_scorer(
experiment_id=experiment_id,
serialized_scorer=json.dumps(judge.model_dump()),
trace_ids=[trace_id],
)
result = client.wait_job_succeeded(response["jobs"][0]["job_id"])["result"]
assert result[trace_id]["failures"] == []
assert result[trace_id]["assessments"][0]["assessment_name"] == "span_counter"
assert result[trace_id]["assessments"][0]["feedback"]["value"] == "3"
def test_invoke_conversation_scorer(client: Client, experiment_with_conversation_traces):
fixture = experiment_with_conversation_traces
session_1_trace_ids = fixture["session_1_trace_ids"]
session_2_trace_ids = fixture["session_2_trace_ids"]
# Scorer using {{conversation}} template variable (session-level)
judge = make_judge(
name="conversation_quality",
instructions="Evaluate: {{ conversation }}",
model="gateway:/mock-conversation",
feedback_value_type=Literal["Good", "Average", "Poor"],
)
response = client.invoke_scorer(
experiment_id=fixture["experiment_id"],
serialized_scorer=json.dumps(judge.model_dump()),
trace_ids=session_1_trace_ids + session_2_trace_ids,
)
# 2 sessions -> 2 jobs, each grouped by session
jobs = response["jobs"]
assert len(jobs) == 2
job_trace_sets = [set(j["trace_ids"]) for j in jobs]
assert set(session_1_trace_ids) in job_trace_sets
assert set(session_2_trace_ids) in job_trace_sets
# Verify each session got expected response
results_by_session = {}
for job_info in jobs:
result = client.wait_job_succeeded(job_info["job_id"])["result"]
# Session-level scorers log to first trace only
first_trace_id = job_info["trace_ids"][0]
assert len(result) == 1
assert result[first_trace_id]["failures"] == []
value = result[first_trace_id]["assessments"][0]["feedback"]["value"]
if set(job_info["trace_ids"]) == set(session_1_trace_ids):
results_by_session["session_1"] = value
else:
results_by_session["session_2"] = value
assert results_by_session == {"session_1": "Good", "session_2": "Average"}
def test_invoke_builtin_safety_scorer(client: Client, experiment_with_traces):
experiment_id, trace_ids = experiment_with_traces
trace_id = trace_ids[0]
# Builtin Safety scorer (uses builtin_scorer_class)
serialized_scorer = json.dumps(
{
"name": "safety",
"aggregations": [],
"description": None,
"mlflow_version": "3.6.0rc0",
"serialization_version": 1,
"builtin_scorer_class": "Safety",
"builtin_scorer_pydantic_data": {"name": "safety", "model": "gateway:/mock-safety"},
"call_source": None,
"call_signature": None,
"original_func_name": None,
"instructions_judge_pydantic_data": None,
}
)
response = client.invoke_scorer(
experiment_id=experiment_id,
serialized_scorer=serialized_scorer,
trace_ids=[trace_id],
)
result = client.wait_job_succeeded(response["jobs"][0]["job_id"])["result"]
assert result[trace_id]["failures"] == []
assert result[trace_id]["assessments"][0]["assessment_name"] == "safety"
assert result[trace_id]["assessments"][0]["feedback"]["value"].lower() in ("yes", "no")
def test_invoke_scorer_with_log_assessments(client: Client, experiment_with_traces):
experiment_id, trace_ids = experiment_with_traces
trace_id = trace_ids[0]
judge = make_judge(
name="answer_quality",
instructions="Input: {{ inputs }}\nOutput: {{ outputs }}\nExpected: {{ expectations }}",
model="gateway:/mock-single-turn",
feedback_value_type=Literal["Yes", "No"],
)
response = client.invoke_scorer(
experiment_id=experiment_id,
serialized_scorer=json.dumps(judge.model_dump()),
trace_ids=[trace_id],
log_assessments=True,
)
job_result = client.wait_job(response["jobs"][0]["job_id"])
assert job_result["status"] == "SUCCEEDED"
# Get assessment ID from job result
assessment_id = job_result["result"][trace_id]["assessments"][0]["assessment_id"]
# Verify assessment was persisted to trace
trace = mlflow.get_trace(trace_id)
persisted = next(a for a in trace.info.assessments if a.assessment_id == assessment_id)
assert persisted.name == "answer_quality"
assert persisted.value == "Yes"
def test_invoke_scorer_fails_if_any_trace_missing(client: Client, experiment_with_traces):
experiment_id, trace_ids = experiment_with_traces
valid_trace_id = trace_ids[0]
fake_trace_id = "tr-does-not-exist-00000000000000"
judge = make_judge(
name="answer_quality",
instructions="Input: {{ inputs }}\nOutput: {{ outputs }}\nExpected: {{ expectations }}",
model="gateway:/mock-single-turn",
feedback_value_type=Literal["Yes", "No"],
)
response = client.invoke_scorer(
experiment_id=experiment_id,
serialized_scorer=json.dumps(judge.model_dump()),
trace_ids=[valid_trace_id, fake_trace_id],
)
# Job fails if any trace is missing, even if some are valid
job_result = client.wait_job(response["jobs"][0]["job_id"])
assert job_result["status"] == "FAILED"
assert "Traces not found" in job_result["result"]
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/test_scorer_invocation.py",
"license": "Apache License 2.0",
"lines": 479,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/models/test_container.py | """
Tests for mlflow.models.container module.
Includes security tests for command injection prevention.
"""
import os
from unittest import mock
import pytest
import yaml
from mlflow.models.container import _install_model_dependencies_to_env
from mlflow.utils import env_manager as em
def _create_model_artifact(model_path, dependencies, build_dependencies=None):
"""Helper to create a minimal model artifact for testing."""
with open(os.path.join(model_path, "MLmodel"), "w") as f:
yaml.dump(
{
"flavors": {
"python_function": {
"env": {"virtualenv": "python_env.yaml"},
"loader_module": "mlflow.pyfunc.model",
}
}
},
f,
)
with open(os.path.join(model_path, "requirements.txt"), "w") as f:
f.write("")
with open(os.path.join(model_path, "python_env.yaml"), "w") as f:
yaml.dump(
{
"python": "3.12",
"build_dependencies": build_dependencies or [],
"dependencies": dependencies,
},
f,
)
def test_command_injection_via_semicolon_blocked(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["numpy; echo INJECTED > /tmp/test_injection_semicolon.txt; #"],
)
evidence_file = "/tmp/test_injection_semicolon.txt"
if os.path.exists(evidence_file):
os.remove(evidence_file)
with pytest.raises(Exception, match="Failed to install model dependencies"):
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert not os.path.exists(evidence_file), "Command injection via semicolon succeeded!"
def test_command_injection_via_pipe_blocked(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["numpy | echo INJECTED > /tmp/test_injection_pipe.txt"],
)
evidence_file = "/tmp/test_injection_pipe.txt"
if os.path.exists(evidence_file):
os.remove(evidence_file)
with pytest.raises(Exception, match="Failed to install model dependencies"):
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert not os.path.exists(evidence_file), "Command injection via pipe succeeded!"
def test_command_injection_via_backticks_blocked(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["`echo INJECTED > /tmp/test_injection_backtick.txt`"],
)
evidence_file = "/tmp/test_injection_backtick.txt"
if os.path.exists(evidence_file):
os.remove(evidence_file)
with pytest.raises(Exception, match="Failed to install model dependencies"):
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert not os.path.exists(evidence_file), "Command injection via backticks succeeded!"
def test_command_injection_via_dollar_parens_blocked(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["$(echo INJECTED > /tmp/test_injection_dollar.txt)"],
)
evidence_file = "/tmp/test_injection_dollar.txt"
if os.path.exists(evidence_file):
os.remove(evidence_file)
with pytest.raises(Exception, match="Failed to install model dependencies"):
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert not os.path.exists(evidence_file), "Command injection via $() succeeded!"
def test_command_injection_via_ampersand_blocked(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["numpy && echo INJECTED > /tmp/test_injection_ampersand.txt"],
)
evidence_file = "/tmp/test_injection_ampersand.txt"
if os.path.exists(evidence_file):
os.remove(evidence_file)
with pytest.raises(Exception, match="Failed to install model dependencies"):
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert not os.path.exists(evidence_file), "Command injection via && succeeded!"
def test_legitimate_package_install(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["pip"],
build_dependencies=[],
)
result = _install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert result == []
def test_requirements_file_reference(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["-r requirements.txt"],
build_dependencies=["pip"],
)
with open(os.path.join(model_path, "requirements.txt"), "w") as f:
f.write("# empty requirements\n")
result = _install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
assert result == []
def test_requirements_path_replacement(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["-r requirements.txt"],
)
with open(os.path.join(model_path, "requirements.txt"), "w") as f:
f.write("six\n")
with mock.patch("mlflow.models.container.Popen") as mock_popen:
mock_popen.return_value.wait.return_value = 0
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
call_args = mock_popen.call_args[0][0]
assert isinstance(call_args, list), "Should use list args, not shell string"
assert "-r" in call_args
req_index = call_args.index("-r")
req_path = call_args[req_index + 1]
assert req_path == os.path.join(model_path, "requirements.txt")
def test_no_shell_execution(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["pip"],
)
with mock.patch("mlflow.models.container.Popen") as mock_popen:
mock_popen.return_value.wait.return_value = 0
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
call_args = mock_popen.call_args
assert isinstance(call_args[0][0], list)
assert call_args[1].get("shell") is not True
def test_build_dependencies_processed(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["pip"],
build_dependencies=["setuptools", "wheel"],
)
with mock.patch("mlflow.models.container.Popen") as mock_popen:
mock_popen.return_value.wait.return_value = 0
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
call_args = mock_popen.call_args[0][0]
assert "setuptools" in call_args
assert "wheel" in call_args
assert "pip" in call_args
def test_package_name_with_requirements_substring_not_modified(tmp_path):
model_path = str(tmp_path)
_create_model_artifact(
model_path,
dependencies=["my-requirements.txt-parser", "requirements.txt-tools"],
)
with mock.patch("mlflow.models.container.Popen") as mock_popen:
mock_popen.return_value.wait.return_value = 0
_install_model_dependencies_to_env(model_path, env_manager=em.LOCAL)
call_args = mock_popen.call_args[0][0]
assert "my-requirements.txt-parser" in call_args
assert "requirements.txt-tools" in call_args
assert not any(model_path in arg for arg in call_args if "parser" in arg or "tools" in arg)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/models/test_container.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracking/context/jupyter_notebook_context.py | import json
import os
from collections.abc import Generator
from functools import lru_cache
from pathlib import Path
from typing import Any
from urllib.request import urlopen
from mlflow.entities import SourceType
from mlflow.tracking.context.abstract_context import RunContextProvider
from mlflow.utils.databricks_utils import is_running_in_ipython_environment
from mlflow.utils.mlflow_tags import MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE
@lru_cache(maxsize=1)
def _get_notebook_name() -> str | None:
"""
Attempt to get the current Jupyter notebook name using multiple methods.
Returns:
The notebook filename if found, None otherwise.
"""
# Method 1: Check VS Code notebook path in IPython user namespace
# VS Code's Jupyter extension stores the notebook path in ip.user_ns
if notebook_path := _get_vscode_notebook_path():
return os.path.basename(notebook_path)
# Method 2: Check environment variables
if vsc_notebook := os.environ.get("__vsc_ipynb_file__"):
return os.path.basename(vsc_notebook)
if ipynb_file := os.environ.get("IPYNB_FILE"):
return os.path.basename(ipynb_file)
# Method 3: Query Jupyter server sessions (for JupyterLab/classic Jupyter)
if notebook_path := _get_notebook_path_from_sessions():
return os.path.basename(notebook_path)
return None
def _get_vscode_notebook_path() -> str | None:
"""
Get notebook path from VS Code's IPython user namespace.
VS Code's Jupyter extension stores the notebook file path in the
IPython user namespace under '__vsc_ipynb_file__'.
Returns:
The notebook path if found, None otherwise.
"""
if not is_running_in_ipython_environment():
return None
try:
from IPython import get_ipython
ip = get_ipython()
if ip and "__vsc_ipynb_file__" in ip.user_ns:
return ip.user_ns["__vsc_ipynb_file__"]
except Exception:
pass
return None
def _get_notebook_path_from_sessions() -> str | None:
"""
Get notebook path by querying Jupyter server sessions.
This queries running Jupyter servers to find the notebook
associated with the current kernel.
Returns:
The notebook path if found, None otherwise.
"""
try:
kernel_id = _get_kernel_id()
if not kernel_id:
return None
for server in _get_running_servers():
try:
if notebook_path := _get_sessions_notebook(server, kernel_id):
return notebook_path
except Exception:
continue
except Exception:
pass
return None
def _get_kernel_id() -> str | None:
"""
Get the current kernel ID from the connection file.
Returns:
The kernel ID string, or None if not found.
"""
try:
import ipykernel
connection_file = Path(ipykernel.get_connection_file()).stem
# Connection file is like: kernel-<uuid>
return connection_file.split("-", 1)[1]
except Exception:
pass
return None
def _get_running_servers() -> Generator[dict[str, Any], None, None]:
"""
Get list of running Jupyter servers by scanning the runtime directory.
Yields:
Server info dictionaries with 'url' and 'token' keys.
"""
try:
from jupyter_core.paths import jupyter_runtime_dir
runtime_dir = Path(jupyter_runtime_dir())
if not runtime_dir.is_dir():
return
# Get server files, sorted by modification time (most recent first)
server_files = sorted(
list(runtime_dir.glob("nbserver-*.json")) # jupyter notebook (or lab 2)
+ list(runtime_dir.glob("jpserver-*.json")), # jupyterlab 3
key=os.path.getmtime,
reverse=True,
)
for server_file in server_files:
try:
with open(server_file) as f:
yield json.load(f)
except (json.JSONDecodeError, OSError):
continue
except ImportError:
pass
def _get_sessions_notebook(server: dict[str, Any], kernel_id: str) -> str | None:
"""
Query a server's sessions API to find the notebook for a kernel.
Args:
server: Server info dict with 'url' and optionally 'token'.
kernel_id: The kernel ID to search for.
Returns:
The notebook path if found, None otherwise.
"""
url = server.get("url", "").rstrip("/")
token = server.get("token") or os.environ.get("JUPYTERHUB_API_TOKEN", "")
sessions_url = f"{url}/api/sessions"
if token:
sessions_url += f"?token={token}"
try:
with urlopen(sessions_url, timeout=0.5) as response:
sessions = json.load(response)
for session in sessions:
if session.get("kernel", {}).get("id") == kernel_id:
return session.get("path")
except Exception:
pass
return None
def _is_in_jupyter_notebook() -> bool:
"""
Check if we're running inside a Jupyter notebook (not just IPython).
Returns:
True if we're in a Jupyter notebook environment, False otherwise.
"""
if not is_running_in_ipython_environment():
return False
try:
from IPython import get_ipython
ip = get_ipython()
# Jupyter notebooks use ZMQInteractiveShell
shell_class = ip.__class__.__name__
if shell_class == "ZMQInteractiveShell":
return True
# Also check for kernel attribute
if hasattr(ip, "kernel"):
return True
return False
except (ImportError, ModuleNotFoundError):
return False
class JupyterNotebookRunContext(RunContextProvider):
"""
Context provider for local Jupyter notebooks.
This provider sets the source name to the notebook filename and source type
to NOTEBOOK when running inside a Jupyter notebook environment.
"""
def in_context(self):
return _is_in_jupyter_notebook()
def tags(self):
notebook_name = _get_notebook_name()
tags = {
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK),
}
if notebook_name:
tags[MLFLOW_SOURCE_NAME] = notebook_name
return tags
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracking/context/jupyter_notebook_context.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/tracking/context/test_jupyter_notebook_context.py | import json
from unittest import mock
import pytest
from mlflow.entities import SourceType
from mlflow.tracking.context.jupyter_notebook_context import (
JupyterNotebookRunContext,
_get_kernel_id,
_get_notebook_name,
_get_notebook_path_from_sessions,
_get_running_servers,
_get_sessions_notebook,
_get_vscode_notebook_path,
_is_in_jupyter_notebook,
)
from mlflow.utils.mlflow_tags import MLFLOW_SOURCE_NAME, MLFLOW_SOURCE_TYPE
MOCK_NOTEBOOK_NAME = "test_notebook.ipynb"
MOCK_NOTEBOOK_PATH = f"/path/to/{MOCK_NOTEBOOK_NAME}"
MOCK_KERNEL_ID = "abc123-def456"
@pytest.mark.parametrize(
("shell_name", "has_kernel", "is_in_ipython", "expected"),
[
("ZMQInteractiveShell", False, True, True),
("SomeOtherShell", True, True, True),
("TerminalInteractiveShell", False, True, False),
("AnyShell", False, False, False),
],
)
def test_is_in_jupyter_notebook(shell_name, has_kernel, is_in_ipython, expected):
mock_shell = mock.Mock(spec=["kernel"] if has_kernel else [])
mock_shell.__class__.__name__ = shell_name
if has_kernel:
mock_shell.kernel = mock.Mock()
mock_ipython = mock.Mock()
mock_ipython.get_ipython.return_value = mock_shell
with (
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context.is_running_in_ipython_environment",
return_value=is_in_ipython,
),
mock.patch.dict("sys.modules", {"IPython": mock_ipython}),
):
assert _is_in_jupyter_notebook() is expected
@pytest.mark.parametrize(
("user_ns", "is_in_ipython", "expected"),
[
({"__vsc_ipynb_file__": MOCK_NOTEBOOK_PATH}, True, MOCK_NOTEBOOK_PATH),
({}, True, None),
({"__vsc_ipynb_file__": MOCK_NOTEBOOK_PATH}, False, None),
],
)
def test_get_vscode_notebook_path(user_ns, is_in_ipython, expected):
mock_shell = mock.Mock()
mock_shell.user_ns = user_ns
mock_ipython = mock.Mock()
mock_ipython.get_ipython.return_value = mock_shell
with (
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context.is_running_in_ipython_environment",
return_value=is_in_ipython,
),
mock.patch.dict("sys.modules", {"IPython": mock_ipython}),
):
assert _get_vscode_notebook_path() == expected
def test_get_kernel_id_success():
mock_ipykernel = mock.Mock()
mock_ipykernel.get_connection_file.return_value = f"/path/to/kernel-{MOCK_KERNEL_ID}.json"
with (
mock.patch.dict("sys.modules", {"ipykernel": mock_ipykernel}),
mock.patch("mlflow.tracking.context.jupyter_notebook_context.Path") as mock_path,
):
mock_path.return_value.stem = f"kernel-{MOCK_KERNEL_ID}"
result = _get_kernel_id()
assert result == MOCK_KERNEL_ID
def test_get_kernel_id_import_error():
with mock.patch.dict("sys.modules", {"ipykernel": None}):
result = _get_kernel_id()
assert result is None
def test_get_running_servers_finds_servers(tmp_path):
server_info = {"url": "http://localhost:8888/", "token": "test_token"}
server_file = tmp_path / "nbserver-12345.json"
server_file.write_text(json.dumps(server_info))
mock_jupyter_core = mock.Mock()
mock_jupyter_core.paths.jupyter_runtime_dir.return_value = str(tmp_path)
with (
mock.patch.dict(
"sys.modules",
{"jupyter_core": mock_jupyter_core, "jupyter_core.paths": mock_jupyter_core.paths},
),
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context.Path",
return_value=tmp_path,
),
):
list(_get_running_servers())
def test_get_running_servers_no_servers(tmp_path):
mock_jupyter_core = mock.Mock()
mock_jupyter_core.paths.jupyter_runtime_dir.return_value = str(tmp_path)
with (
mock.patch.dict(
"sys.modules",
{"jupyter_core": mock_jupyter_core, "jupyter_core.paths": mock_jupyter_core.paths},
),
mock.patch("mlflow.tracking.context.jupyter_notebook_context.Path") as mock_path,
):
mock_path_instance = mock.Mock()
mock_path_instance.is_dir.return_value = True
mock_path_instance.glob.return_value = []
mock_path.return_value = mock_path_instance
servers = list(_get_running_servers())
assert servers == []
def test_get_running_servers_import_error():
with mock.patch.dict("sys.modules", {"jupyter_core": None, "jupyter_core.paths": None}):
servers = list(_get_running_servers())
assert servers == []
def test_get_sessions_notebook_finds_notebook():
server = {"url": "http://localhost:8888/", "token": "test_token"}
mock_sessions = [{"kernel": {"id": MOCK_KERNEL_ID}, "path": MOCK_NOTEBOOK_PATH}]
with mock.patch("mlflow.tracking.context.jupyter_notebook_context.urlopen") as mock_urlopen:
mock_response = mock.Mock()
mock_response.__enter__ = mock.Mock(return_value=mock_response)
mock_response.__exit__ = mock.Mock(return_value=False)
mock_response.read.return_value = json.dumps(mock_sessions).encode()
mock_urlopen.return_value = mock_response
with mock.patch("json.load", return_value=mock_sessions):
result = _get_sessions_notebook(server, MOCK_KERNEL_ID)
assert result == MOCK_NOTEBOOK_PATH
def test_get_sessions_notebook_no_matching_kernel():
server = {"url": "http://localhost:8888/", "token": "test_token"}
mock_sessions = [{"kernel": {"id": "different_kernel"}, "path": "other_notebook.ipynb"}]
with mock.patch("mlflow.tracking.context.jupyter_notebook_context.urlopen") as mock_urlopen:
mock_response = mock.Mock()
mock_response.__enter__ = mock.Mock(return_value=mock_response)
mock_response.__exit__ = mock.Mock(return_value=False)
with mock.patch("json.load", return_value=mock_sessions):
mock_urlopen.return_value = mock_response
result = _get_sessions_notebook(server, MOCK_KERNEL_ID)
assert result is None
def test_get_sessions_notebook_connection_error():
server = {"url": "http://localhost:8888/", "token": "test_token"}
with mock.patch(
"mlflow.tracking.context.jupyter_notebook_context.urlopen",
side_effect=Exception("Connection refused"),
):
result = _get_sessions_notebook(server, MOCK_KERNEL_ID)
assert result is None
def test_get_sessions_notebook_with_jupyterhub_token(monkeypatch):
server = {"url": "http://localhost:8888/", "token": ""}
mock_sessions = [{"kernel": {"id": MOCK_KERNEL_ID}, "path": MOCK_NOTEBOOK_PATH}]
monkeypatch.setenv("JUPYTERHUB_API_TOKEN", "hub_token")
with (
mock.patch("mlflow.tracking.context.jupyter_notebook_context.urlopen") as mock_urlopen,
mock.patch("json.load", return_value=mock_sessions),
):
mock_response = mock.Mock()
mock_response.__enter__ = mock.Mock(return_value=mock_response)
mock_response.__exit__ = mock.Mock(return_value=False)
mock_urlopen.return_value = mock_response
_get_sessions_notebook(server, MOCK_KERNEL_ID)
call_args = mock_urlopen.call_args
assert "hub_token" in call_args[0][0]
@pytest.mark.parametrize(
("vscode_path", "env_vars", "sessions_path", "expected"),
[
(MOCK_NOTEBOOK_PATH, {}, None, MOCK_NOTEBOOK_NAME),
(None, {"__vsc_ipynb_file__": MOCK_NOTEBOOK_PATH}, None, MOCK_NOTEBOOK_NAME),
(None, {"IPYNB_FILE": MOCK_NOTEBOOK_PATH}, None, MOCK_NOTEBOOK_NAME),
(None, {}, MOCK_NOTEBOOK_PATH, MOCK_NOTEBOOK_NAME),
(None, {}, None, None),
],
)
def test_get_notebook_name(vscode_path, env_vars, sessions_path, expected, monkeypatch):
_get_notebook_name.cache_clear()
# Clear relevant env vars that the code checks
monkeypatch.delenv("__vsc_ipynb_file__", raising=False)
monkeypatch.delenv("IPYNB_FILE", raising=False)
# Set the test env vars
for key, value in env_vars.items():
monkeypatch.setenv(key, value)
with (
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_vscode_notebook_path",
return_value=vscode_path,
),
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_notebook_path_from_sessions",
return_value=sessions_path,
),
):
assert _get_notebook_name() == expected
def test_get_notebook_name_is_cached():
_get_notebook_name.cache_clear()
call_count = 0
def mock_vscode_path():
nonlocal call_count
call_count += 1
return MOCK_NOTEBOOK_PATH
with mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_vscode_notebook_path",
side_effect=mock_vscode_path,
):
result1 = _get_notebook_name()
result2 = _get_notebook_name()
result3 = _get_notebook_name()
assert result1 == MOCK_NOTEBOOK_NAME
assert result2 == MOCK_NOTEBOOK_NAME
assert result3 == MOCK_NOTEBOOK_NAME
assert call_count == 1
def test_get_notebook_path_from_sessions_success():
mock_server = {"url": "http://localhost:8888/", "token": "test_token"}
with (
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_kernel_id",
return_value=MOCK_KERNEL_ID,
),
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_running_servers",
return_value=[mock_server],
),
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_sessions_notebook",
return_value=MOCK_NOTEBOOK_PATH,
),
):
result = _get_notebook_path_from_sessions()
assert result == MOCK_NOTEBOOK_PATH
def test_get_notebook_path_from_sessions_no_kernel_id():
with mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_kernel_id",
return_value=None,
):
result = _get_notebook_path_from_sessions()
assert result is None
def test_get_notebook_path_from_sessions_no_servers():
with (
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_kernel_id",
return_value=MOCK_KERNEL_ID,
),
mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_running_servers",
return_value=[],
),
):
result = _get_notebook_path_from_sessions()
assert result is None
@pytest.mark.parametrize(
("is_in_jupyter", "expected"),
[
(True, True),
(False, False),
],
)
def test_jupyter_notebook_run_context_in_context(is_in_jupyter, expected):
with mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._is_in_jupyter_notebook",
return_value=is_in_jupyter,
):
assert JupyterNotebookRunContext().in_context() is expected
@pytest.mark.parametrize(
("notebook_name", "expected_tags"),
[
(
MOCK_NOTEBOOK_NAME,
{
MLFLOW_SOURCE_NAME: MOCK_NOTEBOOK_NAME,
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK),
},
),
(
None,
{
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK),
},
),
],
)
def test_jupyter_notebook_run_context_tags(notebook_name, expected_tags):
_get_notebook_name.cache_clear()
with mock.patch(
"mlflow.tracking.context.jupyter_notebook_context._get_notebook_name",
return_value=notebook_name,
):
tags = JupyterNotebookRunContext().tags()
assert tags == expected_tags
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracking/context/test_jupyter_notebook_context.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/test_scorer_telemetry.py | """Tests for scorer telemetry behavior, specifically testing that nested scorer calls
skip telemetry recording while top-level calls record telemetry correctly.
"""
import asyncio
import json
import threading
from typing import Callable
from unittest import mock
import pytest
from pydantic import PrivateAttr
from mlflow.entities import Feedback
from mlflow.genai.scorers import scorer
from mlflow.genai.scorers.base import Scorer
from mlflow.telemetry.client import TelemetryClient
from mlflow.telemetry.events import ScorerCallEvent
@scorer
def child_scorer_func(outputs) -> int:
"""Simple child scorer that can be called by parent scorers."""
return len(outputs)
def parent_scorer_func(child_scorer: Callable[..., int]) -> Scorer:
"""Get a parent scorer that calls into a child scorer and returns the result *2"""
@scorer
def scorer_func(outputs) -> Feedback:
child_result = child_scorer(outputs=outputs)
return Feedback(name="parent_scorer_func", value=2 * child_result)
return scorer_func
class ParentScorer(Scorer):
_child_scorer: Callable[..., int] = PrivateAttr()
def __init__(self, child_scorer: Callable[..., int], **kwargs):
super().__init__(**kwargs)
self._child_scorer = child_scorer
def __call__(self, outputs) -> Feedback:
# Call child scorer - this should NOT generate telemetry
child_result = self._child_scorer(outputs=outputs)
# child_result is now an int (from decorator scorer), not Feedback
return Feedback(name=self.name, value=child_result * 2)
class RecursiveScorer(Scorer):
_max_depth: int = PrivateAttr()
def __init__(self, max_depth=3, **kwargs):
super().__init__(**kwargs)
self._max_depth = max_depth
def __call__(self, outputs, depth=0) -> Feedback:
if depth >= self._max_depth:
return Feedback(name=self.name, value=len(outputs))
# Recursive call - only first call should generate telemetry
return self(outputs=outputs, depth=depth + 1)
class GrandparentScorer(Scorer):
_parent_scorer: Scorer = PrivateAttr()
def __init__(self, parent_scorer: Scorer, **kwargs):
super().__init__(**kwargs)
self._parent_scorer = parent_scorer
def __call__(self, outputs) -> Feedback:
parent_result = self._parent_scorer(outputs=outputs)
return Feedback(name=self.name, value=parent_result.value + 1)
class ErrorScorer(Scorer):
def __call__(self, outputs) -> Feedback:
raise ValueError("Test error")
class ParentWithErrorChild(Scorer):
_child_scorer: Scorer = PrivateAttr()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._child_scorer = ErrorScorer(name="error_scorer")
def __call__(self, outputs) -> Feedback:
try:
self._child_scorer(outputs=outputs)
except ValueError:
pass # Handle error
return Feedback(name=self.name, value=10)
def get_scorer_call_events(mock_requests):
"""Extract ScorerCallEvent records from captured telemetry."""
return [
record for record in mock_requests if record["data"]["event_name"] == ScorerCallEvent.name
]
def get_event_params(mock_requests):
"""Get parsed params from ScorerCallEvent records."""
scorer_call_events = get_scorer_call_events(mock_requests)
return [json.loads(event["data"]["params"]) for event in scorer_call_events]
def test_nested_scorer_skips_telemetry(mock_requests, mock_telemetry_client: TelemetryClient):
child = child_scorer_func
parent = ParentScorer(name="parent_scorer", child_scorer=child)
result = parent(outputs="test output")
# Expected: len("test output") * 2 = 22
assert result.value == 22
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 1
event_params = get_event_params(mock_requests)
assert len(event_params) == 1
assert event_params[0]["scorer_class"] == "UserDefinedScorer"
# This verifies it's the parent (kind="class"), not child (kind="decorator")
assert event_params[0]["scorer_kind"] == "class"
def test_multi_level_nesting_skips_telemetry(mock_requests, mock_telemetry_client: TelemetryClient):
child = child_scorer_func
parent = parent_scorer_func(child_scorer=child)
grandparent = GrandparentScorer(name="grandparent_scorer", parent_scorer=parent)
result = grandparent(outputs="test")
# Expected: (len("test") * 2) + 1 = 9
assert result.value == 9
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 1
event_params = get_event_params(mock_requests)
assert len(event_params) == 1
assert event_params[0]["scorer_class"] == "UserDefinedScorer"
# Verifies it's the grandparent (kind="class"), not nested (kind="decorator")
assert event_params[0]["scorer_kind"] == "class"
def test_recursive_scorer_skips_nested_telemetry(
mock_requests, mock_telemetry_client: TelemetryClient
):
recursive_scorer = RecursiveScorer(name="recursive_scorer", max_depth=5)
result = recursive_scorer(outputs="test", depth=0)
# Expected: len("test") = 4 (after recursing to max_depth=5)
assert result.value == 4
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 1
event_params = get_event_params(mock_requests)
assert len(event_params) == 1
def test_thread_safety_concurrent_scorers(mock_requests, mock_telemetry_client: TelemetryClient):
child = child_scorer_func
results = []
errors = []
def run_scorer(scorer, outputs):
try:
result = scorer(outputs=outputs)
results.append(result)
except Exception as e:
errors.append(e)
threads = []
for i in range(10):
parent = ParentScorer(name=f"parent{i}", child_scorer=child)
thread = threading.Thread(target=run_scorer, args=(parent, f"test{i}"))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert len(errors) == 0
assert len(results) == 10
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 10
event_params = get_event_params(mock_requests)
assert len(event_params) == 10
def test_error_in_nested_scorer_still_records_parent_telemetry(
mock_requests, mock_telemetry_client: TelemetryClient
):
parent = ParentWithErrorChild(name="parent_scorer")
result = parent(outputs="test")
# Expected: 10 (hardcoded value returned after handling error)
assert result.value == 10
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 1
event_params = get_event_params(mock_requests)
assert len(event_params) == 1
def test_direct_child_call_records_telemetry(mock_requests, mock_telemetry_client: TelemetryClient):
child = child_scorer_func
result = child(outputs="test")
# Expected: len("test") = 4
assert result == 4 # Decorator scorer returns int directly
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 1
event_params = get_event_params(mock_requests)
assert len(event_params) == 1
# Verify it's the decorator scorer
assert event_params[0]["scorer_kind"] == "decorator"
def test_sequential_parent_calls_each_record_telemetry(
mock_requests, mock_telemetry_client: TelemetryClient
):
child = child_scorer_func
parent = ParentScorer(name="parent_scorer", child_scorer=child)
result1 = parent(outputs="test1")
result2 = parent(outputs="test2")
# Expected: len("test1") * 2 = 10
assert result1.value == 10
# Expected: len("test2") * 2 = 10
assert result2.value == 10
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 2
event_params = get_event_params(mock_requests)
assert len(event_params) == 2
def test_async_scorer_raises_error():
with pytest.raises(TypeError, match="Async scorer '__call__' methods are not supported"):
class AsyncScorer(Scorer):
async def __call__(self, outputs) -> Feedback:
await asyncio.sleep(0.001)
return Feedback(name=self.name, value=len(outputs))
def test_telemetry_disabled_nested_scorers_work(
mock_requests, mock_telemetry_client: TelemetryClient
):
with mock.patch("mlflow.telemetry.track.is_telemetry_disabled", return_value=True):
child = child_scorer_func
parent = ParentScorer(name="parent_scorer", child_scorer=child)
result = parent(outputs="test")
# Expected: len("test") * 2 = 8
assert result.value == 8
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 0
def test_decorator_scorer_with_nested_call(mock_requests, mock_telemetry_client: TelemetryClient):
@scorer
def nested_checker(outputs) -> int:
return len(outputs)
@scorer
def parent_checker(outputs) -> int:
nested_result = nested_checker(outputs=outputs)
return nested_result * 2
result = parent_checker(outputs="test")
assert result == 8
mock_telemetry_client.flush()
scorer_events = get_scorer_call_events(mock_requests)
assert len(scorer_events) == 1
event_params = get_event_params(mock_requests)
assert len(event_params) == 1
assert event_params[0]["scorer_kind"] == "decorator"
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/test_scorer_telemetry.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/entities/trace_metrics.py | from dataclasses import dataclass
from enum import Enum
from mlflow.entities._mlflow_object import _MlflowObject
from mlflow.protos import service_pb2 as pb
class MetricViewType(str, Enum):
TRACES = "TRACES"
SPANS = "SPANS"
ASSESSMENTS = "ASSESSMENTS"
def __str__(self) -> str:
return self.value
def to_proto(self):
return pb.MetricViewType.Value(self)
@classmethod
def from_proto(cls, proto: int) -> "MetricViewType":
return cls(pb.MetricViewType.Name(proto))
class AggregationType(str, Enum):
COUNT = "COUNT"
SUM = "SUM"
AVG = "AVG"
PERCENTILE = "PERCENTILE"
MIN = "MIN"
MAX = "MAX"
def __str__(self) -> str:
return self.value
def to_proto(self):
return pb.AggregationType.Value(self)
@dataclass
class MetricAggregation(_MlflowObject):
aggregation_type: AggregationType
percentile_value: float | None = None
def __post_init__(self):
if self.aggregation_type == AggregationType.PERCENTILE:
if self.percentile_value is None:
raise ValueError("Percentile value is required for PERCENTILE aggregation")
if self.percentile_value > 100 or self.percentile_value < 0:
raise ValueError(
f"Percentile value must be between 0 and 100, got {self.percentile_value}"
)
elif self.percentile_value is not None:
raise ValueError(
"Percentile value is only allowed for PERCENTILE aggregation type, "
f"got {self.aggregation_type}"
)
def __str__(self) -> str:
if self.aggregation_type == AggregationType.PERCENTILE:
return f"P{self.percentile_value}"
return str(self.aggregation_type)
def to_proto(self) -> pb.MetricAggregation:
proto = pb.MetricAggregation()
proto.aggregation_type = self.aggregation_type.to_proto()
if self.percentile_value is not None:
proto.percentile_value = self.percentile_value
return proto
@classmethod
def from_proto(cls, proto: pb.MetricAggregation) -> "MetricAggregation":
return cls(
aggregation_type=AggregationType(pb.AggregationType.Name(proto.aggregation_type)),
percentile_value=proto.percentile_value if proto.HasField("percentile_value") else None,
)
@dataclass
class MetricDataPoint(_MlflowObject):
metric_name: str
dimensions: dict[str, str]
values: dict[str, float]
@classmethod
def from_proto(cls, proto: pb.MetricDataPoint) -> "MetricDataPoint":
return cls(
metric_name=proto.metric_name,
dimensions=dict(proto.dimensions),
values=dict(proto.values),
)
def to_proto(self) -> pb.MetricDataPoint:
return pb.MetricDataPoint(
metric_name=self.metric_name,
dimensions=self.dimensions,
values=self.values,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/trace_metrics.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/store/tracking/utils/sql_trace_metrics_utils.py | import json
from dataclasses import dataclass
from datetime import datetime, timezone
import sqlalchemy
from sqlalchemy import Column, and_, case, exists, func, literal_column
from sqlalchemy.orm.query import Query
from mlflow.entities.trace_metrics import (
AggregationType,
MetricAggregation,
MetricDataPoint,
MetricViewType,
)
from mlflow.exceptions import MlflowException
from mlflow.store.db import db_types
from mlflow.store.tracking.dbmodels.models import (
SqlAssessments,
SqlSpan,
SqlSpanMetrics,
SqlTraceInfo,
SqlTraceMetadata,
SqlTraceMetrics,
SqlTraceTag,
)
from mlflow.tracing.constant import (
AssessmentMetricDimensionKey,
AssessmentMetricKey,
AssessmentMetricSearchKey,
SpanAttributeKey,
SpanMetricDimensionKey,
SpanMetricKey,
SpanMetricSearchKey,
TraceMetricDimensionKey,
TraceMetricKey,
TraceMetricSearchKey,
TraceTagKey,
)
from mlflow.utils.search_utils import SearchTraceMetricsUtils
@dataclass
class TraceMetricsConfig:
"""
Configuration for traces metrics.
Args:
aggregation_types: Supported aggregation types to apply to the metrics.
dimensions: Supported dimensions to group metrics by.
"""
aggregation_types: set[AggregationType]
dimensions: set[str]
# TraceMetricKey -> TraceMetricsConfig mapping for traces
TRACES_METRICS_CONFIGS: dict[TraceMetricKey, TraceMetricsConfig] = {
TraceMetricKey.TRACE_COUNT: TraceMetricsConfig(
aggregation_types={AggregationType.COUNT},
dimensions={TraceMetricDimensionKey.TRACE_NAME, TraceMetricDimensionKey.TRACE_STATUS},
),
TraceMetricKey.LATENCY: TraceMetricsConfig(
aggregation_types={AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={TraceMetricDimensionKey.TRACE_NAME},
),
TraceMetricKey.INPUT_TOKENS: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={TraceMetricDimensionKey.TRACE_NAME},
),
TraceMetricKey.OUTPUT_TOKENS: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={TraceMetricDimensionKey.TRACE_NAME},
),
TraceMetricKey.TOTAL_TOKENS: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={TraceMetricDimensionKey.TRACE_NAME},
),
TraceMetricKey.CACHE_READ_INPUT_TOKENS: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={TraceMetricDimensionKey.TRACE_NAME},
),
TraceMetricKey.CACHE_CREATION_INPUT_TOKENS: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={TraceMetricDimensionKey.TRACE_NAME},
),
}
# SpanMetricKey -> TraceMetricsConfig mapping for spans
SPANS_METRICS_CONFIGS: dict[SpanMetricKey, TraceMetricsConfig] = {
SpanMetricKey.SPAN_COUNT: TraceMetricsConfig(
aggregation_types={AggregationType.COUNT},
dimensions={
SpanMetricDimensionKey.SPAN_NAME,
SpanMetricDimensionKey.SPAN_TYPE,
SpanMetricDimensionKey.SPAN_STATUS,
SpanMetricDimensionKey.SPAN_MODEL_NAME,
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER,
},
),
SpanMetricKey.LATENCY: TraceMetricsConfig(
aggregation_types={AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={SpanMetricDimensionKey.SPAN_NAME, SpanMetricDimensionKey.SPAN_STATUS},
),
SpanMetricKey.INPUT_COST: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={
SpanMetricDimensionKey.SPAN_MODEL_NAME,
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER,
},
),
SpanMetricKey.OUTPUT_COST: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={
SpanMetricDimensionKey.SPAN_MODEL_NAME,
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER,
},
),
SpanMetricKey.TOTAL_COST: TraceMetricsConfig(
aggregation_types={AggregationType.SUM, AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={
SpanMetricDimensionKey.SPAN_MODEL_NAME,
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER,
},
),
}
ASSESSMENTS_METRICS_CONFIGS: dict[str, TraceMetricsConfig] = {
AssessmentMetricKey.ASSESSMENT_COUNT: TraceMetricsConfig(
aggregation_types={AggregationType.COUNT},
dimensions={
AssessmentMetricDimensionKey.ASSESSMENT_NAME,
AssessmentMetricDimensionKey.ASSESSMENT_VALUE,
},
),
AssessmentMetricKey.ASSESSMENT_VALUE: TraceMetricsConfig(
aggregation_types={AggregationType.AVG, AggregationType.PERCENTILE},
dimensions={AssessmentMetricDimensionKey.ASSESSMENT_NAME},
),
}
VIEW_TYPE_CONFIGS: dict[MetricViewType, dict[str, TraceMetricsConfig]] = {
MetricViewType.TRACES: TRACES_METRICS_CONFIGS,
MetricViewType.SPANS: SPANS_METRICS_CONFIGS,
MetricViewType.ASSESSMENTS: ASSESSMENTS_METRICS_CONFIGS,
}
TIME_BUCKET_LABEL = "time_bucket"
def get_percentile_aggregation(
db_type: str, percentile_value: float, column, partition_by_columns: list[Column] | None = None
):
"""
Get percentile aggregation function based on database type.
PostgreSQL, MSSQL, and SQLite use linear interpolation via PERCENTILE_CONT (or custom
aggregate for SQLite), equivalent to numpy.quantile's default method='linear' (H&F
method 7). The formula is: (1-g)*y[j] + g*y[j+1], where j and g are integral and
fractional parts of q*(n-1).
See: https://numpy.org/doc/stable/reference/generated/numpy.quantile.html
MySQL uses PERCENT_RANK() which calculates relative rank rather than interpolated values.
Args:
db_type: Database type (e.g., "postgresql", "mssql", "mysql", "sqlite")
percentile_value: Percentile value between 0 and 100 (e.g., 50 for median)
column: SQLAlchemy column to compute percentile on
partition_by_columns: For MSSQL and MySQL, columns to partition by in the OVER clause.
MSSQL and MySQL require PERCENTILE_CONT to have an OVER clause since it's a window
function, not a true aggregate. Pass the GROUP BY columns here.
Returns:
SQLAlchemy aggregation function for percentile
"""
percentile_fraction = percentile_value / 100 # Convert to 0-1 range
match db_type:
case db_types.POSTGRES:
# PostgreSQL PERCENTILE_CONT: ordered-set aggregate for exact percentile
return func.percentile_cont(percentile_fraction).within_group(column)
case db_types.MSSQL:
# MSSQL PERCENTILE_CONT: window function that REQUIRES an OVER clause.
# Unlike PostgreSQL, MSSQL's PERCENTILE_CONT is not a true aggregate function.
# We use OVER (PARTITION BY group_columns) to compute percentile per group.
# The result is a value for each row; the caller must handle deduplication
# (typically by wrapping in MAX/MIN in a subquery approach).
partition_by = partition_by_columns or []
return (
func.percentile_cont(percentile_fraction)
.within_group(column)
.over(partition_by=partition_by)
)
case db_types.SQLITE:
# Custom percentile aggregate function registered in mlflow/store/db/utils.py
# Expects percentile as 0-100
return func.percentile(column, percentile_value)
case db_types.MYSQL:
# MySQL 8.0+ supports PERCENT_RANK() function.
# We use PERCENT_RANK() OVER (PARTITION BY ... ORDER BY column) to get
# each row's percentile rank, then find values at the target percentile.
partition_by = partition_by_columns or []
return func.percent_rank().over(partition_by=partition_by, order_by=column)
def get_time_bucket_expression(
view_type: MetricViewType, time_interval_seconds: int, db_type: str
) -> Column:
"""Get time bucket expression for grouping timestamps.
Args:
view_type: Type of metrics view (e.g., TRACES, SPANS)
time_interval_seconds: Time interval in seconds for bucketing
db_type: Database type (e.g., "postgresql", "mssql", "mysql", "sqlite")
Returns:
SQLAlchemy column expression for time bucket
"""
# Convert time_interval_seconds to milliseconds
bucket_size_ms = time_interval_seconds * 1000
if db_type == db_types.MSSQL:
# MSSQL requires the exact same SQL text in SELECT, GROUP BY, and ORDER BY clauses.
# We use literal_column to generate identical SQL text across all clauses.
match view_type:
case MetricViewType.TRACES:
column_name = "timestamp_ms"
case MetricViewType.SPANS:
# For spans, timestamp is an expression (start_time_unix_nano / 1000000)
# rather than a simple column. Build the complete expression inline.
column_name = "start_time_unix_nano / 1000000"
case MetricViewType.ASSESSMENTS:
column_name = "created_timestamp"
expr_str = f"floor({column_name} / {bucket_size_ms}) * {bucket_size_ms}"
return literal_column(expr_str)
else:
# For non-MSSQL databases, use SQLAlchemy expressions
match view_type:
case MetricViewType.TRACES:
timestamp_column = SqlTraceInfo.timestamp_ms
case MetricViewType.SPANS:
# Convert nanoseconds to milliseconds
timestamp_column = SqlSpan.start_time_unix_nano / 1000000
case MetricViewType.ASSESSMENTS:
timestamp_column = SqlAssessments.created_timestamp
# This floors the timestamp to the nearest bucket boundary
return func.floor(timestamp_column / bucket_size_ms) * bucket_size_ms
def _get_aggregation_expression(
aggregation: MetricAggregation,
db_type: str,
column,
partition_by_columns: list[Column] | None = None,
) -> Column:
"""
Get the SQL aggregation expression for the given aggregation type and column.
Args:
aggregation: The aggregation of the metric
db_type: Database type (for percentile calculations)
column: The column to aggregate
partition_by_columns: For MSSQL and MySQL percentile, columns to partition by in OVER clause
Returns:
SQLAlchemy column expression for the aggregation
"""
match aggregation.aggregation_type:
case AggregationType.COUNT:
return func.count(column)
case AggregationType.SUM:
return func.sum(column)
case AggregationType.AVG:
return func.avg(column)
case AggregationType.PERCENTILE:
return get_percentile_aggregation(
db_type, aggregation.percentile_value, column, partition_by_columns
)
case _:
raise MlflowException.invalid_parameter_value(
f"Unsupported aggregation type: {aggregation.aggregation_type}",
)
def _get_assessment_numeric_value_column(json_column: Column) -> Column:
"""
Extract numeric value from JSON-encoded assessment value.
Handles conversion of JSON primitives to numeric values:
- JSON true/false -> 1/0
- JSON numbers -> numeric value
- other JSON-encoded values -> NULL
Args:
json_column: Column containing JSON-encoded value
Returns:
Column expression that extracts numeric value or NULL for non-numeric values
"""
return case(
# yes / no -> 1.0 / 0.0 to support mlflow.genai.judges.CategoricalRating
# that is used by builtin judges
(json_column.in_([json.dumps(True), json.dumps("yes")]), 1.0),
(json_column.in_([json.dumps(False), json.dumps("no")]), 0.0),
# Skip null, strings, lists, and dicts (JSON null/objects/arrays)
(json_column == "null", None),
(func.substring(json_column, 1, 1).in_(['"', "[", "{"]), None),
# For numbers, cast to float
else_=func.cast(json_column, sqlalchemy.Float),
)
def _get_column_to_aggregate(view_type: MetricViewType, metric_name: str) -> Column:
"""
Get the SQL column for the given metric name and view type.
Args:
metric_name: Name of the metric to query
view_type: Type of metrics view (e.g., TRACES, SPANS, ASSESSMENTS)
Returns:
SQLAlchemy column to aggregate
"""
match view_type:
case MetricViewType.TRACES:
match metric_name:
case TraceMetricKey.TRACE_COUNT:
return SqlTraceInfo.request_id
case TraceMetricKey.LATENCY:
return SqlTraceInfo.execution_time_ms
case metric_name if metric_name in TraceMetricKey.token_usage_keys():
return SqlTraceMetrics.value
case MetricViewType.SPANS:
match metric_name:
case SpanMetricKey.SPAN_COUNT:
return SqlSpan.span_id
case SpanMetricKey.LATENCY:
# Span latency in milliseconds (nanoseconds converted to ms)
return (SqlSpan.end_time_unix_nano - SqlSpan.start_time_unix_nano) // 1000000
case metric_name if metric_name in SpanMetricKey.cost_keys():
return SqlSpanMetrics.value
case MetricViewType.ASSESSMENTS:
match metric_name:
case AssessmentMetricKey.ASSESSMENT_COUNT:
return SqlAssessments.assessment_id
case "assessment_value":
return _get_assessment_numeric_value_column(SqlAssessments.value)
raise MlflowException.invalid_parameter_value(
f"Unsupported metric name: {metric_name} for view type {view_type}",
)
def _get_json_dimension_column(db_type: str, json_key: str, label: str) -> Column:
"""
Extract JSON dimension column with database-specific handling.
Args:
db_type: Database type
json_key: JSON key to extract from dimension_attributes
label: Label for the dimension column
Returns:
Column expression for the JSON dimension
"""
match db_type:
case db_types.MSSQL:
# Use CASE with ISJSON to handle JSON null values stored as 'null' string
# SQLAlchemy stores Python None as JSON 'null', which JSON_VALUE can't handle
# ISJSON returns 1 for valid JSON objects, 0 for 'null' string
return literal_column(
f"CASE WHEN ISJSON(spans.dimension_attributes) = 1 "
f"AND spans.dimension_attributes != 'null' "
f"THEN JSON_VALUE(spans.dimension_attributes, '$.\"{json_key}\"') "
f"ELSE NULL END"
).label(label)
case db_types.POSTGRES:
# Use ->> operator to extract as text without JSON quotes
# Use literal_column to ensure identical SQL for consistent GROUP BY
return literal_column(f"spans.dimension_attributes ->> '{json_key}'").label(label)
case _:
return SqlSpan.dimension_attributes[json_key].label(label)
def _apply_dimension_to_query(
query: Query, dimension: str, view_type: MetricViewType, db_type: str
) -> tuple[Query, Column]:
"""
Apply dimension-specific logic to query and return the dimension column.
Args:
query: SQLAlchemy query to modify
dimension: Dimension name to apply
view_type: Type of metrics view (e.g., TRACES, SPANS, ASSESSMENTS)
db_type: Database type (for MSSQL-specific JSON extraction handling)
Returns:
Tuple of (modified query, labeled dimension column)
"""
match view_type:
case MetricViewType.TRACES:
match dimension:
case TraceMetricDimensionKey.TRACE_NAME:
# Join with SqlTraceTag to get trace name
query = query.join(
SqlTraceTag,
and_(
SqlTraceInfo.request_id == SqlTraceTag.request_id,
SqlTraceTag.key == TraceTagKey.TRACE_NAME,
),
)
return query, SqlTraceTag.value.label(TraceMetricDimensionKey.TRACE_NAME)
case TraceMetricDimensionKey.TRACE_STATUS:
return query, SqlTraceInfo.status.label(TraceMetricDimensionKey.TRACE_STATUS)
case MetricViewType.SPANS:
match dimension:
case SpanMetricDimensionKey.SPAN_NAME:
return query, SqlSpan.name.label(SpanMetricDimensionKey.SPAN_NAME)
case SpanMetricDimensionKey.SPAN_TYPE:
return query, SqlSpan.type.label(SpanMetricDimensionKey.SPAN_TYPE)
case SpanMetricDimensionKey.SPAN_STATUS:
return query, SqlSpan.status.label(SpanMetricDimensionKey.SPAN_STATUS)
case SpanMetricDimensionKey.SPAN_MODEL_NAME:
return query, _get_json_dimension_column(
db_type, SpanAttributeKey.MODEL, SpanMetricDimensionKey.SPAN_MODEL_NAME
)
case SpanMetricDimensionKey.SPAN_MODEL_PROVIDER:
return query, _get_json_dimension_column(
db_type,
SpanAttributeKey.MODEL_PROVIDER,
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER,
)
case MetricViewType.ASSESSMENTS:
match dimension:
case AssessmentMetricDimensionKey.ASSESSMENT_NAME:
return query, SqlAssessments.name.label(
AssessmentMetricDimensionKey.ASSESSMENT_NAME
)
case AssessmentMetricDimensionKey.ASSESSMENT_VALUE:
return query, SqlAssessments.value.label(
AssessmentMetricDimensionKey.ASSESSMENT_VALUE
)
raise MlflowException.invalid_parameter_value(
f"Unsupported dimension `{dimension}` with view type {view_type}"
)
def _apply_view_initial_join(query: Query, view_type: MetricViewType) -> Query:
"""
Apply initial join required for the view type.
Args:
query: SQLAlchemy query (starting from SqlTraceInfo)
view_type: Type of metrics view (e.g., TRACES, SPANS, ASSESSMENTS)
Returns:
Modified query with view-specific joins
"""
match view_type:
case MetricViewType.SPANS:
query = query.join(SqlSpan, SqlSpan.trace_id == SqlTraceInfo.request_id)
case MetricViewType.ASSESSMENTS:
query = query.join(SqlAssessments, SqlAssessments.trace_id == SqlTraceInfo.request_id)
return query
def _apply_metric_specific_joins(
query: Query, metric_name: str, view_type: MetricViewType
) -> Query:
"""
Apply metric-specific joins to the query.
Args:
query: SQLAlchemy query to modify
metric_name: Name of the metric being queried
view_type: Type of metrics view (e.g., TRACES, SPANS)
Returns:
Modified query with necessary joins
"""
match view_type:
case MetricViewType.TRACES:
# Join with SqlTraceMetrics for token usage metrics
if metric_name in TraceMetricKey.token_usage_keys():
query = query.join(
SqlTraceMetrics,
and_(
SqlTraceInfo.request_id == SqlTraceMetrics.request_id,
SqlTraceMetrics.key == metric_name,
),
)
case MetricViewType.SPANS:
# Join with SqlSpanMetrics for cost metrics
if metric_name in SpanMetricKey.cost_keys():
query = query.join(
SqlSpanMetrics,
and_(
SqlSpan.trace_id == SqlSpanMetrics.trace_id,
SqlSpan.span_id == SqlSpanMetrics.span_id,
SqlSpanMetrics.key == metric_name,
),
)
return query
def _apply_filters(query: Query, filters: list[str], view_type: MetricViewType) -> Query:
"""
Apply filters to the query.
Args:
query: SQLAlchemy query to filter
filters: List of filter strings
view_type: Type of metrics view
Returns:
Filtered query
"""
if not filters:
return query
for filter_string in filters:
parsed_filter = SearchTraceMetricsUtils.parse_search_filter(filter_string)
match parsed_filter.view_type:
case TraceMetricSearchKey.VIEW_TYPE:
match parsed_filter.entity:
case TraceMetricSearchKey.STATUS:
query = query.filter(SqlTraceInfo.status == parsed_filter.value)
case TraceMetricSearchKey.METADATA:
metadata_filter = exists().where(
and_(
SqlTraceMetadata.request_id == SqlTraceInfo.request_id,
SqlTraceMetadata.key == parsed_filter.key,
SqlTraceMetadata.value == parsed_filter.value,
)
)
query = query.filter(metadata_filter)
case TraceMetricSearchKey.TAG:
tag_filter = exists().where(
and_(
SqlTraceTag.request_id == SqlTraceInfo.request_id,
SqlTraceTag.key == parsed_filter.key,
SqlTraceTag.value == parsed_filter.value,
)
)
query = query.filter(tag_filter)
case SpanMetricSearchKey.VIEW_TYPE:
if view_type != MetricViewType.SPANS:
raise MlflowException.invalid_parameter_value(
f"Filtering by span is only supported for {MetricViewType.SPANS} view "
f"type, got {view_type}",
)
match parsed_filter.entity:
case SpanMetricSearchKey.NAME:
query = query.filter(SqlSpan.name == parsed_filter.value)
case SpanMetricSearchKey.STATUS:
query = query.filter(SqlSpan.status == parsed_filter.value)
case SpanMetricSearchKey.TYPE:
query = query.filter(SqlSpan.type == parsed_filter.value)
case AssessmentMetricSearchKey.VIEW_TYPE:
if view_type != MetricViewType.ASSESSMENTS:
raise MlflowException.invalid_parameter_value(
"Filtering by assessment is only supported for "
f"{MetricViewType.ASSESSMENTS} view type, got {view_type}",
)
match parsed_filter.entity:
case AssessmentMetricSearchKey.NAME:
query = query.filter(SqlAssessments.name == parsed_filter.value)
case AssessmentMetricSearchKey.TYPE:
query = query.filter(SqlAssessments.assessment_type == parsed_filter.value)
return query
def _has_percentile_aggregation(aggregations: list[MetricAggregation]) -> bool:
return any(agg.aggregation_type == AggregationType.PERCENTILE for agg in aggregations)
def _build_query_with_percentile_subquery(
db_type: str,
query: Query,
aggregations: list[MetricAggregation],
dimension_columns: list[Column],
agg_column: Column,
) -> tuple[Query, list[Column]]:
"""
Build query with percentile window functions using a subquery approach.
Both MSSQL and MySQL require window functions for percentile calculations, which don't
work directly with GROUP BY. This function uses a two-level query pattern:
- Inner: compute window function values (percentile or percent_rank)
- Outer: GROUP BY dimensions and aggregate the window function results
MSSQL uses PERCENTILE_CONT(...) OVER (PARTITION BY ...) directly.
MySQL uses PERCENT_RANK() with linear interpolation to emulate PERCENTILE_CONT.
Args:
db_type: Database type ("mssql" or "mysql")
query: Base SQLAlchemy query with joins and filters applied
aggregations: List of aggregations to compute
dimension_columns: Labeled dimension columns for grouping
agg_column: Column to aggregate on
Returns:
Tuple of (outer_query, select_columns)
"""
partition_by_columns = [col.element for col in dimension_columns] if dimension_columns else []
# Build inner subquery columns: dimensions + value + window function columns
inner_columns = list(dimension_columns)
inner_columns.append(agg_column.label("_agg_value"))
# Add db-specific window function columns
percentile_labels = {}
match db_type:
case db_types.MSSQL:
# add PERCENTILE_CONT window function for each percentile aggregation
for agg in aggregations:
if agg.aggregation_type == AggregationType.PERCENTILE:
label = f"_p{int(agg.percentile_value)}"
expr = get_percentile_aggregation(
db_type, agg.percentile_value, agg_column, partition_by_columns
)
inner_columns.append(expr.label(label))
percentile_labels[str(agg)] = label
case db_types.MYSQL:
# add single PERCENT_RANK column for interpolation
inner_columns.append(
func.percent_rank()
.over(partition_by=partition_by_columns, order_by=agg_column)
.label("_pct_rank")
)
case _:
raise ValueError(
f"Unsupported database type: {db_type}",
)
subquery = query.with_entities(*inner_columns).subquery()
# Build outer query percentile expression based on db type
def _build_outer_percentile_expr(agg):
match db_type:
case db_types.MSSQL:
# MAX picks the pre-computed percentile (same value for all rows in partition)
return func.max(subquery.c[percentile_labels[str(agg)]])
case db_types.MYSQL:
# linear interpolation
pct_fraction = agg.percentile_value / 100
val_col = subquery.c["_agg_value"]
rank_col = subquery.c["_pct_rank"]
# Boundary values and ranks for interpolation
low_val = func.max(case((rank_col <= pct_fraction, val_col)))
hi_val = func.min(case((rank_col >= pct_fraction, val_col)))
low_rank = func.max(case((rank_col <= pct_fraction, rank_col)))
hi_rank = func.min(case((rank_col >= pct_fraction, rank_col)))
# Interpolate: low + (hi - low) * (target - low_rank) / (hi_rank - low_rank)
rank_diff = func.nullif(hi_rank - low_rank, 0)
interpolation = low_val + (hi_val - low_val) * (pct_fraction - low_rank) / rank_diff
return func.coalesce(interpolation, low_val)
def _outer_agg_column(agg: MetricAggregation) -> Column:
agg_label = str(agg)
match agg.aggregation_type:
case AggregationType.PERCENTILE:
return _build_outer_percentile_expr(agg).label(agg_label)
case _:
return _get_aggregation_expression(agg, db_type, subquery.c["_agg_value"]).label(
agg_label
)
select_columns = [subquery.c[col.name].label(col.name) for col in dimension_columns]
select_columns.extend(_outer_agg_column(agg) for agg in aggregations)
outer_query = query.session.query(*select_columns).select_from(subquery)
if dimension_columns:
group_by_cols = [subquery.c[col.name] for col in dimension_columns]
outer_query = outer_query.group_by(*group_by_cols).order_by(*group_by_cols)
return outer_query, select_columns
def query_metrics(
view_type: MetricViewType,
db_type: str,
query: Query,
metric_name: str,
aggregations: list[MetricAggregation],
dimensions: list[str] | None,
filters: list[str] | None,
time_interval_seconds: int | None,
max_results: int,
) -> list[MetricDataPoint]:
"""Unified query metrics function for all view types.
Args:
view_type: Type of metrics view (e.g., TRACES, SPANS)
db_type: Database type (e.g., "postgresql", "mssql", "mysql")
query: Base SQLAlchemy query
metric_name: Name of the metric to query
aggregations: List of aggregations to compute
dimensions: List of dimensions to group by
filters: List of filter strings (each parsed by SearchTraceUtils), combined with AND
time_interval_seconds: Time interval in seconds for time bucketing
max_results: Maximum number of results to return
Returns:
List of MetricDataPoint objects
"""
# Apply view-specific initial join
query = _apply_view_initial_join(query, view_type)
query = _apply_filters(query, filters, view_type)
# Apply metric-specific joins first, before dimensions
# This ensures tables like SqlSpanMetrics are available for dimension extraction
query = _apply_metric_specific_joins(query, metric_name, view_type)
agg_column = _get_column_to_aggregate(view_type, metric_name)
# Group by dimension columns, labeled for SELECT
dimension_columns = []
if time_interval_seconds:
time_bucket_expr = get_time_bucket_expression(view_type, time_interval_seconds, db_type)
dimension_columns.append(time_bucket_expr.label(TIME_BUCKET_LABEL))
for dimension in dimensions or []:
query, dimension_column = _apply_dimension_to_query(query, dimension, view_type, db_type)
dimension_columns.append(dimension_column)
# MSSQL and MySQL with percentile need special handling (window function requires subquery)
if db_type in (db_types.MSSQL, db_types.MYSQL) and _has_percentile_aggregation(aggregations):
query, select_columns = _build_query_with_percentile_subquery(
db_type, query, aggregations, dimension_columns, agg_column
)
else:
# Standard path for PostgreSQL, SQLite
select_columns = list(dimension_columns)
for agg in aggregations:
expr = _get_aggregation_expression(agg, db_type, agg_column)
select_columns.append(expr.label(str(agg)))
query = query.with_entities(*select_columns)
# Extract underlying column expressions from labeled columns for GROUP BY/ORDER BY
if dimension_columns:
group_by_columns = [col.element for col in dimension_columns]
query = query.group_by(*group_by_columns)
# order by time bucket first, then by other dimensions
query = query.order_by(*group_by_columns)
results = query.limit(max_results).all()
return convert_results_to_metric_data_points(
results, select_columns, len(dimension_columns), metric_name
)
def validate_query_trace_metrics_params(
view_type: MetricViewType,
metric_name: str,
aggregations: list[MetricAggregation],
dimensions: list[str] | None,
):
"""Validate parameters for query_trace_metrics.
Args:
view_type: Type of metrics view (e.g., TRACES, SPANS, ASSESSMENTS)
metric_name: Name of the metric to query
aggregations: List of aggregations to compute
dimensions: List of dimensions to group by
Raises:
MlflowException: If any parameter is invalid
"""
if view_type not in VIEW_TYPE_CONFIGS:
supported_view_types = [vt.value for vt in VIEW_TYPE_CONFIGS.keys()]
raise MlflowException.invalid_parameter_value(
f"view_type must be one of {supported_view_types}, got '{view_type.value}'",
)
view_type_config = VIEW_TYPE_CONFIGS[view_type]
if metric_name not in view_type_config:
raise MlflowException.invalid_parameter_value(
f"metric_name must be one of {list(view_type_config.keys())}, got '{metric_name}'",
)
metrics_config = view_type_config[metric_name]
aggregation_types = [agg.aggregation_type for agg in aggregations]
if invalid_agg_types := (set(aggregation_types) - metrics_config.aggregation_types):
supported_aggs = sorted([a.value for a in metrics_config.aggregation_types])
invalid_aggs = sorted([a.value for a in invalid_agg_types])
raise MlflowException.invalid_parameter_value(
f"Found invalid aggregation_type(s): {invalid_aggs}. "
f"Supported aggregation types: {supported_aggs}",
)
dimensions_list = dimensions or []
if invalid_dimensions := (set(dimensions_list) - metrics_config.dimensions):
supported_dims = sorted([d for d in metrics_config.dimensions if d is not None])
raise MlflowException.invalid_parameter_value(
f"Found invalid dimension(s): {sorted(invalid_dimensions)}. "
f"Supported dimensions: {supported_dims}",
)
def convert_results_to_metric_data_points(
results: list[tuple[...]],
select_columns: list[Column],
num_dimensions: int,
metric_name: str,
) -> list[MetricDataPoint]:
"""
Convert query results to MetricDataPoint objects.
Args:
results: List of tuples containing query results
select_columns: List of labeled column objects (dimensions + aggregations)
num_dimensions: Number of dimension columns
metric_name: Name of the metric being queried
Returns:
List of MetricDataPoint objects
"""
data_points = []
for row in results:
# Split row values into dimensions and aggregations based on select_columns
dims = {col.name: row[i] for i, col in enumerate(select_columns[:num_dimensions])}
# Skip data points with None dimension values
if any(value is None for value in dims.values()):
continue
# Convert time_bucket from milliseconds to ISO 8601 datetime string
if TIME_BUCKET_LABEL in dims:
timestamp_ms = float(dims[TIME_BUCKET_LABEL])
timestamp_sec = timestamp_ms / 1000.0
dt = datetime.fromtimestamp(timestamp_sec, tz=timezone.utc)
dims[TIME_BUCKET_LABEL] = dt.isoformat()
values = {
col.name: row[i + num_dimensions]
for i, col in enumerate(select_columns[num_dimensions:])
if row[i + num_dimensions] is not None
}
# Skip data points with no values (all aggregations returned None)
if not values:
continue
data_points.append(
MetricDataPoint(
dimensions=dims,
metric_name=metric_name,
values=values,
)
)
return data_points
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/store/tracking/utils/sql_trace_metrics_utils.py",
"license": "Apache License 2.0",
"lines": 749,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/entities/test_trace_metrics.py | import pytest
from mlflow.entities.trace_metrics import (
AggregationType,
MetricAggregation,
MetricDataPoint,
MetricViewType,
)
from mlflow.protos import service_pb2 as pb
@pytest.mark.parametrize(
("view_type", "expected_proto"),
zip(MetricViewType, pb.MetricViewType.values(), strict=True),
)
def test_trace_metrics_view_type(view_type: MetricViewType, expected_proto: pb.MetricViewType):
assert view_type.to_proto() == expected_proto
@pytest.mark.parametrize(
("aggregation_type", "expected_proto"),
zip(AggregationType, pb.AggregationType.values(), strict=True),
)
def test_trace_metrics_aggregation_type_to_proto(
aggregation_type: AggregationType, expected_proto: pb.AggregationType
):
assert aggregation_type.to_proto() == expected_proto
def test_metrics_aggregation_to_proto_without_percentile():
aggregation = MetricAggregation(aggregation_type=AggregationType.AVG)
proto = aggregation.to_proto()
assert proto.aggregation_type == pb.AggregationType.AVG
assert not proto.HasField("percentile_value")
@pytest.mark.parametrize(
("percentile_value"),
[50.0, 75.0, 90.0, 95.0, 99.0, 99.9],
)
def test_metrics_aggregation_percentile_values(percentile_value: float):
aggregation = MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
proto = aggregation.to_proto()
assert proto.percentile_value == percentile_value
def test_metrics_aggregation_percentile_requires_value():
with pytest.raises(ValueError, match="Percentile value is required for PERCENTILE aggregation"):
MetricAggregation(aggregation_type=AggregationType.PERCENTILE)
@pytest.mark.parametrize("percentile_value", [-1.0, -0.1, 100.1, 101.0, 1000.0])
def test_metrics_aggregation_percentile_value_out_of_range(percentile_value: float):
with pytest.raises(ValueError, match="Percentile value must be between 0 and 100"):
MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
@pytest.mark.parametrize("percentile_value", [0.0, 0.1, 50.0, 99.9, 100.0])
def test_metrics_aggregation_percentile_value_valid_range(percentile_value: float):
aggregation = MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
assert aggregation.percentile_value == percentile_value
@pytest.mark.parametrize(
"agg_type",
[t for t in AggregationType if t is not AggregationType.PERCENTILE],
)
def test_metrics_aggregation_non_percentile_with_value_raises(agg_type: AggregationType):
with pytest.raises(
ValueError, match="Percentile value is only allowed for PERCENTILE aggregation"
):
MetricAggregation(aggregation_type=agg_type, percentile_value=50.0)
def test_trace_metrics_metric_data_point_from_proto():
metric_data_point_proto = pb.MetricDataPoint(
metric_name="latency",
dimensions={"status": "OK"},
values={"avg": 150.5, "p99": 200},
)
assert MetricDataPoint.from_proto(metric_data_point_proto) == MetricDataPoint(
metric_name="latency",
dimensions={"status": "OK"},
values={"avg": 150.5, "p99": 200},
)
def test_trace_metrics_metric_data_point_to_proto():
metric_data_point = MetricDataPoint(
metric_name="latency",
dimensions={"status": "OK", "model": "gpt-4"},
values={"avg": 150.5, "p99": 200.0},
)
proto = metric_data_point.to_proto()
assert proto.metric_name == "latency"
assert dict(proto.dimensions) == {"status": "OK", "model": "gpt-4"}
assert dict(proto.values) == {"avg": 150.5, "p99": 200.0}
@pytest.mark.parametrize(
("view_type", "expected_proto"),
zip(MetricViewType, pb.MetricViewType.values(), strict=True),
)
def test_trace_metrics_view_type_from_proto(view_type: MetricViewType, expected_proto: int):
assert MetricViewType.from_proto(expected_proto) == view_type
@pytest.mark.parametrize(
"agg_type",
[t for t in AggregationType if t is not AggregationType.PERCENTILE],
)
def test_metrics_aggregation_from_proto_without_percentile(agg_type: AggregationType):
proto = pb.MetricAggregation(aggregation_type=agg_type.to_proto())
aggregation = MetricAggregation.from_proto(proto)
assert aggregation.aggregation_type == agg_type
assert aggregation.percentile_value is None
@pytest.mark.parametrize("percentile_value", [50.0, 75.0, 90.0, 95.0, 99.0, 99.9])
def test_metrics_aggregation_from_proto_with_percentile(percentile_value: float):
proto = pb.MetricAggregation(
aggregation_type=pb.AggregationType.PERCENTILE,
percentile_value=percentile_value,
)
aggregation = MetricAggregation.from_proto(proto)
assert aggregation.aggregation_type == AggregationType.PERCENTILE
assert aggregation.percentile_value == percentile_value
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/entities/test_trace_metrics.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/store/tracking/test_sqlalchemy_store_query_trace_metrics.py | import json
import uuid
from dataclasses import asdict
from datetime import datetime, timezone
import numpy as np
import pytest
from opentelemetry import trace as trace_api
from mlflow.entities import (
Assessment,
AssessmentSource,
AssessmentSourceType,
Expectation,
Feedback,
trace_location,
)
from mlflow.entities.assessment import AssessmentError
from mlflow.entities.trace_info import TraceInfo
from mlflow.entities.trace_metrics import (
AggregationType,
MetricAggregation,
MetricViewType,
)
from mlflow.entities.trace_status import TraceStatus
from mlflow.exceptions import MlflowException
from mlflow.genai.judges import CategoricalRating
from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore
from mlflow.tracing.constant import (
AssessmentMetricDimensionKey,
AssessmentMetricKey,
SpanAttributeKey,
SpanMetricDimensionKey,
SpanMetricKey,
TraceMetadataKey,
TraceMetricDimensionKey,
TraceMetricKey,
TraceTagKey,
)
from mlflow.utils.time import get_current_time_millis
from tests.store.tracking.test_sqlalchemy_store import create_test_span
pytestmark = pytest.mark.notrackingurimock
def test_query_trace_metrics_count_no_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_count_no_dimensions")
for i in range(5):
trace_info = TraceInfo(
trace_id=f"trace{i}",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100 + i * 10,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 5},
}
def test_query_trace_metrics_count_by_status(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_count_by_status")
traces_data = [
("trace1", TraceStatus.OK),
("trace2", TraceStatus.OK),
("trace3", TraceStatus.OK),
("trace4", TraceStatus.ERROR),
("trace5", TraceStatus.ERROR),
]
for trace_id, status in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=status,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[TraceMetricDimensionKey.TRACE_STATUS],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {TraceMetricDimensionKey.TRACE_STATUS: "ERROR"},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {TraceMetricDimensionKey.TRACE_STATUS: "OK"},
"values": {"COUNT": 3},
}
def test_query_trace_metrics_count_by_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_count_by_name")
traces_data = [
("trace1", "workflow_a"),
("trace2", "workflow_a"),
("trace3", "workflow_a"),
("trace4", "workflow_b"),
("trace5", "workflow_b"),
]
for trace_id, name in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {"COUNT": 3},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {"COUNT": 2},
}
def test_query_trace_metrics_count_by_multiple_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_count_by_multiple_dimensions")
traces_data = [
("trace1", TraceStatus.OK, "workflow_a"),
("trace2", TraceStatus.OK, "workflow_a"),
("trace3", TraceStatus.ERROR, "workflow_a"),
("trace4", TraceStatus.OK, "workflow_b"),
("trace5", TraceStatus.ERROR, "workflow_b"),
]
for trace_id, status, name in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=status,
tags={TraceTagKey.TRACE_NAME: name},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[TraceMetricDimensionKey.TRACE_STATUS, TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 4
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
TraceMetricDimensionKey.TRACE_STATUS: "ERROR",
TraceMetricDimensionKey.TRACE_NAME: "workflow_a",
},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
TraceMetricDimensionKey.TRACE_STATUS: "ERROR",
TraceMetricDimensionKey.TRACE_NAME: "workflow_b",
},
"values": {"COUNT": 1},
}
assert asdict(result[2]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
TraceMetricDimensionKey.TRACE_STATUS: "OK",
TraceMetricDimensionKey.TRACE_NAME: "workflow_a",
},
"values": {"COUNT": 2},
}
assert asdict(result[3]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
TraceMetricDimensionKey.TRACE_STATUS: "OK",
TraceMetricDimensionKey.TRACE_NAME: "workflow_b",
},
"values": {"COUNT": 1},
}
def test_query_trace_metrics_latency_avg(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_latency_avg")
traces_data = [
("trace1", "workflow_a", 100),
("trace2", "workflow_a", 200),
("trace3", "workflow_a", 300),
("trace4", "workflow_b", 150),
("trace5", "workflow_b", 250),
]
for trace_id, name, duration in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=duration,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.LATENCY,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.LATENCY,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {"AVG": 200.0},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.LATENCY,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {"AVG": 200.0},
}
@pytest.mark.parametrize(
"percentile_value",
[50.0, 75.0, 90.0, 95.0, 99.0],
)
def test_query_trace_metrics_latency_percentiles(
store: SqlAlchemyStore,
percentile_value: float,
):
exp_id = store.create_experiment(f"test_latency_percentile_{percentile_value}")
traces_data = [
("trace1", "workflow_a", 100),
("trace2", "workflow_a", 200),
("trace3", "workflow_a", 300),
("trace4", "workflow_b", 100),
("trace5", "workflow_b", 200),
]
for trace_id, name, duration in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=duration,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.LATENCY,
aggregations=[
MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
# Calculate expected values based on database type
expected_workflow_a = pytest.approx(
np.percentile([100.0, 200.0, 300.0], percentile_value),
abs=0.01,
)
expected_workflow_b = pytest.approx(
np.percentile([100.0, 200.0], percentile_value),
abs=0.01,
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.LATENCY,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {f"P{percentile_value}": expected_workflow_a},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.LATENCY,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {f"P{percentile_value}": expected_workflow_b},
}
def test_query_trace_metrics_latency_percentile_identical_values(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_latency_percentile_identical")
# All traces have the same duration - tests edge case where lower and upper
# interpolation points are the same
traces_data = [
("trace1", "workflow_a", 50),
("trace2", "workflow_a", 50),
]
for trace_id, name, duration in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=duration,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.LATENCY,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.AVG),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=50),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=95),
],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
# When all values are identical, any percentile should return that value
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.LATENCY,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {
"AVG": 50.0,
"P50": pytest.approx(50.0, abs=0.01),
"P95": pytest.approx(50.0, abs=0.01),
},
}
def test_query_trace_metrics_latency_multiple_aggregations(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_latency_multiple_aggregations")
traces_data = [
("trace1", "workflow_a", 100),
("trace2", "workflow_a", 200),
("trace3", "workflow_a", 300),
("trace4", "workflow_a", 400),
("trace5", "workflow_a", 500),
]
for trace_id, name, duration in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=duration,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.LATENCY,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.AVG),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=95),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=99),
],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
# Calculate expected percentile value based on database type
values = [100.0, 200.0, 300.0, 400.0, 500.0]
expected_p95 = pytest.approx(np.percentile(values, 95.0), abs=0.01)
expected_p99 = pytest.approx(np.percentile(values, 99.0), abs=0.01)
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.LATENCY,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {
"AVG": pytest.approx(300.0, abs=0.01),
"P95": expected_p95,
"P99": expected_p99,
},
}
def test_query_trace_metrics_with_time_interval(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_with_time_interval")
base_time = 1577836800000 # 2020-01-01 00:00:00 UTC in milliseconds
hour_ms = 60 * 60 * 1000
traces_data = [
("trace1", base_time, 100),
("trace2", base_time + 10 * 60 * 1000, 200),
("trace3", base_time + hour_ms, 150),
("trace4", base_time + hour_ms + 30 * 60 * 1000, 250),
("trace5", base_time + 2 * hour_ms, 300),
]
for trace_id, timestamp, duration in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=timestamp,
execution_duration=duration,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
time_interval_seconds=3600, # 1 hour
start_time_ms=base_time,
end_time_ms=base_time + 3 * hour_ms,
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(base_time / 1000, tz=timezone.utc).isoformat()
},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time + hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"COUNT": 2},
}
assert asdict(result[2]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time + 2 * hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"COUNT": 1},
}
def test_query_trace_metrics_with_time_interval_and_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_with_time_interval_and_dimensions")
base_time = 1577836800000 # 2020-01-01 00:00:00 UTC in milliseconds
hour_ms = 60 * 60 * 1000
traces_data = [
("trace1", base_time, TraceStatus.OK, 100),
("trace2", base_time + 10 * 60 * 1000, TraceStatus.ERROR, 200),
("trace3", base_time + hour_ms, TraceStatus.OK, 150),
("trace4", base_time + hour_ms, TraceStatus.ERROR, 250),
]
for trace_id, timestamp, status, duration in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=timestamp,
execution_duration=duration,
state=status,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[TraceMetricDimensionKey.TRACE_STATUS],
time_interval_seconds=3600, # 1 hour
start_time_ms=base_time,
end_time_ms=base_time + 2 * hour_ms,
)
assert len(result) == 4
time_bucket_1 = datetime.fromtimestamp(base_time / 1000, tz=timezone.utc).isoformat()
time_bucket_2 = datetime.fromtimestamp(
(base_time + hour_ms) / 1000, tz=timezone.utc
).isoformat()
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": time_bucket_1,
TraceMetricDimensionKey.TRACE_STATUS: "ERROR",
},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": time_bucket_1,
TraceMetricDimensionKey.TRACE_STATUS: "OK",
},
"values": {"COUNT": 1},
}
assert asdict(result[2]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": time_bucket_2,
TraceMetricDimensionKey.TRACE_STATUS: "ERROR",
},
"values": {"COUNT": 1},
}
assert asdict(result[3]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {
"time_bucket": time_bucket_2,
TraceMetricDimensionKey.TRACE_STATUS: "OK",
},
"values": {"COUNT": 1},
}
def test_query_trace_metrics_with_status_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_with_status_filter")
traces_data = [
("trace1", TraceStatus.OK),
("trace2", TraceStatus.OK),
("trace3", TraceStatus.OK),
("trace4", TraceStatus.ERROR),
("trace5", TraceStatus.ERROR),
]
for trace_id, status in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=status,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.status = 'OK'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 3},
}
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.status = 'ERROR'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
def test_query_trace_metrics_with_source_run_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_with_source_run_filter")
traces_data = [
("trace1", "run_123"),
("trace2", "run_123"),
("trace3", "run_456"),
("trace4", "run_456"),
("trace5", None), # No source run
]
for trace_id, source_run in traces_data:
metadata = {TraceMetadataKey.SOURCE_RUN: source_run} if source_run else {}
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
trace_metadata=metadata,
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[f"trace.metadata.`{TraceMetadataKey.SOURCE_RUN}` = 'run_123'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[f"trace.metadata.`{TraceMetadataKey.SOURCE_RUN}` = 'run_456'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
def test_query_trace_metrics_with_multiple_filters(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_with_multiple_filters")
traces_data = [
("trace1", TraceStatus.OK, "run_123"),
("trace2", TraceStatus.OK, "run_123"),
("trace3", TraceStatus.ERROR, "run_123"),
("trace4", TraceStatus.OK, "run_456"),
("trace5", TraceStatus.ERROR, "run_456"),
]
for trace_id, status, source_run in traces_data:
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=status,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
trace_metadata={TraceMetadataKey.SOURCE_RUN: source_run},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[
"trace.status = 'OK'",
f"trace.metadata.`{TraceMetadataKey.SOURCE_RUN}` = 'run_123'",
],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
def test_query_trace_metrics_with_tag_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_with_tag_filter")
traces_data = [
("trace1", "model_v1"),
("trace2", "model_v1"),
("trace3", "model_v2"),
("trace4", "model_v2"),
("trace5", None), # No model tag
]
for trace_id, model_version in traces_data:
tags = {TraceTagKey.TRACE_NAME: "test_trace", "tag1": "value1"}
if model_version:
tags["model.version"] = model_version
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags=tags,
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.tag.tag1 = 'value1'"],
)
assert len(result) == 1
assert asdict(result[0])["values"] == {"COUNT": 5}
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.tag.`model.version` = 'model_v1'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.tag.`model.version` = 'model_v2'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TRACE_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
@pytest.mark.parametrize(
("filter_string", "error_match"),
[
("status = 'OK'", r"Invalid identifier 'status'"),
("trace.status != 'OK'", r"Invalid comparator: '!=', only '=' operator is supported"),
("trace.unsupported_field = 'value'", r"Invalid entity 'unsupported_field' specified"),
("span.status = 'OK'", r"Filtering by span is only supported for SPANS view type"),
(
"assessment.type = 'feedback'",
r"Filtering by assessment is only supported for ASSESSMENTS view type",
),
],
)
def test_query_trace_metrics_with_invalid_filter(
store: SqlAlchemyStore, filter_string: str, error_match: str
):
exp_id = store.create_experiment("test_with_invalid_filter")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
with pytest.raises(MlflowException, match=error_match):
store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TRACE_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[filter_string],
)
@pytest.fixture
def traces_with_token_usage_setup(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_traces_with_token_usage")
traces_data = [
("trace1", "workflow_a", 100, 50, 150),
("trace2", "workflow_a", 200, 100, 300),
("trace3", "workflow_a", 150, 75, 225),
("trace4", "workflow_b", 300, 150, 450),
("trace5", "workflow_b", 250, 125, 375),
]
for trace_id, name, input_tokens, output_tokens, total_tokens in traces_data:
token_usage = {
TraceMetricKey.INPUT_TOKENS: input_tokens,
TraceMetricKey.OUTPUT_TOKENS: output_tokens,
TraceMetricKey.TOTAL_TOKENS: total_tokens,
}
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
trace_metadata={TraceMetadataKey.TOKEN_USAGE: json.dumps(token_usage)},
)
store.start_trace(trace_info)
return exp_id, store
def test_query_trace_metrics_total_tokens_sum(traces_with_token_usage_setup):
exp_id, store = traces_with_token_usage_setup
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TOTAL_TOKENS,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {"SUM": 675},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {"SUM": 825},
}
def test_query_trace_metrics_total_tokens_avg(traces_with_token_usage_setup):
exp_id, store = traces_with_token_usage_setup
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TOTAL_TOKENS,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {"AVG": 225.0},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {"AVG": 412.5},
}
def test_query_trace_metrics_total_tokens_percentiles(traces_with_token_usage_setup):
exp_id, store = traces_with_token_usage_setup
percentiles = [50, 75, 90, 95, 99]
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TOTAL_TOKENS,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=p)
for p in percentiles
],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
# Calculate expected values based on database type
workflow_a_values = [150, 225, 300]
workflow_b_values = [375, 450]
expected_workflow_a_values = {f"P{p}": np.percentile(workflow_a_values, p) for p in percentiles}
expected_workflow_b_values = {f"P{p}": np.percentile(workflow_b_values, p) for p in percentiles}
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": expected_workflow_a_values,
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": expected_workflow_b_values,
}
def test_query_trace_metrics_total_tokens_no_dimensions(traces_with_token_usage_setup):
exp_id, store = traces_with_token_usage_setup
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TOTAL_TOKENS,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.SUM),
MetricAggregation(aggregation_type=AggregationType.AVG),
],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.TOTAL_TOKENS,
"dimensions": {},
"values": {"SUM": 1500, "AVG": 300.0},
}
def test_query_trace_metrics_total_tokens_without_token_usage(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_total_tokens_without_token_usage")
for i in range(3):
trace_info = TraceInfo(
trace_id=f"trace{i}",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.TOTAL_TOKENS,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
)
# No data points returned when all aggregation values are None
assert len(result) == 0
@pytest.fixture
def traces_with_cached_token_usage_setup(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_traces_with_cached_token_usage")
traces_data = [
# (trace_id, name, input, output, total, cache_read, cache_creation)
("trace1", "workflow_a", 100, 50, 150, 30, 10),
("trace2", "workflow_a", 200, 100, 300, 60, 20),
("trace3", "workflow_a", 150, 75, 225, 0, 0),
("trace4", "workflow_b", 300, 150, 450, 100, 50),
("trace5", "workflow_b", 250, 125, 375, 80, 40),
]
for trace_id, name, input_t, output_t, total_t, cache_read, cache_creation in traces_data:
token_usage = {
TraceMetricKey.INPUT_TOKENS: input_t,
TraceMetricKey.OUTPUT_TOKENS: output_t,
TraceMetricKey.TOTAL_TOKENS: total_t,
TraceMetricKey.CACHE_READ_INPUT_TOKENS: cache_read,
TraceMetricKey.CACHE_CREATION_INPUT_TOKENS: cache_creation,
}
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: name},
trace_metadata={TraceMetadataKey.TOKEN_USAGE: json.dumps(token_usage)},
)
store.start_trace(trace_info)
return exp_id, store
def test_query_trace_metrics_cache_read_input_tokens_sum(traces_with_cached_token_usage_setup):
exp_id, store = traces_with_cached_token_usage_setup
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.CACHE_READ_INPUT_TOKENS,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.CACHE_READ_INPUT_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {"SUM": 90},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.CACHE_READ_INPUT_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {"SUM": 180},
}
def test_query_trace_metrics_cache_creation_input_tokens_sum(traces_with_cached_token_usage_setup):
exp_id, store = traces_with_cached_token_usage_setup
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.CACHE_CREATION_INPUT_TOKENS,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[TraceMetricDimensionKey.TRACE_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.CACHE_CREATION_INPUT_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_a"},
"values": {"SUM": 30},
}
assert asdict(result[1]) == {
"metric_name": TraceMetricKey.CACHE_CREATION_INPUT_TOKENS,
"dimensions": {TraceMetricDimensionKey.TRACE_NAME: "workflow_b"},
"values": {"SUM": 90},
}
def test_query_trace_metrics_cache_read_input_tokens_no_dimensions(
traces_with_cached_token_usage_setup,
):
exp_id, store = traces_with_cached_token_usage_setup
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.TRACES,
metric_name=TraceMetricKey.CACHE_READ_INPUT_TOKENS,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.SUM),
MetricAggregation(aggregation_type=AggregationType.AVG),
],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": TraceMetricKey.CACHE_READ_INPUT_TOKENS,
"dimensions": {},
"values": {"SUM": 270, "AVG": 54.0},
}
def test_query_span_metrics_count_no_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_count_no_dimensions")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=1000000000),
create_test_span("trace1", "span2", span_id=2, span_type="CHAIN", start_ns=1100000000),
create_test_span("trace1", "span3", span_id=3, span_type="LLM", start_ns=1200000000),
create_test_span("trace1", "span4", span_id=4, span_type="TOOL", start_ns=1300000000),
create_test_span("trace1", "span5", span_id=5, span_type="LLM", start_ns=1400000000),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {},
"values": {"COUNT": 5},
}
def test_query_span_metrics_count_by_span_type(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_count_by_span_type")
# Create a trace first
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different types
spans = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=1000000000),
create_test_span("trace1", "span2", span_id=2, span_type="LLM", start_ns=1100000000),
create_test_span("trace1", "span3", span_id=3, span_type="LLM", start_ns=1200000000),
create_test_span("trace1", "span4", span_id=4, span_type="CHAIN", start_ns=1300000000),
create_test_span("trace1", "span5", span_id=5, span_type="CHAIN", start_ns=1400000000),
create_test_span("trace1", "span6", span_id=6, span_type="TOOL", start_ns=1500000000),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_TYPE],
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "CHAIN"},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "LLM"},
"values": {"COUNT": 3},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "TOOL"},
"values": {"COUNT": 1},
}
def test_query_span_metrics_count_by_span_status(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_count_by_span_status")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different statuses
spans = [
create_test_span(
"trace1",
"span1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="LLM",
start_ns=1200000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span4",
span_id=4,
span_type="CHAIN",
start_ns=1300000000,
status=trace_api.StatusCode.ERROR,
),
create_test_span(
"trace1",
"span5",
span_id=5,
span_type="CHAIN",
start_ns=1400000000,
status=trace_api.StatusCode.ERROR,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_STATUS],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_STATUS: "ERROR"},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_STATUS: "OK"},
"values": {"COUNT": 3},
}
def test_query_span_metrics_with_time_interval(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_with_time_interval")
# Base time in nanoseconds (2020-01-01 00:00:00 UTC)
base_time_ns = 1577836800000000000
hour_ns = 60 * 60 * 1_000_000_000
# Create a trace
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=base_time_ns // 1_000_000, # Convert to milliseconds
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans at different times
spans = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=base_time_ns),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=base_time_ns + 10 * 60 * 1_000_000_000,
),
create_test_span(
"trace1", "span3", span_id=3, span_type="LLM", start_ns=base_time_ns + hour_ns
),
create_test_span(
"trace1",
"span4",
span_id=4,
span_type="LLM",
start_ns=base_time_ns + hour_ns + 30 * 60 * 1_000_000_000,
),
create_test_span(
"trace1", "span5", span_id=5, span_type="LLM", start_ns=base_time_ns + 2 * hour_ns
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
time_interval_seconds=3600, # 1 hour
start_time_ms=base_time_ns // 1_000_000,
end_time_ms=(base_time_ns + 3 * hour_ns) // 1_000_000,
)
base_time_ms = base_time_ns // 1_000_000
hour_ms = 60 * 60 * 1000
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(base_time_ms / 1000, tz=timezone.utc).isoformat()
},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time_ms + hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"COUNT": 2},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time_ms + 2 * hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"COUNT": 1},
}
def test_query_span_metrics_with_time_interval_and_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_with_time_interval_and_dimensions")
# Base time in nanoseconds (2020-01-01 00:00:00 UTC)
base_time_ns = 1577836800000000000
hour_ns = 60 * 60 * 1_000_000_000
# Create a trace
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=base_time_ns // 1_000_000,
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans at different times with different types
spans = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=base_time_ns),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="CHAIN",
start_ns=base_time_ns + 10 * 60 * 1_000_000_000,
),
create_test_span(
"trace1", "span3", span_id=3, span_type="LLM", start_ns=base_time_ns + hour_ns
),
create_test_span(
"trace1", "span4", span_id=4, span_type="CHAIN", start_ns=base_time_ns + hour_ns
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_TYPE],
time_interval_seconds=3600, # 1 hour
start_time_ms=base_time_ns // 1_000_000,
end_time_ms=(base_time_ns + 2 * hour_ns) // 1_000_000,
)
base_time_ms = base_time_ns // 1_000_000
hour_ms = 60 * 60 * 1000
time_bucket_1 = datetime.fromtimestamp(base_time_ms / 1000, tz=timezone.utc).isoformat()
time_bucket_2 = datetime.fromtimestamp(
(base_time_ms + hour_ms) / 1000, tz=timezone.utc
).isoformat()
assert len(result) == 4
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": time_bucket_1,
SpanMetricDimensionKey.SPAN_TYPE: "CHAIN",
},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": time_bucket_1,
SpanMetricDimensionKey.SPAN_TYPE: "LLM",
},
"values": {"COUNT": 1},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": time_bucket_2,
SpanMetricDimensionKey.SPAN_TYPE: "CHAIN",
},
"values": {"COUNT": 1},
}
assert asdict(result[3]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
"time_bucket": time_bucket_2,
SpanMetricDimensionKey.SPAN_TYPE: "LLM",
},
"values": {"COUNT": 1},
}
def test_query_span_metrics_with_filters(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_with_filters")
# Create traces with different statuses
trace1 = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace1)
trace2 = TraceInfo(
trace_id="trace2",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.ERROR,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace2)
# Create spans for trace1 (OK status)
spans_trace1 = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=1000000000),
create_test_span("trace1", "span2", span_id=2, span_type="LLM", start_ns=1100000000),
create_test_span("trace1", "span3", span_id=3, span_type="CHAIN", start_ns=1200000000),
]
store.log_spans(exp_id, spans_trace1)
# Create spans for trace2 (ERROR status)
spans_trace2 = [
create_test_span("trace2", "span4", span_id=4, span_type="LLM", start_ns=1300000000),
create_test_span("trace2", "span5", span_id=5, span_type="CHAIN", start_ns=1400000000),
]
store.log_spans(exp_id, spans_trace2)
# Query spans only for traces with OK status
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.status = 'OK'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {},
"values": {"COUNT": 3},
}
# Query spans grouped by type for traces with ERROR status
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_TYPE],
filters=["trace.status = 'ERROR'"],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "CHAIN"},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "LLM"},
"values": {"COUNT": 1},
}
def test_query_span_metrics_across_multiple_traces(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_across_multiple_traces")
# Create multiple traces
for i in range(3):
trace_info = TraceInfo(
trace_id=f"trace{i}",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: f"workflow_{i}"},
)
store.start_trace(trace_info)
# Create spans for each trace
spans = [
create_test_span(
f"trace{i}",
f"span{i}_1",
span_id=i * 10 + 1,
span_type="LLM",
start_ns=1000000000 + i * 100000000,
),
create_test_span(
f"trace{i}",
f"span{i}_2",
span_id=i * 10 + 2,
span_type="CHAIN",
start_ns=1100000000 + i * 100000000,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_TYPE],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "CHAIN"},
"values": {"COUNT": 3},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "LLM"},
"values": {"COUNT": 3},
}
def test_query_span_metrics_with_span_status_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_with_status_filter")
trace1 = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace1)
trace2 = TraceInfo(
trace_id="trace2",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.ERROR,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace2)
spans_ok = [
create_test_span(
"trace1",
"span1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="CHAIN",
start_ns=1200000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="CHAIN",
start_ns=1200000000,
status=trace_api.StatusCode.OK,
),
]
store.log_spans(exp_id, spans_ok)
spans_error = [
create_test_span(
"trace2",
"span4",
span_id=4,
span_type="LLM",
start_ns=1300000000,
status=trace_api.StatusCode.ERROR,
),
create_test_span(
"trace2",
"span5",
span_id=5,
span_type="CHAIN",
start_ns=1400000000,
status=trace_api.StatusCode.ERROR,
),
]
store.log_spans(exp_id, spans_error)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_TYPE],
filters=["span.status = 'OK'"],
)
# Should only count spans from trace1 (OK status)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "CHAIN"},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_TYPE: "LLM"},
"values": {"COUNT": 2},
}
def test_query_span_metrics_with_span_name_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_with_name_filter")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different names
spans = [
create_test_span(
"trace1", "generate_response", span_id=1, span_type="LLM", start_ns=1000000000
),
create_test_span(
"trace1", "generate_response", span_id=2, span_type="LLM", start_ns=1100000000
),
create_test_span(
"trace1", "process_input", span_id=3, span_type="CHAIN", start_ns=1200000000
),
create_test_span(
"trace1", "validate_output", span_id=4, span_type="TOOL", start_ns=1300000000
),
]
store.log_spans(exp_id, spans)
# Query spans with span.name filter
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["span.name = 'generate_response'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
def test_query_span_metrics_with_span_type_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_with_type_filter")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=1000000000),
create_test_span("trace1", "span2", span_id=2, span_type="TOOL", start_ns=1100000000),
create_test_span("trace1", "span3", span_id=3, span_type="CHAIN", start_ns=1200000000),
create_test_span("trace1", "span4", span_id=4, span_type="TOOL", start_ns=1300000000),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["span.type = 'TOOL'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
@pytest.mark.parametrize(
("filter_string", "error_match"),
[
("status = 'OK'", r"Invalid identifier 'status'"),
("span.status != 'OK'", r"Invalid comparator: '!=', only '=' operator is supported"),
("span.invalid_field = 'value'", r"Invalid entity 'invalid_field' specified"),
("span.status.extra = 'value'", r"does not require a key"),
("span.name LIKE 'test%'", r"only '=' operator is supported"),
],
)
def test_query_span_metrics_invalid_filters(
store: SqlAlchemyStore, filter_string: str, error_match: str
):
exp_id = store.create_experiment("test_span_invalid_filters")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span("trace1", "span1", span_id=1, span_type="LLM", start_ns=1000000000),
]
store.log_spans(exp_id, spans)
with pytest.raises(MlflowException, match=error_match):
store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[filter_string],
)
def test_query_span_metrics_count_by_span_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_count_by_span_name")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different names
spans = [
create_test_span(
"trace1", "generate_response", span_id=1, span_type="LLM", start_ns=1000000000
),
create_test_span(
"trace1", "generate_response", span_id=2, span_type="LLM", start_ns=1100000000
),
create_test_span(
"trace1", "generate_response", span_id=3, span_type="LLM", start_ns=1200000000
),
create_test_span(
"trace1", "process_input", span_id=4, span_type="CHAIN", start_ns=1300000000
),
create_test_span(
"trace1", "process_input", span_id=5, span_type="CHAIN", start_ns=1400000000
),
create_test_span(
"trace1", "validate_output", span_id=6, span_type="TOOL", start_ns=1500000000
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_NAME],
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "generate_response"},
"values": {"COUNT": 3},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "process_input"},
"values": {"COUNT": 2},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "validate_output"},
"values": {"COUNT": 1},
}
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_NAME],
filters=["span.type = 'TOOL'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "validate_output"},
"values": {"COUNT": 1},
}
def test_query_span_metrics_count_by_span_name_and_type(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_count_by_span_name_and_type")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different names and types
spans = [
create_test_span("trace1", "llm_call", span_id=1, span_type="LLM", start_ns=1000000000),
create_test_span("trace1", "llm_call", span_id=2, span_type="LLM", start_ns=1100000000),
create_test_span("trace1", "tool_call", span_id=3, span_type="TOOL", start_ns=1200000000),
create_test_span("trace1", "tool_call", span_id=4, span_type="TOOL", start_ns=1300000000),
create_test_span("trace1", "chain_call", span_id=5, span_type="CHAIN", start_ns=1400000000),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_NAME, SpanMetricDimensionKey.SPAN_TYPE],
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_NAME: "chain_call",
SpanMetricDimensionKey.SPAN_TYPE: "CHAIN",
},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_NAME: "llm_call",
SpanMetricDimensionKey.SPAN_TYPE: "LLM",
},
"values": {"COUNT": 2},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_NAME: "tool_call",
SpanMetricDimensionKey.SPAN_TYPE: "TOOL",
},
"values": {"COUNT": 2},
}
def test_query_span_metrics_latency_avg(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_latency_avg")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different latencies (end_ns - start_ns in nanoseconds)
# Latency in ms = (end_ns - start_ns) / 1_000_000
spans = [
# 100ms latency
create_test_span(
"trace1",
"span1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000,
),
# 200ms latency
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000,
),
# 300ms latency
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3300000000,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {},
"values": {"AVG": 200.0}, # (100 + 200 + 300) / 3 = 200
}
def test_query_span_metrics_latency_avg_by_span_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_latency_avg_by_span_name")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different names and latencies
spans = [
# generate_response: 100ms, 200ms, 300ms -> avg = 200ms
create_test_span(
"trace1",
"generate_response",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000,
),
create_test_span(
"trace1",
"generate_response",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000,
),
create_test_span(
"trace1",
"generate_response",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3300000000,
),
# process_input: 50ms, 150ms -> avg = 100ms
create_test_span(
"trace1",
"process_input",
span_id=4,
span_type="CHAIN",
start_ns=4000000000,
end_ns=4050000000,
),
create_test_span(
"trace1",
"process_input",
span_id=5,
span_type="CHAIN",
start_ns=5000000000,
end_ns=5150000000,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[SpanMetricDimensionKey.SPAN_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "generate_response"},
"values": {"AVG": 200.0},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "process_input"},
"values": {"AVG": 100.0},
}
def test_query_span_metrics_latency_by_span_status(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_latency_by_span_status")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different statuses and latencies
spans = [
# OK status: 100ms, 200ms -> avg = 150ms
create_test_span(
"trace1",
"span1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000,
status=trace_api.StatusCode.OK,
),
# ERROR status: 50ms, 150ms -> avg = 100ms
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3050000000,
status=trace_api.StatusCode.ERROR,
),
create_test_span(
"trace1",
"span4",
span_id=4,
span_type="LLM",
start_ns=4000000000,
end_ns=4150000000,
status=trace_api.StatusCode.ERROR,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[SpanMetricDimensionKey.SPAN_STATUS],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {SpanMetricDimensionKey.SPAN_STATUS: "ERROR"},
"values": {"AVG": 100.0},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {SpanMetricDimensionKey.SPAN_STATUS: "OK"},
"values": {"AVG": 150.0},
}
@pytest.mark.parametrize(
"percentile_value",
[50.0, 75.0, 90.0, 95.0, 99.0],
)
def test_query_span_metrics_latency_percentiles(
store: SqlAlchemyStore,
percentile_value: float,
):
exp_id = store.create_experiment(f"test_span_latency_percentile_{percentile_value}")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different latencies: 100ms, 200ms, 300ms, 400ms, 500ms
spans = [
create_test_span(
"trace1",
"span1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000, # 100ms
),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000, # 200ms
),
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3300000000, # 300ms
),
create_test_span(
"trace1",
"span4",
span_id=4,
span_type="LLM",
start_ns=4000000000,
end_ns=4400000000, # 400ms
),
create_test_span(
"trace1",
"span5",
span_id=5,
span_type="LLM",
start_ns=5000000000,
end_ns=5500000000, # 500ms
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[
MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
],
)
# Calculate expected percentile value
latency_values = [100.0, 200.0, 300.0, 400.0, 500.0]
expected_percentile = pytest.approx(
np.percentile(latency_values, percentile_value),
abs=0.01,
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {},
"values": {f"P{percentile_value}": expected_percentile},
}
def test_query_span_metrics_latency_percentiles_by_span_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_latency_percentiles_by_span_name")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different names and latencies
spans = [
# generate_response: 100ms, 200ms, 300ms
create_test_span(
"trace1",
"generate_response",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000,
),
create_test_span(
"trace1",
"generate_response",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000,
),
create_test_span(
"trace1",
"generate_response",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3300000000,
),
# process_input: 50ms, 150ms
create_test_span(
"trace1",
"process_input",
span_id=4,
span_type="CHAIN",
start_ns=4000000000,
end_ns=4050000000,
),
create_test_span(
"trace1",
"process_input",
span_id=5,
span_type="CHAIN",
start_ns=5000000000,
end_ns=5150000000,
),
]
store.log_spans(exp_id, spans)
percentile_value = 50.0
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[
MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
],
dimensions=[SpanMetricDimensionKey.SPAN_NAME],
)
# Calculate expected percentile values
gen_resp_values = [100.0, 200.0, 300.0]
proc_input_values = [50.0, 150.0]
expected_gen_resp = np.percentile(gen_resp_values, percentile_value)
expected_proc_input = np.percentile(proc_input_values, percentile_value)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "generate_response"},
"values": {f"P{percentile_value}": expected_gen_resp},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {SpanMetricDimensionKey.SPAN_NAME: "process_input"},
"values": {f"P{percentile_value}": expected_proc_input},
}
def test_query_span_metrics_latency_multiple_aggregations(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_latency_multiple_aggregations")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with latencies: 100ms, 200ms, 300ms, 400ms, 500ms
spans = [
create_test_span(
"trace1",
"span1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000,
),
create_test_span(
"trace1",
"span2",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000,
),
create_test_span(
"trace1",
"span3",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3300000000,
),
create_test_span(
"trace1",
"span4",
span_id=4,
span_type="LLM",
start_ns=4000000000,
end_ns=4400000000,
),
create_test_span(
"trace1",
"span5",
span_id=5,
span_type="LLM",
start_ns=5000000000,
end_ns=5500000000,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.AVG),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=50),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=95),
],
)
latency_values = [100.0, 200.0, 300.0, 400.0, 500.0]
expected_p50 = pytest.approx(np.percentile(latency_values, 50.0), abs=0.01)
expected_p95 = pytest.approx(np.percentile(latency_values, 95.0), abs=0.01)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {},
"values": {"AVG": pytest.approx(300.0, abs=0.01), "P50": expected_p50, "P95": expected_p95},
}
def test_query_span_metrics_latency_by_span_name_and_status(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_latency_by_span_name_and_status")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different names, statuses, and latencies
spans = [
# generate_response + OK: 100ms, 200ms -> avg = 150ms
create_test_span(
"trace1",
"generate_response",
span_id=1,
span_type="LLM",
start_ns=1000000000,
end_ns=1100000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"generate_response",
span_id=2,
span_type="LLM",
start_ns=2000000000,
end_ns=2200000000,
status=trace_api.StatusCode.OK,
),
# generate_response + ERROR: 300ms
create_test_span(
"trace1",
"generate_response",
span_id=3,
span_type="LLM",
start_ns=3000000000,
end_ns=3300000000,
status=trace_api.StatusCode.ERROR,
),
# process_input + OK: 50ms
create_test_span(
"trace1",
"process_input",
span_id=4,
span_type="CHAIN",
start_ns=4000000000,
end_ns=4050000000,
status=trace_api.StatusCode.OK,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.LATENCY,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[SpanMetricDimensionKey.SPAN_NAME, SpanMetricDimensionKey.SPAN_STATUS],
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {
SpanMetricDimensionKey.SPAN_NAME: "generate_response",
SpanMetricDimensionKey.SPAN_STATUS: "ERROR",
},
"values": {"AVG": 300.0},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {
SpanMetricDimensionKey.SPAN_NAME: "generate_response",
SpanMetricDimensionKey.SPAN_STATUS: "OK",
},
"values": {"AVG": 150.0},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.LATENCY,
"dimensions": {
SpanMetricDimensionKey.SPAN_NAME: "process_input",
SpanMetricDimensionKey.SPAN_STATUS: "OK",
},
"values": {"AVG": 50.0},
}
def test_query_assessment_metrics_count_no_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_count_no_dimensions")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments = [
Feedback(
trace_id=trace_id,
name="correctness",
value=True,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user1@test.com"
),
),
Feedback(
trace_id=trace_id,
name="relevance",
value=0.8,
source=AssessmentSource(source_type=AssessmentSourceType.LLM_JUDGE, source_id="gpt-4"),
),
Expectation(
trace_id=trace_id,
name="expected_output",
value="Hello World",
source=AssessmentSource(source_type=AssessmentSourceType.CODE, source_id="test_suite"),
),
]
for assessment in assessments:
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {},
"values": {"COUNT": 3},
}
def test_query_assessment_metrics_count_by_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_count_by_name")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create assessments with different names
assessments_data = [
("correctness", True),
("correctness", False),
("relevance", 0.9),
("relevance", 0.8),
("relevance", 0.7),
("quality", "high"),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness"},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "quality"},
"values": {"COUNT": 1},
}
assert asdict(result[2]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "relevance"},
"values": {"COUNT": 3},
}
def test_query_assessment_metrics_count_by_value_and_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_count_by_value")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create assessments with different values
assessments_data = [
("correctness", True),
("correctness", True),
("correctness", False),
("quality", "high"),
("quality", "high"),
("quality", "low"),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[
AssessmentMetricDimensionKey.ASSESSMENT_NAME,
AssessmentMetricDimensionKey.ASSESSMENT_VALUE,
],
)
# Values are stored as JSON strings
assert len(result) == 4
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness",
AssessmentMetricDimensionKey.ASSESSMENT_VALUE: json.dumps(False),
},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness",
AssessmentMetricDimensionKey.ASSESSMENT_VALUE: json.dumps(True),
},
"values": {"COUNT": 2},
}
assert asdict(result[2]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "quality",
AssessmentMetricDimensionKey.ASSESSMENT_VALUE: json.dumps("high"),
},
"values": {"COUNT": 2},
}
assert asdict(result[3]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "quality",
AssessmentMetricDimensionKey.ASSESSMENT_VALUE: json.dumps("low"),
},
"values": {"COUNT": 1},
}
def test_query_assessment_metrics_with_time_interval(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_with_time_interval")
# Base time in milliseconds (2020-01-01 00:00:00 UTC)
base_time_ms = 1577836800000
hour_ms = 60 * 60 * 1000
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=base_time_ms,
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create assessments at different times
assessment_times = [
base_time_ms,
base_time_ms + 10 * 60 * 1000, # +10 minutes
base_time_ms + hour_ms, # +1 hour
base_time_ms + hour_ms + 30 * 60 * 1000, # +1.5 hours
base_time_ms + 2 * hour_ms, # +2 hours
]
for i, timestamp in enumerate(assessment_times):
assessment = Feedback(
trace_id=trace_id,
name=f"quality_{i}",
value=True,
create_time_ms=timestamp,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
time_interval_seconds=3600, # 1 hour
start_time_ms=base_time_ms,
end_time_ms=base_time_ms + 3 * hour_ms,
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(base_time_ms / 1000, tz=timezone.utc).isoformat()
},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time_ms + hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"COUNT": 2},
}
assert asdict(result[2]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time_ms + 2 * hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"COUNT": 1},
}
def test_query_assessment_metrics_with_time_interval_and_dimensions(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_with_time_interval_and_dimensions")
# Base time in milliseconds (2020-01-01 00:00:00 UTC)
base_time_ms = 1577836800000
hour_ms = 60 * 60 * 1000
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=base_time_ms,
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create assessments at different times with different names
assessments_data = [
(base_time_ms, "correctness"),
(base_time_ms + 10 * 60 * 1000, "relevance"),
(base_time_ms + hour_ms, "correctness"),
(base_time_ms + hour_ms, "relevance"),
]
for timestamp, name in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=True,
create_time_ms=timestamp,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
time_interval_seconds=3600, # 1 hour
start_time_ms=base_time_ms,
end_time_ms=base_time_ms + 2 * hour_ms,
)
time_bucket_1 = datetime.fromtimestamp(base_time_ms / 1000, tz=timezone.utc).isoformat()
time_bucket_2 = datetime.fromtimestamp(
(base_time_ms + hour_ms) / 1000, tz=timezone.utc
).isoformat()
assert len(result) == 4
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": time_bucket_1,
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness",
},
"values": {"COUNT": 1},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": time_bucket_1,
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "relevance",
},
"values": {"COUNT": 1},
}
assert asdict(result[2]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": time_bucket_2,
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness",
},
"values": {"COUNT": 1},
}
assert asdict(result[3]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {
"time_bucket": time_bucket_2,
AssessmentMetricDimensionKey.ASSESSMENT_NAME: "relevance",
},
"values": {"COUNT": 1},
}
def test_query_assessment_metrics_with_filters(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_with_filters")
# Create traces with different statuses
trace_id1 = f"tr-{uuid.uuid4().hex}"
trace1 = TraceInfo(
trace_id=trace_id1,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace1)
trace_id2 = f"tr-{uuid.uuid4().hex}"
trace2 = TraceInfo(
trace_id=trace_id2,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.ERROR,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace2)
# Create assessments for trace1 (OK status)
for i in range(3):
assessment = Feedback(
trace_id=trace_id1,
name=f"quality_{i}",
value=True,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
# Create assessments for trace2 (ERROR status)
for i in range(2):
assessment = Feedback(
trace_id=trace_id2,
name=f"error_check_{i}",
value=False,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
# Query assessments only for traces with OK status
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.status = 'OK'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {},
"values": {"COUNT": 3},
}
# Query assessments for traces with ERROR status
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["trace.status = 'ERROR'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
def test_query_assessment_metrics_across_multiple_traces(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_across_multiple_traces")
# Create multiple traces
for i in range(3):
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: f"workflow_{i}"},
)
store.start_trace(trace_info)
# Create assessments for each trace
assessments_data = [
("correctness", True),
("relevance", 0.9),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness"},
"values": {"COUNT": 3},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "relevance"},
"values": {"COUNT": 3},
}
def test_query_assessment_value_avg_by_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_avg_by_name")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("accuracy", 0.8),
("accuracy", 0.9),
("accuracy", 0.85),
("precision", 0.7),
("precision", 0.75),
("quality", "high"),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
# Only 2 results - "quality" is excluded because non-numeric values result in None
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "accuracy"},
"values": {"AVG": pytest.approx(0.85, abs=0.01)},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "precision"},
"values": {"AVG": pytest.approx(0.725, abs=0.01)},
}
def test_query_assessment_value_with_boolean_values(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_with_boolean")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("correctness", True),
("correctness", True),
("correctness", False),
("correctness", True),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "correctness"},
"values": {"AVG": pytest.approx(3 / 4, abs=0.01)},
}
def test_query_assessment_value_with_yes_no_string_values(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_with_yes_no")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("quality", CategoricalRating.YES),
("quality", CategoricalRating.YES),
("quality", CategoricalRating.NO),
("quality", CategoricalRating.YES),
("quality", CategoricalRating.UNKNOWN),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "quality"},
"values": {"AVG": pytest.approx(0.75, abs=0.01)},
}
@pytest.mark.parametrize(
"percentile_value",
[50.0, 75.0, 90.0, 95.0, 99.0],
)
def test_query_assessment_value_percentiles(
store: SqlAlchemyStore,
percentile_value: float,
):
exp_id = store.create_experiment(f"test_assessment_value_p{percentile_value}")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data_accuracy = [
("accuracy", 0.1),
("accuracy", 0.2),
("accuracy", 0.3),
("accuracy", 0.4),
("accuracy", 0.5),
]
assessments_data_score = [
("score", 10.0),
("score", 20.0),
("score", 30.0),
]
for name, value in assessments_data_accuracy + assessments_data_score:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[
MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
accuracy_values = [0.1, 0.2, 0.3, 0.4, 0.5]
score_values = [10.0, 20.0, 30.0]
expected_accuracy = pytest.approx(
np.percentile(accuracy_values, percentile_value),
abs=0.01,
)
expected_score = pytest.approx(
np.percentile(score_values, percentile_value),
abs=0.01,
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "accuracy"},
"values": {f"P{percentile_value}": expected_accuracy},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "score"},
"values": {f"P{percentile_value}": expected_score},
}
def test_query_assessment_value_mixed_types(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_mixed_types")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("rating", 5.0),
("rating", 4.5),
("rating", 3.0),
("status", "good"),
("status", "bad"),
("passed", True),
("passed", False),
("passed", True),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
# Only 2 results - "status" is excluded because non-numeric values result in None
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "passed"},
"values": {"AVG": pytest.approx(2.0 / 3.0, abs=0.01)},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "rating"},
"values": {"AVG": pytest.approx(12.5 / 3.0, abs=0.01)},
}
def test_query_assessment_value_multiple_aggregations(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_multiple_aggs")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("score", 10.0),
("score", 20.0),
("score", 30.0),
("score", 40.0),
("score", 50.0),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.AVG),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=50),
MetricAggregation(aggregation_type=AggregationType.PERCENTILE, percentile_value=95),
],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
values = [10.0, 20.0, 30.0, 40.0, 50.0]
expected_p50 = pytest.approx(np.percentile(values, 50.0), abs=0.01)
expected_p95 = pytest.approx(np.percentile(values, 95.0), abs=0.01)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "score"},
"values": {"AVG": pytest.approx(30.0, abs=0.01), "P50": expected_p50, "P95": expected_p95},
}
@pytest.mark.parametrize(
"assessment_type",
[Feedback, Expectation],
)
def test_query_assessment_value_no_dimensions(
store: SqlAlchemyStore, assessment_type: type[Assessment]
):
exp_id = store.create_experiment("test_assessment_value_no_dimensions")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("accuracy", 0.8),
("accuracy", 0.9),
("precision", 0.7),
("recall", 0.85),
]
for name, value in assessments_data:
assessment = assessment_type(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {},
"values": {"AVG": pytest.approx(3.25 / 4, abs=0.01)},
}
def test_query_assessment_value_with_time_bucket(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_time_bucket")
# Base time in milliseconds (2020-01-01 00:00:00 UTC)
base_time_ms = 1577836800000
hour_ms = 60 * 60 * 1000
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=base_time_ms,
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create assessments with numeric values at different times
assessment_data = [
# Hour 0: avg should be (0.8 + 0.9) / 2 = 0.85
(base_time_ms, "accuracy", 0.8),
(base_time_ms + 10 * 60 * 1000, "accuracy", 0.9),
# Hour 1: avg should be (0.7 + 0.75) / 2 = 0.725
(base_time_ms + hour_ms, "precision", 0.7),
(base_time_ms + hour_ms + 30 * 60 * 1000, "precision", 0.75),
# Hour 2: avg should be 0.95
(base_time_ms + 2 * hour_ms, "recall", 0.95),
]
for timestamp, name, value in assessment_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
create_time_ms=timestamp,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
time_interval_seconds=3600,
start_time_ms=base_time_ms,
end_time_ms=base_time_ms + 3 * hour_ms,
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {
"time_bucket": datetime.fromtimestamp(base_time_ms / 1000, tz=timezone.utc).isoformat()
},
"values": {"AVG": pytest.approx(0.85, abs=0.01)},
}
assert asdict(result[1]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time_ms + hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"AVG": pytest.approx(0.725, abs=0.01)},
}
assert asdict(result[2]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {
"time_bucket": datetime.fromtimestamp(
(base_time_ms + 2 * hour_ms) / 1000, tz=timezone.utc
).isoformat()
},
"values": {"AVG": pytest.approx(0.95, abs=0.01)},
}
@pytest.mark.parametrize(
"assessment_type",
[Feedback, Expectation],
)
def test_query_assessment_invalid_values(store: SqlAlchemyStore, assessment_type: type[Assessment]):
exp_id = store.create_experiment("test_assessment_invalid_values")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessments_data = [
("string_score", json.dumps("test")),
("list_score", json.dumps([1, 2, 3])),
("dict_score", json.dumps({"a": 1, "b": 2})),
]
for name, value in assessments_data:
assessment = assessment_type(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
)
# No results - all values are non-numeric so all aggregations return None
assert len(result) == 0
def test_query_assessment_value_with_null_value(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_null_value")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessment = Feedback(
trace_id=trace_id,
name="score",
value=None,
error=AssessmentError(
error_message="Null value",
error_code="NULL_VALUE",
),
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"),
)
store.create_assessment(assessment)
assessment = Feedback(
trace_id=trace_id,
name="score",
value=12,
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"),
)
store.create_assessment(assessment)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=["assessment_name"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {"assessment_name": "score"},
"values": {"AVG": 12},
}
def test_query_assessment_value_with_assessment_name_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_value_with_filter")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create multiple assessments with different names
assessments_data = [
("accuracy", 0.8),
("accuracy", 0.9),
("precision", 0.7),
("recall", 0.85),
]
for name, value in assessments_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
# Query with assessment_name filter
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_VALUE,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[AssessmentMetricDimensionKey.ASSESSMENT_NAME],
filters=["assessment.name = 'accuracy'"],
)
# Should only return accuracy results
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_VALUE,
"dimensions": {AssessmentMetricDimensionKey.ASSESSMENT_NAME: "accuracy"},
"values": {"AVG": pytest.approx(0.85, abs=0.01)},
}
def test_query_assessment_with_type_filter(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_with_type_filter")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create feedback assessments
for i in range(3):
assessment = Feedback(
trace_id=trace_id,
name=f"feedback_{i}",
value=0.8,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
# Create expectation assessments
for i in range(2):
assessment = Expectation(
trace_id=trace_id,
name=f"expectation_{i}",
value="expected",
source=AssessmentSource(source_type=AssessmentSourceType.CODE, source_id="test_suite"),
)
store.create_assessment(assessment)
# Query with assessment.type = 'feedback'
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["assessment.type = 'feedback'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {},
"values": {"COUNT": 3},
}
# Query with assessment.type = 'expectation'
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=["assessment.type = 'expectation'"],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
def test_query_assessment_with_combined_name_and_type_filters(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_assessment_combined_filters")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create feedback assessments with different names
feedback_data = [
("accuracy", 0.8),
("accuracy", 0.9),
("precision", 0.7),
]
for name, value in feedback_data:
assessment = Feedback(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"
),
)
store.create_assessment(assessment)
# Create expectation assessments
expectation_data = [
("accuracy", "expected_value"),
("recall", "expected_value"),
]
for name, value in expectation_data:
assessment = Expectation(
trace_id=trace_id,
name=name,
value=value,
source=AssessmentSource(source_type=AssessmentSourceType.CODE, source_id="test_suite"),
)
store.create_assessment(assessment)
# Query with both type and name filters - should only return feedback with name 'accuracy'
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[
"assessment.type = 'feedback'",
"assessment.name = 'accuracy'",
],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": AssessmentMetricKey.ASSESSMENT_COUNT,
"dimensions": {},
"values": {"COUNT": 2},
}
@pytest.mark.parametrize(
("filter_string", "error_match"),
[
("name = 'accuracy'", r"Invalid identifier 'name'"),
(
"assessment.name != 'accuracy'",
r"Invalid comparator: '!=', only '=' operator is supported",
),
("assessment.invalid_field = 'value'", r"Invalid entity 'invalid_field' specified"),
("assessment.name.extra = 'value'", r"does not require a key"),
("assessment.type LIKE 'feed%'", r"only '=' operator is supported"),
("assessment.value = '0.8'", r"Invalid entity 'value' specified"),
],
)
def test_query_assessments_metrics_invalid_filters(
store: SqlAlchemyStore, filter_string: str, error_match: str
):
exp_id = store.create_experiment("test_assessment_invalid_filters")
trace_id = f"tr-{uuid.uuid4().hex}"
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
assessment = Feedback(
trace_id=trace_id,
name="accuracy",
value=0.8,
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="user@test.com"),
)
store.create_assessment(assessment)
with pytest.raises(MlflowException, match=error_match):
store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.ASSESSMENTS,
metric_name=AssessmentMetricKey.ASSESSMENT_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
filters=[filter_string],
)
def test_query_span_metrics_cost_sum(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_sum")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"llm_call_1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.001,
"output_cost": 0.002,
"total_cost": 0.003,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
create_test_span(
"trace1",
"llm_call_2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.002,
"output_cost": 0.003,
"total_cost": 0.005,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
create_test_span(
"trace1",
"llm_call_3",
span_id=3,
span_type="LLM",
start_ns=1200000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.003,
"output_cost": 0.004,
"total_cost": 0.007,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {},
"values": {"SUM": 0.015},
}
def test_query_span_metrics_cost_by_model_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_by_model")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"gpt4_call_1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
create_test_span(
"trace1",
"gpt4_call_2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
create_test_span(
"trace1",
"claude_call",
span_id=3,
span_type="LLM",
start_ns=1200000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.005,
"output_cost": 0.015,
"total_cost": 0.02,
},
SpanAttributeKey.MODEL: "claude-3",
},
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_NAME],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_NAME: "claude-3"},
"values": {"SUM": 0.02},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_NAME: "gpt-4"},
"values": {"SUM": 0.06},
}
def test_query_span_metrics_cost_avg_by_model_name(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_avg_by_model")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"gpt4_call_1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
create_test_span(
"trace1",
"gpt4_call_2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.02,
"output_cost": 0.03,
"total_cost": 0.05,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_NAME],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_NAME: "gpt-4"},
"values": {"AVG": 0.04},
}
def test_query_span_metrics_cost_multiple_aggregations(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_multiple_agg")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
f"llm_call_{i}",
span_id=i,
span_type="LLM",
start_ns=1000000000 + i * 100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": i,
"output_cost": i,
"total_cost": 2 * i,
},
SpanAttributeKey.MODEL: "gpt-4",
},
)
for i in range(1, 6)
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[
MetricAggregation(aggregation_type=AggregationType.SUM),
MetricAggregation(aggregation_type=AggregationType.AVG),
],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {},
"values": {"SUM": 30.0, "AVG": 6.0},
}
def test_query_span_metrics_input_output_cost(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_input_output_cost")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"llm_call_1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.03,
"total_cost": 0.04,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
create_test_span(
"trace1",
"llm_call_2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.02,
"output_cost": 0.04,
"total_cost": 0.06,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
]
store.log_spans(exp_id, spans)
input_result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.INPUT_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
)
assert len(input_result) == 1
assert asdict(input_result[0]) == {
"metric_name": SpanMetricKey.INPUT_COST,
"dimensions": {},
"values": {"SUM": 0.03},
}
output_result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.OUTPUT_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
)
assert len(output_result) == 1
assert asdict(output_result[0]) == {
"metric_name": SpanMetricKey.OUTPUT_COST,
"dimensions": {},
"values": {"SUM": 0.07},
}
def test_query_span_metrics_cost_across_multiple_traces(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_multiple_traces")
for i in range(3):
trace_info = TraceInfo(
trace_id=f"trace{i}",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
f"trace{i}",
"llm_call",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
},
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_NAME],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_NAME: "gpt-4"},
"values": {"SUM": 0.09},
}
@pytest.mark.parametrize("percentile_value", [50, 90, 95, 99])
def test_query_span_metrics_cost_percentiles(store: SqlAlchemyStore, percentile_value: float):
exp_id = store.create_experiment(f"test_span_cost_percentile_{percentile_value}")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
f"llm_call_{i}",
span_id=i,
span_type="LLM",
start_ns=1000000000 + i * 100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.005 * i,
"output_cost": 0.005 * i,
"total_cost": 0.01 * i,
},
SpanAttributeKey.MODEL: "gpt-4",
},
)
for i in range(1, 11)
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[
MetricAggregation(
aggregation_type=AggregationType.PERCENTILE, percentile_value=percentile_value
)
],
)
cost_values = [0.01 * i for i in range(1, 11)]
expected_percentile = pytest.approx(
np.percentile(cost_values, percentile_value),
abs=0.001,
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {},
"values": {f"P{percentile_value}": expected_percentile},
}
def test_query_span_metrics_cost_by_model_provider(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_by_provider")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"openai_call_1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
SpanAttributeKey.MODEL_PROVIDER: "openai",
},
),
create_test_span(
"trace1",
"openai_call_2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.015,
"output_cost": 0.025,
"total_cost": 0.04,
},
SpanAttributeKey.MODEL: "gpt-4-turbo",
SpanAttributeKey.MODEL_PROVIDER: "openai",
},
),
create_test_span(
"trace1",
"anthropic_call",
span_id=3,
span_type="LLM",
start_ns=1200000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.005,
"output_cost": 0.015,
"total_cost": 0.02,
},
SpanAttributeKey.MODEL: "claude-3-5-sonnet",
SpanAttributeKey.MODEL_PROVIDER: "anthropic",
},
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_PROVIDER],
)
assert len(result) == 2
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "anthropic"},
"values": {"SUM": 0.02},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai"},
"values": {"SUM": 0.07},
}
def test_query_span_metrics_cost_avg_by_model_provider(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_avg_by_provider")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
f"openai_call_{i}",
span_id=i,
span_type="LLM",
start_ns=1000000000 + i * 100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01 * i,
"output_cost": 0.01 * i,
"total_cost": 0.02 * i,
},
SpanAttributeKey.MODEL: "gpt-4",
SpanAttributeKey.MODEL_PROVIDER: "openai",
},
)
for i in range(1, 4)
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.AVG)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_PROVIDER],
)
assert len(result) == 1
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai"},
"values": {"AVG": 0.04},
}
def test_query_span_metrics_cost_by_model_name_and_provider(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_cost_by_name_and_provider")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"gpt4_call",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
SpanAttributeKey.MODEL_PROVIDER: "openai",
},
),
create_test_span(
"trace1",
"gpt4_turbo_call",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.005,
"output_cost": 0.015,
"total_cost": 0.02,
},
SpanAttributeKey.MODEL: "gpt-4-turbo",
SpanAttributeKey.MODEL_PROVIDER: "openai",
},
),
create_test_span(
"trace1",
"claude_call",
span_id=3,
span_type="LLM",
start_ns=1200000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.008,
"output_cost": 0.012,
"total_cost": 0.02,
},
SpanAttributeKey.MODEL: "claude-3-5-sonnet",
SpanAttributeKey.MODEL_PROVIDER: "anthropic",
},
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.TOTAL_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[
SpanMetricDimensionKey.SPAN_MODEL_NAME,
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER,
],
)
assert len(result) == 3
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {
SpanMetricDimensionKey.SPAN_MODEL_NAME: "claude-3-5-sonnet",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "anthropic",
},
"values": {"SUM": 0.02},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {
SpanMetricDimensionKey.SPAN_MODEL_NAME: "gpt-4",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai",
},
"values": {"SUM": 0.03},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.TOTAL_COST,
"dimensions": {
SpanMetricDimensionKey.SPAN_MODEL_NAME: "gpt-4-turbo",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai",
},
"values": {"SUM": 0.02},
}
def test_query_span_metrics_input_output_cost_by_provider(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_input_output_cost_by_provider")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
spans = [
create_test_span(
"trace1",
"openai_call",
span_id=1,
span_type="LLM",
start_ns=1000000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.01,
"output_cost": 0.02,
"total_cost": 0.03,
},
SpanAttributeKey.MODEL: "gpt-4",
SpanAttributeKey.MODEL_PROVIDER: "openai",
},
),
create_test_span(
"trace1",
"anthropic_call",
span_id=2,
span_type="LLM",
start_ns=1100000000,
attributes={
SpanAttributeKey.LLM_COST: {
"input_cost": 0.005,
"output_cost": 0.015,
"total_cost": 0.02,
},
SpanAttributeKey.MODEL: "claude-3-5-sonnet",
SpanAttributeKey.MODEL_PROVIDER: "anthropic",
},
),
]
store.log_spans(exp_id, spans)
# Test INPUT_COST
result_input = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.INPUT_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_PROVIDER],
)
assert len(result_input) == 2
assert asdict(result_input[0]) == {
"metric_name": SpanMetricKey.INPUT_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "anthropic"},
"values": {"SUM": 0.005},
}
assert asdict(result_input[1]) == {
"metric_name": SpanMetricKey.INPUT_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai"},
"values": {"SUM": 0.01},
}
# Test OUTPUT_COST
result_output = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.OUTPUT_COST,
aggregations=[MetricAggregation(aggregation_type=AggregationType.SUM)],
dimensions=[SpanMetricDimensionKey.SPAN_MODEL_PROVIDER],
)
assert len(result_output) == 2
assert asdict(result_output[0]) == {
"metric_name": SpanMetricKey.OUTPUT_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "anthropic"},
"values": {"SUM": 0.015},
}
assert asdict(result_output[1]) == {
"metric_name": SpanMetricKey.OUTPUT_COST,
"dimensions": {SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai"},
"values": {"SUM": 0.02},
}
def test_query_span_metrics_count_by_span_status_and_model_provider(store: SqlAlchemyStore):
exp_id = store.create_experiment("test_span_count_by_status_and_provider")
trace_info = TraceInfo(
trace_id="trace1",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=get_current_time_millis(),
execution_duration=100,
state=TraceStatus.OK,
tags={TraceTagKey.TRACE_NAME: "test_trace"},
)
store.start_trace(trace_info)
# Create spans with different statuses and model providers
spans = [
# OpenAI OK spans
create_test_span(
"trace1",
"openai_ok_1",
span_id=1,
span_type="LLM",
start_ns=1000000000,
status=trace_api.StatusCode.OK,
attributes={SpanAttributeKey.MODEL_PROVIDER: "openai"},
),
create_test_span(
"trace1",
"openai_ok_2",
span_id=2,
span_type="LLM",
start_ns=1100000000,
status=trace_api.StatusCode.OK,
attributes={SpanAttributeKey.MODEL_PROVIDER: "openai"},
),
# OpenAI ERROR span
create_test_span(
"trace1",
"openai_error",
span_id=3,
span_type="LLM",
start_ns=1200000000,
status=trace_api.StatusCode.ERROR,
attributes={SpanAttributeKey.MODEL_PROVIDER: "openai"},
),
# Anthropic OK span
create_test_span(
"trace1",
"anthropic_ok",
span_id=4,
span_type="LLM",
start_ns=1300000000,
status=trace_api.StatusCode.OK,
attributes={SpanAttributeKey.MODEL_PROVIDER: "anthropic"},
),
# Anthropic ERROR spans
create_test_span(
"trace1",
"anthropic_error_1",
span_id=5,
span_type="LLM",
start_ns=1400000000,
status=trace_api.StatusCode.ERROR,
attributes={SpanAttributeKey.MODEL_PROVIDER: "anthropic"},
),
create_test_span(
"trace1",
"anthropic_error_2",
span_id=6,
span_type="LLM",
start_ns=1500000000,
status=trace_api.StatusCode.ERROR,
attributes={SpanAttributeKey.MODEL_PROVIDER: "anthropic"},
),
# Spans WITHOUT model provider - these should NOT appear in results
create_test_span(
"trace1",
"no_provider_ok_1",
span_id=7,
span_type="CHAIN",
start_ns=1600000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"no_provider_ok_2",
span_id=8,
span_type="CHAIN",
start_ns=1700000000,
status=trace_api.StatusCode.OK,
),
create_test_span(
"trace1",
"no_provider_error",
span_id=9,
span_type="CHAIN",
start_ns=1800000000,
status=trace_api.StatusCode.ERROR,
),
]
store.log_spans(exp_id, spans)
result = store.query_trace_metrics(
experiment_ids=[exp_id],
view_type=MetricViewType.SPANS,
metric_name=SpanMetricKey.SPAN_COUNT,
aggregations=[MetricAggregation(aggregation_type=AggregationType.COUNT)],
dimensions=[SpanMetricDimensionKey.SPAN_STATUS, SpanMetricDimensionKey.SPAN_MODEL_PROVIDER],
)
# Results should be sorted by dimensions: first by status, then by provider.
# Spans without model provider (None value) are filtered out.
assert len(result) == 4
assert asdict(result[0]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_STATUS: "ERROR",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "anthropic",
},
"values": {"COUNT": 2},
}
assert asdict(result[1]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_STATUS: "ERROR",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai",
},
"values": {"COUNT": 1},
}
assert asdict(result[2]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_STATUS: "OK",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "anthropic",
},
"values": {"COUNT": 1},
}
assert asdict(result[3]) == {
"metric_name": SpanMetricKey.SPAN_COUNT,
"dimensions": {
SpanMetricDimensionKey.SPAN_STATUS: "OK",
SpanMetricDimensionKey.SPAN_MODEL_PROVIDER: "openai",
},
"values": {"COUNT": 2},
}
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/store/tracking/test_sqlalchemy_store_query_trace_metrics.py",
"license": "Apache License 2.0",
"lines": 4232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/forbidden_make_judge_in_builtin_scorers.py | import ast
from pathlib import Path
from clint.resolver import Resolver
from clint.rules.base import Rule
class ForbiddenMakeJudgeInBuiltinScorers(Rule):
"""Ensure make_judge is not used in builtin_scorers.py.
After switching to InstructionsJudge in builtin_scorers.py, this rule
prevents future regressions by detecting any usage of make_judge in that file.
"""
def _message(self) -> str:
return (
"Usage of `make_judge` is forbidden in builtin_scorers.py. "
"Use `InstructionsJudge` directly instead."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver, path: Path) -> bool:
"""Check if this is a call to make_judge in builtin_scorers.py.
Args:
node: The AST Call node to check
resolver: Resolver instance to resolve fully qualified names
path: Path to the file being linted
Returns:
True if this is a forbidden make_judge call, False otherwise
"""
if path.name != "builtin_scorers.py":
return False
if names := resolver.resolve(node):
match names:
case ["mlflow", "genai", "judges", "make_judge", *_]:
return True
case ["mlflow", "genai", "make_judge", *_]:
return True
case ["make_judge", *_]:
return True
case _:
return False
return False
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/forbidden_make_judge_in_builtin_scorers.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_forbidden_make_judge_in_builtin_scorers.py | from pathlib import Path
from clint.config import Config
from clint.linter import lint_file
from clint.rules.forbidden_make_judge_in_builtin_scorers import (
ForbiddenMakeJudgeInBuiltinScorers,
)
def test_forbidden_make_judge_in_builtin_scorers(index_path: Path) -> None:
code = """
from mlflow.genai.judges.make_judge import make_judge
from mlflow.genai.judges import InstructionsJudge
# BAD - direct call after import
judge1 = make_judge(name="test", instructions="test")
# BAD - module qualified call
from mlflow.genai import judges
judge2 = judges.make_judge(name="test", instructions="test")
# GOOD - using InstructionsJudge instead
judge3 = InstructionsJudge(name="test", instructions="test")
"""
config = Config(select={ForbiddenMakeJudgeInBuiltinScorers.name})
violations = lint_file(Path("builtin_scorers.py"), code, config, index_path)
# Should detect: 1 import + 2 calls = 3 violations
assert len(violations) == 3
assert all(isinstance(v.rule, ForbiddenMakeJudgeInBuiltinScorers) for v in violations)
def test_make_judge_allowed_in_other_files(index_path: Path) -> None:
code = """
from mlflow.genai.judges.make_judge import make_judge
# GOOD - allowed in other files
judge = make_judge(name="test", instructions="test")
"""
config = Config(select={ForbiddenMakeJudgeInBuiltinScorers.name})
violations = lint_file(Path("some_other_file.py"), code, config, index_path)
# Should NOT trigger in other files
assert len(violations) == 0
def test_instructions_judge_not_flagged(index_path: Path) -> None:
code = """
from mlflow.genai.judges import InstructionsJudge
# GOOD - InstructionsJudge is the correct approach
judge = InstructionsJudge(name="test", instructions="test")
"""
config = Config(select={ForbiddenMakeJudgeInBuiltinScorers.name})
violations = lint_file(Path("builtin_scorers.py"), code, config, index_path)
assert len(violations) == 0
def test_nested_make_judge_call(index_path: Path) -> None:
code = """
from mlflow.genai.judges.make_judge import make_judge
# BAD - nested call
result = some_function(make_judge(name="test", instructions="test"))
"""
config = Config(select={ForbiddenMakeJudgeInBuiltinScorers.name})
violations = lint_file(Path("builtin_scorers.py"), code, config, index_path)
# Should detect: 1 import + 1 call = 2 violations
assert len(violations) == 2
assert all(isinstance(v.rule, ForbiddenMakeJudgeInBuiltinScorers) for v in violations)
def test_make_judge_in_comment_not_flagged(index_path: Path) -> None:
code = """
from mlflow.genai.judges import InstructionsJudge
# This comment mentions make_judge but should not trigger
judge = InstructionsJudge(name="test", instructions="test")
"""
config = Config(select={ForbiddenMakeJudgeInBuiltinScorers.name})
violations = lint_file(Path("builtin_scorers.py"), code, config, index_path)
assert len(violations) == 0
| {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_forbidden_make_judge_in_builtin_scorers.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/knowledge_retention.py | # NB: User-facing name for the knowledge retention assessment.
KNOWLEDGE_RETENTION_ASSESSMENT_NAME = "knowledge_retention"
KNOWLEDGE_RETENTION_PROMPT = """\
Your task is to evaluate the LAST AI response in the {{ conversation }} and determine if it:
- Correctly uses or references information the user provided in earlier turns
- Avoids contradicting information the user provided in earlier turns
- Accurately recalls details without distortion
Output "yes" if the AI's last response correctly retains any referenced prior user information.
Output "no" if the AI's last response:
- Contradicts information the user provided earlier
- Misrepresents or inaccurately recalls user-provided information
- Forgets or ignores information that is directly relevant to answering the current user question
IMPORTANT GUIDELINES:
1. Only evaluate information explicitly provided by the USER in prior turns
2. Focus on factual information (names, dates, preferences, context) rather than opinions
3. Not all prior information needs to be referenced in every response - only evaluate information
that is directly relevant to the current user's question or request
4. If the AI doesn't reference prior information because it's not relevant to the current turn,
that's acceptable (output "yes")
5. Only output "no" if there's a clear contradiction, distortion, or problematic forgetting of
information that should have been used
6. Evaluate ONLY the last AI response, not the entire conversation
Base your judgment strictly on the conversation content provided. Do not use outside knowledge.
"""
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/knowledge_retention.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/prompts/tool_call_correctness.py | import json
from typing import TYPE_CHECKING
from mlflow.genai.judges.utils.formatting_utils import (
format_available_tools,
format_tools_called,
)
from mlflow.genai.prompts.utils import format_prompt
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import ChatTool
TOOL_CALL_CORRECTNESS_FEEDBACK_NAME = "tool_call_correctness"
# Shared output format for all prompt variants
_OUTPUT_FORMAT = """
Please evaluate whether the agent's tool calls and their arguments are correct and reasonable using only the following json format. Return "yes" if the tool calls and arguments are correct and reasonable, otherwise return "no".
Do not use any markdown formatting or output additional lines.
{
"rationale": "Reason for the assessment. If incorrect or unreasonable tool calls are found, identify which specific calls or arguments are problematic and explain why. If all tool calls and arguments are correct, explain why they are appropriate. Start each rationale with `Let's think step by step`",
"result": "yes|no"
}\
""" # noqa: E501
# Ordering instruction variants
ORDERING_INSTRUCTION_CHECK = (
"3) Ordering\n- Consider whether the order of tool calls matches the expected order."
)
ORDERING_INSTRUCTION_IGNORE = (
"Note: The order of tool calls does not need to match. You should not penalize the agent for "
"calling tools in a different order than the expected order."
)
# Evaluation criteria for ground-truth-free mode
_GROUND_TRUTH_FREE_CRITERIA = """\
1) Need for tools
- Was using any tool necessary or helpful for this request?
- Did the agent fail to use an obviously appropriate tool that was available?
2) Tool selection
- For each step, is the chosen tool a good match for the subtask, given the tool descriptions?
- Did the agent avoid tools that are clearly irrelevant, overpowered, or disallowed for the request?
3) Arguments and intent alignment
- Do the arguments match the tool's schema?
- Are the arguments clearly grounded in the user's request and the tool's documented purpose?
- Across calls, are key parameters provided in ways that logically follow from prior tool outputs
or user messages, rather than arbitrary changes?
4) Tool flow and combinations
- When multiple tools are used, is the overall sequence of tool choices logically sound?
- Are follow-up tool calls justified by what the agent appears to be trying to achieve with respect
to the user's request?
{{ordering_instruction}}"""
# Evaluation criteria for full expectations (names + arguments)
_FULL_EXPECTATIONS_CRITERIA = """\
1) Tool selection match
- Are the same tools being called (or semantically equivalent alternatives)?
- Are there any missing or extra tool calls compared to expectations?
2) Argument match
- Do the actual arguments convey the same intent as the expected arguments, even if phrased
differently?
- Are there any significant differences in argument values that would change the outcome?
{{ordering_instruction}}"""
# Evaluation criteria for partial expectations (names only)
_PARTIAL_EXPECTATIONS_CRITERIA = """\
1) Tool selection match
- Do the actual tool calls match the expected tool names?
- Are there any missing or extra tools compared to expectations?
2) Argument reasonableness
- Are the arguments provided reasonable given the user request and tool definitions?
- Do the arguments match the tool's schema and documented purpose?
{{ordering_instruction}}"""
# Unified prompt template
_PROMPT_TEMPLATE = """\
{{preamble}}
Focus only on the choice of tools and the arguments passed to them. Do NOT judge whether the tools'
outputs or implementations are correct.
Evaluate:
{{evaluation_criteria}}
<request>
{{request}}
</request>
{{expected_section}}<actual_tool_calls>
{{tools_called}}
</actual_tool_calls>
<available_tools>
{{available_tools}}
</available_tools>"""
# Preamble variants
_GROUND_TRUTH_FREE_PREAMBLE = """\
Consider whether the agent selected appropriate tools and called with the correct arguments for the
task.
Given the user's request, the available tools (including their described capabilities/constraints),
and the sequence of tool calls made by the agent, evaluate if the agent chose suitable tools and
used them in a reasonable way."""
_FULL_EXPECTATIONS_PREAMBLE = """\
Compare the actual tool calls against the expected tool calls to determine if they are correct.
Given the user's request, the expected tool calls (ground truth), and the actual tool calls made
by the agent, evaluate whether the actual tool calls semantically match the expected ones."""
_PARTIAL_EXPECTATIONS_PREAMBLE = """\
Evaluate tool call correctness by comparing tool selection against expected tool names, and
evaluating whether arguments are reasonable.
Given the user's request, the expected tool names (ground truth), the available tools (including
their described capabilities/constraints), and the actual tool calls made by the agent, evaluate
whether the agent selected the correct tools and used reasonable arguments.\
"""
# Used by ToolCallCorrectness.instructions property for serialization
TOOL_CALL_CORRECTNESS_PROMPT_INSTRUCTIONS = (
_GROUND_TRUTH_FREE_PREAMBLE
+ "\n\nFocus only on the choice of tools and the arguments passed to them. Do NOT judge "
"whether the tools'\noutputs or implementations are correct.\n\nEvaluate:\n\n"
+ _GROUND_TRUTH_FREE_CRITERIA
+ "\n\n<request>\n{{request}}\n</request>\n\n<available_tools>\n{{available_tools}}\n"
"</available_tools>\n\n<tools_called>\n{{tools_called}}\n</tools_called>"
)
def _format_expected_calls(expected_calls: list["FunctionCall"], include_arguments: bool) -> str:
lines = []
for i, call in enumerate(expected_calls, 1):
if include_arguments:
lines.append(f"Expected Tool Call {i}: {call.name}")
if call.arguments:
lines.append(f" Arguments: {json.dumps(call.arguments)}")
else:
lines.append(" Arguments: empty")
else:
lines.append(f"Expected Tool {i}: {call.name}")
return "\n".join(lines) if lines else "No expected tool calls provided."
def get_prompt(
request: str,
tools_called: list["FunctionCall"],
available_tools: list["ChatTool"],
expected_calls: list["FunctionCall"] | None = None,
include_arguments: bool = True,
check_order: bool = False,
) -> str:
"""
Generate tool call correctness evaluation prompt.
Args:
request: The original user request that the agent is trying to fulfill
tools_called: The sequence of tools that were called by the agent
available_tools: The set of available tools
expected_calls: Optional list of expected tool calls for ground-truth comparison.
If None, uses ground-truth-free evaluation.
include_arguments: If True, compare both tool names and arguments (full expectations).
If False, compare only tool names (partial expectations).
check_order: If True, ask LLM to consider ordering of tool calls.
"""
available_tools_str = format_available_tools(available_tools)
tools_called_str = format_tools_called(tools_called)
ordering = ORDERING_INSTRUCTION_CHECK if check_order else ORDERING_INSTRUCTION_IGNORE
if expected_calls is None:
preamble = _GROUND_TRUTH_FREE_PREAMBLE
criteria = _GROUND_TRUTH_FREE_CRITERIA.replace("{{ordering_instruction}}", ordering)
expected_section = ""
elif include_arguments:
preamble = _FULL_EXPECTATIONS_PREAMBLE
criteria = _FULL_EXPECTATIONS_CRITERIA.replace("{{ordering_instruction}}", ordering)
expected_calls_str = _format_expected_calls(expected_calls, include_arguments)
expected_section = (
f"<expected_tool_calls>\n{expected_calls_str}\n</expected_tool_calls>\n\n"
)
else:
preamble = _PARTIAL_EXPECTATIONS_PREAMBLE
criteria = _PARTIAL_EXPECTATIONS_CRITERIA.replace("{{ordering_instruction}}", ordering)
expected_calls_str = _format_expected_calls(expected_calls, include_arguments)
expected_section = (
f"<expected_tool_calls>\n{expected_calls_str}\n</expected_tool_calls>\n\n"
)
return format_prompt(
_PROMPT_TEMPLATE + _OUTPUT_FORMAT,
preamble=preamble,
evaluation_criteria=criteria,
expected_section=expected_section,
request=request,
available_tools=available_tools_str,
tools_called=tools_called_str,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/tool_call_correctness.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/utils/formatting_utils.py | import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import ChatTool
_logger = logging.getLogger(__name__)
def format_available_tools(available_tools: list["ChatTool"]) -> str:
"""Format available tools with descriptions and parameters.
Args:
available_tools: The set of available tools
Returns:
Formatted string representation of available tools
Example:
>>> # Output format:
>>> # - search: Search for information on the web
>>> # - query (required): string - The search query to execute
>>> # - max_results (optional): integer - Maximum number of results
>>> # - translate: Translate text to another language
>>> # - text (required): string - The text to translate
>>> # - target (required): string - Target language code
"""
available_tools_parts = []
for tool in available_tools:
if not tool.function:
_logger.warning(f"Skipping tool with missing function definition: {tool}")
continue
tool_str = f"- {tool.function.name}"
if tool.function.description:
tool_str += f": {tool.function.description}"
if tool.function.parameters and tool.function.parameters.properties:
params = tool.function.parameters
required_params = set(params.required or [])
param_lines = []
for param_name, param_prop in params.properties.items():
is_required = param_name in required_params
required_marker = " (required)" if is_required else " (optional)"
param_line = f" - {param_name}{required_marker}"
if hasattr(param_prop, "type") and param_prop.type:
param_line += f": {param_prop.type}"
if param_prop.description:
param_line += f" - {param_prop.description}"
param_lines.append(param_line)
if param_lines:
tool_str += "\n" + "\n".join(param_lines)
available_tools_parts.append(tool_str)
return "\n\n".join(available_tools_parts) if available_tools_parts else "No tools available"
def format_tools_called(tools_called: list["FunctionCall"]) -> str:
"""Format tools called with step numbers, arguments, and outputs.
Args:
tools_called: The sequence of tools that were called by the agent.
Each element should be a FunctionCall object.
Returns:
Formatted string representation of tools called
Example:
>>> # Output format:
>>> # Tool Call 1: search
>>> # Input Arguments: {"query": "capital of France"}
>>> # Output: Paris
>>> #
>>> # Tool Call 2: translate
>>> # Input Arguments: {"text": "Paris", "target": "es"}
>>> # Output: París
"""
tools_called_parts = []
for idx, tool in enumerate(tools_called, start=1):
tool_name = tool.name
tool_args = tool.arguments or {}
tool_output = tool.outputs or "(no output)"
tool_str = f"Tool Call {idx}: {tool_name}\n"
tool_str += f" Input Arguments: {tool_args}\n"
tool_str += f" Output: {tool_output}"
if tool.exception:
tool_str += f"\n Exception: {tool.exception}"
tools_called_parts.append(tool_str)
return "\n\n".join(tools_called_parts) if tools_called_parts else "No tools called"
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/utils/formatting_utils.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/judges/utils/test_formatting_utils.py | import pytest
from mlflow.genai.judges.utils.formatting_utils import format_available_tools, format_tools_called
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import (
ChatTool,
FunctionParams,
FunctionToolDefinition,
ParamProperty,
)
@pytest.mark.parametrize(
("tools", "expected"),
[
pytest.param(
[
ChatTool(
type="function",
function=FunctionToolDefinition(
name="get_weather",
description="Get current weather for a location",
),
)
],
"- get_weather: Get current weather for a location",
id="basic",
),
pytest.param(
[
ChatTool(
type="function",
function=FunctionToolDefinition(
name="search",
description="Search for information",
parameters=FunctionParams(
properties={
"query": ParamProperty(
type="string", description="The search query"
),
"max_results": ParamProperty(
type="integer", description="Maximum number of results"
),
},
required=["query"],
),
),
)
],
(
"- search: Search for information\n"
" - query (required): string - The search query\n"
" - max_results (optional): integer - Maximum number of results"
),
id="with_parameters",
),
pytest.param(
[
ChatTool(
type="function",
function=FunctionToolDefinition(name="tool1", description="First tool"),
),
ChatTool(
type="function",
function=FunctionToolDefinition(name="tool2", description="Second tool"),
),
],
"- tool1: First tool\n\n- tool2: Second tool",
id="multiple",
),
pytest.param(
[],
"No tools available",
id="empty",
),
pytest.param(
[
ChatTool(type="function", function=None),
ChatTool(
type="function",
function=FunctionToolDefinition(name="valid_tool", description="Valid tool"),
),
],
"- valid_tool: Valid tool",
id="missing_function",
),
pytest.param(
[
ChatTool(
type="function",
function=FunctionToolDefinition(
name="calc",
parameters=FunctionParams(
properties={
"x": ParamProperty(type="number"),
"y": ParamProperty(type="number"),
},
required=["x", "y"],
),
),
)
],
"- calc\n - x (required): number\n - y (required): number",
id="parameter_without_description",
),
],
)
def test_format_available_tools(tools, expected):
result = format_available_tools(tools)
assert result == expected
@pytest.mark.parametrize(
("tools_called", "expected"),
[
pytest.param(
[
FunctionCall(
name="get_weather",
arguments={"city": "Paris"},
outputs="Sunny, 22°C",
)
],
(
"Tool Call 1: get_weather\n"
" Input Arguments: {'city': 'Paris'}\n"
" Output: Sunny, 22°C"
),
id="basic",
),
pytest.param(
[
FunctionCall(
name="search",
arguments={"query": "capital of France"},
outputs="Paris",
),
FunctionCall(
name="translate",
arguments={"text": "Paris", "target": "es"},
outputs="París",
),
],
(
"Tool Call 1: search\n"
" Input Arguments: {'query': 'capital of France'}\n"
" Output: Paris\n"
"\n"
"Tool Call 2: translate\n"
" Input Arguments: {'text': 'Paris', 'target': 'es'}\n"
" Output: París"
),
id="multiple",
),
pytest.param(
[
FunctionCall(
name="get_weather",
arguments={"city": "InvalidCity"},
outputs=None,
exception="ValueError: City not found",
)
],
(
"Tool Call 1: get_weather\n"
" Input Arguments: {'city': 'InvalidCity'}\n"
" Output: (no output)\n"
" Exception: ValueError: City not found"
),
id="with_exception",
),
pytest.param(
[
FunctionCall(
name="stream_data",
arguments={"source": "api"},
outputs={"items": [1, 2]},
exception="TimeoutError: Connection lost",
)
],
(
"Tool Call 1: stream_data\n"
" Input Arguments: {'source': 'api'}\n"
" Output: {'items': [1, 2]}\n"
" Exception: TimeoutError: Connection lost"
),
id="with_partial_output_and_exception",
),
pytest.param(
[
FunctionCall(
name="send_notification",
arguments={"message": "Hello"},
outputs=None,
)
],
(
"Tool Call 1: send_notification\n"
" Input Arguments: {'message': 'Hello'}\n"
" Output: (no output)"
),
id="no_output",
),
pytest.param(
[],
"No tools called",
id="empty",
),
pytest.param(
[
FunctionCall(
name="get_time",
arguments=None,
outputs="12:00 PM",
)
],
"Tool Call 1: get_time\n Input Arguments: {}\n Output: 12:00 PM",
id="empty_arguments",
),
],
)
def test_format_tools_called(tools_called, expected):
result = format_tools_called(tools_called)
assert result == expected
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/utils/test_formatting_utils.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/tool_call_efficiency.py | from typing import TYPE_CHECKING
from mlflow.genai.judges.utils.formatting_utils import (
format_available_tools,
format_tools_called,
)
from mlflow.genai.prompts.utils import format_prompt
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import ChatTool
# NB: User-facing name for the is_tool_call_efficient assessment.
TOOL_CALL_EFFICIENCY_FEEDBACK_NAME = "tool_call_efficiency"
TOOL_CALL_EFFICIENCY_PROMPT_INSTRUCTIONS = """\
Consider the agent's tool usage for redundancy and inefficiency.
Given the user's request, the available tools, and the sequence of tools called by the agent, \
determine whether any tool calls were unnecessary or could have been made more efficient. In your \
analysis, treat retries caused by temporary tool failures (e.g., timeouts, transient errors) as \
efficient and not redundant.
Consider in particular:
Calls to the same tool with identical or very similar arguments
Repeated calls to the same tool with the same parameters
Multiple calls that could reasonably have been consolidated into a single call
<request>
{{request}}
</request>
<available_tools>
{{available_tools}}
</available_tools>
<tools_called>
{{tools_called}}
</tools_called>"""
TOOL_CALL_EFFICIENCY_PROMPT_OUTPUT = """
Please evaluate whether the agent's tool usage is efficient and free of redundancy using only the following json format. Return "yes" if the tool usage is efficient and free of redundancy, otherwise return "no".
Do not use any markdown formatting or output additional lines.
{
"rationale": "Reason for the assessment. If redundant tool calls are found, identify which specific calls are redundant and explain why. If no redundancy is found, explain why the tool usage is efficient. Start each rationale with `Let's think step by step`",
"result": "yes|no"
}\
""" # noqa: E501
TOOL_CALL_EFFICIENCY_PROMPT = (
TOOL_CALL_EFFICIENCY_PROMPT_INSTRUCTIONS + TOOL_CALL_EFFICIENCY_PROMPT_OUTPUT
)
def get_prompt(
request: str,
tools_called: list["FunctionCall"],
available_tools: list["ChatTool"],
) -> str:
"""Generate tool call efficiency evaluation prompt.
Args:
request: The original user request that the agent is trying to fulfill
tools_called: The sequence of tools that were called by the agent.
Each element should be a FunctionCall object.
available_tools: The set of available tools
Returns:
Formatted prompt string
"""
available_tools_str = format_available_tools(available_tools)
tools_called_str = format_tools_called(tools_called)
return format_prompt(
TOOL_CALL_EFFICIENCY_PROMPT,
request=request,
available_tools=available_tools_str,
tools_called=tools_called_str,
)
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/tool_call_efficiency.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/utils/type.py | from __future__ import annotations
from typing import Any
from mlflow.types.chat import Function
class FunctionCall(Function):
arguments: str | dict[str, Any] | None = None
outputs: Any | None = None
exception: str | None = None
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/utils/type.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/utils/prompts/available_tools_extraction.py | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from mlflow.types.llm import ChatMessage
AVAILABLE_TOOLS_EXTRACTION_SYSTEM_PROMPT = """You are an expert in analyzing agent execution traces.
Your task is to examine an MLflow trace and identify all tools or functions that were
available to the LLM, not which tools were actually called.
CRITICAL: You MUST return ONLY valid JSON matching the schema below.
Do NOT return explanations, comments, or natural language. Return ONLY the JSON object.
## How You Should Analyze the Trace
Use the tools available to you to thoroughly inspect the trace:
1. Use list_spans
Retrieve the list of spans in the trace. Each span may contain tool definitions in
attributes or inputs.
2. Use GetSpanTool
For any span returned by list_spans, inspect its content—especially:
- inputs
- attributes
- metadata
These may include tool definitions or schemas given to the LLM.
3. Use SearchTraceRegexTool
Search the trace for keywords commonly associated with tool definitions, such as:
- "definition"
- "schema"
- "tool"
- "parameters"
- "functions"
Use these results to locate spans likely to contain tool schemas.
You must base your findings only on information contained in the trace.
Do not rely on or confuse the tools that you can use (like list_spans or GetSpanTool)
with the tools that were available to the LLM inside the trace. Only identify tools that
the trace itself shows were provided to the LLM.
## What to Look For
Search the trace for tool definitions or schemas that were provided to the LLM,
regardless of where they appear (span attributes, inputs, metadata, etc.).
A "tool definition" includes:
- The tool/function name
- An optional description
- An optional JSON schema for parameters
## Required Output Format
You MUST return a valid JSON object in exactly this format:
{output_example}
For every tool definition found, extract and return:
- type — Always "function"
- function.name — The tool's name (required)
- function.description — A description of the tool (use empty string "" if not available)
- function.parameters — The JSON parameter schema (use empty object {{}} if not available)
## Rules
- Return ONLY valid JSON. No explanations, no markdown, no comments.
- Return only unique tools. Two tool definitions should be treated as duplicates if
any of the following are true: Their tool names/descriptions/parameter schemas are
identical or nearly identical.
- If no tool definitions are present in the trace, return: {{"tools": []}}
- Only identify tools that the trace explicitly provides; do not infer or invent tools.
"""
AVAILABLE_TOOLS_EXTRACTION_USER_PROMPT = """
Please analyze the trace with the tools available to you and return the tools that were
available to the LLM in the trace.
Remember: respond with ONLY valid JSON matching the schema provided.
- Use double quotes for all property names and string values.
- Do not include comments, trailing commas, or explanatory text.
- Do not wrap the JSON in markdown (no ``` blocks).
- Do not include any text before or after the JSON.
- The response must be directly parseable by json.loads().
If no tools are found, return {"tools": []}.
"""
def get_available_tools_extraction_prompts(
output_example: str,
) -> list["ChatMessage"]:
"""
Generate system and user prompts for extracting available tools from a trace.
Args:
output_example: JSON string example of the expected output format.
Returns:
A list of chat messages [system_message, user_message] for tool extraction.
"""
from mlflow.types.llm import ChatMessage
system_prompt = AVAILABLE_TOOLS_EXTRACTION_SYSTEM_PROMPT.format(output_example=output_example)
user_prompt = AVAILABLE_TOOLS_EXTRACTION_USER_PROMPT
system_message = ChatMessage(role="system", content=system_prompt)
user_message = ChatMessage(role="user", content=user_prompt)
return [system_message, user_message]
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/utils/prompts/available_tools_extraction.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/gateway/providers/litellm.py | from __future__ import annotations
import json
from typing import Any, AsyncIterable
from mlflow.gateway.config import EndpointConfig, LiteLLMConfig
from mlflow.gateway.providers.base import BaseProvider, PassthroughAction, ProviderAdapter
from mlflow.gateway.schemas import chat, embeddings
from mlflow.gateway.utils import parse_sse_lines
from mlflow.tracing.constant import TokenUsageKey
class LiteLLMAdapter(ProviderAdapter):
@classmethod
def _get_litellm_model_name(cls, config: EndpointConfig) -> str:
litellm_config = config.model.config
if litellm_config.litellm_provider:
return f"{litellm_config.litellm_provider}/{config.model.name}"
return config.model.name
@classmethod
def chat_to_model(cls, payload: dict[str, Any], config: EndpointConfig) -> dict[str, Any]:
return {"model": cls._get_litellm_model_name(config), **payload}
@classmethod
def embeddings_to_model(cls, payload: dict[str, Any], config: EndpointConfig) -> dict[str, Any]:
return {"model": cls._get_litellm_model_name(config), **payload}
@classmethod
def model_to_chat(cls, resp: dict[str, Any], config: EndpointConfig) -> chat.ResponsePayload:
return chat.ResponsePayload.model_validate(resp)
@classmethod
def model_to_chat_streaming(
cls, resp: dict[str, Any], config: EndpointConfig
) -> chat.StreamResponsePayload:
return chat.StreamResponsePayload.model_validate(resp)
@classmethod
def model_to_embeddings(
cls, resp: dict[str, Any], config: EndpointConfig
) -> embeddings.ResponsePayload:
return embeddings.ResponsePayload.model_validate(resp)
class LiteLLMProvider(BaseProvider):
"""
Provider that uses LiteLLM library to support any LLM provider.
This serves as a fallback for providers not natively supported.
"""
NAME = "LiteLLM"
CONFIG_TYPE = LiteLLMConfig
PASSTHROUGH_PROVIDER_PATHS = {
PassthroughAction.OPENAI_CHAT: "chat/completions",
PassthroughAction.OPENAI_EMBEDDINGS: "embeddings",
PassthroughAction.OPENAI_RESPONSES: "responses",
PassthroughAction.ANTHROPIC_MESSAGES: "messages",
PassthroughAction.GEMINI_GENERATE_CONTENT: "{model}:generateContent",
PassthroughAction.GEMINI_STREAM_GENERATE_CONTENT: "{model}:streamGenerateContent",
}
def __init__(self, config: EndpointConfig, enable_tracing: bool = False) -> None:
super().__init__(config, enable_tracing=enable_tracing)
if config.model.config is None or not isinstance(config.model.config, LiteLLMConfig):
raise TypeError(f"Unexpected config type {config.model.config}")
self.litellm_config: LiteLLMConfig = config.model.config
def get_provider_name(self) -> str:
"""
Return the actual underlying provider name instead of "LiteLLM".
For example, if litellm_provider is "anthropic", returns "anthropic"
instead of "LiteLLM" for more accurate tracing and metrics.
"""
if self.litellm_config.litellm_provider:
return self.litellm_config.litellm_provider
return self.NAME
@property
def adapter_class(self):
return LiteLLMAdapter
def _build_litellm_kwargs(self, payload: dict[str, Any]) -> dict[str, Any]:
kwargs = {**payload}
if self.litellm_config.litellm_auth_config:
kwargs.update(self.litellm_config.litellm_auth_config)
return kwargs
async def _chat(self, payload: chat.RequestPayload) -> chat.ResponsePayload:
import litellm
from fastapi.encoders import jsonable_encoder
payload_dict = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload_dict)
kwargs = self._build_litellm_kwargs(
self.adapter_class.chat_to_model(payload_dict, self.config)
)
response = await litellm.acompletion(**kwargs)
# Convert to dict for adapter processing
resp_dict = {
"id": response.id,
"object": response.object,
"created": response.created,
"model": response.model,
"choices": [
{
"index": choice.index,
"message": {
"role": choice.message.role,
"content": choice.message.content,
"tool_calls": (
[
{
"id": tc.id,
"type": tc.type,
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments,
},
}
for tc in choice.message.tool_calls
]
if choice.message.tool_calls
else None
),
},
"finish_reason": choice.finish_reason,
}
for choice in response.choices
],
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens,
},
}
return self.adapter_class.model_to_chat(resp_dict, self.config)
async def _chat_stream(
self, payload: chat.RequestPayload
) -> AsyncIterable[chat.StreamResponsePayload]:
import litellm
from fastapi.encoders import jsonable_encoder
payload_dict = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload_dict)
kwargs = self._build_litellm_kwargs(
self.adapter_class.chat_to_model(payload_dict, self.config)
)
kwargs["stream"] = True
if self._enable_tracing:
if kwargs.get("stream_options") is None:
kwargs["stream_options"] = {"include_usage": True}
elif "include_usage" not in kwargs["stream_options"]:
kwargs["stream_options"]["include_usage"] = True
response = await litellm.acompletion(**kwargs)
async for chunk in response:
# Convert chunk to dict for adapter processing
resp_dict = {
"id": chunk.id,
"object": chunk.object,
"created": chunk.created,
"model": chunk.model,
"choices": [
{
"index": choice.index,
"finish_reason": choice.finish_reason,
"delta": {
"role": getattr(choice.delta, "role", None),
"content": getattr(choice.delta, "content", None),
"tool_calls": (
[
{
"index": tc_idx,
"id": getattr(tc, "id", None),
"type": getattr(tc, "type", None),
"function": {
"name": getattr(tc.function, "name", None),
"arguments": getattr(tc.function, "arguments", None),
}
if hasattr(tc, "function")
else None,
}
for tc_idx, tc in enumerate(choice.delta.tool_calls)
]
if getattr(choice.delta, "tool_calls", None)
else None
),
},
}
for choice in chunk.choices
],
}
yield self.adapter_class.model_to_chat_streaming(resp_dict, self.config)
async def _embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
import litellm
from fastapi.encoders import jsonable_encoder
payload_dict = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload_dict)
kwargs = self._build_litellm_kwargs(
self.adapter_class.embeddings_to_model(payload_dict, self.config)
)
response = await litellm.aembedding(**kwargs)
# Convert to dict for adapter processing
resp_dict = {
"data": [
{"embedding": data["embedding"], "index": idx}
for idx, data in enumerate(response.data)
],
"model": response.model,
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"total_tokens": response.usage.total_tokens,
},
}
return self.adapter_class.model_to_embeddings(resp_dict, self.config)
def _extract_passthrough_token_usage(
self, action: PassthroughAction, result: dict[str, Any]
) -> dict[str, int] | None:
"""
Extract token usage from LiteLLM passthrough response.
LiteLLM normalizes responses to different formats depending on the action:
- OpenAI-style actions: usage.prompt_tokens, completion_tokens, total_tokens
- Anthropic-style actions: usage.input_tokens, output_tokens
- Gemini-style actions: usageMetadata.promptTokenCount, candidatesTokenCount,
totalTokenCount
"""
usage = result.get("usage")
# Try OpenAI format first (most common)
if token_usage := self._extract_token_usage_from_dict(
usage,
"prompt_tokens",
"completion_tokens",
"total_tokens",
cache_read_key="prompt_tokens_details.cached_tokens",
):
return token_usage
# Try Anthropic format
if token_usage := self._extract_token_usage_from_dict(
usage,
"input_tokens",
"output_tokens",
cache_read_key="cache_read_input_tokens",
cache_creation_key="cache_creation_input_tokens",
):
return token_usage
# Try Gemini format
return self._extract_token_usage_from_dict(
result.get("usageMetadata"),
"promptTokenCount",
"candidatesTokenCount",
"totalTokenCount",
cache_read_key="cachedContentTokenCount",
)
def _extract_streaming_token_usage(self, chunk: Any) -> dict[str, int]:
"""
Extract token usage from LiteLLM streaming chunks.
LiteLLM handles multiple provider formats:
- OpenAI: {"usage": {"prompt_tokens": X, "completion_tokens": Y, "total_tokens": Z}}
- Anthropic: message_start/message_delta events with usage data
- Gemini: {"usageMetadata": {"promptTokenCount": X, ...}}
Chunk can be:
- bytes: SSE-formatted data (for Anthropic passthrough)
- dict: Direct response dict
- object with model_dump: LiteLLM response object (OpenAI/Gemini)
Returns:
A dictionary with token usage found in this chunk.
"""
usage: dict[str, int] = {}
try:
# Handle bytes (SSE format from Anthropic)
if isinstance(chunk, bytes):
for data in parse_sse_lines(chunk):
usage.update(self._extract_usage_from_data(data))
elif isinstance(chunk, dict):
usage.update(self._extract_usage_from_data(chunk))
elif hasattr(chunk, "model_dump"):
data = chunk.model_dump()
usage.update(self._extract_usage_from_data(data))
except AttributeError:
pass
return usage
def _extract_usage_from_data(self, data: dict[str, Any]) -> dict[str, int]:
"""Extract token usage from a parsed data dictionary."""
usage = data.get("usage")
# OpenAI format (in chunk.usage)
if token_usage := self._extract_token_usage_from_dict(
usage,
"prompt_tokens",
"completion_tokens",
"total_tokens",
cache_read_key="prompt_tokens_details.cached_tokens",
):
return token_usage
# OpenAI Responses API format (usage nested in response object)
resp_usage = data.get("response", {}).get("usage")
if token_usage := self._extract_token_usage_from_dict(
resp_usage,
"input_tokens",
"output_tokens",
"total_tokens",
cache_read_key="input_tokens_details.cached_tokens",
):
return token_usage
# Anthropic format (in chunk.usage)
if token_usage := self._extract_token_usage_from_dict(
usage,
"input_tokens",
"output_tokens",
cache_read_key="cache_read_input_tokens",
cache_creation_key="cache_creation_input_tokens",
):
return token_usage
# Anthropic message_start format (input_tokens in message.usage)
if data.get("type") == "message_start":
msg_usage = data.get("message", {}).get("usage")
if token_usage := self._extract_token_usage_from_dict(
msg_usage,
"input_tokens",
"output_tokens",
cache_read_key="cache_read_input_tokens",
cache_creation_key="cache_creation_input_tokens",
):
return token_usage
# Gemini format
if token_usage := self._extract_token_usage_from_dict(
data.get("usageMetadata"),
"promptTokenCount",
"candidatesTokenCount",
"totalTokenCount",
cache_read_key="cachedContentTokenCount",
):
return token_usage
return {}
async def _stream_passthrough_with_usage(
self, stream: AsyncIterable[Any]
) -> AsyncIterable[bytes]:
"""
Stream passthrough response while accumulating token usage.
LiteLLM streaming methods yield different types:
- Anthropic: bytes (SSE format)
- OpenAI/Gemini: objects with model_dump
Token usage is only extracted when tracing is enabled.
"""
accumulated_usage: dict[str, int] = {} if self._enable_tracing else None
try:
async for chunk in stream:
if accumulated_usage is not None:
chunk_usage = self._extract_streaming_token_usage(chunk)
accumulated_usage.update(chunk_usage)
if isinstance(chunk, bytes):
yield chunk
else:
data = json.dumps(self._response_to_dict(chunk))
yield f"data: {data}\n\n".encode()
finally:
if accumulated_usage is not None:
# Calculate total if we have input and output but no total
if (
TokenUsageKey.INPUT_TOKENS in accumulated_usage
and TokenUsageKey.OUTPUT_TOKENS in accumulated_usage
and TokenUsageKey.TOTAL_TOKENS not in accumulated_usage
):
accumulated_usage[TokenUsageKey.TOTAL_TOKENS] = (
accumulated_usage[TokenUsageKey.INPUT_TOKENS]
+ accumulated_usage[TokenUsageKey.OUTPUT_TOKENS]
)
self._set_span_token_usage(accumulated_usage)
async def _passthrough(
self,
action: PassthroughAction,
payload: dict[str, Any],
headers: dict[str, str] | None = None,
) -> dict[str, Any] | AsyncIterable[Any]:
"""
Passthrough endpoint for raw API requests using LiteLLM.
Routes requests to the appropriate LiteLLM SDK method based on the action.
The headers parameter is unused because LiteLLM handles auth via kwargs.
"""
self._validate_passthrough_action(action)
model_name = self.adapter_class._get_litellm_model_name(self.config)
kwargs = self._build_litellm_kwargs(payload)
kwargs["model"] = model_name
match action:
case PassthroughAction.OPENAI_RESPONSES:
result = await self._passthrough_openai_responses(kwargs)
case PassthroughAction.ANTHROPIC_MESSAGES:
result = await self._passthrough_anthropic_messages(kwargs)
case PassthroughAction.GEMINI_GENERATE_CONTENT:
result = await self._passthrough_gemini_generate_content(kwargs)
case PassthroughAction.GEMINI_STREAM_GENERATE_CONTENT:
result = self._passthrough_gemini_stream_generate_content(kwargs)
case PassthroughAction.OPENAI_CHAT:
result = await self._passthrough_openai_chat(kwargs)
case PassthroughAction.OPENAI_EMBEDDINGS:
result = await self._passthrough_openai_embeddings(kwargs)
case _:
raise ValueError(f"Unsupported passthrough action: {action!r}")
# Wrap streaming responses with token usage extraction
if not isinstance(result, dict):
return self._stream_passthrough_with_usage(result)
return result
async def _passthrough_openai_responses(self, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Passthrough for OpenAI Response API using litellm.aresponses()."""
import litellm
if kwargs.pop("stream", False):
return self._stream_openai_responses(kwargs)
response = await litellm.aresponses(**kwargs)
return self._response_to_dict(response)
def _stream_openai_responses(self, kwargs: dict[str, Any]) -> AsyncIterable[Any]:
"""Stream OpenAI Response API responses."""
# Inject stream_options.include_usage=true to get usage in final chunk (only when tracing)
if self._enable_tracing:
if kwargs.get("stream_options") is None:
kwargs["stream_options"] = {"include_usage": True}
elif "include_usage" not in kwargs["stream_options"]:
kwargs["stream_options"]["include_usage"] = True
async def stream_generator():
import litellm
response = await litellm.aresponses(**kwargs, stream=True)
async for chunk in response:
yield chunk
return stream_generator()
async def _passthrough_anthropic_messages(self, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Passthrough for Anthropic Messages API using litellm.anthropic.messages.acreate()."""
import litellm
if kwargs.pop("stream", False):
return self._stream_anthropic_messages(kwargs)
response = await litellm.anthropic.messages.acreate(**kwargs)
return self._response_to_dict(response)
def _stream_anthropic_messages(self, kwargs: dict[str, Any]) -> AsyncIterable[bytes]:
"""Stream Anthropic Messages API responses."""
async def stream_generator():
import litellm
response = await litellm.anthropic.messages.acreate(**kwargs, stream=True)
async for chunk in response:
# LiteLLM returns bytes directly, so we can yield them directly
yield chunk
return stream_generator()
async def _passthrough_gemini_generate_content(self, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Passthrough for Gemini generateContent API."""
from litellm.google_genai import agenerate_content
response = await agenerate_content(**kwargs)
return self._response_to_dict(response)
def _passthrough_gemini_stream_generate_content(
self, kwargs: dict[str, Any]
) -> AsyncIterable[Any]:
"""Passthrough for Gemini streamGenerateContent API."""
async def stream_generator():
from litellm.google_genai import agenerate_content
response = await agenerate_content(**kwargs, stream=True)
async for chunk in response:
yield chunk
return stream_generator()
async def _passthrough_openai_chat(self, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Passthrough for OpenAI Chat Completions API."""
import litellm
if kwargs.pop("stream", False):
return self._stream_openai_chat(kwargs)
response = await litellm.acompletion(**kwargs)
return self._response_to_dict(response)
def _stream_openai_chat(self, kwargs: dict[str, Any]) -> AsyncIterable[Any]:
"""Stream OpenAI Chat Completions API responses."""
# Inject stream_options.include_usage=true to get usage in final chunk (only when tracing)
if self._enable_tracing:
if kwargs.get("stream_options") is None:
kwargs["stream_options"] = {"include_usage": True}
elif "include_usage" not in kwargs["stream_options"]:
kwargs["stream_options"]["include_usage"] = True
async def stream_generator():
import litellm
response = await litellm.acompletion(**kwargs, stream=True)
async for chunk in response:
yield chunk
return stream_generator()
async def _passthrough_openai_embeddings(self, kwargs: dict[str, Any]) -> dict[str, Any]:
"""Passthrough for OpenAI Embeddings API."""
import litellm
response = await litellm.aembedding(**kwargs)
return self._response_to_dict(response)
def _response_to_dict(self, response: Any) -> dict[str, Any]:
"""Convert a LiteLLM response object to a dictionary."""
match response:
case dict():
return response
case _ if hasattr(response, "model_dump"):
return response.model_dump()
case _:
raise TypeError(f"Unexpected response type: {type(response).__name__}")
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/gateway/providers/litellm.py",
"license": "Apache License 2.0",
"lines": 469,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.