File size: 7,067 Bytes
0083b07 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 | import argparse
import os
import sqlite3
from datetime import datetime, timedelta
from pathlib import Path
import mlflow
import numpy as np
import pandas as pd
from dotenv import load_dotenv
# Evidently 0.7.x API: Report is in evidently root, presets moved to evidently.presets
from evidently import Report
from evidently.presets import DataDriftPreset
load_dotenv(dotenv_path=".env")
# AG News test set has exactly 1,900 examples per class (25% each).
# This is the distribution the model was validated on β our baseline.
REFERENCE_COUNTS = {"World": 1900, "Sports": 1900, "Business": 1900, "Sci/Tech": 1900}
# From our test run: model averaged ~0.97 confidence across the AG News test set.
REFERENCE_CONFIDENCE_MEAN = 0.97
REFERENCE_CONFIDENCE_STD = 0.05
MIN_SAMPLES = 30 # statistical tests are unreliable below this
def build_reference_df() -> pd.DataFrame:
rng = np.random.default_rng(seed=42)
rows = []
for label, count in REFERENCE_COUNTS.items():
confidences = np.clip(
rng.normal(REFERENCE_CONFIDENCE_MEAN, REFERENCE_CONFIDENCE_STD, count),
0.5,
1.0,
)
for conf in confidences:
rows.append({"label": label, "confidence": round(float(conf), 4)})
return pd.DataFrame(rows)
def load_current_df(db_path: str, hours: int) -> pd.DataFrame:
cutoff = (datetime.utcnow() - timedelta(hours=hours)).isoformat()
conn = sqlite3.connect(db_path)
df = pd.read_sql_query(
"""
SELECT label, confidence, created_at
FROM classifications
WHERE created_at >= ? AND cached = 0
ORDER BY created_at
""",
conn,
params=(cutoff,),
)
conn.close()
return df
def run_evidently(reference_df: pd.DataFrame, current_df: pd.DataFrame, html_path: str):
# In evidently 0.7.x, Report.run() returns a Snapshot object.
# The Snapshot holds results and has save_html() / dict() methods.
snapshot = Report([DataDriftPreset()]).run(
reference_data=reference_df[["label", "confidence"]],
current_data=current_df[["label", "confidence"]],
)
snapshot.save_html(html_path)
return snapshot.dict()
def extract_drift_result(report_dict: dict) -> tuple[bool, float, list[str]]:
"""
Parse the Evidently 0.7 snapshot dict.
DriftedColumnsCount gives the overall share of drifted columns.
ValueDrift per column gives p-values β drift when p_value < threshold (0.05).
"""
drifted_share = 0.0
drifted_columns = []
for metric in report_dict.get("metrics", []):
name = metric.get("metric_name", "")
value = metric.get("value")
if "DriftedColumnsCount" in name and isinstance(value, dict):
drifted_share = value.get("share", 0.0)
if "ValueDrift" in name and isinstance(value, (int, float)):
# value is the p-value; drift detected when p_value < threshold (0.05)
config = metric.get("config", {})
column = config.get("column", "")
threshold = config.get("threshold", 0.05)
if value < threshold:
drifted_columns.append(column)
# Dataset-level drift: triggered when β₯50% of columns drift (Evidently default)
drift_detected = drifted_share >= 0.5
return drift_detected, drifted_share, drifted_columns
def log_to_mlflow(
drift_detected: bool,
drift_share: float,
drifted_columns: list[str],
current_df: pd.DataFrame,
html_path: str,
):
mlflow.set_tracking_uri(os.environ["MLFLOW_TRACKING_URI"])
mlflow.set_experiment("drift-monitoring")
run_name = f"drift-{datetime.utcnow().strftime('%Y%m%d-%H%M')}"
with mlflow.start_run(run_name=run_name):
mlflow.log_metric("drift_detected", int(drift_detected))
mlflow.log_metric("drift_share_of_columns", round(drift_share, 4))
mlflow.log_metric("sample_count", len(current_df))
mlflow.log_metric("mean_confidence", round(current_df["confidence"].mean(), 4))
mlflow.log_metric("p10_confidence", round(current_df["confidence"].quantile(0.10), 4))
label_dist = current_df["label"].value_counts(normalize=True)
for label, share in label_dist.items():
key = label.lower().replace("/", "_")
mlflow.log_metric(f"label_share_{key}", round(share, 4))
if drifted_columns:
mlflow.set_tag("drifted_columns", ", ".join(drifted_columns))
mlflow.log_artifact(html_path)
print(f"[drift] metrics logged to MLflow run '{run_name}'")
def run(db_path: str = "classifications.db", hours: int = 24, output_dir: str = "reports"):
Path(output_dir).mkdir(exist_ok=True)
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M")
html_path = str(Path(output_dir) / f"drift_{timestamp}.html")
print("[drift] building reference distribution (AG News test set β 25% per label)...")
reference_df = build_reference_df()
print(f"[drift] loading current data from '{db_path}' (last {hours}h, model-only rows)...")
current_df = load_current_df(db_path, hours)
if len(current_df) < MIN_SAMPLES:
print(
f"[drift] only {len(current_df)} rows in window "
f"(need β₯ {MIN_SAMPLES}). Run the pipeline longer and retry."
)
return
ref_dist = reference_df["label"].value_counts(normalize=True).round(3)
cur_dist = current_df["label"].value_counts(normalize=True).round(3)
print("\n Label distribution comparison:")
print(f" {'Label':<12} {'Reference':>10} {'Current':>10}")
for label in REFERENCE_COUNTS:
print(f" {label:<12} {ref_dist.get(label, 0):>10.1%} {cur_dist.get(label, 0):>10.1%}")
print(
f"\n Current confidence β mean: {current_df['confidence'].mean():.3f} "
f"p10: {current_df['confidence'].quantile(0.1):.3f}\n"
)
print("[drift] running Evidently drift report...")
report_dict = run_evidently(reference_df, current_df, html_path)
print(f"[drift] report saved β {html_path}")
drift_detected, drift_share, drifted_columns = extract_drift_result(report_dict)
print("[drift] logging metrics to MLflow (DagsHub)...")
log_to_mlflow(drift_detected, drift_share, drifted_columns, current_df, html_path)
status = "DRIFT DETECTED" if drift_detected else "no drift detected"
col_info = f" drifted columns: {drifted_columns}" if drifted_columns else ""
print(f"\n[drift] result: {status}{col_info}")
print(f"[drift] open {html_path} in a browser for the full visual report")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run drift detection on classifications.db")
parser.add_argument("--db", default="classifications.db", help="Path to SQLite DB")
parser.add_argument("--hours", type=int, default=24, help="Lookback window in hours")
parser.add_argument("--output-dir", default="reports", help="Where to save HTML reports")
args = parser.parse_args()
run(db_path=args.db, hours=args.hours, output_dir=args.output_dir)
|