ClarusC64's picture
Create scorer.py
3fbc8ba verified
import sys
import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
def resolve_label(df: pd.DataFrame) -> pd.Series:
label_cols = [c for c in df.columns if c.startswith("label_")]
if len(label_cols) == 1:
return df[label_cols[0]]
raise ValueError(f"Expected one label column, found: {label_cols}")
def align_frames(preds: pd.DataFrame, truth: pd.DataFrame) -> tuple[pd.DataFrame, pd.DataFrame]:
if len(preds) != len(truth):
raise ValueError(
f"Row count mismatch: predictions has {len(preds)} rows, truth has {len(truth)} rows"
)
if "scenario_id" in preds.columns and "scenario_id" in truth.columns:
preds = preds.sort_values("scenario_id").reset_index(drop=True)
truth = truth.sort_values("scenario_id").reset_index(drop=True)
if not preds["scenario_id"].equals(truth["scenario_id"]):
raise ValueError("scenario_id mismatch after alignment between predictions and truth")
return preds, truth
def score(predictions_path: str, ground_truth_path: str) -> dict:
preds = pd.read_csv(predictions_path)
truth = pd.read_csv(ground_truth_path)
preds, truth = align_frames(preds, truth)
y_pred = resolve_label(preds)
y_true = resolve_label(truth)
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred, zero_division=0)
recall = recall_score(y_true, y_pred, zero_division=0)
f1 = f1_score(y_true, y_pred, zero_division=0)
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=[0, 1]).ravel()
false_safe_rate = fn / (fn + tp) if (fn + tp) > 0 else 0.0
return {
"accuracy": accuracy,
"precision": precision,
"recall_cascade_detection": recall,
"false_safe_rate": false_safe_rate,
"f1": f1,
"confusion_matrix": {
"tp": int(tp),
"fp": int(fp),
"tn": int(tn),
"fn": int(fn),
},
}
if __name__ == "__main__":
if len(sys.argv) != 3:
raise SystemExit("Usage: python scorer.py <predictions.csv> <ground_truth.csv>")
print(score(sys.argv[1], sys.argv[2]))