| import pandas as pd |
| from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix |
|
|
| TARGET_LABEL = "label_shock_boundary_transition" |
|
|
|
|
| def normalize_label(df): |
| label_cols = [c for c in df.columns if c.startswith("label_")] |
|
|
| if len(label_cols) == 1: |
| return df.rename(columns={label_cols[0]: "label"}) |
|
|
| if len(label_cols) == 0: |
| raise ValueError( |
| f"No label column found. Expected {TARGET_LABEL} or a single label_* column." |
| ) |
|
|
| raise ValueError(f"Multiple label columns found: {label_cols}") |
|
|
|
|
| def validate_binary_labels(series, name): |
| unique_values = set(series.dropna().unique()) |
| if not unique_values.issubset({0, 1}): |
| raise ValueError( |
| f"{name} must contain only binary values 0/1. Found: {sorted(unique_values)}" |
| ) |
|
|
|
|
| def validate_lengths(y_true, y_pred): |
| if len(y_true) != len(y_pred): |
| raise ValueError( |
| f"Prediction length mismatch. predictions={len(y_pred)} ground_truth={len(y_true)}" |
| ) |
|
|
|
|
| def validate_prediction_columns(preds): |
| allowed = {"scenario_id", "label"} |
| extra_cols = [c for c in preds.columns if c not in allowed] |
| if extra_cols: |
| raise ValueError( |
| f"Predictions file should contain only scenario_id and label columns after normalization. Extra columns found: {extra_cols}" |
| ) |
|
|
|
|
| def score(predictions_path, ground_truth_path): |
| preds = pd.read_csv(predictions_path) |
| truth = pd.read_csv(ground_truth_path) |
|
|
| truth = normalize_label(truth) |
| preds = normalize_label(preds) |
|
|
| if "label" not in preds.columns: |
| raise ValueError("Predictions file must contain a label column") |
|
|
| if "label" not in truth.columns: |
| raise ValueError("Ground truth file must contain a label column") |
|
|
| validate_prediction_columns(preds) |
|
|
| y_true = truth["label"] |
| y_pred = preds["label"] |
|
|
| validate_lengths(y_true, y_pred) |
| validate_binary_labels(y_true, "Ground truth labels") |
| validate_binary_labels(y_pred, "Prediction labels") |
|
|
| accuracy = accuracy_score(y_true, y_pred) |
| precision = precision_score(y_true, y_pred, zero_division=0) |
| recall = recall_score(y_true, y_pred, zero_division=0) |
| f1 = f1_score(y_true, y_pred, zero_division=0) |
|
|
| cm = confusion_matrix(y_true, y_pred, labels=[0, 1]) |
| tn, fp, fn, tp = cm.ravel() |
|
|
| false_safe_rate = fn / (fn + tp) if (fn + tp) > 0 else 0.0 |
| true_safe_rate = tn / (tn + fp) if (tn + fp) > 0 else 0.0 |
| positive_prediction_rate = (tp + fp) / len(y_pred) if len(y_pred) > 0 else 0.0 |
|
|
| return { |
| "target_label": TARGET_LABEL, |
| "n_samples": int(len(y_true)), |
| "accuracy": float(accuracy), |
| "precision": float(precision), |
| "recall_cascade_detection": float(recall), |
| "false_safe_rate": float(false_safe_rate), |
| "true_safe_rate": float(true_safe_rate), |
| "positive_prediction_rate": float(positive_prediction_rate), |
| "f1": float(f1), |
| "confusion_matrix": cm.tolist(), |
| } |
|
|
|
|
| if __name__ == "__main__": |
| import sys |
|
|
| if len(sys.argv) != 3: |
| raise ValueError("Usage: python scorer.py <predictions.csv> <ground_truth.csv>") |
|
|
| predictions_path = sys.argv[1] |
| ground_truth_path = sys.argv[2] |
|
|
| print(score(predictions_path, ground_truth_path)) |