| import csv |
| import json |
| import sys |
| from typing import Dict, List |
|
|
| K = 1.0 |
| BOUNDARY_EPS = 0.02 |
| HIGH_CONTENT_SCORE_THRESHOLD = 0.90 |
|
|
| REQUIRED_PRED_COLUMNS = {"scenario_id", "prediction"} |
| REQUIRED_TRUTH_COLUMNS = { |
| "scenario_id", |
| "pressure", |
| "buffer", |
| "lag", |
| "coupling", |
| "content_score", |
| "label_stable", |
| } |
|
|
|
|
| def load_csv(path: str) -> List[Dict[str, str]]: |
| with open(path, newline="", encoding="utf-8") as f: |
| return list(csv.DictReader(f)) |
|
|
|
|
| def validate_columns(rows: List[Dict[str, str]], required_cols: set, name: str) -> None: |
| if not rows: |
| raise ValueError(f"{name} file is empty") |
|
|
| cols = set(rows[0].keys()) |
| missing = required_cols - cols |
| if missing: |
| raise ValueError(f"{name} missing required columns: {sorted(missing)}") |
|
|
|
|
| def index_by_id(rows: List[Dict[str, str]], id_col: str = "scenario_id") -> Dict[str, Dict[str, str]]: |
| index: Dict[str, Dict[str, str]] = {} |
|
|
| for row in rows: |
| sid = str(row.get(id_col, "")).strip() |
|
|
| if not sid: |
| raise ValueError("Blank or missing scenario_id detected") |
|
|
| if sid in index: |
| raise ValueError(f"Duplicate scenario_id detected: {sid}") |
|
|
| index[sid] = row |
|
|
| return index |
|
|
|
|
| def parse_binary(value: str, field_name: str, sid: str) -> int: |
| try: |
| parsed = int(value) |
| except ValueError as e: |
| raise ValueError(f"Invalid {field_name} for {sid}: {value}") from e |
|
|
| if parsed not in (0, 1): |
| raise ValueError(f"Invalid {field_name} for {sid}: {parsed}") |
|
|
| return parsed |
|
|
|
|
| def compute_surfaces(pressure: float, buffer: float, lag: float, coupling: float) -> Dict[str, float]: |
| s1 = buffer - (pressure * coupling) - (K * lag) |
| s2 = buffer - (pressure * (coupling ** 2)) - (K * lag) |
| s3 = buffer - (pressure * coupling) - (K * (lag ** 2)) |
| return { |
| "baseline_surface": s1, |
| "coupling_surface": s2, |
| "lag_surface": s3, |
| } |
|
|
|
|
| def compute_manifold_margin(pressure: float, buffer: float, lag: float, coupling: float) -> float: |
| return min(compute_surfaces(pressure, buffer, lag, coupling).values()) |
|
|
|
|
| def classify_region(manifold_margin: float) -> str: |
| if abs(manifold_margin) <= BOUNDARY_EPS: |
| return "near_boundary" |
| if manifold_margin < 0: |
| return "collapse_zone" |
| return "safe_margin" |
|
|
|
|
| def evaluate(pred_path: str, truth_path: str) -> Dict[str, object]: |
| preds = load_csv(pred_path) |
| truth = load_csv(truth_path) |
|
|
| validate_columns(preds, REQUIRED_PRED_COLUMNS, "prediction") |
| validate_columns(truth, REQUIRED_TRUTH_COLUMNS, "truth") |
|
|
| if len(preds) != len(truth): |
| raise ValueError("Prediction and truth row counts do not match") |
|
|
| pred_map = index_by_id(preds) |
| truth_map = index_by_id(truth) |
|
|
| if set(pred_map.keys()) != set(truth_map.keys()): |
| raise ValueError("scenario_id mismatch between prediction and truth files") |
|
|
| total = 0 |
| correct = 0 |
|
|
| false_rescue = 0 |
| high_conf_predicted_rescue_total = 0 |
|
|
| boundary_total = 0 |
| boundary_errors = 0 |
|
|
| surface_distance_sum = 0.0 |
|
|
| collapse_zone = 0 |
| near_boundary = 0 |
| safe_margin = 0 |
|
|
| tp = tn = fp = fn = 0 |
|
|
| regime_counts = { |
| "baseline_surface_active": 0, |
| "coupling_surface_active": 0, |
| "lag_surface_active": 0, |
| } |
|
|
| for sid in sorted(truth_map): |
| truth_row = truth_map[sid] |
| pred_row = pred_map[sid] |
|
|
| pred = parse_binary(pred_row["prediction"], "prediction", sid) |
| label = parse_binary(truth_row["label_stable"], "label_stable", sid) |
|
|
| try: |
| pressure = float(truth_row["pressure"]) |
| buffer = float(truth_row["buffer"]) |
| lag = float(truth_row["lag"]) |
| coupling = float(truth_row["coupling"]) |
| content_score = float(truth_row["content_score"]) |
| except ValueError as e: |
| raise ValueError(f"Invalid numeric field for {sid}: {e}") from e |
|
|
| surfaces = compute_surfaces(pressure, buffer, lag, coupling) |
| manifold_margin = min(surfaces.values()) |
| active_surface = min(surfaces, key=surfaces.get) |
|
|
| regime_counts[f"{active_surface}_active"] += 1 |
|
|
| total += 1 |
| surface_distance_sum += abs(manifold_margin) |
|
|
| if pred == label: |
| correct += 1 |
|
|
| if label == 1 and pred == 1: |
| tp += 1 |
| elif label == 0 and pred == 0: |
| tn += 1 |
| elif label == 0 and pred == 1: |
| fp += 1 |
| elif label == 1 and pred == 0: |
| fn += 1 |
|
|
| if content_score > HIGH_CONTENT_SCORE_THRESHOLD and pred == 1: |
| high_conf_predicted_rescue_total += 1 |
| if manifold_margin < 0: |
| false_rescue += 1 |
|
|
| if abs(manifold_margin) <= BOUNDARY_EPS: |
| boundary_total += 1 |
| if pred != label: |
| boundary_errors += 1 |
|
|
| region = classify_region(manifold_margin) |
| if region == "collapse_zone": |
| collapse_zone += 1 |
| elif region == "near_boundary": |
| near_boundary += 1 |
| else: |
| safe_margin += 1 |
|
|
| accuracy = correct / total if total else 0.0 |
| false_rescue_rate = ( |
| false_rescue / high_conf_predicted_rescue_total |
| if high_conf_predicted_rescue_total |
| else 0.0 |
| ) |
| boundary_error_rate = ( |
| boundary_errors / boundary_total |
| if boundary_total |
| else 0.0 |
| ) |
| surface_distance_mean = surface_distance_sum / total if total else 0.0 |
|
|
| return { |
| "accuracy": accuracy, |
| "false_rescue_rate": false_rescue_rate, |
| "boundary_error_rate": boundary_error_rate, |
| "surface_distance_mean": surface_distance_mean, |
| "collapse_margin_distribution": { |
| "collapse_zone": collapse_zone, |
| "near_boundary": near_boundary, |
| "safe_margin": safe_margin, |
| }, |
| "confusion_matrix": { |
| "true_stable_predicted_stable": tp, |
| "true_unstable_predicted_unstable": tn, |
| "false_rescue": fp, |
| "false_collapse": fn, |
| }, |
| "active_surface_counts": regime_counts, |
| "diagnostics": { |
| "total_rows": total, |
| "high_conf_predicted_rescue_total": high_conf_predicted_rescue_total, |
| "boundary_cases": boundary_total, |
| }, |
| "thresholds": { |
| "k": K, |
| "boundary_eps": BOUNDARY_EPS, |
| "high_content_score_threshold": HIGH_CONTENT_SCORE_THRESHOLD, |
| }, |
| } |
|
|
|
|
| if __name__ == "__main__": |
| if len(sys.argv) != 3: |
| print("Usage: scorer.py predictions.csv truth.csv") |
| sys.exit(1) |
|
|
| pred_file = sys.argv[1] |
| truth_file = sys.argv[2] |
|
|
| result = evaluate(pred_file, truth_file) |
| print(json.dumps(result, indent=2)) |