| import csv |
| import re |
| from dataclasses import dataclass |
| from typing import Dict, List, Tuple |
|
|
| @dataclass |
| class RowResult: |
| id: str |
| score: float |
| passed: bool |
| reasons: List[str] |
|
|
| def _parse_expected(s: str) -> Dict[str, int]: |
| out = {} |
| for part in (s or "").split(";"): |
| if part.strip(): |
| k, v = part.split("=") |
| out[k.strip()] = int(v.strip()) |
| return out |
|
|
| def _norm(s: str) -> str: |
| return re.sub(r"\s+", " ", (s or "").lower().strip()) |
|
|
| def _justification_gap(observations: str, analysis: str, response: str) -> bool: |
| a = _norm(analysis) |
| r = _norm(response) |
|
|
| |
| action_verbs = ["start", "order", "admit", "prescribe", "initiate"] |
| if any(v in r for v in action_verbs): |
| |
| if not any(w in r for w in ["because", "given", "due to", "based on", "since"]): |
| |
| drivers = ["risk", "elevated", "confirmed", "positive", "fracture", "score"] |
| if not any(d in a for d in drivers): |
| return True |
| return False |
|
|
| def _hidden_premise(observations: str, analysis: str, response: str) -> bool: |
| o = _norm(observations) |
| a = _norm(analysis) |
| r = _norm(response) |
|
|
| |
| if any(w in r for w in ["admit", "iv", "mri"]): |
| if not any(w in o + " " + a for w in ["severe", "red flag", "failed", "high risk"]): |
| return True |
|
|
| |
| if "prediabetes" in a and "insulin" in r: |
| return True |
| if "viral" in a and "antibiotic" in r: |
| return True |
|
|
| return False |
|
|
| def score_csv(path: str) -> Tuple[float, List[RowResult]]: |
| results: List[RowResult] = [] |
| with open(path, newline="", encoding="utf-8") as f: |
| reader = csv.DictReader(f) |
| for row in reader: |
| exp = _parse_expected(row["labels_expected"]) |
|
|
| got_gap = 1 if _justification_gap( |
| row["observations"], row["analysis"], row["model_response"] |
| ) else 0 |
| got_hidden = 1 if _hidden_premise( |
| row["observations"], row["analysis"], row["model_response"] |
| ) else 0 |
|
|
| reasons = [] |
| if got_gap != exp.get("justification_gap", 0): |
| reasons.append("justification_gap mismatch") |
| if got_hidden != exp.get("hidden_premise", 0): |
| reasons.append("hidden_premise mismatch") |
|
|
| matches = sum([ |
| got_gap == exp.get("justification_gap", 0), |
| got_hidden == exp.get("hidden_premise", 0), |
| ]) |
| score = matches / 2.0 |
|
|
| results.append(RowResult( |
| id=row["id"], |
| score=score, |
| passed=(score == 1.0), |
| reasons=reasons |
| )) |
|
|
| overall = sum(r.score for r in results) / len(results) |
| return overall, results |
|
|
| if __name__ == "__main__": |
| import argparse, json |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--csv", required=True) |
| args = ap.parse_args() |
| overall, rows = score_csv(args.csv) |
| print(json.dumps({ |
| "overall_score": overall, |
| "rows": [r.__dict__ for r in rows] |
| }, indent=2)) |
|
|