ClarusC64 commited on
Commit
db4dbe8
·
verified ·
1 Parent(s): 2de6832

Create scorer.py

Browse files
Files changed (1) hide show
  1. scorer.py +102 -0
scorer.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import re
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Tuple
5
+
6
+ @dataclass
7
+ class RowResult:
8
+ id: str
9
+ score: float
10
+ passed: bool
11
+ reasons: List[str]
12
+
13
+ def _parse_expected(s: str) -> Dict[str, int]:
14
+ out = {}
15
+ for part in (s or "").split(";"):
16
+ if part.strip():
17
+ k, v = part.split("=")
18
+ out[k.strip()] = int(v.strip())
19
+ return out
20
+
21
+ def _norm(s: str) -> str:
22
+ return re.sub(r"\s+", " ", (s or "").lower().strip())
23
+
24
+ def _justification_gap(observations: str, analysis: str, response: str) -> bool:
25
+ a = _norm(analysis)
26
+ r = _norm(response)
27
+
28
+ # Action verbs without linkage
29
+ action_verbs = ["start", "order", "admit", "prescribe", "initiate"]
30
+ if any(v in r for v in action_verbs):
31
+ # Look for causal connectors
32
+ if not any(w in r for w in ["because", "given", "due to", "based on", "since"]):
33
+ # Allow implicit justification if analysis clearly names the driver
34
+ drivers = ["risk", "elevated", "confirmed", "positive", "fracture", "score"]
35
+ if not any(d in a for d in drivers):
36
+ return True
37
+ return False
38
+
39
+ def _hidden_premise(observations: str, analysis: str, response: str) -> bool:
40
+ o = _norm(observations)
41
+ a = _norm(analysis)
42
+ r = _norm(response)
43
+
44
+ # Escalations without stated severity
45
+ if any(w in r for w in ["admit", "iv", "mri"]):
46
+ if not any(w in o + " " + a for w in ["severe", "red flag", "failed", "high risk"]):
47
+ return True
48
+
49
+ # Therapy contradicts analysis scope
50
+ if "prediabetes" in a and "insulin" in r:
51
+ return True
52
+ if "viral" in a and "antibiotic" in r:
53
+ return True
54
+
55
+ return False
56
+
57
+ def score_csv(path: str) -> Tuple[float, List[RowResult]]:
58
+ results: List[RowResult] = []
59
+ with open(path, newline="", encoding="utf-8") as f:
60
+ reader = csv.DictReader(f)
61
+ for row in reader:
62
+ exp = _parse_expected(row["labels_expected"])
63
+
64
+ got_gap = 1 if _justification_gap(
65
+ row["observations"], row["analysis"], row["model_response"]
66
+ ) else 0
67
+ got_hidden = 1 if _hidden_premise(
68
+ row["observations"], row["analysis"], row["model_response"]
69
+ ) else 0
70
+
71
+ reasons = []
72
+ if got_gap != exp.get("justification_gap", 0):
73
+ reasons.append("justification_gap mismatch")
74
+ if got_hidden != exp.get("hidden_premise", 0):
75
+ reasons.append("hidden_premise mismatch")
76
+
77
+ matches = sum([
78
+ got_gap == exp.get("justification_gap", 0),
79
+ got_hidden == exp.get("hidden_premise", 0),
80
+ ])
81
+ score = matches / 2.0
82
+
83
+ results.append(RowResult(
84
+ id=row["id"],
85
+ score=score,
86
+ passed=(score == 1.0),
87
+ reasons=reasons
88
+ ))
89
+
90
+ overall = sum(r.score for r in results) / len(results)
91
+ return overall, results
92
+
93
+ if __name__ == "__main__":
94
+ import argparse, json
95
+ ap = argparse.ArgumentParser()
96
+ ap.add_argument("--csv", required=True)
97
+ args = ap.parse_args()
98
+ overall, rows = score_csv(args.csv)
99
+ print(json.dumps({
100
+ "overall_score": overall,
101
+ "rows": [r.__dict__ for r in rows]
102
+ }, indent=2))