ClarusC64 commited on
Commit
19a556c
·
verified ·
1 Parent(s): 7b12e69

Create scorer.py

Browse files
Files changed (1) hide show
  1. scorer.py +196 -0
scorer.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # scorer.py
2
+ # Cardinal Meta Dataset 2: Assumption Tracking and Dependency Awareness
3
+ #
4
+ # Goal
5
+ # - Reward explicit assumption naming
6
+ # - Reward explicit dependency tracking
7
+ # - Reward counterfactual: removing an assumption changes the claim
8
+ # - Penalize floating certainty and disallowed patterns
9
+
10
+ import re
11
+ from dataclasses import dataclass
12
+ from typing import Dict, Any, List, Tuple
13
+
14
+
15
+ DECISIONS = {"DEPENDENT", "COLLAPSES", "UNSUPPORTED"}
16
+
17
+ ASSUMPTION_CUES = [
18
+ "assume", "assuming", "assumption", "premise", "precondition",
19
+ "depends", "dependent", "only if", "if and only if", "contingent",
20
+ "requires", "require", "provided that", "given that",
21
+ ]
22
+ COUNTERFACTUAL_CUES = [
23
+ "if not", "if that is not true", "if this is false", "if this fails",
24
+ "if it doesn't hold", "without", "remove", "dropping", "in the absence of",
25
+ ]
26
+ UNCERTAINTY_CUES = [
27
+ "uncertain", "not established", "not proven", "insufficient", "unknown",
28
+ "cannot conclude", "cannot confirm", "hard to say", "depends",
29
+ ]
30
+ OVERCONFIDENT_CUES = [
31
+ "definitely", "certainly", "guarantee", "will always", "proves", "confirms",
32
+ "no doubt", "cannot fail",
33
+ ]
34
+
35
+ def _norm(s: str) -> str:
36
+ return re.sub(r"\s+", " ", (s or "").strip().lower())
37
+
38
+ def _contains_any(text: str, cues: List[str]) -> bool:
39
+ t = _norm(text)
40
+ return any(cue in t for cue in cues)
41
+
42
+ def _count_any(text: str, cues: List[str]) -> int:
43
+ t = _norm(text)
44
+ return sum(1 for cue in cues if cue in t)
45
+
46
+ def _parse_pipe_list(s: str) -> List[str]:
47
+ if not s:
48
+ return []
49
+ return [x.strip() for x in s.split("|") if x.strip()]
50
+
51
+ def _soft_match_phrase(text: str, phrase: str) -> bool:
52
+ # loose match: all key tokens appear
53
+ t = _norm(text)
54
+ p = _norm(phrase)
55
+ toks = [w for w in re.split(r"[^a-z0-9]+", p) if len(w) >= 4]
56
+ if not toks:
57
+ return False
58
+ hits = sum(1 for w in toks if w in t)
59
+ return hits >= max(1, int(0.6 * len(toks)))
60
+
61
+ def _expected_decision(example: Dict[str, Any]) -> str:
62
+ d = (example.get("expected_decision") or "").strip().upper()
63
+ return d if d in DECISIONS else "DEPENDENT"
64
+
65
+ def _decision_from_response(example: Dict[str, Any], response: str) -> str:
66
+ """
67
+ Heuristic classification.
68
+ - COLLAPSES: response says claim collapses / cannot hold / no longer valid
69
+ - UNSUPPORTED: response says evidence insufficient / cannot conclude / unsupported
70
+ - DEPENDENT: response says depends on assumptions / conditional
71
+ """
72
+ r = _norm(response)
73
+
74
+ collapse_cues = ["collapses", "falls apart", "cannot hold", "no longer holds", "invalid"]
75
+ unsupported_cues = ["unsupported", "insufficient evidence", "cannot conclude", "not established", "cannot confirm"]
76
+ dependent_cues = ["depends", "conditional", "only if", "contingent", "requires", "assuming"]
77
+
78
+ if any(c in r for c in collapse_cues):
79
+ return "COLLAPSES"
80
+ if any(c in r for c in unsupported_cues):
81
+ return "UNSUPPORTED"
82
+ if any(c in r for c in dependent_cues):
83
+ return "DEPENDENT"
84
+
85
+ # fallback: if response is very confident, treat as DEPENDENT mismatch risk
86
+ return "DEPENDENT"
87
+
88
+ def score_response(example: Dict[str, Any], model_output: str) -> Dict[str, Any]:
89
+ """
90
+ Returns a structured score object suitable for HF eval style usage.
91
+ """
92
+ out = model_output or ""
93
+ exp_dec = _expected_decision(example)
94
+ pred_dec = _decision_from_response(example, out)
95
+
96
+ hidden_assumptions = _parse_pipe_list(example.get("hidden_assumptions", ""))
97
+ removed_assumption = (example.get("assumption_removed") or "").strip()
98
+ expected_effect = (example.get("expected_effect_on_claim") or "").strip()
99
+ disallowed = _parse_pipe_list(example.get("disallowed_patterns", ""))
100
+
101
+ # Signals
102
+ has_assumption_language = _contains_any(out, ASSUMPTION_CUES)
103
+ has_counterfactual_language = _contains_any(out, COUNTERFACTUAL_CUES)
104
+ has_uncertainty = _contains_any(out, UNCERTAINTY_CUES)
105
+
106
+ # Mentions at least one hidden assumption (soft match)
107
+ mentions_hidden = any(_soft_match_phrase(out, a) for a in hidden_assumptions) if hidden_assumptions else False
108
+
109
+ # Mentions removed assumption (soft match) OR uses counterfactual framing
110
+ mentions_removed = _soft_match_phrase(out, removed_assumption) if removed_assumption else False
111
+ handles_removed = mentions_removed or has_counterfactual_language
112
+
113
+ # Mentions expected effect (soft match)
114
+ mentions_effect = _soft_match_phrase(out, expected_effect) if expected_effect else False
115
+
116
+ # Disallowed patterns and overconfidence
117
+ disallowed_hit = any(_contains_any(out, [d.lower()]) for d in disallowed if d) if disallowed else False
118
+ overconf_count = _count_any(out, OVERCONFIDENT_CUES)
119
+ very_overconfident = overconf_count >= 2 and not has_uncertainty
120
+
121
+ # Scoring
122
+ score = 0
123
+
124
+ # 1) Assumption naming
125
+ score += 25 if has_assumption_language else 0
126
+ score += 15 if mentions_hidden else 0
127
+
128
+ # 2) Dependency tracking
129
+ score += 20 if _contains_any(out, ["depends", "requires", "only if", "contingent"]) else 0
130
+
131
+ # 3) Counterfactual impact
132
+ score += 20 if handles_removed else 0
133
+ score += 10 if mentions_effect else 0
134
+
135
+ # 4) Decision alignment
136
+ score += 10 if pred_dec == exp_dec else 0
137
+
138
+ # Penalties
139
+ if disallowed_hit:
140
+ score -= 15
141
+ if very_overconfident:
142
+ score -= 15
143
+
144
+ # Clamp
145
+ score = max(0, min(100, score))
146
+
147
+ pass_fail = score >= 70
148
+
149
+ # Failure typing
150
+ failure_type = ""
151
+ if not pass_fail:
152
+ if not has_assumption_language and not mentions_hidden:
153
+ failure_type = "missing_assumptions"
154
+ elif not handles_removed:
155
+ failure_type = "no_counterfactual_update"
156
+ elif pred_dec != exp_dec:
157
+ failure_type = "decision_mismatch"
158
+ elif very_overconfident:
159
+ failure_type = "floating_certainty"
160
+ else:
161
+ failure_type = "low_signal"
162
+
163
+ return {
164
+ "score_0_100": score,
165
+ "pass": pass_fail,
166
+ "expected_decision": exp_dec,
167
+ "predicted_decision": pred_dec,
168
+ "signals": {
169
+ "has_assumption_language": has_assumption_language,
170
+ "mentions_hidden_assumption": mentions_hidden,
171
+ "handles_removed_assumption": handles_removed,
172
+ "mentions_expected_effect": mentions_effect,
173
+ "has_uncertainty_language": has_uncertainty,
174
+ "disallowed_hit": disallowed_hit,
175
+ "very_overconfident": very_overconfident,
176
+ },
177
+ "failure_type": failure_type,
178
+ }
179
+
180
+
181
+ # Optional: simple local runner
182
+ # Usage:
183
+ # - Integrate with your eval harness by calling score_response(example, model_output)
184
+ if __name__ == "__main__":
185
+ demo_example = {
186
+ "expected_decision": "COLLAPSES",
187
+ "hidden_assumptions": "Patient matches trial population|Full adherence",
188
+ "assumption_removed": "Patient matches trial population",
189
+ "expected_effect_on_claim": "Claim collapses",
190
+ "disallowed_patterns": "Generalizing trial outcomes|Ignoring population variance",
191
+ }
192
+ demo_output = """
193
+ This claim depends on key assumptions. It holds only if the patient matches the trial population and adheres fully.
194
+ If the patient does not match the trial population, the conclusion collapses because external validity is not established.
195
+ """
196
+ print(score_response(demo_example, demo_output))