ClarusC64's picture
Create scorer.py
7466ce1 verified
import csv
import json
import re
from typing import Dict, Any, List, Tuple
ALLOWED_ACCEPT = {"ready", "borderline", "not_supportable"}
CERTAINTY_TERMS = {
"proves", "proof", "definitive", "definitively", "certain", "guarantee",
"clearly effective", "clearly efficacy", "we hit the endpoint", "winner"
}
SAFETY_MINIMIZE_TERMS = {
"clean safety", "no safety issues", "basically safe", "minimal safety",
"no concerning safety", "solved", "not dangerous"
}
def norm(s: str) -> str:
return re.sub(r"\s+", " ", (s or "").strip().lower())
def tokset(s: str) -> set:
s = re.sub(r"[^a-z0-9\s]", " ", norm(s))
return {t for t in s.split() if t}
def jaccard(a: str, b: str) -> float:
ta, tb = tokset(a), tokset(b)
if not ta and not tb:
return 1.0
if not ta or not tb:
return 0.0
return len(ta & tb) / len(ta | tb)
def extract_json(text: str) -> Dict[str, Any]:
if text is None:
return {}
text = text.strip()
try:
return json.loads(text)
except Exception:
m = re.search(r"\{.*\}", text, flags=re.DOTALL)
if not m:
return {}
try:
return json.loads(m.group(0))
except Exception:
return {}
def split_claims(pipe_text: str) -> List[str]:
parts = [p.strip() for p in (pipe_text or "").split("|")]
return [p for p in parts if p]
def load_refs(path: str) -> Dict[str, Dict[str, str]]:
refs = {}
with open(path, newline="", encoding="utf-8") as f:
r = csv.DictReader(f)
for row in r:
refs[row["case_id"]] = {
"claims": row["drift_claims_expected"],
"corrected": row["corrected_narrative_expected"],
"accept": row["regulatory_acceptability_expected"].strip(),
"stats": row["stats_summary"],
"safety": row["safety_results"],
}
return refs
def score_one(pred: Dict[str, Any], gold: Dict[str, str]) -> Tuple[float, Dict[str, float]]:
parsed = extract_json(pred.get("prediction", ""))
pred_claims = parsed.get("drift_claims", [])
if isinstance(pred_claims, str):
pred_claims = [c.strip() for c in pred_claims.split("\n") if c.strip()]
if not isinstance(pred_claims, list):
pred_claims = []
pred_corrected = (parsed.get("corrected_summary") or parsed.get("corrected_narrative") or "").strip()
pred_accept = (parsed.get("regulatory_acceptability") or "").strip()
gold_claims = split_claims(gold["claims"])
gold_corrected = gold["corrected"]
gold_accept = gold["accept"]
# Drift detection (35)
# score by soft matching claims via token overlap
hit = 0
for gc in gold_claims:
best = 0.0
for pc in pred_claims:
best = max(best, jaccard(pc, gc))
if best >= 0.35:
hit += 1
drift = 35.0 * (hit / max(1, len(gold_claims)))
# Rewrite alignment (25)
rewrite = 25.0 * jaccard(pred_corrected, gold_corrected)
# Cross-doc consistency (20)
# reward if they mention cross-doc drift when expected includes it
# heuristic: if any expected claim contains "framed" or "overstated" and pred mentions "headline" or "press" or "investor" or "across"
expects_multi = any(("framed" in norm(c) or "overstated" in norm(c) or "softened" in norm(c)) for c in gold_claims)
mentions_multi = any(k in norm(" ".join(pred_claims)) for k in ["press", "headline", "investor", "across documents", "deck", "call"])
cross = 20.0 * (1.0 if (expects_multi and mentions_multi) else 0.3 if expects_multi else 0.6)
# Safety integrity (20)
safety = 20.0
txt = norm(pred_corrected + " " + " ".join(pred_claims))
# penalize certainty when stats show primary not significant
primary_miss = ("not significant" in norm(gold["stats"])) or ("p=0.18" in norm(gold["stats"])) or ("p=0.29" in norm(gold["stats"])) or ("p=0.11" in norm(gold["stats"])) or ("p=0.09" in norm(gold["stats"]))
if primary_miss:
for t in CERTAINTY_TERMS:
if t in txt:
safety -= 6.0
break
# penalize safety minimization when safety text indicates imbalance
safety_issue = any(k in norm(gold["safety"]) for k in ["higher", "increased", "imbalance", "g3", "sae", "aria", "anaphylaxis", "crs", "mortality", "qt"])
if safety_issue:
for t in SAFETY_MINIMIZE_TERMS:
if t in txt:
safety -= 8.0
break
safety = max(0.0, min(20.0, safety))
# Regulatory accept match as a small gate on total
gate = 1.0
if pred_accept in ALLOWED_ACCEPT:
if pred_accept != gold_accept:
gate = 0.9
else:
gate = 0.85
total = gate * (drift + rewrite + safety + cross)
return total, {
"drift_points": drift,
"rewrite_points": rewrite,
"safety_points": safety,
"cross_doc_points": cross,
"gate": gate,
}
def score_predictions(predictions_path: str, test_csv_path: str) -> Dict[str, float]:
refs = load_refs(test_csv_path)
n = 0
total = 0.0
drift = 0.0
rewrite = 0.0
safety = 0.0
cross = 0.0
gate_avg = 0.0
fmt = 0
with open(predictions_path, encoding="utf-8") as f:
for line in f:
if not line.strip():
continue
item = json.loads(line)
cid = item.get("case_id")
if cid not in refs:
continue
n += 1
parsed = extract_json(item.get("prediction", ""))
ok = isinstance(parsed.get("drift_claims", []), (list, str)) and (parsed.get("corrected_summary") or parsed.get("corrected_narrative")) and parsed.get("regulatory_acceptability")
fmt += 1 if ok else 0
sc, parts = score_one(item, refs[cid])
total += sc
drift += parts["drift_points"]
rewrite += parts["rewrite_points"]
safety += parts["safety_points"]
cross += parts["cross_doc_points"]
gate_avg += parts["gate"]
if n == 0:
return {"final_score": 0.0, "n_scored": 0.0}
return {
"final_score": float(total / n),
"drift_points_avg": float(drift / n),
"rewrite_points_avg": float(rewrite / n),
"safety_points_avg": float(safety / n),
"cross_doc_points_avg": float(cross / n),
"gate_avg": float(gate_avg / n),
"format_pass_rate": float(fmt / n),
"n_scored": float(n),
}
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser()
p.add_argument("--predictions", required=True, help="Path to predictions.jsonl")
p.add_argument("--test_csv", required=True, help="Path to data/test.csv")
a = p.parse_args()
print(json.dumps(score_predictions(a.predictions, a.test_csv), indent=2))