| import csv |
| import json |
| import re |
| from typing import Dict, Tuple |
|
|
| ALLOWED_TYPES = { |
| "low_uncertainty", |
| "moderate_confidence", |
| "confidence_borderline", |
| "confidence_low", |
| "disorder_high", |
| "model_disagreement", |
| "high_disorder", |
| } |
|
|
| def norm(s: str) -> str: |
| return re.sub(r"\s+", " ", (s or "").strip().lower()) |
|
|
| def token_set(s: str) -> set: |
| s = norm(s) |
| s = re.sub(r"[^a-z0-9\s]", " ", s) |
| return set([t for t in s.split(" ") if t]) |
|
|
| def jaccard(a: str, b: str) -> float: |
| ta, tb = token_set(a), token_set(b) |
| if not ta or not tb: |
| return 0.0 |
| return len(ta & tb) / len(ta | tb) |
|
|
| def load_refs(test_csv): |
| refs={} |
| with open(test_csv,encoding="utf-8") as f: |
| r=csv.DictReader(f) |
| for row in r: |
| refs[row["id"]] = ( |
| norm(row["gold_uncertainty_flag"]), |
| row["gold_uncertainty_type"], |
| row["gold_recommendation"], |
| ) |
| return refs |
|
|
| def extract_json(text): |
| try: |
| return json.loads(text) |
| except: |
| m=re.search(r"\{.*\}",text) |
| if m: |
| try: |
| return json.loads(m.group(0)) |
| except: |
| return {} |
| return {} |
|
|
| def score(pred_path,test_csv): |
| refs=load_refs(test_csv) |
|
|
| n=0 |
| flag_hits=0 |
| type_hits=0 |
| rec_sim=0 |
| fmt_hits=0 |
|
|
| with open(pred_path,encoding="utf-8") as f: |
| preds=[json.loads(x) for x in f if x.strip()] |
|
|
| for p in preds: |
| pid=p["id"] |
| if pid not in refs: |
| continue |
|
|
| n+=1 |
| parsed=extract_json(p["prediction"]) |
|
|
| pred_flag=norm(parsed.get("uncertainty_flag")) |
| pred_type=parsed.get("uncertainty_type","") |
| pred_rec=parsed.get("recommendation","") |
|
|
| gold_flag,gold_type,gold_rec=refs[pid] |
|
|
| if pred_flag==gold_flag: |
| flag_hits+=1 |
|
|
| if pred_type==gold_type: |
| type_hits+=1 |
|
|
| rec_sim+=jaccard(pred_rec,gold_rec) |
|
|
| if pred_flag in {"yes","no"} and pred_type in ALLOWED_TYPES: |
| fmt_hits+=1 |
|
|
| if n==0: |
| return {"final_score":0} |
|
|
| acc_flag=flag_hits/n |
| acc_type=type_hits/n |
| sim_rec=rec_sim/n |
| fmt=fmt_hits/n |
|
|
| final=0.4*acc_flag+0.3*acc_type+0.2*sim_rec+0.1*fmt |
|
|
| return { |
| "final_score":final, |
| "uncertainty_flag_accuracy":acc_flag, |
| "uncertainty_type_accuracy":acc_type, |
| "recommendation_similarity":sim_rec, |
| "format_pass_rate":fmt, |
| "n":n |
| } |
|
|
| if __name__=="__main__": |
| import argparse |
| p=argparse.ArgumentParser() |
| p.add_argument("--predictions") |
| p.add_argument("--test_csv") |
| args=p.parse_args() |
| print(score(args.predictions,args.test_csv)) |
|
|