ClarusC64's picture
Create scorer.py
ccc941d verified
import csv
import json
import re
from typing import Dict, Tuple
ALLOWED_ERROR_TYPES = {
"no_error",
"low_confidence_region_overtrust",
"interdomain_orientation_overclaim",
"disorder_as_structure",
"loop_position_overtrust",
"complex_negation_from_monomer",
"interface_overconfidence",
"cofactor_absence_misread",
"low_complexity_as_domain",
}
def _norm(s: str) -> str:
return re.sub(r"\s+", " ", (s or "").strip().lower())
def _token_set(s: str) -> set:
s = _norm(s)
s = re.sub(r"[^a-z0-9\s]", " ", s)
return {t for t in s.split(" ") if t}
def _jaccard(a: str, b: str) -> float:
ta = _token_set(a)
tb = _token_set(b)
if not ta and not tb:
return 1.0
if not ta or not tb:
return 0.0
return len(ta & tb) / len(ta | tb)
def _extract_json(text: str) -> Dict:
if text is None:
return {}
text = text.strip()
try:
return json.loads(text)
except Exception:
pass
m = re.search(r"\{.*\}", text, flags=re.DOTALL)
if not m:
return {}
try:
return json.loads(m.group(0))
except Exception:
return {}
def load_refs(test_csv_path: str) -> Dict[str, Tuple[str, str, str]]:
refs = {}
with open(test_csv_path, newline="", encoding="utf-8") as f:
r = csv.DictReader(f)
for row in r:
refs[row["id"]] = (
_norm(row["gold_misinterpretation"]),
row["gold_error_type"].strip(),
row["gold_correction"].strip(),
)
return refs
def score_predictions(predictions_path: str, test_csv_path: str) -> Dict[str, float]:
refs = load_refs(test_csv_path)
n = 0
mis_hits = 0
type_hits = 0
corr_sim_sum = 0.0
format_hits = 0
with open(predictions_path, encoding="utf-8") as f:
preds = [json.loads(line) for line in f if line.strip()]
for item in preds:
ex_id = item.get("id", "")
raw = item.get("prediction", "")
if ex_id not in refs:
continue
n += 1
parsed = _extract_json(raw)
pred_mis = _norm(parsed.get("misinterpretation", ""))
pred_type = (parsed.get("error_type") or "").strip()
pred_corr = (parsed.get("correction") or "").strip()
gold_mis, gold_type, gold_corr = refs[ex_id]
mis_hits += 1 if pred_mis == gold_mis else 0
type_hits += 1 if pred_type == gold_type else 0
corr_sim_sum += _jaccard(pred_corr, gold_corr)
has_keys = pred_mis in {"yes", "no"} and pred_type != "" and pred_corr != ""
type_allowed = pred_type in ALLOWED_ERROR_TYPES
consistency = (pred_mis == "no" and pred_type == "no_error") or (pred_mis == "yes" and pred_type != "no_error")
format_hits += 1 if (has_keys and type_allowed and consistency) else 0
if n == 0:
return {
"final_score": 0.0,
"misinterpretation_accuracy": 0.0,
"error_type_accuracy": 0.0,
"correction_similarity": 0.0,
"format_pass_rate": 0.0,
"n_scored": 0.0,
}
mis_acc = mis_hits / n
type_acc = type_hits / n
corr_sim = corr_sim_sum / n
fmt = format_hits / n
final = 0.4 * mis_acc + 0.3 * type_acc + 0.2 * corr_sim + 0.1 * fmt
return {
"final_score": float(final),
"misinterpretation_accuracy": float(mis_acc),
"error_type_accuracy": float(type_acc),
"correction_similarity": float(corr_sim),
"format_pass_rate": float(fmt),
"n_scored": float(n),
}
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser()
p.add_argument("--predictions", required=True)
p.add_argument("--test_csv", required=True)
args = p.parse_args()
print(json.dumps(score_predictions(args.predictions, args.test_csv), indent=2))