ClarusC64 commited on
Commit
ccc941d
·
verified ·
1 Parent(s): fc2ffbd

Create scorer.py

Browse files
Files changed (1) hide show
  1. scorer.py +131 -0
scorer.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import re
4
+ from typing import Dict, Tuple
5
+
6
+ ALLOWED_ERROR_TYPES = {
7
+ "no_error",
8
+ "low_confidence_region_overtrust",
9
+ "interdomain_orientation_overclaim",
10
+ "disorder_as_structure",
11
+ "loop_position_overtrust",
12
+ "complex_negation_from_monomer",
13
+ "interface_overconfidence",
14
+ "cofactor_absence_misread",
15
+ "low_complexity_as_domain",
16
+ }
17
+
18
+ def _norm(s: str) -> str:
19
+ return re.sub(r"\s+", " ", (s or "").strip().lower())
20
+
21
+ def _token_set(s: str) -> set:
22
+ s = _norm(s)
23
+ s = re.sub(r"[^a-z0-9\s]", " ", s)
24
+ return {t for t in s.split(" ") if t}
25
+
26
+ def _jaccard(a: str, b: str) -> float:
27
+ ta = _token_set(a)
28
+ tb = _token_set(b)
29
+ if not ta and not tb:
30
+ return 1.0
31
+ if not ta or not tb:
32
+ return 0.0
33
+ return len(ta & tb) / len(ta | tb)
34
+
35
+ def _extract_json(text: str) -> Dict:
36
+ if text is None:
37
+ return {}
38
+ text = text.strip()
39
+ try:
40
+ return json.loads(text)
41
+ except Exception:
42
+ pass
43
+ m = re.search(r"\{.*\}", text, flags=re.DOTALL)
44
+ if not m:
45
+ return {}
46
+ try:
47
+ return json.loads(m.group(0))
48
+ except Exception:
49
+ return {}
50
+
51
+ def load_refs(test_csv_path: str) -> Dict[str, Tuple[str, str, str]]:
52
+ refs = {}
53
+ with open(test_csv_path, newline="", encoding="utf-8") as f:
54
+ r = csv.DictReader(f)
55
+ for row in r:
56
+ refs[row["id"]] = (
57
+ _norm(row["gold_misinterpretation"]),
58
+ row["gold_error_type"].strip(),
59
+ row["gold_correction"].strip(),
60
+ )
61
+ return refs
62
+
63
+ def score_predictions(predictions_path: str, test_csv_path: str) -> Dict[str, float]:
64
+ refs = load_refs(test_csv_path)
65
+
66
+ n = 0
67
+ mis_hits = 0
68
+ type_hits = 0
69
+ corr_sim_sum = 0.0
70
+ format_hits = 0
71
+
72
+ with open(predictions_path, encoding="utf-8") as f:
73
+ preds = [json.loads(line) for line in f if line.strip()]
74
+
75
+ for item in preds:
76
+ ex_id = item.get("id", "")
77
+ raw = item.get("prediction", "")
78
+ if ex_id not in refs:
79
+ continue
80
+
81
+ n += 1
82
+ parsed = _extract_json(raw)
83
+
84
+ pred_mis = _norm(parsed.get("misinterpretation", ""))
85
+ pred_type = (parsed.get("error_type") or "").strip()
86
+ pred_corr = (parsed.get("correction") or "").strip()
87
+
88
+ gold_mis, gold_type, gold_corr = refs[ex_id]
89
+
90
+ mis_hits += 1 if pred_mis == gold_mis else 0
91
+ type_hits += 1 if pred_type == gold_type else 0
92
+ corr_sim_sum += _jaccard(pred_corr, gold_corr)
93
+
94
+ has_keys = pred_mis in {"yes", "no"} and pred_type != "" and pred_corr != ""
95
+ type_allowed = pred_type in ALLOWED_ERROR_TYPES
96
+ consistency = (pred_mis == "no" and pred_type == "no_error") or (pred_mis == "yes" and pred_type != "no_error")
97
+ format_hits += 1 if (has_keys and type_allowed and consistency) else 0
98
+
99
+ if n == 0:
100
+ return {
101
+ "final_score": 0.0,
102
+ "misinterpretation_accuracy": 0.0,
103
+ "error_type_accuracy": 0.0,
104
+ "correction_similarity": 0.0,
105
+ "format_pass_rate": 0.0,
106
+ "n_scored": 0.0,
107
+ }
108
+
109
+ mis_acc = mis_hits / n
110
+ type_acc = type_hits / n
111
+ corr_sim = corr_sim_sum / n
112
+ fmt = format_hits / n
113
+
114
+ final = 0.4 * mis_acc + 0.3 * type_acc + 0.2 * corr_sim + 0.1 * fmt
115
+
116
+ return {
117
+ "final_score": float(final),
118
+ "misinterpretation_accuracy": float(mis_acc),
119
+ "error_type_accuracy": float(type_acc),
120
+ "correction_similarity": float(corr_sim),
121
+ "format_pass_rate": float(fmt),
122
+ "n_scored": float(n),
123
+ }
124
+
125
+ if __name__ == "__main__":
126
+ import argparse
127
+ p = argparse.ArgumentParser()
128
+ p.add_argument("--predictions", required=True)
129
+ p.add_argument("--test_csv", required=True)
130
+ args = p.parse_args()
131
+ print(json.dumps(score_predictions(args.predictions, args.test_csv), indent=2))