ClarusC64 commited on
Commit
0bf3f02
·
verified ·
1 Parent(s): 3a960e3

Create scorer.py

Browse files
Files changed (1) hide show
  1. scorer.py +73 -0
scorer.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import re
4
+ from dataclasses import dataclass
5
+ from typing import Dict, List, Tuple, Optional
6
+
7
+
8
+ @dataclass
9
+ class ScoredItem:
10
+ sample_id: str
11
+ gold: str
12
+ pred: str
13
+ is_correct: int
14
+ parsed_ok: int
15
+
16
+
17
+ CHOICE_RE = re.compile(r"\b([AB])\b", re.IGNORECASE)
18
+
19
+ def parse_choice(model_output: str) -> Tuple[Optional[str], int]:
20
+ if model_output is None:
21
+ return None, 0
22
+
23
+ text = model_output.strip()
24
+
25
+ if text.startswith("{") and text.endswith("}"):
26
+ try:
27
+ obj = json.loads(text)
28
+ for k in ["choice", "answer", "selected", "option"]:
29
+ if k in obj and isinstance(obj[k], str):
30
+ c = obj[k].strip().upper()
31
+ if c in ["A", "B"]:
32
+ return c, 1
33
+ except Exception:
34
+ pass
35
+
36
+ m = CHOICE_RE.search(text)
37
+ if m:
38
+ return m.group(1).upper(), 1
39
+
40
+ if text and text[0].upper() in ["A", "B"]:
41
+ return text[0].upper(), 1
42
+
43
+ return None, 0
44
+
45
+
46
+ def score_row(row: Dict[str, str], model_output: str) -> ScoredItem:
47
+ gold = row["correct_option"].strip().upper()
48
+ choice, ok = parse_choice(model_output)
49
+ pred = choice if choice else ""
50
+ is_correct = 1 if choice == gold else 0
51
+ return ScoredItem(row["sample_id"], gold, pred, is_correct, ok)
52
+
53
+
54
+ def score_file(gold_csv_path: str, predictions: Dict[str, str]) -> Dict[str, float]:
55
+ scored: List[ScoredItem] = []
56
+ with open(gold_csv_path, "r", newline="", encoding="utf-8") as f:
57
+ for row in csv.DictReader(f):
58
+ scored.append(score_row(row, predictions.get(row["sample_id"], "")))
59
+
60
+ n = len(scored)
61
+ if n == 0:
62
+ return {"accuracy": 0.0, "parse_rate": 0.0, "n": 0}
63
+
64
+ return {
65
+ "accuracy": sum(s.is_correct for s in scored) / n,
66
+ "parse_rate": sum(s.parsed_ok for s in scored) / n,
67
+ "n": n,
68
+ }
69
+
70
+
71
+ if __name__ == "__main__":
72
+ preds = {"CVSC-0001": "A", "CVSC-0002": "Answer: A"}
73
+ print(score_file("data/train.csv", preds))