ClarusC64's picture
Create scorer.py
341b9cb verified
import csv
import json
import re
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Any
@dataclass
class ScoredItem:
sample_id: str
gold: str
pred: str
is_correct: int
parsed_ok: int
CHOICE_RE = re.compile(r"\b([AB])\b", re.IGNORECASE)
JSON_OBJ_RE = re.compile(r"\{.*?\}", re.DOTALL)
def parse_choice(model_output: Optional[str]) -> Tuple[Optional[str], int]:
"""
For ClarusC64/description-granularity-consistency-v0.1
Expected model output: A or B
Accepts:
- "A"
- "Answer: B"
- "I choose A because..."
- JSON anywhere: {"choice":"A"} / {"answer":"B"} / {"selected":"A"} / {"option":"B"}
"""
if model_output is None:
return None, 0
text = str(model_output).strip()
if not text:
return None, 0
mjson = JSON_OBJ_RE.search(text)
if mjson:
try:
obj = json.loads(mjson.group(0))
if isinstance(obj, dict):
for k in ("choice", "answer", "selected", "option"):
v = obj.get(k)
if isinstance(v, str):
c = v.strip().upper()
if c in ("A", "B"):
return c, 1
except Exception:
pass
m = CHOICE_RE.search(text)
if m:
return m.group(1).upper(), 1
c0 = text[0].upper()
if c0 in ("A", "B"):
return c0, 1
return None, 0
def score_file(gold_csv_path: str, predictions: Dict[str, str]) -> Dict[str, float]:
"""
gold_csv_path: typically "data/train.csv"
predictions: {sample_id: model_output_string}
Returns:
- accuracy
- parse_rate
- n
- missing_predictions
"""
scored: List[ScoredItem] = []
missing_predictions = 0
with open(gold_csv_path, "r", newline="", encoding="utf-8") as f:
reader = csv.DictReader(f)
if not reader.fieldnames:
return {"accuracy": 0.0, "parse_rate": 0.0, "n": 0, "missing_predictions": 0}
if "sample_id" not in reader.fieldnames:
raise KeyError("CSV missing required column: sample_id")
if "correct_option" not in reader.fieldnames:
raise KeyError("CSV missing required column: correct_option")
for row in reader:
sid = (row.get("sample_id") or "").strip()
gold = (row.get("correct_option") or "").strip().upper()
if not sid:
raise ValueError("Empty sample_id encountered")
if gold not in ("A", "B"):
raise ValueError(f"Invalid correct_option for {sid}: {gold!r}")
if sid not in predictions:
missing_predictions += 1
choice, ok = parse_choice(predictions.get(sid, ""))
pred = choice or ""
is_correct = 1 if choice == gold else 0
scored.append(ScoredItem(sid, gold, pred, is_correct, ok))
n = len(scored)
if n == 0:
return {"accuracy": 0.0, "parse_rate": 0.0, "n": 0, "missing_predictions": 0}
return {
"accuracy": sum(s.is_correct for s in scored) / n,
"parse_rate": sum(s.parsed_ok for s in scored) / n,
"n": n,
"missing_predictions": missing_predictions,
}
def load_predictions_csv(pred_csv_path: str) -> Dict[str, str]:
"""
Optional helper: load predictions from a CSV with columns:
- sample_id
- output
"""
preds: Dict[str, str] = {}
with open(pred_csv_path, "r", newline="", encoding="utf-8") as f:
reader = csv.DictReader(f)
if not reader.fieldnames:
return preds
if "sample_id" not in reader.fieldnames or "output" not in reader.fieldnames:
raise KeyError("Predictions CSV must include columns: sample_id, output")
for row in reader:
sid = (row.get("sample_id") or "").strip()
out = row.get("output") or ""
if sid:
preds[sid] = out
return preds
if __name__ == "__main__":
# Smoke test for this dataset (CDGC ids)
preds = {
"CDGC-0001": "A",
"CDGC-0002": "Answer: A",
"CDGC-0003": '{"choice":"B"}',
}
print(score_file("data/train.csv", preds))
# Alternative:
# preds = load_predictions_csv("preds.csv")
# print(score_file("data/train.csv", preds))