| import re |
| from dataclasses import dataclass |
| from typing import Dict, Any, List |
|
|
| LABELS = { |
| "consistent", |
| "inconsistent-duplication", |
| "inconsistent-teleport", |
| "inconsistent-stale-entity", |
| "inconsistent-wrong-identity", |
| "inconsistent-impossible-state", |
| "inconsistent-missing-object", |
| } |
|
|
| @dataclass |
| class ScoreResult: |
| score: float |
| details: Dict[str, Any] |
|
|
| def _has(t: str, pats: List[str]) -> bool: |
| t = (t or "").lower() |
| return any(re.search(p, t) for p in pats) |
|
|
| def score(sample: Dict[str, Any], prediction: str) -> ScoreResult: |
| pred = (prediction or "").strip() |
| words_ok = len(pred.split()) <= 260 |
|
|
| label_ok = 1 if any(lbl in pred for lbl in LABELS) else 0 |
| compare_ref = 1 if _has(pred, [r"model", r"world model", r"observed", r"perception", r"memory"]) else 0 |
| inconsistency_ref = 1 if _has(pred, [r"inconsist", r"dup", r"teleport", r"stale", r"missing", r"contradic", r"swap"]) else 0 |
|
|
| raw = ( |
| 0.25 * int(words_ok) + |
| 0.40 * label_ok + |
| 0.20 * compare_ref + |
| 0.15 * inconsistency_ref |
| ) |
| final = max(0.0, min(1.0, raw)) |
|
|
| return ScoreResult( |
| score=final, |
| details={ |
| "words_ok": words_ok, |
| "label_ok": label_ok, |
| "compare_ref": compare_ref, |
| "inconsistency_ref": inconsistency_ref, |
| "consistency_pressure": sample.get("consistency_pressure"), |
| "scenario": sample.get("scenario"), |
| } |
| ) |
|
|
| def aggregate(results: List[ScoreResult]) -> Dict[str, Any]: |
| if not results: |
| return {"mean": 0.0, "n": 0} |
| return {"mean": sum(r.score for r in results) / len(results), "n": len(results)} |
|
|