| import re |
| from dataclasses import dataclass |
| from typing import Dict, Any, List |
|
|
| LABELS = { |
| "compliant", |
| "violation-collision", |
| "violation-joint-limit", |
| "violation-torque", |
| "violation-speed", |
| "violation-clearance", |
| "violation-singularity", |
| "violation-human-safety", |
| } |
|
|
| @dataclass |
| class ScoreResult: |
| score: float |
| details: Dict[str, Any] |
|
|
| def _has(t: str, pats: List[str]) -> bool: |
| t = (t or "").lower() |
| return any(re.search(p, t) for p in pats) |
|
|
| def score(sample: Dict[str, Any], prediction: str) -> ScoreResult: |
| pred = (prediction or "").strip() |
| words_ok = len(pred.split()) <= 220 |
|
|
| label_ok = 1 if any(l in pred for l in LABELS) else 0 |
| constraint_ref = 1 if _has(pred, [ |
| r"collision", r"joint", r"limit", r"torque", r"speed", r"clearance", r"singular", r"human" |
| ]) else 0 |
| env_ref = 1 if _has(pred, [r"table", r"shelf", r"human", r"frame", r"edge", r"corridor"]) else 0 |
| action_ref = 1 if _has(pred, [r"path", r"trajectory", r"move", r"reach", r"place", r"lift"]) else 0 |
|
|
| raw = ( |
| 0.25 * int(words_ok) + |
| 0.35 * label_ok + |
| 0.20 * constraint_ref + |
| 0.20 * (env_ref or action_ref) |
| ) |
| final = max(0.0, min(1.0, raw)) |
|
|
| return ScoreResult( |
| score=final, |
| details={ |
| "words_ok": words_ok, |
| "label_ok": label_ok, |
| "constraint_ref": constraint_ref, |
| "env_or_action_ref": int(env_ref or action_ref), |
| "constraint_pressure": sample.get("constraint_pressure"), |
| "scenario": sample.get("scenario"), |
| } |
| ) |
|
|
| def aggregate(results: List[ScoreResult]) -> Dict[str, Any]: |
| if not results: |
| return {"mean": 0.0, "n": 0} |
| return {"mean": sum(r.score for r in results) / len(results), "n": len(results)} |
|
|