File size: 5,267 Bytes
15c3265 f84690d 15c3265 9b7e0a7 15c3265 f84690d 9b7e0a7 f84690d 15c3265 f84690d 15c3265 f84690d 9b7e0a7 f84690d 9b7e0a7 15c3265 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | """Evaluate one generated answer with proxy factual/style/worldview scores."""
from __future__ import annotations
import argparse
import json
from fic_agent.config import RuntimeConfig
from fic_agent.eval.judge import score_response_proxy, score_response_llm
def _load_json(path: str):
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
def _build_compact_report(result: dict) -> dict:
mode = str(result.get("mode", "")).strip()
scores = result.get("scores") if isinstance(result.get("scores"), dict) else {}
issues_obj = result.get("issues") if isinstance(result.get("issues"), dict) else {}
critical = [str(x).strip() for x in issues_obj.get("critical", []) if str(x).strip()]
major = [str(x).strip() for x in issues_obj.get("major", []) if str(x).strip()]
minor = [str(x).strip() for x in issues_obj.get("minor", []) if str(x).strip()]
if mode == "proxy":
return {
"mode": mode,
"scores": scores,
"key_conclusion": "Proxy-only heuristic scores (fast check, not final LLM judgment).",
}
same_character = result.get("same_character")
confidence_100 = result.get("confidence_100")
scorecard = result.get("scorecard") if isinstance(result.get("scorecard"), dict) else {}
penalties = result.get("penalties") if isinstance(result.get("penalties"), dict) else {}
overall_100 = scorecard.get("overall_100")
if overall_100 is None:
overall_100 = scores.get("overall")
usefulness_100 = scores.get("usefulness")
if usefulness_100 is None and isinstance(scorecard.get("response_usefulness"), dict):
usefulness_module = scorecard.get("response_usefulness", {}).get("module_score")
if usefulness_module is not None:
try:
usefulness_100 = round((float(usefulness_module) / 5.0) * 100.0, 2)
except Exception:
usefulness_100 = None
if critical:
verdict = "High-risk answer: critical consistency issues detected."
elif major:
verdict = "Usable with caution: major issues remain."
elif same_character == "Yes":
verdict = "Good result: role consistency and overall quality are acceptable."
else:
verdict = "Role consistency is insufficient."
return {
"mode": mode or "llm",
"scores": scores,
"overall_100": overall_100,
"usefulness_100": usefulness_100,
"same_character": same_character,
"confidence_100": confidence_100,
"issues": {
"critical": critical,
"major": major,
"minor": minor[:3],
},
"penalty": {
"formula": penalties.get("formula"),
"additive_deduction": penalties.get("additive_deduction"),
"multiplier": penalties.get("multiplier"),
"overall_deduction": penalties.get("overall_deduction"),
},
"key_conclusion": verdict,
}
def main() -> None:
parser = argparse.ArgumentParser(description="Evaluate generated answer")
parser.add_argument("--result-json", required=True, help="Path produced by run_meta_qa --save-json")
parser.add_argument("--character", default=None, help="Character override")
parser.add_argument("--processed-dir", default="data/processed", help="Processed directory")
parser.add_argument("--mode", choices=["proxy", "llm"], default="llm", help="Scoring mode")
parser.add_argument("--model", default=None, help="Judge model override for LLM mode")
parser.add_argument("--rounds", type=int, default=3, help="Judge rounds for LLM mode")
parser.add_argument("--temperature", type=float, default=0.2, help="Judge temperature for LLM mode")
parser.add_argument("--top-n", type=int, default=6, help="Evidence items per lane shown to LLM judge")
parser.add_argument("--full-report", action="store_true", help="Keep full detailed report instead of compact summary")
parser.add_argument("--save-json", default=None, help="Optional path to save full score report")
args = parser.parse_args()
obj = _load_json(args.result_json)
query = obj.get("query", "")
response = obj.get("answer", "")
evidence = obj.get("evidence", {})
character = args.character or obj.get("character")
if args.mode == "proxy":
scores = score_response_proxy(
response=response,
evidence=evidence,
character=character,
processed_dir=args.processed_dir,
)
result = {"mode": "proxy", "scores": scores}
else:
cfg = RuntimeConfig()
result = score_response_llm(
query=query,
response=response,
evidence=evidence,
cfg=cfg,
character=character,
model=args.model,
rounds=args.rounds,
temperature=args.temperature,
top_n=args.top_n,
)
output = result if args.full_report else _build_compact_report(result)
print(json.dumps(output, ensure_ascii=False, indent=2))
if args.save_json:
with open(args.save_json, "w", encoding="utf-8") as f:
json.dump(output, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()
|