fic-agent / scripts /run_scene_experiments.py
t1eautomat's picture
update latest code and outputs
9b7e0a7
"""Run scene-level roleplay experiments on ficset dataset.
Experiment protocol (per target scene):
1) Locate scenes with is_two_person_dialogue_scene=true.
2) Use all prior scenes as retrieval knowledge corpus.
3) Take the first spoken line (first non-environment dialog) as query.
4) Ask the model to answer as the other character in that scene.
The retrieval and meta-cognitive QA algorithms are reused from existing modules.
"""
from __future__ import annotations
import argparse
from dataclasses import dataclass
import json
from pathlib import Path
import re
import sys
import time
from typing import Dict, Iterable, List, Optional, Tuple
from fic_agent.config import RuntimeConfig
from fic_agent.eval.judge import (
compare_responses_pairwise_llm,
score_response_llm,
score_response_proxy,
)
from fic_agent.generation.compose import run_tri_retrieve_and_compose
from fic_agent.generation.meta_loop import run_meta_cognitive_qa
from fic_agent.generation.token_usage import merge_token_usage, new_token_usage, record_token_usage
from fic_agent.ingest.pipeline import (
build_document_layer,
chunks_to_dicts,
save_jsonl,
)
from fic_agent.persona.profile import (
build_persona_profile,
render_persona_prompt,
save_persona_profile,
save_persona_prompt,
)
from fic_agent.retrieval.retriever import build_index_for_texts
from fic_agent.utils.retry import retry_call
CHAPTER_RE = re.compile(r"Chapter-(\d+)")
ABLATION_SPECS: List[Tuple[str, List[str], str]] = [
("l1_only", ["facts"], "meta"),
("l1_l2", ["facts", "persona"], "meta"),
("l1_only_rag", ["facts"], "single_pass_rag"),
]
QA_ONLY_STAGES = {"compose_draft", "style_rewrite"}
@dataclass
class SceneRecord:
chapter_num: int
scene_id: int
global_idx: int
chapter_base: str
scene: Dict
def _read_json(path: Path):
return json.loads(path.read_text(encoding="utf-8"))
def _write_json(path: Path, obj: Dict) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(obj, ensure_ascii=False, indent=2), encoding="utf-8")
def _build_run_token_totals(rows: List[Dict], key: str = "token_usage") -> Dict:
totals = new_token_usage()
for row in rows:
if row.get("status") != "ok":
continue
usage = row.get(key)
if isinstance(usage, dict):
merge_token_usage(totals, usage)
return totals
def _usage_slice_by_stages(usage: Dict, stages: set[str]) -> Dict:
out = new_token_usage()
stages_obj = usage.get("stages") if isinstance(usage, dict) else {}
if not isinstance(stages_obj, dict):
return out
for stage, stats in stages_obj.items():
if str(stage) not in stages:
continue
if not isinstance(stats, dict):
continue
stage_usage = {
"calls": int(stats.get("calls", 0) or 0),
"prompt_tokens": int(stats.get("prompt_tokens", 0) or 0),
"completion_tokens": int(stats.get("completion_tokens", 0) or 0),
"total_tokens": int(stats.get("total_tokens", 0) or 0),
"stages": {
str(stage): {
"calls": int(stats.get("calls", 0) or 0),
"prompt_tokens": int(stats.get("prompt_tokens", 0) or 0),
"completion_tokens": int(stats.get("completion_tokens", 0) or 0),
"total_tokens": int(stats.get("total_tokens", 0) or 0),
}
},
"models": {},
}
merge_token_usage(out, stage_usage)
return out
def _split_qa_token_usage(usage: Dict) -> Tuple[Dict, Dict]:
qa_only = _usage_slice_by_stages(usage, QA_ONLY_STAGES)
all_stages = usage.get("stages") if isinstance(usage, dict) else {}
rag_stage_names = set(all_stages.keys()) - QA_ONLY_STAGES if isinstance(all_stages, dict) else set()
rag_internal = _usage_slice_by_stages(usage, rag_stage_names)
return qa_only, rag_internal
def _build_run_latency_stats(rows: List[Dict], key: str = "generation_latency_sec") -> Dict:
vals: List[float] = []
for row in rows:
if row.get("status") != "ok":
continue
try:
v = float(row.get(key, 0.0))
except Exception:
continue
if v > 0:
vals.append(v)
if not vals:
return {"count": 0, "sum_sec": 0.0, "avg_sec": 0.0, "min_sec": 0.0, "max_sec": 0.0}
return {
"count": len(vals),
"sum_sec": round(sum(vals), 3),
"avg_sec": round(sum(vals) / len(vals), 3),
"min_sec": round(min(vals), 3),
"max_sec": round(max(vals), 3),
}
def _build_compare_win_counts(rows: List[Dict], winner_key: str = "compare_winner_overall") -> Dict:
out = {"available": 0, "ficrag_better": 0, "plain_llm_better": 0, "tie": 0}
for row in rows:
if row.get("status") != "ok":
continue
winner = str(row.get(winner_key, "")).strip().lower()
if not winner:
continue
out["available"] += 1
if winner == "ficrag":
out["ficrag_better"] += 1
elif winner == "plain_llm":
out["plain_llm_better"] += 1
else:
out["tie"] += 1
return out
def _print_progress(
*,
done: int,
total: int,
ok: int,
failed: int,
skipped: int,
case_key: str,
status: str,
start_ts: float,
) -> None:
width = 30
if total <= 0:
ratio = 1.0
else:
ratio = max(0.0, min(1.0, done / total))
filled = int(round(width * ratio))
bar = "=" * filled + "-" * (width - filled)
elapsed = max(0.0, time.time() - start_ts)
speed = (done / elapsed) if elapsed > 0 else 0.0
if speed > 0 and done < total:
eta_sec = (total - done) / speed
else:
eta_sec = 0.0
eta_m, eta_s = divmod(int(round(eta_sec)), 60)
ela_m, ela_s = divmod(int(round(elapsed)), 60)
line = (
f"[{bar}] {done}/{total} "
f"ok={ok} failed={failed} skipped={skipped} "
f"speed={speed:.2f}/s eta={eta_m:02d}:{eta_s:02d} elapsed={ela_m:02d}:{ela_s:02d} "
f"{case_key} -> {status}"
)
sys.stdout.write("\r" + line[:220])
sys.stdout.flush()
if done >= total:
sys.stdout.write("\n")
sys.stdout.flush()
def _build_compact_eval_report(result: Dict) -> Dict:
mode = str(result.get("mode", "")).strip()
scores = result.get("scores") if isinstance(result.get("scores"), dict) else {}
issues_obj = result.get("issues") if isinstance(result.get("issues"), dict) else {}
critical = [str(x).strip() for x in issues_obj.get("critical", []) if str(x).strip()]
major = [str(x).strip() for x in issues_obj.get("major", []) if str(x).strip()]
minor = [str(x).strip() for x in issues_obj.get("minor", []) if str(x).strip()]
if mode == "proxy":
return {
"mode": mode,
"scores": scores,
"key_conclusion": "Proxy-only heuristic scores (fast check, not final LLM judgment).",
}
same_character = result.get("same_character")
confidence_100 = result.get("confidence_100")
scorecard = result.get("scorecard") if isinstance(result.get("scorecard"), dict) else {}
penalties = result.get("penalties") if isinstance(result.get("penalties"), dict) else {}
overall_100 = scorecard.get("overall_100")
if overall_100 is None:
overall_100 = scores.get("overall_100")
comp = scorecard.get("overall_components") if isinstance(scorecard.get("overall_components"), dict) else {}
overall_quality_100 = comp.get("overall_quality_100")
overall_role_strict_100 = comp.get("overall_role_strict_100")
if critical:
verdict = "High-risk answer: critical consistency issues detected."
elif major:
verdict = "Usable with caution: major issues remain."
elif same_character == "Yes":
verdict = "Good result: role consistency and overall quality are acceptable."
else:
verdict = "Role consistency is insufficient."
return {
"mode": mode or "llm",
"scores": scores,
"overall_100": overall_100,
"overall_breakdown": {
"quality_100": overall_quality_100,
"role_strict_100": overall_role_strict_100,
},
"same_character": same_character,
"confidence_100": confidence_100,
"issues": {
"critical": critical,
"major": major,
"minor": minor[:3],
},
"penalty": {
"formula": penalties.get("formula"),
"additive_deduction": penalties.get("additive_deduction"),
"multiplier": penalties.get("multiplier"),
"quality_deduction": penalties.get("quality_deduction"),
"overall_deduction": penalties.get("overall_deduction"),
},
"key_conclusion": verdict,
}
def _run_plain_llm_baseline(
*,
query: str,
answerer: str,
cfg: RuntimeConfig,
model: Optional[str] = None,
) -> Dict:
if not cfg.llm_api_key:
raise ValueError("llm_api_key is required for plain LLM baseline generation.")
try:
from openai import OpenAI # type: ignore
except Exception as e:
raise ImportError("openai package is required for plain LLM baseline generation.") from e
model_name = model or cfg.llm_model
client = OpenAI(base_url=cfg.llm_base_url, api_key=cfg.llm_api_key)
token_usage = new_token_usage()
started_at = time.time()
t0 = time.perf_counter()
def _call():
return client.chat.completions.create(
model=model_name,
messages=[
{
"role": "system",
"content": (
"You are a general-purpose assistant in fiction dialogue mode. "
"Answer the user's line as the target character. "
"Use only general world knowledge and local conversational cues."
),
},
{
"role": "user",
"content": f"Target character: {answerer}\nUser line: {query}\nRespond in one concise reply.",
},
],
temperature=0.3,
max_tokens=700,
)
def _on_retry(attempt: int, err: Exception, delay: float) -> None:
print(
f"[plain_llm_baseline][retry] attempt={attempt + 1}/{max(1, int(cfg.api_retry_attempts))} "
f"sleep={delay:.1f}s err={err}",
flush=True,
)
resp = retry_call(
_call,
max_attempts=max(1, int(cfg.api_retry_attempts)),
base_delay_sec=float(cfg.api_retry_base_delay_sec),
max_delay_sec=float(cfg.api_retry_max_delay_sec),
jitter_sec=float(cfg.api_retry_jitter_sec),
on_retry=_on_retry,
)
latency_sec = round(max(0.0, time.perf_counter() - t0), 3)
record_token_usage(
token_usage,
response=resp,
stage="plain_llm_baseline",
model=model_name,
)
answer = str(resp.choices[0].message.content or "").strip()
if not answer:
answer = "(empty baseline response)"
return {
"answer": answer,
"token_usage": token_usage,
"generation_latency_sec": latency_sec,
"generation_started_at_epoch": round(started_at, 3),
"model": model_name,
}
def _safe_float(value: object) -> Optional[float]:
try:
if value is None:
return None
return float(value)
except Exception:
return None
def _build_compare_compact(
*,
case_key: str,
query: str,
answerer: str,
ficrag_eval_compact: Optional[Dict],
baseline_eval_compact: Optional[Dict],
ficrag_token_total: int,
baseline_token_total: int,
ficrag_latency_sec: float,
baseline_latency_sec: float,
pairwise_obj: Optional[Dict] = None,
) -> Dict:
def _score(obj: Optional[Dict], key: str) -> Optional[float]:
if not isinstance(obj, dict):
return None
scores = obj.get("scores") if isinstance(obj.get("scores"), dict) else {}
return _safe_float(scores.get(key))
def _overall(obj: Optional[Dict]) -> Optional[float]:
if not isinstance(obj, dict):
return None
return _safe_float(obj.get("overall_100"))
def _overall_quality(obj: Optional[Dict]) -> Optional[float]:
if not isinstance(obj, dict):
return None
br = obj.get("overall_breakdown") if isinstance(obj.get("overall_breakdown"), dict) else {}
return _safe_float(br.get("quality_100"))
def _overall_role_strict(obj: Optional[Dict]) -> Optional[float]:
if not isinstance(obj, dict):
return None
br = obj.get("overall_breakdown") if isinstance(obj.get("overall_breakdown"), dict) else {}
return _safe_float(br.get("role_strict_100"))
overall_f = _overall(ficrag_eval_compact)
overall_b = _overall(baseline_eval_compact)
overall_quality_f = _overall_quality(ficrag_eval_compact)
overall_quality_b = _overall_quality(baseline_eval_compact)
overall_strict_f = _overall_role_strict(ficrag_eval_compact)
overall_strict_b = _overall_role_strict(baseline_eval_compact)
def _winner(a: Optional[float], b: Optional[float]) -> Optional[str]:
if a is None or b is None:
return None
if abs(a - b) < 1e-9:
return "tie"
return "ficrag" if a > b else "plain_llm"
winner_overall = _winner(overall_f, overall_b)
winner_quality = _winner(overall_quality_f, overall_quality_b)
def _delta(a: Optional[float], b: Optional[float]) -> Optional[float]:
if a is None or b is None:
return None
return round(a - b, 2)
return {
"case_key": case_key,
"query": query,
"character": answerer,
"winner_overall": winner_overall,
"winner_quality": winner_quality,
"winner_pairwise": (pairwise_obj or {}).get("winner"),
"ficrag": ficrag_eval_compact,
"plain_llm": baseline_eval_compact,
"pairwise": pairwise_obj,
"delta": {
"facts": _delta(_score(ficrag_eval_compact, "facts"), _score(baseline_eval_compact, "facts")),
"persona": _delta(_score(ficrag_eval_compact, "persona"), _score(baseline_eval_compact, "persona")),
"worldview": _delta(_score(ficrag_eval_compact, "worldview"), _score(baseline_eval_compact, "worldview")),
"usefulness": _delta(_score(ficrag_eval_compact, "usefulness"), _score(baseline_eval_compact, "usefulness")),
"overall_quality": _delta(overall_quality_f, overall_quality_b),
"overall_role_strict": _delta(overall_strict_f, overall_strict_b),
"overall": _delta(overall_f, overall_b),
"token_total": ficrag_token_total - baseline_token_total,
"latency_sec": round(float(ficrag_latency_sec) - float(baseline_latency_sec), 3),
},
}
def _chapter_num_from_path(path: Path) -> int:
m = CHAPTER_RE.search(path.name)
if not m:
raise ValueError(f"Cannot parse chapter number from: {path}")
return int(m.group(1))
def _iter_chapter_files(book_dir: Path) -> List[Path]:
files = list(book_dir.glob("*.json"))
files.sort(key=lambda p: (_chapter_num_from_path(p), p.name))
return files
def _normalize_name(name: Optional[str]) -> str:
return re.sub(r"\s+", " ", str(name or "").strip()).lower()
def _dedup_keep_order(items: Iterable[str]) -> List[str]:
out: List[str] = []
seen = set()
for v in items:
val = str(v or "").strip()
if not val:
continue
k = _normalize_name(val)
if k in seen:
continue
seen.add(k)
out.append(val)
return out
def _build_timeline(book_dir: Path) -> List[SceneRecord]:
timeline: List[SceneRecord] = []
gidx = 0
for chapter_file in _iter_chapter_files(book_dir):
chapter_num = _chapter_num_from_path(chapter_file)
chapter_base = chapter_file.name.replace(".txt.json", "")
arr = _read_json(chapter_file)
if not isinstance(arr, list):
continue
for item in arr:
if not isinstance(item, dict):
continue
scene_id = int(item.get("scene_id", -1))
if scene_id < 0:
continue
timeline.append(
SceneRecord(
chapter_num=chapter_num,
scene_id=scene_id,
global_idx=gidx,
chapter_base=chapter_base,
scene=item,
)
)
gidx += 1
return timeline
def _load_scene_text(scene_text_dir: Path, record: SceneRecord) -> Optional[str]:
# scene text filenames are 1-based: scene-(scene_id + 1)
p = scene_text_dir / f"{record.chapter_base}-scene-{record.scene_id + 1}.txt"
if p.exists():
txt = p.read_text(encoding="utf-8").strip()
return txt if txt else None
return None
def _fallback_scene_text(scene: Dict) -> str:
dialogs = scene.get("dialogs")
if not isinstance(dialogs, list):
return ""
lines: List[str] = []
for d in dialogs:
if not isinstance(d, dict):
continue
c = str(d.get("content", "")).strip()
if c:
lines.append(c)
return "\n\n".join(lines).strip()
def _build_knowledge_text(
prior_scenes: List[SceneRecord],
scene_text_dir: Path,
) -> str:
blocks: List[str] = []
for s in prior_scenes:
txt = _load_scene_text(scene_text_dir, s) or _fallback_scene_text(s.scene)
if not txt:
continue
blocks.append(f"[Chapter {s.chapter_num} Scene {s.scene_id}]\n{txt}")
return "\n\n".join(blocks).strip()
def _extract_first_speaker_query(scene: Dict) -> Tuple[Optional[str], Optional[str]]:
dialogs = scene.get("dialogs")
if not isinstance(dialogs, list):
return None, None
for d in dialogs:
if not isinstance(d, dict):
continue
speaker = str(d.get("from", "")).strip()
if not speaker or _normalize_name(speaker) == "environment":
continue
content = str(d.get("content", "")).strip()
if not content:
continue
return speaker, content
return None, None
def _extract_reference_answer(
scene: Dict,
*,
asker: Optional[str],
answerer: Optional[str],
) -> Optional[str]:
dialogs = scene.get("dialogs")
if not isinstance(dialogs, list):
return None
asker_norm = _normalize_name(asker)
answerer_norm = _normalize_name(answerer)
query_idx: Optional[int] = None
for i, d in enumerate(dialogs):
if not isinstance(d, dict):
continue
spk = str(d.get("from", "")).strip()
txt = str(d.get("content", "")).strip()
if not spk or not txt or _normalize_name(spk) == "environment":
continue
query_idx = i
break
if query_idx is None:
return None
for d in dialogs[query_idx + 1 :]:
if not isinstance(d, dict):
continue
spk = str(d.get("from", "")).strip()
txt = str(d.get("content", "")).strip()
if not spk or not txt or _normalize_name(spk) == "environment":
continue
spk_norm = _normalize_name(spk)
if answerer_norm and spk_norm == answerer_norm:
return txt
if spk_norm != asker_norm:
return txt
for d in dialogs:
if not isinstance(d, dict):
continue
spk = str(d.get("from", "")).strip()
txt = str(d.get("content", "")).strip()
if not spk or not txt or _normalize_name(spk) == "environment":
continue
if answerer_norm and _normalize_name(spk) == answerer_norm:
return txt
return None
def _pick_other_character(
scene: Dict,
first_speaker: Optional[str],
*,
character_candidates: Optional[List[str]] = None,
) -> Optional[str]:
chs = scene.get("charectors")
chars = _dedup_keep_order(chs if isinstance(chs, list) else [])
dialogs = scene.get("dialogs")
dialog_speakers: List[str] = []
if isinstance(dialogs, list):
for d in dialogs:
if not isinstance(d, dict):
continue
spk = str(d.get("from", "")).strip()
if not spk or _normalize_name(spk) == "environment":
continue
dialog_speakers.append(spk)
dialog_speakers = _dedup_keep_order(dialog_speakers)
pool = _dedup_keep_order(chars + dialog_speakers)
if not pool:
pool = []
asker_norm = _normalize_name(first_speaker)
for name in pool:
if _normalize_name(name) != asker_norm:
return name
# Fallback 1: find another known character explicitly mentioned in this scene text.
scene_blob_parts: List[str] = []
dialogs = scene.get("dialogs")
if isinstance(dialogs, list):
for d in dialogs:
if not isinstance(d, dict):
continue
c = str(d.get("content", "")).strip()
if c:
scene_blob_parts.append(c)
scene_blob = "\n".join(scene_blob_parts)
for cand in character_candidates or []:
cand_norm = _normalize_name(cand)
if not cand_norm or cand_norm == asker_norm:
continue
if re.search(rf"\b{re.escape(cand)}\b", scene_blob):
return cand
# Fallback 2: global candidate fallback to keep every true scene runnable.
for cand in character_candidates or []:
if _normalize_name(cand) != asker_norm:
return cand
return None
def _load_character_candidates(path: Path) -> List[str]:
if not path.exists():
return []
obj = _read_json(path)
chars = obj.get("characters") if isinstance(obj, dict) else []
if not isinstance(chars, list):
return []
return _dedup_keep_order([str(x) for x in chars])
def _build_runtime_cfg(processed_dir: Path, index_dir: Path, output_dir: Path) -> RuntimeConfig:
cfg = RuntimeConfig()
cfg.data_processed_dir = str(processed_dir)
cfg.data_index_dir = str(index_dir)
cfg.output_dir = str(output_dir)
return cfg
def _validate_runtime_requirements() -> None:
cfg = RuntimeConfig()
if not cfg.embedding_api_key:
raise ValueError(
"Missing embedding API key. Set EMBEDDING_API_KEY (or OPENAI_API_KEY / OPENROUTER_API_KEY)."
)
if not cfg.llm_api_key:
raise ValueError(
"Missing LLM API key. Set LLM_API_KEY (or OPENAI_API_KEY / OPENROUTER_API_KEY)."
)
try:
import openai # noqa: F401
except Exception as e:
raise ImportError(
"Package `openai` is required for embeddings + generation. Install requirements first."
) from e
try:
import faiss # noqa: F401
except Exception as e:
raise ImportError(
"Package `faiss-cpu` is required for retrieval indexes. Install requirements first."
) from e
def _build_annotated_dialogues(prior_scenes: List[SceneRecord]) -> List[Dict]:
rows: List[Dict] = []
pos = 0
for s in prior_scenes:
dialogs = s.scene.get("dialogs")
if not isinstance(dialogs, list):
continue
for d in dialogs:
if not isinstance(d, dict):
continue
speaker = str(d.get("from", "")).strip()
utterance = str(d.get("content", "")).strip()
if not speaker or not utterance:
continue
if _normalize_name(speaker) == "environment":
continue
rows.append(
{
"speaker": speaker,
"speaker_method": "dataset",
"utterance": utterance,
"context_before": "",
"context_after": "",
"chunk_id": f"scene-{s.chapter_num}-{s.scene_id}",
"chapter_id": s.chapter_num,
"position": pos,
}
)
pos += 1
return rows
def _save_core_artifacts(
cfg: RuntimeConfig,
knowledge_text: str,
book_id: str,
answerer: str,
character_candidates: List[str],
prior_scenes: List[SceneRecord],
) -> Dict[str, int]:
processed_dir = Path(cfg.data_processed_dir)
processed_dir.mkdir(parents=True, exist_ok=True)
chunks = build_document_layer(knowledge_text, book_id=book_id, max_chars=2000, overlap=200)
chunk_dicts = chunks_to_dicts(chunks)
save_jsonl(chunk_dicts, str(processed_dir / "chunks.jsonl"))
dialogues = _build_annotated_dialogues(prior_scenes=prior_scenes)
save_jsonl(dialogues, str(processed_dir / "dialogues.jsonl"))
# Rule-based worldview extraction (fast path, no LLM refinement).
try:
from fic_agent.worldview.worldview import build_worldview_notes, save_worldview_notes
except Exception as e:
raise ImportError(
"Failed to import worldview builders. Ensure dependencies are installed."
) from e
worldview_notes: List[Dict] = build_worldview_notes(chunk_dicts)
save_worldview_notes(worldview_notes, str(processed_dir / "worldview_notes.jsonl"))
utterances = [
str(d.get("utterance", "")).strip()
for d in dialogues
if _normalize_name(d.get("speaker")) == _normalize_name(answerer) and str(d.get("utterance", "")).strip()
]
bg_utterances = [
str(d.get("utterance", "")).strip()
for d in dialogues
if _normalize_name(d.get("speaker")) != _normalize_name(answerer) and str(d.get("utterance", "")).strip()
]
excluded = _dedup_keep_order(character_candidates + [str(d.get("speaker", "")).strip() for d in dialogues if str(d.get("speaker", "")).strip()])
profile = build_persona_profile(
name=answerer,
utterances=utterances,
background_utterances=bg_utterances,
excluded_terms=excluded,
worldview_notes=[],
)
safe_answerer = answerer.replace("/", "_")
save_persona_profile(profile, str(processed_dir / f"persona_{safe_answerer}.json"))
save_persona_prompt(render_persona_prompt(profile), str(processed_dir / f"persona_{safe_answerer}_prompt.txt"))
# Build all three indexes; if a lane is empty, create a placeholder row to keep retrieval pipeline runnable.
fact_texts = [c["text"] for c in chunk_dicts] or ["(no prior scene knowledge)"]
fact_meta = (
[{"id": c["chunk_id"], "text": c["text"], "chapter_id": c["chapter_id"]} for c in chunk_dicts]
if chunk_dicts
else [{"id": "facts-placeholder", "text": "(no prior scene knowledge)", "chapter_id": 0}]
)
build_index_for_texts(fact_texts, fact_meta, cfg, "facts")
persona_texts = [str(d.get("utterance", "")).strip() for d in dialogues if str(d.get("utterance", "")).strip()]
persona_meta = [
{
"id": f"dlg-{i}",
"text": str(d.get("utterance", "")).strip(),
"speaker": d.get("speaker"),
"chunk_id": d.get("chunk_id"),
}
for i, d in enumerate(dialogues)
if str(d.get("utterance", "")).strip()
]
if not persona_texts:
persona_texts = [f"(no dialogue found for {answerer})"]
persona_meta = [
{
"id": "persona-placeholder",
"text": f"(no dialogue found for {answerer})",
"speaker": answerer,
"chunk_id": "none",
}
]
build_index_for_texts(persona_texts, persona_meta, cfg, "persona")
worldview_rows = [w for w in worldview_notes if str(w.get("text", "")).strip()]
worldview_seen_text = set()
worldview_meta = []
worldview_texts = []
for i, w in enumerate(worldview_rows):
text = str(w.get("text", "")).strip()
norm = re.sub(r"\s+", " ", text).lower()
if not norm or norm in worldview_seen_text:
continue
worldview_seen_text.add(norm)
worldview_texts.append(text)
worldview_meta.append(
{
"id": f"wv-{i}",
"text": text,
"type": w.get("type"),
"entity": w.get("entity"),
"source_chunk": w.get("source_chunk"),
}
)
if not worldview_texts:
worldview_texts = ["(no worldview note extracted)"]
worldview_meta = [
{
"id": "worldview-placeholder",
"text": "(no worldview note extracted)",
"type": "note",
"entity": None,
"source_chunk": None,
}
]
build_index_for_texts(worldview_texts, worldview_meta, cfg, "worldview")
return {
"chunk_count": len(chunk_dicts),
"dialogue_count": len(dialogues),
"worldview_count": len(worldview_notes),
"persona_utterance_count": len(utterances),
}
def _run_one_case(
*,
case_dir: Path,
run_outputs_dir: Path,
book: str,
knowledge_text: str,
query: str,
answerer: str,
reference_answer: Optional[str],
character_candidates: List[str],
prior_scenes: List[SceneRecord],
style_correct: bool,
max_iter: Optional[int],
eval_mode: str,
keep_eval_full: bool,
eval_rounds: int,
eval_temperature: float,
eval_top_n: int,
with_plain_llm_baseline: bool,
baseline_model: Optional[str],
with_ablation_experiments: bool,
) -> Dict:
processed_dir = case_dir / "processed"
index_dir = case_dir / "indexes"
cfg = _build_runtime_cfg(processed_dir=processed_dir, index_dir=index_dir, output_dir=run_outputs_dir)
stats = _save_core_artifacts(
cfg=cfg,
knowledge_text=knowledge_text,
book_id=book,
answerer=answerer,
character_candidates=character_candidates,
prior_scenes=prior_scenes,
)
gen_started_at = time.time()
gen_t0 = time.perf_counter()
qa = run_meta_cognitive_qa(
query=query,
cfg=cfg,
character=answerer,
style_correct=style_correct,
max_iterations=max_iter,
)
generation_latency_sec = round(max(0.0, time.perf_counter() - gen_t0), 3)
qa_obj = {
"query": query,
"character": answerer,
"reference_answer": reference_answer,
"answer": qa.answer,
"trace": [s.__dict__ for s in qa.trace],
"evidence": qa.evidence,
"token_usage": qa.token_usage,
"generation_latency_sec": generation_latency_sec,
"generation_started_at_epoch": round(gen_started_at, 3),
}
qa_usage_qa_only, qa_usage_rag_internal = _split_qa_token_usage(qa.token_usage)
qa_obj["token_usage_qa_only"] = qa_usage_qa_only
qa_obj["token_usage_rag_internal"] = qa_usage_rag_internal
_write_json(case_dir / "qa_full.json", qa_obj)
eval_obj = None
if eval_mode == "proxy":
eval_obj = {"mode": "proxy", "scores": score_response_proxy(qa.answer, qa.evidence, character=answerer, processed_dir=str(processed_dir))}
elif eval_mode == "llm":
eval_obj = score_response_llm(
query=query,
response=qa.answer,
evidence=qa.evidence,
cfg=cfg,
character=answerer,
rounds=eval_rounds,
temperature=eval_temperature,
top_n=eval_top_n,
reference_answer=reference_answer,
generation_mode="ficrag_meta",
)
ficrag_token_total = int(qa_usage_qa_only.get("total_tokens", 0))
ficrag_rag_internal_token_total = int(qa_usage_rag_internal.get("total_tokens", 0))
if eval_obj is not None:
compact = _build_compact_eval_report(eval_obj)
compact["qa"] = {
"query": query,
"character": answerer,
"reference_answer": reference_answer,
"answer": qa.answer,
}
compact["qa_metrics"] = {
"generation_latency_sec": generation_latency_sec,
"token_usage": qa_usage_qa_only,
"token_total": ficrag_token_total,
"rag_internal_token_usage": qa_usage_rag_internal,
"rag_internal_token_total": ficrag_rag_internal_token_total,
}
compact["evidence_counts"] = {
"facts": len(qa.evidence.get("facts", [])),
"persona": len(qa.evidence.get("persona", [])),
"worldview": len(qa.evidence.get("worldview", [])),
}
_write_json(case_dir / "eval_compact.json", compact)
if keep_eval_full:
_write_json(case_dir / "eval_full.json", eval_obj)
else:
compact = None
baseline = None
baseline_compact = None
baseline_eval_obj = None
baseline_usage_qa_only = new_token_usage()
baseline_usage_rag_internal = new_token_usage()
if with_plain_llm_baseline:
baseline = _run_plain_llm_baseline(
query=query,
answerer=answerer,
cfg=cfg,
model=baseline_model,
)
baseline_usage_qa_only, baseline_usage_rag_internal = _split_qa_token_usage(baseline["token_usage"])
baseline_qa_obj = {
"query": query,
"character": answerer,
"reference_answer": reference_answer,
"answer": baseline["answer"],
"token_usage": baseline["token_usage"],
"token_usage_qa_only": baseline_usage_qa_only,
"token_usage_rag_internal": baseline_usage_rag_internal,
"generation_latency_sec": baseline["generation_latency_sec"],
"generation_started_at_epoch": baseline["generation_started_at_epoch"],
"model": baseline["model"],
}
_write_json(case_dir / "qa_baseline.json", baseline_qa_obj)
if eval_mode == "proxy":
baseline_eval_obj = {
"mode": "proxy",
"scores": score_response_proxy(
baseline["answer"],
qa.evidence,
character=answerer,
processed_dir=str(processed_dir),
),
}
elif eval_mode == "llm":
baseline_eval_obj = score_response_llm(
query=query,
response=baseline["answer"],
evidence=qa.evidence,
cfg=cfg,
character=answerer,
rounds=eval_rounds,
temperature=eval_temperature,
top_n=eval_top_n,
reference_answer=reference_answer,
generation_mode="plain_llm",
)
if baseline_eval_obj is not None:
baseline_compact = _build_compact_eval_report(baseline_eval_obj)
baseline_compact["qa"] = {
"query": query,
"character": answerer,
"reference_answer": reference_answer,
"answer": baseline["answer"],
}
baseline_compact["qa_metrics"] = {
"generation_latency_sec": baseline["generation_latency_sec"],
"token_usage": baseline_usage_qa_only,
"token_total": int(baseline_usage_qa_only.get("total_tokens", 0)),
"rag_internal_token_usage": baseline_usage_rag_internal,
"rag_internal_token_total": int(baseline_usage_rag_internal.get("total_tokens", 0)),
}
baseline_compact["evidence_counts"] = {
"facts": len(qa.evidence.get("facts", [])),
"persona": len(qa.evidence.get("persona", [])),
"worldview": len(qa.evidence.get("worldview", [])),
}
_write_json(case_dir / "eval_baseline_compact.json", baseline_compact)
if keep_eval_full:
_write_json(case_dir / "eval_baseline_full.json", baseline_eval_obj)
pairwise_obj = None
if eval_mode == "llm" and compact is not None and baseline_compact is not None:
pairwise_obj = compare_responses_pairwise_llm(
query=query,
answer_a=qa.answer,
answer_b=baseline["answer"],
evidence=qa.evidence,
cfg=cfg,
character=answerer,
label_a="ficrag",
label_b="plain_llm",
rounds=max(1, min(3, eval_rounds)),
temperature=eval_temperature,
top_n=eval_top_n,
reference_answer=reference_answer,
)
if keep_eval_full:
_write_json(case_dir / "compare_pairwise_full.json", pairwise_obj)
compare_compact = _build_compare_compact(
case_key=case_dir.name,
query=query,
answerer=answerer,
ficrag_eval_compact=compact,
baseline_eval_compact=baseline_compact,
ficrag_token_total=ficrag_token_total,
baseline_token_total=int(baseline_usage_qa_only.get("total_tokens", 0)),
ficrag_latency_sec=generation_latency_sec,
baseline_latency_sec=float(baseline["generation_latency_sec"]),
pairwise_obj=pairwise_obj,
)
_write_json(case_dir / "compare_compact.json", compare_compact)
else:
compare_compact = None
ablation_payload = None
ablation_token_usage = new_token_usage()
ablation_token_usage_qa_only = new_token_usage()
ablation_token_usage_rag_internal = new_token_usage()
ablation_latency_total = 0.0
if with_ablation_experiments:
ablation_results: Dict[str, Dict] = {}
for ablation_name, lanes, ablation_mode in ABLATION_SPECS:
ab_t0 = time.perf_counter()
if ablation_mode == "single_pass_rag":
ab_out = run_tri_retrieve_and_compose(
query=query,
cfg=cfg,
character=answerer,
style_correct=style_correct,
active_lanes=lanes,
)
ab_answer = ab_out.answer
ab_evidence = ab_out.evidence
ab_trace = [
{
"iteration": 1,
"probe": query,
"mode": "single_pass_rag",
"sufficient": True,
"confidence": 1.0,
}
]
ab_token_usage_raw = ab_out.token_usage
else:
ab_qa = run_meta_cognitive_qa(
query=query,
cfg=cfg,
character=answerer,
style_correct=style_correct,
max_iterations=max_iter,
active_lanes=lanes,
)
ab_answer = ab_qa.answer
ab_evidence = ab_qa.evidence
ab_trace = [s.__dict__ for s in ab_qa.trace]
ab_token_usage_raw = ab_qa.token_usage
ab_latency = round(max(0.0, time.perf_counter() - ab_t0), 3)
ablation_latency_total += ab_latency
merge_token_usage(ablation_token_usage, ab_token_usage_raw)
ab_usage_qa_only, ab_usage_rag_internal = _split_qa_token_usage(ab_token_usage_raw)
merge_token_usage(ablation_token_usage_qa_only, ab_usage_qa_only)
merge_token_usage(ablation_token_usage_rag_internal, ab_usage_rag_internal)
ab_token_total = int(ab_usage_qa_only.get("total_tokens", 0))
ab_rag_internal_token_total = int(ab_usage_rag_internal.get("total_tokens", 0))
ab_qa_obj = {
"query": query,
"character": answerer,
"active_lanes": lanes,
"ablation_mode": ablation_mode,
"answer": ab_answer,
"trace": ab_trace,
"evidence": ab_evidence,
"token_usage": ab_token_usage_raw,
"token_usage_qa_only": ab_usage_qa_only,
"token_usage_rag_internal": ab_usage_rag_internal,
"generation_latency_sec": ab_latency,
}
ab_qa_path = case_dir / f"qa_{ablation_name}.json"
_write_json(ab_qa_path, ab_qa_obj)
ab_eval_obj = None
if eval_mode == "proxy":
ab_eval_obj = {
"mode": "proxy",
"scores": score_response_proxy(
ab_answer,
ab_evidence,
character=answerer,
processed_dir=str(processed_dir),
),
}
elif eval_mode == "llm":
ab_eval_obj = score_response_llm(
query=query,
response=ab_answer,
evidence=ab_evidence,
cfg=cfg,
character=answerer,
rounds=eval_rounds,
temperature=eval_temperature,
top_n=eval_top_n,
reference_answer=reference_answer,
generation_mode=(
"ablation_single_pass_rag"
if ablation_mode == "single_pass_rag"
else "ablation_meta"
),
)
ab_compact = None
ab_eval_path = None
if ab_eval_obj is not None:
ab_compact = _build_compact_eval_report(ab_eval_obj)
ab_compact["qa"] = {
"query": query,
"character": answerer,
"reference_answer": reference_answer,
"answer": ab_answer,
"active_lanes": lanes,
"ablation_mode": ablation_mode,
}
ab_compact["qa_metrics"] = {
"generation_latency_sec": ab_latency,
"token_usage": ab_usage_qa_only,
"token_total": ab_token_total,
"rag_internal_token_usage": ab_usage_rag_internal,
"rag_internal_token_total": ab_rag_internal_token_total,
}
ab_compact["evidence_counts"] = {
"facts": len(ab_evidence.get("facts", [])),
"persona": len(ab_evidence.get("persona", [])),
"worldview": len(ab_evidence.get("worldview", [])),
}
ab_eval_path = case_dir / f"eval_{ablation_name}_compact.json"
_write_json(ab_eval_path, ab_compact)
if keep_eval_full:
_write_json(case_dir / f"eval_{ablation_name}_full.json", ab_eval_obj)
ablation_results[ablation_name] = {
"active_lanes": lanes,
"ablation_mode": ablation_mode,
"qa_path": str(ab_qa_path),
"eval_path": str(ab_eval_path) if ab_eval_path is not None else None,
"scores": (ab_compact or {}).get("scores"),
"overall_100": (ab_compact or {}).get("overall_100"),
"same_character": (ab_compact or {}).get("same_character"),
"token_total": ab_token_total,
"rag_internal_token_total": ab_rag_internal_token_total,
"generation_latency_sec": ab_latency,
"answer_preview": ab_answer[:160],
}
ablation_payload = {
"query": query,
"character": answerer,
"ablations": ablation_results,
}
_write_json(case_dir / "ablation_compact.json", ablation_payload)
return {
"qa_path": str(case_dir / "qa_full.json"),
"eval_path": str(case_dir / "eval_compact.json") if eval_obj is not None else None,
"qa_baseline_path": str(case_dir / "qa_baseline.json") if baseline is not None else None,
"eval_baseline_path": str(case_dir / "eval_baseline_compact.json") if baseline_eval_obj is not None else None,
"compare_path": str(case_dir / "compare_compact.json") if compare_compact is not None else None,
"answer_preview": qa.answer[:160],
"token_usage": qa_usage_qa_only,
"token_total": ficrag_token_total,
"rag_internal_token_usage": qa_usage_rag_internal,
"rag_internal_token_total": ficrag_rag_internal_token_total,
"generation_latency_sec": generation_latency_sec,
"baseline_token_usage": (
baseline_usage_qa_only if baseline is not None else None
),
"baseline_token_total": (
int(baseline_usage_qa_only.get("total_tokens", 0))
if baseline is not None
else 0
),
"baseline_rag_internal_token_usage": (
baseline_usage_rag_internal if baseline is not None else None
),
"baseline_rag_internal_token_total": (
int(baseline_usage_rag_internal.get("total_tokens", 0))
if baseline is not None
else 0
),
"baseline_generation_latency_sec": float(baseline["generation_latency_sec"]) if baseline is not None else 0.0,
"compare_winner_overall": compare_compact.get("winner_overall") if isinstance(compare_compact, dict) else None,
"compare_winner_quality": compare_compact.get("winner_quality") if isinstance(compare_compact, dict) else None,
"compare_winner_pairwise": compare_compact.get("winner_pairwise") if isinstance(compare_compact, dict) else None,
"compare_overall_delta": (
compare_compact.get("delta", {}).get("overall")
if isinstance(compare_compact, dict)
else None
),
"ablation_path": str(case_dir / "ablation_compact.json") if ablation_payload is not None else None,
"ablation_count": len((ablation_payload or {}).get("ablations", {})),
"ablation_token_usage": ablation_token_usage_qa_only if ablation_payload is not None else None,
"ablation_token_total": (
int(ablation_token_usage_qa_only.get("total_tokens", 0))
if ablation_payload is not None
else 0
),
"ablation_rag_internal_token_usage": (
ablation_token_usage_rag_internal if ablation_payload is not None else None
),
"ablation_rag_internal_token_total": (
int(ablation_token_usage_rag_internal.get("total_tokens", 0))
if ablation_payload is not None
else 0
),
"ablation_generation_latency_sec_total": round(ablation_latency_total, 3) if ablation_payload is not None else 0.0,
"ablation_overall_scores": {
k: v.get("overall_100")
for k, v in ((ablation_payload or {}).get("ablations", {})).items()
} if ablation_payload is not None else {},
"evidence_counts": {
"facts": len(qa.evidence.get("facts", [])),
"persona": len(qa.evidence.get("persona", [])),
"worldview": len(qa.evidence.get("worldview", [])),
},
"eval_summary": compact,
**stats,
}
def main() -> None:
parser = argparse.ArgumentParser(description="Run scene-level experiments on ficset dataset")
parser.add_argument("--book", default="105_Persuasion", help="Book id under data/raw/dataset/ficset/")
parser.add_argument("--dataset-root", default="data/raw/dataset", help="Dataset root")
parser.add_argument("--output-root", default="outputs/experiments", help="Experiment output root")
parser.add_argument("--run-name", default=None, help="Optional run directory name")
parser.add_argument("--start-case", type=int, default=0, help="0-based index in target true-scene list")
parser.add_argument("--max-cases", type=int, default=None, help="Maximum number of target cases to run")
parser.add_argument("--style-correct", action="store_true", help="Apply style correction in final generation")
parser.add_argument("--max-iter", type=int, default=None, help="Override meta loop max iterations")
parser.add_argument("--eval-mode", choices=["none", "proxy", "llm"], default="llm", help="Post-answer evaluation mode (default: llm)")
parser.add_argument("--keep-eval-full", action="store_true", help="Also save full verbose eval json in addition to compact output")
parser.add_argument("--eval-rounds", type=int, default=3, help="LLM eval rounds if eval-mode=llm")
parser.add_argument("--eval-temperature", type=float, default=0.2, help="LLM eval temperature")
parser.add_argument("--eval-top-n", type=int, default=6, help="Evidence top-n for LLM eval prompt")
parser.add_argument(
"--with-plain-llm-baseline",
action="store_true",
help="Also run a direct plain-LLM baseline answer for comparison.",
)
parser.add_argument("--baseline-model", default=None, help="Optional model override for plain LLM baseline")
parser.add_argument(
"--with-ablation-experiments",
action="store_true",
help="Run retrieval-layer ablations: L1 only, L1+L2, L2+L3.",
)
parser.add_argument("--dry-run", action="store_true", help="Only build case list; do not call model APIs")
parser.add_argument("--no-progress", action="store_true", help="Disable progress bar output")
args = parser.parse_args()
if not args.dry_run:
_validate_runtime_requirements()
project_root = Path(__file__).resolve().parents[1]
dataset_root = (project_root / args.dataset_root).resolve()
book = args.book
book_dir = dataset_root / "ficset" / book
scene_text_dir = dataset_root / "ficset_scene_texts" / book
characters_path = dataset_root / "ficset_characters" / f"{book}.json"
if not book_dir.exists():
raise FileNotFoundError(f"Book directory not found: {book_dir}")
if not scene_text_dir.exists():
raise FileNotFoundError(f"Scene text directory not found: {scene_text_dir}")
run_name = args.run_name or f"{book}_{time.strftime('%Y%m%d_%H%M%S')}"
run_dir = (project_root / args.output_root / run_name).resolve()
cases_dir = run_dir / "cases"
run_outputs_dir = run_dir / "qa_outputs"
run_dir.mkdir(parents=True, exist_ok=True)
cases_dir.mkdir(parents=True, exist_ok=True)
run_outputs_dir.mkdir(parents=True, exist_ok=True)
character_candidates = _load_character_candidates(characters_path)
timeline = _build_timeline(book_dir)
targets = [x for x in timeline if bool(x.scene.get("is_two_person_dialogue_scene", False))]
selected = targets[args.start_case :]
if args.max_cases is not None:
selected = selected[: max(0, args.max_cases)]
total_cases = len(selected)
summary_rows: List[Dict] = []
start_ts = time.time()
if total_cases and not args.no_progress:
_print_progress(
done=0,
total=total_cases,
ok=0,
failed=0,
skipped=0,
case_key="(start)",
status="pending",
start_ts=start_ts,
)
for i, target in enumerate(selected):
case_idx = args.start_case + i
case_key = f"case_{case_idx:04d}_ch{target.chapter_num:03d}_sc{target.scene_id:03d}"
case_dir = cases_dir / case_key
case_dir.mkdir(parents=True, exist_ok=True)
asker, query = _extract_first_speaker_query(target.scene)
answerer = _pick_other_character(
target.scene,
asker,
character_candidates=character_candidates,
)
reference_answer = _extract_reference_answer(
target.scene,
asker=asker,
answerer=answerer,
)
prior = [s for s in timeline if s.global_idx < target.global_idx]
knowledge_text = _build_knowledge_text(prior_scenes=prior, scene_text_dir=scene_text_dir)
base_row = {
"case_key": case_key,
"book": book,
"chapter_num": target.chapter_num,
"scene_id": target.scene_id,
"scene_global_idx": target.global_idx,
"asker": asker,
"answerer": answerer,
"query": query,
"reference_answer": reference_answer,
"prior_scene_count": len(prior),
"knowledge_chars": len(knowledge_text),
"status": "planned" if args.dry_run else "pending",
}
if not query:
base_row["status"] = "skipped"
base_row["reason"] = "missing_first_speaker_utterance"
_write_json(case_dir / "case_summary.json", base_row)
summary_rows.append(base_row)
if not args.no_progress:
_print_progress(
done=i + 1,
total=total_cases,
ok=sum(1 for r in summary_rows if r["status"] == "ok"),
failed=sum(1 for r in summary_rows if r["status"] == "failed"),
skipped=sum(1 for r in summary_rows if r["status"] == "skipped"),
case_key=case_key,
status=base_row["status"],
start_ts=start_ts,
)
continue
if not answerer:
base_row["status"] = "skipped"
base_row["reason"] = "missing_other_character"
_write_json(case_dir / "case_summary.json", base_row)
summary_rows.append(base_row)
if not args.no_progress:
_print_progress(
done=i + 1,
total=total_cases,
ok=sum(1 for r in summary_rows if r["status"] == "ok"),
failed=sum(1 for r in summary_rows if r["status"] == "failed"),
skipped=sum(1 for r in summary_rows if r["status"] == "skipped"),
case_key=case_key,
status=base_row["status"],
start_ts=start_ts,
)
continue
if not knowledge_text:
base_row["status"] = "skipped"
base_row["reason"] = "empty_prior_knowledge"
_write_json(case_dir / "case_summary.json", base_row)
summary_rows.append(base_row)
if not args.no_progress:
_print_progress(
done=i + 1,
total=total_cases,
ok=sum(1 for r in summary_rows if r["status"] == "ok"),
failed=sum(1 for r in summary_rows if r["status"] == "failed"),
skipped=sum(1 for r in summary_rows if r["status"] == "skipped"),
case_key=case_key,
status=base_row["status"],
start_ts=start_ts,
)
continue
if args.dry_run:
_write_json(case_dir / "case_summary.json", base_row)
summary_rows.append(base_row)
if not args.no_progress:
_print_progress(
done=i + 1,
total=total_cases,
ok=sum(1 for r in summary_rows if r["status"] == "ok"),
failed=sum(1 for r in summary_rows if r["status"] == "failed"),
skipped=sum(1 for r in summary_rows if r["status"] == "skipped"),
case_key=case_key,
status=base_row["status"],
start_ts=start_ts,
)
continue
try:
# Save the exact knowledge corpus used for traceability.
(case_dir / "knowledge.txt").write_text(knowledge_text, encoding="utf-8")
run_data = _run_one_case(
case_dir=case_dir,
run_outputs_dir=run_outputs_dir,
book=book,
knowledge_text=knowledge_text,
query=query,
answerer=answerer,
reference_answer=reference_answer,
character_candidates=character_candidates,
prior_scenes=prior,
style_correct=args.style_correct,
max_iter=args.max_iter,
eval_mode=args.eval_mode,
keep_eval_full=args.keep_eval_full,
eval_rounds=args.eval_rounds,
eval_temperature=args.eval_temperature,
eval_top_n=args.eval_top_n,
with_plain_llm_baseline=args.with_plain_llm_baseline,
baseline_model=args.baseline_model,
with_ablation_experiments=args.with_ablation_experiments,
)
base_row.update(run_data)
base_row["status"] = "ok"
except Exception as e: # Keep the batch running on single-case failures.
base_row["status"] = "failed"
base_row["error"] = str(e)
_write_json(case_dir / "case_summary.json", base_row)
summary_rows.append(base_row)
if not args.no_progress:
_print_progress(
done=i + 1,
total=total_cases,
ok=sum(1 for r in summary_rows if r["status"] == "ok"),
failed=sum(1 for r in summary_rows if r["status"] == "failed"),
skipped=sum(1 for r in summary_rows if r["status"] == "skipped"),
case_key=case_key,
status=base_row["status"],
start_ts=start_ts,
)
summary = {
"book": book,
"dataset_root": str(dataset_root),
"run_dir": str(run_dir),
"dry_run": bool(args.dry_run),
"target_true_scene_count": len(targets),
"selected_case_count": len(selected),
"result_counts": {
"ok": sum(1 for r in summary_rows if r["status"] == "ok"),
"failed": sum(1 for r in summary_rows if r["status"] == "failed"),
"skipped": sum(1 for r in summary_rows if r["status"] == "skipped"),
"planned": sum(1 for r in summary_rows if r["status"] == "planned"),
},
"token_usage_totals": _build_run_token_totals(summary_rows),
"rag_internal_token_usage_totals": _build_run_token_totals(summary_rows, key="rag_internal_token_usage"),
"generation_latency_stats": _build_run_latency_stats(summary_rows),
"plain_llm_baseline_enabled": bool(args.with_plain_llm_baseline),
"baseline_token_usage_totals": _build_run_token_totals(summary_rows, key="baseline_token_usage"),
"baseline_rag_internal_token_usage_totals": _build_run_token_totals(
summary_rows, key="baseline_rag_internal_token_usage"
),
"baseline_generation_latency_stats": _build_run_latency_stats(
summary_rows, key="baseline_generation_latency_sec"
),
"compare_win_counts": _build_compare_win_counts(summary_rows, winner_key="compare_winner_overall"),
"compare_win_counts_quality": _build_compare_win_counts(summary_rows, winner_key="compare_winner_quality"),
"compare_win_counts_pairwise": _build_compare_win_counts(summary_rows, winner_key="compare_winner_pairwise"),
"ablation_enabled": bool(args.with_ablation_experiments),
"ablation_token_usage_totals": _build_run_token_totals(summary_rows, key="ablation_token_usage"),
"ablation_rag_internal_token_usage_totals": _build_run_token_totals(
summary_rows, key="ablation_rag_internal_token_usage"
),
"ablation_generation_latency_stats": _build_run_latency_stats(
summary_rows, key="ablation_generation_latency_sec_total"
),
"cases": summary_rows,
}
_write_json(run_dir / "summary.json", summary)
with open(run_dir / "cases.jsonl", "w", encoding="utf-8") as f:
for row in summary_rows:
f.write(json.dumps(row, ensure_ascii=False) + "\n")
print(json.dumps(summary["result_counts"], ensure_ascii=False))
print(f"Saved summary to {run_dir / 'summary.json'}")
if __name__ == "__main__":
main()