t1eautomat's picture
update latest code and outputs
9b7e0a7
"""Response composition with fact-first persona-biased fusion."""
from __future__ import annotations
from dataclasses import dataclass
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence
from ..config import RuntimeConfig
from ..retrieval.retriever import tri_retrieve
from ..utils.retry import retry_call
from .token_usage import new_token_usage, record_token_usage
@dataclass
class FusionResult:
answer: str
draft: str
evidence: Dict[str, object]
token_usage: Dict[str, Any]
def _load_persona_profile(cfg: RuntimeConfig, character: Optional[str]) -> Dict:
if not character:
return {}
safe = character.replace("/", "_")
profile_path = Path(cfg.data_processed_dir) / f"persona_{safe}.json"
if not profile_path.exists():
return {}
try:
with open(profile_path, "r", encoding="utf-8") as f:
return json.load(f)
except Exception:
return {}
def _format_lane_rows(rows: List[Dict], lane_tag: str, max_rows: int = 6) -> str:
if not rows:
return "(none)"
lines: List[str] = []
for i, row in enumerate(rows[:max_rows], start=1):
text = str(row.get("text", "")).strip().replace("\n", " ")
score = float(row.get("score", 0.0))
extra = ""
if lane_tag == "P":
speaker = row.get("speaker")
extra = f" speaker={speaker}" if speaker else ""
if lane_tag == "W":
t = row.get("type")
ent = row.get("entity")
parts = []
if t:
parts.append(f"type={t}")
if ent:
parts.append(f"entity={ent}")
extra = (" " + " ".join(parts)) if parts else ""
lines.append(f"[{lane_tag}{i}] score={score:.3f}{extra} text={text}")
return "\n".join(lines)
def _build_fusion_prompts(
query: str,
evidence: Dict[str, object],
character: Optional[str],
persona_profile: Dict,
) -> tuple[str, str]:
facts = evidence.get("facts", [])
persona = evidence.get("persona", [])
worldview = evidence.get("worldview", [])
query_plan = evidence.get("query_plan", {})
style_markers = persona_profile.get("style_markers", [])
speaking_rules = persona_profile.get("speaking_rules", [])
values = persona_profile.get("values", [])
system_prompt = (
"You are a retrieval-grounded fiction dialogue assistant.\n"
"Priority policy:\n"
"1) Facts evidence is highest priority.\n"
"2) Worldview evidence is second priority.\n"
"3) Persona style is third priority.\n"
"If style conflicts with facts/worldview, keep facts/worldview.\n"
"Do not invent unsupported facts.\n"
"When possible, cite evidence IDs like [F1], [W2], [P1]."
)
user_prompt = (
f"User query:\n{query}\n\n"
f"Character:\n{character or 'Unknown'}\n\n"
f"Query decomposition:\n{json.dumps(query_plan, ensure_ascii=False)}\n\n"
f"Character style markers:\n{', '.join(style_markers) if style_markers else '(none)'}\n"
f"Character speaking rules:\n{'; '.join(speaking_rules) if speaking_rules else '(none)'}\n"
f"Character values:\n{', '.join(values) if values else '(none)'}\n\n"
f"Facts evidence:\n{_format_lane_rows(facts, 'F')}\n\n"
f"Worldview evidence:\n{_format_lane_rows(worldview, 'W')}\n\n"
f"Persona evidence:\n{_format_lane_rows(persona, 'P')}\n\n"
"Generate a concise in-character answer with factual consistency."
)
return system_prompt, user_prompt
def _build_style_correction_prompts(
draft: str,
character: Optional[str],
persona_profile: Dict,
) -> tuple[str, str]:
style_markers = persona_profile.get("style_markers", [])
speaking_rules = persona_profile.get("speaking_rules", [])
examples = persona_profile.get("examples", [])
system_prompt = (
"You are a style editor for fictional roleplay.\n"
"Rewrite only style/tone/wording to match the target character.\n"
"Do NOT change factual content or claims.\n"
"Do NOT add new facts."
)
user_prompt = (
f"Character: {character or 'Unknown'}\n"
f"Style markers: {', '.join(style_markers) if style_markers else '(none)'}\n"
f"Speaking rules: {'; '.join(speaking_rules) if speaking_rules else '(none)'}\n"
f"Example lines: {' | '.join(examples[:3]) if examples else '(none)'}\n\n"
f"Draft answer:\n{draft}\n\n"
"Return revised answer only."
)
return system_prompt, user_prompt
def _heuristic_fallback_answer(query: str, evidence: Dict[str, object], character: Optional[str]) -> str:
facts = evidence.get("facts", [])[:3]
worldview = evidence.get("worldview", [])[:2]
persona = evidence.get("persona", [])[:2]
lines = [f"Q: {query}", ""]
if character:
lines.append(f"Character: {character}")
lines.append("Fact-grounded points:")
if facts:
for row in facts:
lines.append(f"- {row.get('text', '')}")
else:
lines.append("- (no strong fact evidence found)")
lines.append("World consistency notes:")
if worldview:
for row in worldview:
lines.append(f"- {row.get('text', '')}")
else:
lines.append("- (no worldview constraints retrieved)")
lines.append("Persona cues:")
if persona:
for row in persona:
lines.append(f"- {row.get('text', '')}")
else:
lines.append("- (no persona cue retrieved)")
return "\n".join(lines)
def compose_response(
query: str,
evidence: Dict[str, object],
cfg: RuntimeConfig,
character: Optional[str] = None,
style_correct: bool = False,
token_usage: Optional[Dict[str, Any]] = None,
) -> str:
profile = _load_persona_profile(cfg, character)
if not cfg.llm_api_key:
return _heuristic_fallback_answer(query=query, evidence=evidence, character=character)
try:
from openai import OpenAI # type: ignore
except Exception as e:
raise ImportError(
"openai package is required for LLM composition. Install dependencies first."
) from e
client = OpenAI(base_url=cfg.llm_base_url, api_key=cfg.llm_api_key)
sys_prompt, user_prompt = _build_fusion_prompts(
query=query,
evidence=evidence,
character=character,
persona_profile=profile,
)
def _draft_call():
return client.chat.completions.create(
model=cfg.llm_model,
messages=[
{"role": "system", "content": sys_prompt},
{"role": "user", "content": user_prompt},
],
temperature=0.2,
max_tokens=700,
)
def _on_draft_retry(attempt: int, err: Exception, delay: float) -> None:
print(
f"[compose][retry] stage=draft attempt={attempt + 1}/{max(1, int(cfg.api_retry_attempts))} sleep={delay:.1f}s err={err}",
flush=True,
)
draft_resp = retry_call(
_draft_call,
max_attempts=max(1, int(cfg.api_retry_attempts)),
base_delay_sec=float(cfg.api_retry_base_delay_sec),
max_delay_sec=float(cfg.api_retry_max_delay_sec),
jitter_sec=float(cfg.api_retry_jitter_sec),
on_retry=_on_draft_retry,
)
if token_usage is not None:
record_token_usage(
token_usage,
response=draft_resp,
stage="compose_draft",
model=cfg.llm_model,
)
draft = (draft_resp.choices[0].message.content or "").strip()
if not draft:
return _heuristic_fallback_answer(query=query, evidence=evidence, character=character)
if not style_correct:
return draft
style_sys, style_user = _build_style_correction_prompts(
draft=draft,
character=character,
persona_profile=profile,
)
def _style_call():
return client.chat.completions.create(
model=cfg.llm_model,
messages=[
{"role": "system", "content": style_sys},
{"role": "user", "content": style_user},
],
temperature=0.1,
max_tokens=700,
)
def _on_style_retry(attempt: int, err: Exception, delay: float) -> None:
print(
f"[compose][retry] stage=style_rewrite attempt={attempt + 1}/{max(1, int(cfg.api_retry_attempts))} sleep={delay:.1f}s err={err}",
flush=True,
)
style_resp = retry_call(
_style_call,
max_attempts=max(1, int(cfg.api_retry_attempts)),
base_delay_sec=float(cfg.api_retry_base_delay_sec),
max_delay_sec=float(cfg.api_retry_max_delay_sec),
jitter_sec=float(cfg.api_retry_jitter_sec),
on_retry=_on_style_retry,
)
if token_usage is not None:
record_token_usage(
token_usage,
response=style_resp,
stage="style_rewrite",
model=cfg.llm_model,
)
revised = (style_resp.choices[0].message.content or "").strip()
return revised or draft
def run_tri_retrieve_and_compose(
query: str,
cfg: RuntimeConfig,
character: Optional[str] = None,
style_correct: bool = False,
active_lanes: Optional[Sequence[str]] = None,
) -> FusionResult:
token_usage: Dict[str, Any] = new_token_usage()
evidence = tri_retrieve(query=query, cfg=cfg, character=character, active_lanes=active_lanes)
draft = compose_response(
query=query,
evidence=evidence,
cfg=cfg,
character=character,
style_correct=False,
token_usage=token_usage,
)
final_answer = draft
if style_correct:
final_answer = compose_response(
query=query,
evidence=evidence,
cfg=cfg,
character=character,
style_correct=True,
token_usage=token_usage,
)
return FusionResult(answer=final_answer, draft=draft, evidence=evidence, token_usage=token_usage)