t1eautomat's picture
update latest code and outputs
9b7e0a7
"""Meta-cognitive QA loop: retrieve -> assess -> probe -> retrieve -> compose."""
from __future__ import annotations
from dataclasses import dataclass, field
import json
from typing import Any, Dict, List, Optional, Sequence
from ..config import RuntimeConfig
from ..retrieval.retriever import decompose_query, tri_retrieve
from ..utils.retry import retry_call
from .compose import compose_response
from .token_usage import new_token_usage, record_token_usage
@dataclass
class LoopStep:
iteration: int
probe: str
sufficient: bool
confidence: float
missing: List[str] = field(default_factory=list)
next_probe: Optional[str] = None
evidence_counts: Dict[str, int] = field(default_factory=dict)
@dataclass
class MetaLoopResult:
answer: str
evidence: Dict[str, object]
trace: List[LoopStep]
token_usage: Dict[str, Any]
def _safe_text(v: object) -> str:
return str(v or "").strip()
def _dedup_merge_rows(existing: List[Dict], new_rows: List[Dict], lane: str) -> List[Dict]:
merged = list(existing)
seen = {
(
lane,
_safe_text(r.get("id")),
_safe_text(r.get("text")),
_safe_text(r.get("speaker")),
_safe_text(r.get("entity")),
)
for r in existing
}
for r in new_rows:
key = (
lane,
_safe_text(r.get("id")),
_safe_text(r.get("text")),
_safe_text(r.get("speaker")),
_safe_text(r.get("entity")),
)
if key in seen:
continue
seen.add(key)
merged.append(r)
merged.sort(key=lambda x: float(x.get("score", 0.0)), reverse=True)
return merged
def _merge_evidence_pool(pool: Dict[str, object], delta: Dict[str, object]) -> Dict[str, object]:
out = dict(pool)
for lane in ("facts", "persona", "worldview"):
out[lane] = _dedup_merge_rows(
existing=list(out.get(lane, [])),
new_rows=list(delta.get(lane, [])),
lane=lane,
)
out["query_plan"] = delta.get("query_plan") or out.get("query_plan") or {}
out["budgets"] = delta.get("budgets") or out.get("budgets") or {}
return out
def _sample_rows(rows: List[Dict], top_n: int) -> List[Dict]:
sampled = []
for r in rows[:top_n]:
sampled.append(
{
"rank": r.get("rank"),
"score": r.get("score"),
"text": _safe_text(r.get("text"))[:260],
"speaker": r.get("speaker"),
"type": r.get("type"),
"entity": r.get("entity"),
}
)
return sampled
def _parse_json_object(text: str) -> Optional[Dict]:
if not text:
return None
try:
obj = json.loads(text)
if isinstance(obj, dict):
return obj
except json.JSONDecodeError:
pass
s = text.find("{")
e = text.rfind("}")
if s != -1 and e != -1 and e > s:
try:
obj = json.loads(text[s : e + 1])
if isinstance(obj, dict):
return obj
except json.JSONDecodeError:
return None
return None
def _heuristic_assess(pool: Dict[str, object], cfg: RuntimeConfig) -> Dict:
return _heuristic_assess_with_lanes(pool=pool, cfg=cfg, active_lanes=None)
def _heuristic_assess_with_lanes(
pool: Dict[str, object],
cfg: RuntimeConfig,
active_lanes: Optional[Sequence[str]] = None,
) -> Dict:
facts = list(pool.get("facts", []))
persona = list(pool.get("persona", []))
worldview = list(pool.get("worldview", []))
enabled = {str(x).strip().lower() for x in (active_lanes or ["facts", "persona", "worldview"])}
fact_max = max((float(r.get("score", 0.0)) for r in facts), default=0.0)
checks: List[bool] = []
if "facts" in enabled:
checks.append(len(facts) >= cfg.tri_k_min and fact_max >= cfg.tri_tau_low)
if "worldview" in enabled:
checks.append(len(worldview) >= 1)
if "persona" in enabled:
checks.append(len(persona) >= 1)
sufficient = all(checks) if checks else True
missing: List[str] = []
if "facts" in enabled and len(facts) < cfg.tri_k_min:
missing.append("more veridical facts")
if "worldview" in enabled and len(worldview) < 1:
missing.append("world rules/setting evidence")
if "persona" in enabled and len(persona) < 1:
missing.append("character speaking evidence")
next_probe = None
if missing:
next_probe = "Focus on: " + "; ".join(missing)
return {
"sufficient": sufficient,
"confidence": 0.75 if sufficient else 0.35,
"missing": missing,
"next_probe": next_probe,
"need_narrative": False,
}
def _llm_assess(
query: str,
character: Optional[str],
pool: Dict[str, object],
cfg: RuntimeConfig,
token_usage: Optional[Dict[str, Any]] = None,
active_lanes: Optional[Sequence[str]] = None,
) -> Optional[Dict]:
if not cfg.llm_api_key:
return None
try:
from openai import OpenAI # type: ignore
except Exception:
return None
client = OpenAI(base_url=cfg.llm_base_url, api_key=cfg.llm_api_key)
model = cfg.meta_judge_model or cfg.llm_model
enabled = [str(x).strip().lower() for x in (active_lanes or ["facts", "persona", "worldview"]) if str(x).strip()]
payload = {
"query": query,
"character": character,
"active_lanes": enabled,
"facts": _sample_rows(list(pool.get("facts", [])), cfg.meta_trace_top_n),
"persona": _sample_rows(list(pool.get("persona", [])), cfg.meta_trace_top_n),
"worldview": _sample_rows(list(pool.get("worldview", [])), cfg.meta_trace_top_n),
}
system_prompt = (
"You are an evidence sufficiency judge for a fiction QA agent.\n"
"Return JSON only with fields:\n"
"sufficient (bool), confidence (0-1), need_narrative (bool),\n"
"missing (array of short strings), next_probe (string).\n"
"Only judge sufficiency for active_lanes in payload; ignore disabled lanes.\n"
"If evidence is weak, provide a concrete next_probe query for retrieval.\n"
"Keep next_probe under 20 words."
)
user_prompt = "Assess if evidence is sufficient.\n\n" + json.dumps(payload, ensure_ascii=False)
try:
def _call():
return client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=cfg.meta_judge_temperature,
max_tokens=cfg.meta_new_probe_max_tokens,
)
def _on_retry(attempt: int, err: Exception, delay: float) -> None:
print(
f"[meta_assess][retry] attempt={attempt + 1}/{max(1, int(cfg.api_retry_attempts))} sleep={delay:.1f}s err={err}",
flush=True,
)
resp = retry_call(
_call,
max_attempts=max(1, int(cfg.api_retry_attempts)),
base_delay_sec=float(cfg.api_retry_base_delay_sec),
max_delay_sec=float(cfg.api_retry_max_delay_sec),
jitter_sec=float(cfg.api_retry_jitter_sec),
on_retry=_on_retry,
)
if token_usage is not None:
record_token_usage(
token_usage,
response=resp,
stage="meta_assess",
model=model,
)
content = _safe_text(resp.choices[0].message.content)
obj = _parse_json_object(content)
if not obj:
return None
return {
"sufficient": bool(obj.get("sufficient", False)),
"confidence": float(obj.get("confidence", 0.0)),
"need_narrative": bool(obj.get("need_narrative", False)),
"missing": [str(x) for x in obj.get("missing", []) if str(x).strip()],
"next_probe": _safe_text(obj.get("next_probe")) or None,
}
except Exception:
return None
def _build_next_probe(
query: str,
assess: Dict,
previous_probes: List[str],
) -> Optional[str]:
probe = _safe_text(assess.get("next_probe"))
if probe:
if probe.lower() not in {p.lower() for p in previous_probes}:
return probe
missing = [str(x).strip() for x in assess.get("missing", []) if str(x).strip()]
if missing:
fallback = f"{query}. Focus on {', '.join(missing[:2])}."
if fallback.lower() not in {p.lower() for p in previous_probes}:
return fallback
return None
def run_meta_cognitive_qa(
query: str,
cfg: RuntimeConfig,
character: Optional[str] = None,
style_correct: bool = False,
max_iterations: Optional[int] = None,
active_lanes: Optional[Sequence[str]] = None,
) -> MetaLoopResult:
max_iter = int(max_iterations or cfg.meta_max_iterations)
pool: Dict[str, object] = {"facts": [], "persona": [], "worldview": [], "query_plan": {}}
trace: List[LoopStep] = []
probes: List[str] = [query]
token_usage: Dict[str, Any] = new_token_usage()
enabled_lanes = [str(x).strip().lower() for x in (active_lanes or ["facts", "persona", "worldview"]) if str(x).strip()]
if not enabled_lanes:
enabled_lanes = ["facts", "persona", "worldview"]
for i in range(1, max_iter + 1):
probe = probes[-1]
retrieval = tri_retrieve(query=probe, cfg=cfg, character=character, active_lanes=enabled_lanes)
pool = _merge_evidence_pool(pool, retrieval)
assess = _llm_assess(
query=query,
character=character,
pool=pool,
cfg=cfg,
token_usage=token_usage,
active_lanes=enabled_lanes,
)
if assess is None:
assess = _heuristic_assess_with_lanes(pool=pool, cfg=cfg, active_lanes=enabled_lanes)
step = LoopStep(
iteration=i,
probe=probe,
sufficient=bool(assess.get("sufficient", False)),
confidence=float(assess.get("confidence", 0.0)),
missing=[str(x) for x in assess.get("missing", [])],
next_probe=_safe_text(assess.get("next_probe")) or None,
evidence_counts={
"facts": len(list(pool.get("facts", []))),
"persona": len(list(pool.get("persona", []))),
"worldview": len(list(pool.get("worldview", []))),
},
)
trace.append(step)
if step.sufficient:
break
next_probe = _build_next_probe(query=query, assess=assess, previous_probes=probes)
if not next_probe:
break
probes.append(next_probe)
# Keep decomposition aligned with original user query.
pool["query_plan"] = decompose_query(query=query, character=character)
pool["active_lanes"] = enabled_lanes
answer = compose_response(
query=query,
evidence=pool,
cfg=cfg,
character=character,
style_correct=style_correct,
token_usage=token_usage,
)
return MetaLoopResult(answer=answer, evidence=pool, trace=trace, token_usage=token_usage)