File size: 2,810 Bytes
15c3265
 
 
 
 
 
 
9b7e0a7
15c3265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b7e0a7
15c3265
 
 
 
 
 
 
9b7e0a7
15c3265
 
 
9b7e0a7
 
 
 
 
 
 
15c3265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b7e0a7
 
15c3265
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
"""Run meta-cognitive QA loop end-to-end."""

from __future__ import annotations

import argparse
import json
from pathlib import Path
import time

from fic_agent.config import RuntimeConfig
from fic_agent.generation.meta_loop import run_meta_cognitive_qa


def main() -> None:
    parser = argparse.ArgumentParser(description="fic-agent meta-cognitive QA")
    parser.add_argument("--query", required=True, help="User question")
    parser.add_argument("--character", default=None, help="Target character")
    parser.add_argument("--max-iter", type=int, default=None, help="Max retrieval loops")
    parser.add_argument("--style-correct", action="store_true", help="Apply style-only rewrite pass")
    parser.add_argument("--dump-evidence", action="store_true", help="Print merged evidence pool")
    parser.add_argument("--dump-trace", action="store_true", help="Print loop trace")
    parser.add_argument("--save-json", default=None, help="Optional output json path")
    args = parser.parse_args()

    cfg = RuntimeConfig()
    gen_t0 = time.perf_counter()
    result = run_meta_cognitive_qa(
        query=args.query,
        cfg=cfg,
        character=args.character,
        style_correct=args.style_correct,
        max_iterations=args.max_iter,
    )
    generation_latency_sec = round(max(0.0, time.perf_counter() - gen_t0), 3)

    print("=== FINAL ANSWER ===")
    print(result.answer)
    print(
        f"\n=== TOKEN USAGE === total={result.token_usage.get('total_tokens', 0)} "
        f"(prompt={result.token_usage.get('prompt_tokens', 0)}, "
        f"completion={result.token_usage.get('completion_tokens', 0)}, "
        f"calls={result.token_usage.get('calls', 0)})"
    )
    print(f"=== GENERATION LATENCY === {generation_latency_sec:.3f}s")

    if args.dump_trace:
        print("\n=== LOOP TRACE ===")
        for s in result.trace:
            print(
                f"iter={s.iteration} sufficient={s.sufficient} conf={s.confidence:.2f} "
                f"probe={s.probe!r} missing={s.missing}"
            )

    if args.dump_evidence:
        print("\n=== EVIDENCE POOL ===")
        print(json.dumps(result.evidence, ensure_ascii=False, indent=2))

    if args.save_json:
        out = {
            "query": args.query,
            "character": args.character,
            "answer": result.answer,
            "trace": [s.__dict__ for s in result.trace],
            "evidence": result.evidence,
            "token_usage": result.token_usage,
            "generation_latency_sec": generation_latency_sec,
        }
        path = Path(args.save_json)
        path.parent.mkdir(parents=True, exist_ok=True)
        path.write_text(json.dumps(out, ensure_ascii=False, indent=2), encoding="utf-8")
        print(f"\nSaved json to {path}")


if __name__ == "__main__":
    main()