File size: 5,448 Bytes
45cc459 be04d92 45cc459 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 | """
Free-chat mode for Tensegrity.
The cognitive layer is the agent. Each user turn is a perception cycle that
runs the full agent stack (UnifiedField + FreeEnergyEngine + EpistemicMemory
+ EpisodicMemory + AssociativeMemory + log-likelihood CausalArena +
EnergyCausalArena + TopologyMapper). The LLM enters at exactly one place:
the final Broca verbalization, where logit grafting under semantic vocabulary
grounding shapes the LLM's tokens to be coherent with the agent's converged
beliefs.
This is the architecture from the Sensorium paper: the manifold reasons; the
LLM narrates.
Usage:
python scripts/chat.py
python scripts/chat.py --hypotheses "explanation_a,explanation_b,explanation_c"
python scripts/chat.py --offline # no LLM; agent prints converged belief
Type :state to dump the agent's BeliefState. Type :memory to dump episodic
memory. Type :quit to exit.
"""
from __future__ import annotations
import argparse
import json
import sys
import traceback
from tensegrity.graft.pipeline import HybridPipeline
def parse_args():
ap = argparse.ArgumentParser(description="Tensegrity free-chat mode")
ap.add_argument(
"--hypotheses",
default="positive,neutral,negative,uncertain",
help="Comma-separated initial hypothesis labels (the agent reasons over these).",
)
ap.add_argument(
"--mode",
default="local",
choices=["local", "remote", "offline"],
help="LLM mode for narration. 'offline' bypasses the LLM entirely.",
)
ap.add_argument(
"--model",
default="meta-llama/Llama-3.2-1B-Instruct",
help="HF model id for narration.",
)
ap.add_argument(
"--scale", type=float, default=2.5, help="Logit graft scale.",
)
ap.add_argument(
"--entropy-gate", type=float, default=0.85,
help="Above this normalized entropy, no graft is applied (LLM speaks freely).",
)
return ap.parse_args()
def banner(args):
print("=" * 78)
print(" TENSEGRITY CHAT")
print(f" hypotheses : {args.hypotheses}")
print(f" mode : {args.mode}")
if args.mode != "offline":
print(f" model : {args.model}")
print(f" graft : semantic grounding (sbert phrase projection)")
print(" commands : :state :memory :quit")
print("=" * 78)
def dump_state(pipe: HybridPipeline) -> None:
bs = pipe.controller.belief_state
rows = [
{
"hypothesis": h.description,
"p": round(h.probability, 3),
"supports": len(h.supporting_evidence),
"contradicts": len(h.contradicting_evidence),
}
for h in bs.hypotheses
]
rows.sort(key=lambda r: r["p"], reverse=True)
print(json.dumps({
"turn": bs.turn,
"tension": round(bs.current_tension, 3),
"free_energy": round(bs.free_energy, 3),
"epistemic_urgency": round(bs.epistemic_urgency, 3),
"eliminated": bs.eliminated_hypotheses,
"hypotheses": rows,
"confirmed_facts": bs.confirmed_facts[-5:],
}, indent=2))
def dump_memory(pipe: HybridPipeline) -> None:
ep = pipe.controller.agent.episodic
print(json.dumps({
"n_episodes": len(ep.episodes),
"stats": ep.statistics,
}, indent=2, default=str))
def main():
args = parse_args()
hypotheses = [h.strip() for h in args.hypotheses.split(",") if h.strip()]
if len(hypotheses) < 2:
print("error: need at least two hypotheses", file=sys.stderr)
sys.exit(2)
pipe = HybridPipeline(
hypothesis_labels=hypotheses,
model_name=args.model,
mode=args.mode,
scale=args.scale,
entropy_gate=args.entropy_gate,
async_graft=True,
semantic_grounding=(args.mode != "offline"),
)
banner(args)
while True:
try:
line = input("\nyou> ").strip()
except (EOFError, KeyboardInterrupt):
print()
break
if not line:
continue
if line == ":quit":
break
if line == ":state":
dump_state(pipe)
continue
if line == ":memory":
dump_memory(pipe)
continue
# Perception updates the agent's belief state. This runs the full
# cognitive stack — no LLM in this step.
try:
pipe.process_observation(line)
except Exception as e:
print(f"[perception failed: {type(e).__name__}: {e}]")
traceback.print_exc()
continue
# Generation: LLM narrates the converged belief, with semantic-
# grounded logit grafting from the cognitive layer.
try:
res = pipe.generate_response(
"Given everything observed so far, what is the agent's best summary?",
max_tokens=100,
)
except Exception as e:
print(f"[generation failed: {type(e).__name__}: {e}]")
traceback.print_exc()
continue
text = res.get("text", "").strip() or "(no narration)"
beliefs = res.get("beliefs", {})
mode_label = res.get("mode", "?")
top = max(beliefs, key=beliefs.get) if beliefs else "(none)"
top_p = beliefs.get(top, 0.0)
print(f"agent[{mode_label}] {text}")
print(f" → top hypothesis: {top} (p={top_p:.2f})")
if __name__ == "__main__":
main()
|