feat: V3 — SBERT-native cognitive stack (NGC, Hopfield, falsification all in embedding space)
#4
by theapemachine - opened
- scripts/compare_iterative.py +0 -196
- tensegrity/.DS_Store +0 -0
- tensegrity/__init__.py +41 -62
- tensegrity/broca/controller.py +6 -27
- tensegrity/core/__init__.py +0 -54
- tensegrity/core/agent.py +0 -14
- tensegrity/core/blanket.py +0 -14
- tensegrity/core/morton.py +0 -15
- tensegrity/core/unified_field.py +0 -5
- tensegrity/engine/__init__.py +5 -5
- tensegrity/engine/agent.py +217 -0
- tensegrity/engine/scoring.py +0 -309
- tensegrity/legacy/__init__.py +0 -5
- tensegrity/legacy/v1/__init__.py +0 -19
- tensegrity/legacy/v1/agent.py +0 -497
- tensegrity/legacy/v1/blanket.py +0 -218
- tensegrity/legacy/v1/morton.py +0 -308
- tensegrity/pipeline/canonical.py +103 -153
- tensegrity/pipeline/iterative.py +0 -466
- tests/.DS_Store +0 -0
scripts/compare_iterative.py
DELETED
|
@@ -1,196 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Compare single-shot ScoringBridge vs iterative cognitive scorer on a slice
|
| 3 |
-
of the benchmark. No LLM in either path — this isolates the cognitive
|
| 4 |
-
contribution.
|
| 5 |
-
"""
|
| 6 |
-
from __future__ import annotations
|
| 7 |
-
|
| 8 |
-
import time
|
| 9 |
-
import argparse
|
| 10 |
-
import logging
|
| 11 |
-
import warnings
|
| 12 |
-
|
| 13 |
-
import numpy as np
|
| 14 |
-
|
| 15 |
-
from tensegrity.bench.tasks import load_task_samples
|
| 16 |
-
from tensegrity.engine.scoring import ScoringBridge
|
| 17 |
-
from tensegrity.pipeline.iterative import IterativeCognitiveScorer
|
| 18 |
-
|
| 19 |
-
logging.basicConfig(level=logging.WARNING)
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
TASKS_TO_RUN = [
|
| 23 |
-
("truthfulqa", 30), # graft-friendly today
|
| 24 |
-
("mmlu_philosophy", 30), # graft-hostile
|
| 25 |
-
("winogrande", 30), # graft-dead
|
| 26 |
-
("arc_challenge", 30), # mid
|
| 27 |
-
("copa", 20), # causal, small
|
| 28 |
-
("logical_deduction", 30), # logic
|
| 29 |
-
]
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
def run_task(task_name: str, n: int):
|
| 33 |
-
samples = load_task_samples(task_name, max_samples=n)
|
| 34 |
-
if not samples:
|
| 35 |
-
print(f" [{task_name}] no samples")
|
| 36 |
-
return None
|
| 37 |
-
|
| 38 |
-
shared_params = {
|
| 39 |
-
"obs_dim": 256,
|
| 40 |
-
"hidden_dims": [128, 32],
|
| 41 |
-
"fhrr_dim": 2048,
|
| 42 |
-
"ngc_settle_steps": 30,
|
| 43 |
-
"ngc_learning_rate": 0.01,
|
| 44 |
-
"hopfield_beta": 0.05,
|
| 45 |
-
"context_settle_steps": 40,
|
| 46 |
-
"choice_settle_steps": 25,
|
| 47 |
-
"context_learning_epochs": 3,
|
| 48 |
-
}
|
| 49 |
-
single = ScoringBridge(
|
| 50 |
-
**shared_params,
|
| 51 |
-
confidence_threshold=0.15,
|
| 52 |
-
)
|
| 53 |
-
iterative = IterativeCognitiveScorer(
|
| 54 |
-
**shared_params,
|
| 55 |
-
max_iterations=6,
|
| 56 |
-
convergence_top_p=0.75,
|
| 57 |
-
w_sbert=1.0,
|
| 58 |
-
w_fhrr=0.3,
|
| 59 |
-
w_ngc=0.6,
|
| 60 |
-
belief_step=0.6,
|
| 61 |
-
shaping_lr_scale=0.5,
|
| 62 |
-
use_hopfield=True,
|
| 63 |
-
hopfield_steps=2,
|
| 64 |
-
)
|
| 65 |
-
|
| 66 |
-
n_total = len(samples)
|
| 67 |
-
n_single_correct = 0
|
| 68 |
-
n_iter_correct = 0
|
| 69 |
-
n_iter_used_total = 0
|
| 70 |
-
n_iter_converged = 0
|
| 71 |
-
n_disagree = 0
|
| 72 |
-
n_iter_better = 0
|
| 73 |
-
n_single_better = 0
|
| 74 |
-
|
| 75 |
-
t_single = 0.0
|
| 76 |
-
t_iter = 0.0
|
| 77 |
-
|
| 78 |
-
for s in samples:
|
| 79 |
-
# Single-shot
|
| 80 |
-
single.reset()
|
| 81 |
-
t0 = time.time()
|
| 82 |
-
scores_s, _ = single.score_choices(s.prompt, s.choices)
|
| 83 |
-
t_single += time.time() - t0
|
| 84 |
-
# If gated to all zeros, fall back to sbert-only argmax — matches benchmark.
|
| 85 |
-
sa = np.array(scores_s)
|
| 86 |
-
if np.allclose(sa, 0.0):
|
| 87 |
-
# use raw sbert sim as tiebreaker (single's gate = uninformative)
|
| 88 |
-
if hasattr(single, "sentence_similarities"):
|
| 89 |
-
sims = single.sentence_similarities(s.prompt, s.choices)
|
| 90 |
-
elif hasattr(single, "_sentence_similarities"):
|
| 91 |
-
warnings.warn(
|
| 92 |
-
"ScoringBridge has no public sentence_similarities(); using "
|
| 93 |
-
"_sentence_similarities (private). Prefer adding a stable public API.",
|
| 94 |
-
UserWarning,
|
| 95 |
-
stacklevel=2,
|
| 96 |
-
)
|
| 97 |
-
sims = single._sentence_similarities(s.prompt, s.choices)
|
| 98 |
-
else:
|
| 99 |
-
raise AttributeError(
|
| 100 |
-
"ScoringBridge exposes no sentence_similarities() or "
|
| 101 |
-
"_sentence_similarities(); add a public API on ScoringBridge for tie-breaks.",
|
| 102 |
-
)
|
| 103 |
-
pred_s = int(np.argmax(sims))
|
| 104 |
-
else:
|
| 105 |
-
pred_s = int(np.argmax(sa))
|
| 106 |
-
|
| 107 |
-
# Iterative
|
| 108 |
-
iterative.reset()
|
| 109 |
-
t0 = time.time()
|
| 110 |
-
result = iterative.score(s.prompt, s.choices)
|
| 111 |
-
t_iter += time.time() - t0
|
| 112 |
-
pred_i = result.committed_idx
|
| 113 |
-
|
| 114 |
-
ok_s = (pred_s == s.gold)
|
| 115 |
-
ok_i = (pred_i == s.gold)
|
| 116 |
-
n_single_correct += int(ok_s)
|
| 117 |
-
n_iter_correct += int(ok_i)
|
| 118 |
-
n_iter_used_total += result.iterations_used
|
| 119 |
-
n_iter_converged += int(result.converged)
|
| 120 |
-
if pred_s != pred_i:
|
| 121 |
-
n_disagree += 1
|
| 122 |
-
if ok_i and not ok_s:
|
| 123 |
-
n_iter_better += 1
|
| 124 |
-
elif ok_s and not ok_i:
|
| 125 |
-
n_single_better += 1
|
| 126 |
-
|
| 127 |
-
acc_s = n_single_correct / n_total
|
| 128 |
-
acc_i = n_iter_correct / n_total
|
| 129 |
-
print(
|
| 130 |
-
f" [{task_name:<22}] N={n_total:3d} "
|
| 131 |
-
f"single={acc_s:5.1%} iter={acc_i:5.1%} "
|
| 132 |
-
f"Δ={(acc_i-acc_s):+5.1%} "
|
| 133 |
-
f"disagree={n_disagree:2d} "
|
| 134 |
-
f"iter→✓={n_iter_better} iter→✗={n_single_better} "
|
| 135 |
-
f"avg_iters={n_iter_used_total/n_total:.1f} "
|
| 136 |
-
f"conv={n_iter_converged}/{n_total} "
|
| 137 |
-
f"t_s={t_single:.1f}s t_i={t_iter:.1f}s"
|
| 138 |
-
)
|
| 139 |
-
return {
|
| 140 |
-
"task": task_name, "n": n_total,
|
| 141 |
-
"single": acc_s, "iter": acc_i, "delta": acc_i - acc_s,
|
| 142 |
-
"disagree": n_disagree, "iter_better": n_iter_better,
|
| 143 |
-
"single_better": n_single_better,
|
| 144 |
-
"avg_iters": n_iter_used_total / n_total,
|
| 145 |
-
"converged": n_iter_converged,
|
| 146 |
-
"t_single_s": t_single, "t_iter_s": t_iter,
|
| 147 |
-
}
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
def main():
|
| 151 |
-
ap = argparse.ArgumentParser()
|
| 152 |
-
ap.add_argument("--tasks", nargs="*", default=None,
|
| 153 |
-
help="task names; default = small fixed slice")
|
| 154 |
-
ap.add_argument("--n", type=int, default=None,
|
| 155 |
-
help="override per-task sample count")
|
| 156 |
-
args = ap.parse_args()
|
| 157 |
-
|
| 158 |
-
if args.tasks:
|
| 159 |
-
plan = [(t, args.n or 30) for t in args.tasks]
|
| 160 |
-
else:
|
| 161 |
-
plan = TASKS_TO_RUN
|
| 162 |
-
if args.n is not None:
|
| 163 |
-
plan = [(t, args.n) for t, _ in plan]
|
| 164 |
-
|
| 165 |
-
print("=" * 110)
|
| 166 |
-
print("Single-shot ScoringBridge vs Iterative cognitive scorer (LLM-free)")
|
| 167 |
-
print("=" * 110)
|
| 168 |
-
rows = []
|
| 169 |
-
for t, n in plan:
|
| 170 |
-
try:
|
| 171 |
-
r = run_task(t, n)
|
| 172 |
-
except Exception as e:
|
| 173 |
-
print(f" [{t}] FAILED: {type(e).__name__}: {e}")
|
| 174 |
-
continue
|
| 175 |
-
if r is not None:
|
| 176 |
-
rows.append(r)
|
| 177 |
-
|
| 178 |
-
if not rows:
|
| 179 |
-
return
|
| 180 |
-
|
| 181 |
-
print("-" * 110)
|
| 182 |
-
total_n = sum(r["n"] for r in rows)
|
| 183 |
-
sum_s = sum(r["single"] * r["n"] for r in rows) / total_n
|
| 184 |
-
sum_i = sum(r["iter"] * r["n"] for r in rows) / total_n
|
| 185 |
-
print(
|
| 186 |
-
f" {'OVERALL':<24} N={total_n:3d} "
|
| 187 |
-
f"single={sum_s:5.1%} iter={sum_i:5.1%} "
|
| 188 |
-
f"Δ={(sum_i-sum_s):+5.1%} "
|
| 189 |
-
f"disagree={sum(r['disagree'] for r in rows):3d} "
|
| 190 |
-
f"iter→✓={sum(r['iter_better'] for r in rows):3d} "
|
| 191 |
-
f"iter→✗={sum(r['single_better'] for r in rows):3d}"
|
| 192 |
-
)
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
if __name__ == "__main__":
|
| 196 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/.DS_Store
DELETED
|
Binary file (8.2 kB)
|
|
|
tensegrity/__init__.py
CHANGED
|
@@ -1,34 +1,27 @@
|
|
| 1 |
"""
|
| 2 |
-
Tensegrity: a non-gradient cognitive architecture
|
| 3 |
-
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
importable from ``tensegrity.legacy.v1``. Several other names are re-exported lazily
|
| 12 |
-
via ``tensegrity`` for migration only: ``EpistemicMemory``, ``EpisodicMemory``, and
|
| 13 |
-
``AssociativeMemory`` from ``tensegrity.memory.*``; ``CausalArena`` and
|
| 14 |
-
``StructuralCausalModel`` from ``tensegrity.causal.*``; ``FreeEnergyEngine`` and
|
| 15 |
-
``BeliefPropagator`` from ``tensegrity.inference.*``. Those are **not** defined under
|
| 16 |
-
``tensegrity.legacy.v1``—use the module paths above when importing explicitly.
|
| 17 |
-
|
| 18 |
-
Top-level exports intentionally expose the unified field as the default
|
| 19 |
-
architecture. Deprecated V1 names are resolved lazily for migration only.
|
| 20 |
"""
|
| 21 |
|
| 22 |
-
from
|
| 23 |
-
from typing import Any
|
| 24 |
-
import warnings
|
| 25 |
-
|
| 26 |
-
from tensegrity.engine import (
|
| 27 |
UnifiedField,
|
| 28 |
HopfieldMemoryBank,
|
| 29 |
EnergyDecomposition,
|
|
|
|
|
|
|
| 30 |
PredictiveCodingCircuit,
|
| 31 |
LayerState,
|
|
|
|
|
|
|
| 32 |
FHRREncoder,
|
| 33 |
FHRRCodebook,
|
| 34 |
SemanticFHRRCodebook,
|
|
@@ -36,24 +29,39 @@ from tensegrity.engine import (
|
|
| 36 |
bundle,
|
| 37 |
unbind,
|
| 38 |
permute,
|
|
|
|
|
|
|
| 39 |
EnergyCausalArena,
|
| 40 |
CausalEnergyTerm,
|
| 41 |
TopologyMapper,
|
| 42 |
TopologyMapping,
|
| 43 |
VirtualParent,
|
| 44 |
-
ScoringBridge,
|
| 45 |
-
NGCLogitsProcessor,
|
| 46 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
-
__version__ = "0.
|
| 49 |
|
| 50 |
__all__ = (
|
| 51 |
"__version__",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
"UnifiedField",
|
| 53 |
"HopfieldMemoryBank",
|
| 54 |
"EnergyDecomposition",
|
| 55 |
"PredictiveCodingCircuit",
|
| 56 |
"LayerState",
|
|
|
|
| 57 |
"FHRREncoder",
|
| 58 |
"FHRRCodebook",
|
| 59 |
"SemanticFHRRCodebook",
|
|
@@ -61,47 +69,18 @@ __all__ = (
|
|
| 61 |
"bundle",
|
| 62 |
"unbind",
|
| 63 |
"permute",
|
|
|
|
| 64 |
"EnergyCausalArena",
|
| 65 |
"CausalEnergyTerm",
|
| 66 |
"TopologyMapper",
|
| 67 |
"TopologyMapping",
|
| 68 |
"VirtualParent",
|
| 69 |
-
"
|
| 70 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
)
|
| 72 |
-
|
| 73 |
-
_LEGACY_EXPORTS = {
|
| 74 |
-
"TensegrityAgent": ("tensegrity.legacy.v1.agent", "TensegrityAgent"),
|
| 75 |
-
"MortonEncoder": ("tensegrity.legacy.v1.morton", "MortonEncoder"),
|
| 76 |
-
"MarkovBlanket": ("tensegrity.legacy.v1.blanket", "MarkovBlanket"),
|
| 77 |
-
"EpistemicMemory": ("tensegrity.memory.epistemic", "EpistemicMemory"),
|
| 78 |
-
"EpisodicMemory": ("tensegrity.memory.episodic", "EpisodicMemory"),
|
| 79 |
-
"AssociativeMemory": ("tensegrity.memory.associative", "AssociativeMemory"),
|
| 80 |
-
"CausalArena": ("tensegrity.causal.arena", "CausalArena"),
|
| 81 |
-
"StructuralCausalModel": ("tensegrity.causal.scm", "StructuralCausalModel"),
|
| 82 |
-
"FreeEnergyEngine": ("tensegrity.inference.free_energy", "FreeEnergyEngine"),
|
| 83 |
-
"BeliefPropagator": ("tensegrity.inference.belief_propagation", "BeliefPropagator"),
|
| 84 |
-
}
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
def __getattr__(name: str) -> Any:
|
| 88 |
-
"""Resolve deprecated top-level V1 names with an explicit migration warning."""
|
| 89 |
-
target = _LEGACY_EXPORTS.get(name)
|
| 90 |
-
|
| 91 |
-
if target is None:
|
| 92 |
-
raise AttributeError(f"module 'tensegrity' has no attribute {name!r}")
|
| 93 |
-
|
| 94 |
-
module_name, attr = target
|
| 95 |
-
|
| 96 |
-
warnings.warn(
|
| 97 |
-
f"tensegrity.{name} is not part of the primary V2 export surface. "
|
| 98 |
-
f"Import {name} from {module_name} explicitly, or use "
|
| 99 |
-
"tensegrity.UnifiedField for the unified energy engine.",
|
| 100 |
-
DeprecationWarning,
|
| 101 |
-
stacklevel=2,
|
| 102 |
-
)
|
| 103 |
-
|
| 104 |
-
value = getattr(import_module(module_name), attr)
|
| 105 |
-
globals()[name] = value
|
| 106 |
-
|
| 107 |
-
return value
|
|
|
|
| 1 |
"""
|
| 2 |
+
Tensegrity: a non-gradient cognitive architecture operating in SBERT
|
| 3 |
+
embedding space.
|
| 4 |
|
| 5 |
+
V3 architecture:
|
| 6 |
+
SBERT embedding → NGC predictive coding → Hopfield memory
|
| 7 |
+
→ causal energy terms → Bayesian belief integration
|
| 8 |
|
| 9 |
+
Primary exports:
|
| 10 |
+
CognitiveAgent — the complete agent (replaces V1 TensegrityAgent)
|
| 11 |
+
UnifiedField — SBERT-native NGC + Hopfield
|
| 12 |
+
CanonicalPipeline — benchmark/chat entry point
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
"""
|
| 14 |
|
| 15 |
+
from tensegrity.engine.unified_field import (
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
UnifiedField,
|
| 17 |
HopfieldMemoryBank,
|
| 18 |
EnergyDecomposition,
|
| 19 |
+
)
|
| 20 |
+
from tensegrity.engine.ngc import (
|
| 21 |
PredictiveCodingCircuit,
|
| 22 |
LayerState,
|
| 23 |
+
)
|
| 24 |
+
from tensegrity.engine.fhrr import (
|
| 25 |
FHRREncoder,
|
| 26 |
FHRRCodebook,
|
| 27 |
SemanticFHRRCodebook,
|
|
|
|
| 29 |
bundle,
|
| 30 |
unbind,
|
| 31 |
permute,
|
| 32 |
+
)
|
| 33 |
+
from tensegrity.engine.causal_energy import (
|
| 34 |
EnergyCausalArena,
|
| 35 |
CausalEnergyTerm,
|
| 36 |
TopologyMapper,
|
| 37 |
TopologyMapping,
|
| 38 |
VirtualParent,
|
|
|
|
|
|
|
| 39 |
)
|
| 40 |
+
from tensegrity.engine.agent import (
|
| 41 |
+
CognitiveAgent,
|
| 42 |
+
DEFAULT_MEDIATED_SCM_NAME,
|
| 43 |
+
)
|
| 44 |
+
from tensegrity.causal.scm import StructuralCausalModel
|
| 45 |
+
from tensegrity.causal.arena import CausalArena
|
| 46 |
+
from tensegrity.inference.free_energy import FreeEnergyEngine
|
| 47 |
+
from tensegrity.inference.belief_propagation import BeliefPropagator
|
| 48 |
+
from tensegrity.memory.episodic import EpisodicMemory
|
| 49 |
+
from tensegrity.memory.epistemic import EpistemicMemory
|
| 50 |
|
| 51 |
+
__version__ = "0.3.0"
|
| 52 |
|
| 53 |
__all__ = (
|
| 54 |
"__version__",
|
| 55 |
+
# Agent
|
| 56 |
+
"CognitiveAgent",
|
| 57 |
+
"DEFAULT_MEDIATED_SCM_NAME",
|
| 58 |
+
# Engine
|
| 59 |
"UnifiedField",
|
| 60 |
"HopfieldMemoryBank",
|
| 61 |
"EnergyDecomposition",
|
| 62 |
"PredictiveCodingCircuit",
|
| 63 |
"LayerState",
|
| 64 |
+
# FHRR
|
| 65 |
"FHRREncoder",
|
| 66 |
"FHRRCodebook",
|
| 67 |
"SemanticFHRRCodebook",
|
|
|
|
| 69 |
"bundle",
|
| 70 |
"unbind",
|
| 71 |
"permute",
|
| 72 |
+
# Causal
|
| 73 |
"EnergyCausalArena",
|
| 74 |
"CausalEnergyTerm",
|
| 75 |
"TopologyMapper",
|
| 76 |
"TopologyMapping",
|
| 77 |
"VirtualParent",
|
| 78 |
+
"StructuralCausalModel",
|
| 79 |
+
"CausalArena",
|
| 80 |
+
# Inference
|
| 81 |
+
"FreeEnergyEngine",
|
| 82 |
+
"BeliefPropagator",
|
| 83 |
+
# Memory
|
| 84 |
+
"EpisodicMemory",
|
| 85 |
+
"EpistemicMemory",
|
| 86 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/broca/controller.py
CHANGED
|
@@ -22,7 +22,7 @@ import logging
|
|
| 22 |
import re
|
| 23 |
from difflib import SequenceMatcher
|
| 24 |
|
| 25 |
-
from tensegrity.
|
| 26 |
from tensegrity.broca.schemas import (
|
| 27 |
ParsedObservation,
|
| 28 |
ParsedFeedback,
|
|
@@ -92,7 +92,7 @@ class CognitiveController:
|
|
| 92 |
|
| 93 |
def __init__(
|
| 94 |
self,
|
| 95 |
-
agent: Optional[
|
| 96 |
broca: Optional[BrocaInterface] = None,
|
| 97 |
n_hypotheses: int = 8,
|
| 98 |
hypothesis_labels: Optional[List[str]] = None,
|
|
@@ -102,7 +102,7 @@ class CognitiveController:
|
|
| 102 |
):
|
| 103 |
"""
|
| 104 |
Args:
|
| 105 |
-
agent:
|
| 106 |
broca: BrocaInterface instance. Created with defaults if None.
|
| 107 |
n_hypotheses: Number of competing hypotheses to maintain
|
| 108 |
hypothesis_labels: Labels for the hypothesis space
|
|
@@ -127,11 +127,11 @@ class CognitiveController:
|
|
| 127 |
n_obs = n_states * 4 # Observation space > hypothesis space
|
| 128 |
n_actions = 4 # ask, state, eliminate, conclude
|
| 129 |
|
| 130 |
-
self.agent = agent or
|
| 131 |
n_states=n_states,
|
| 132 |
n_observations=n_obs,
|
| 133 |
n_actions=n_actions,
|
| 134 |
-
sensory_dims=n_states,
|
| 135 |
sensory_bits=4,
|
| 136 |
context_dim=32,
|
| 137 |
associative_dim=64,
|
|
@@ -189,7 +189,7 @@ class CognitiveController:
|
|
| 189 |
while len(labels) < n_hyp:
|
| 190 |
labels.append(f"_empty_{len(labels)}")
|
| 191 |
|
| 192 |
-
self.agent =
|
| 193 |
n_states=n_hyp,
|
| 194 |
n_observations=n_hyp * 4,
|
| 195 |
n_actions=4,
|
|
@@ -395,27 +395,6 @@ class CognitiveController:
|
|
| 395 |
n = len(self.belief_state.hypotheses) or self.agent.n_states
|
| 396 |
features = np.zeros(n)
|
| 397 |
|
| 398 |
-
# Detect binary yes/no tasks. For these tasks, the template parser's
|
| 399 |
-
# keyword-based polarity detection is systematically wrong because
|
| 400 |
-
# passages about questions almost always contain negation words
|
| 401 |
-
# ("not", "doesn't") that have nothing to do with the answer.
|
| 402 |
-
# When we detect a binary yes/no task, we suppress the template
|
| 403 |
-
# parser's relation-based evidence entirely and let SBERT carry
|
| 404 |
-
# the signal. This fixes the BoolQ -12% regression.
|
| 405 |
-
active_labels = [
|
| 406 |
-
h.description.lower() for h in self.belief_state.hypotheses
|
| 407 |
-
if not h.description.startswith("_empty_")
|
| 408 |
-
]
|
| 409 |
-
is_binary_yesno = (
|
| 410 |
-
len(active_labels) == 2
|
| 411 |
-
and any(l in ("yes", "no", "true", "false") for l in active_labels)
|
| 412 |
-
)
|
| 413 |
-
if is_binary_yesno:
|
| 414 |
-
# For binary yes/no: return zero vector (no template-parser evidence).
|
| 415 |
-
# SBERT sentence similarity in the canonical pipeline will provide
|
| 416 |
-
# the actual signal. The template parser does more harm than good here.
|
| 417 |
-
return features
|
| 418 |
-
|
| 419 |
# Map entities and relations to hypothesis dimensions using the
|
| 420 |
# known hypothesis labels. The LLM parser (or template fallback)
|
| 421 |
# extracts entities that may match hypothesis names.
|
|
|
|
| 22 |
import re
|
| 23 |
from difflib import SequenceMatcher
|
| 24 |
|
| 25 |
+
from tensegrity.engine.agent import CognitiveAgent, DEFAULT_MEDIATED_SCM_NAME
|
| 26 |
from tensegrity.broca.schemas import (
|
| 27 |
ParsedObservation,
|
| 28 |
ParsedFeedback,
|
|
|
|
| 92 |
|
| 93 |
def __init__(
|
| 94 |
self,
|
| 95 |
+
agent: Optional[CognitiveAgent] = None,
|
| 96 |
broca: Optional[BrocaInterface] = None,
|
| 97 |
n_hypotheses: int = 8,
|
| 98 |
hypothesis_labels: Optional[List[str]] = None,
|
|
|
|
| 102 |
):
|
| 103 |
"""
|
| 104 |
Args:
|
| 105 |
+
agent: CognitiveAgent instance. Created with defaults if None.
|
| 106 |
broca: BrocaInterface instance. Created with defaults if None.
|
| 107 |
n_hypotheses: Number of competing hypotheses to maintain
|
| 108 |
hypothesis_labels: Labels for the hypothesis space
|
|
|
|
| 127 |
n_obs = n_states * 4 # Observation space > hypothesis space
|
| 128 |
n_actions = 4 # ask, state, eliminate, conclude
|
| 129 |
|
| 130 |
+
self.agent = agent or CognitiveAgent(
|
| 131 |
n_states=n_states,
|
| 132 |
n_observations=n_obs,
|
| 133 |
n_actions=n_actions,
|
| 134 |
+
sensory_dims=n_states,
|
| 135 |
sensory_bits=4,
|
| 136 |
context_dim=32,
|
| 137 |
associative_dim=64,
|
|
|
|
| 189 |
while len(labels) < n_hyp:
|
| 190 |
labels.append(f"_empty_{len(labels)}")
|
| 191 |
|
| 192 |
+
self.agent = CognitiveAgent(
|
| 193 |
n_states=n_hyp,
|
| 194 |
n_observations=n_hyp * 4,
|
| 195 |
n_actions=4,
|
|
|
|
| 395 |
n = len(self.belief_state.hypotheses) or self.agent.n_states
|
| 396 |
features = np.zeros(n)
|
| 397 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
# Map entities and relations to hypothesis dimensions using the
|
| 399 |
# known hypothesis labels. The LLM parser (or template fallback)
|
| 400 |
# extracts entities that may match hypothesis names.
|
tensegrity/core/__init__.py
DELETED
|
@@ -1,54 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Primary V2 core: the unified energy landscape.
|
| 3 |
-
|
| 4 |
-
The old Morton-coded V1 API lives under ``tensegrity.legacy.v1``. Compatibility
|
| 5 |
-
modules remain at ``tensegrity.core.agent``, ``tensegrity.core.morton``, and
|
| 6 |
-
``tensegrity.core.blanket`` for migration, but they are no longer part of the
|
| 7 |
-
primary export surface.
|
| 8 |
-
"""
|
| 9 |
-
|
| 10 |
-
from tensegrity.engine.unified_field import (
|
| 11 |
-
UnifiedField,
|
| 12 |
-
HopfieldMemoryBank,
|
| 13 |
-
EnergyDecomposition,
|
| 14 |
-
)
|
| 15 |
-
from tensegrity.engine.ngc import PredictiveCodingCircuit, LayerState
|
| 16 |
-
from tensegrity.engine.fhrr import (
|
| 17 |
-
FHRREncoder,
|
| 18 |
-
FHRRCodebook,
|
| 19 |
-
SemanticFHRRCodebook,
|
| 20 |
-
bind,
|
| 21 |
-
bundle,
|
| 22 |
-
unbind,
|
| 23 |
-
permute,
|
| 24 |
-
)
|
| 25 |
-
from tensegrity.engine.causal_energy import (
|
| 26 |
-
EnergyCausalArena,
|
| 27 |
-
CausalEnergyTerm,
|
| 28 |
-
TopologyMapper,
|
| 29 |
-
TopologyMapping,
|
| 30 |
-
VirtualParent,
|
| 31 |
-
)
|
| 32 |
-
from tensegrity.engine.scoring import ScoringBridge, NGCLogitsProcessor
|
| 33 |
-
|
| 34 |
-
__all__ = (
|
| 35 |
-
"UnifiedField",
|
| 36 |
-
"HopfieldMemoryBank",
|
| 37 |
-
"EnergyDecomposition",
|
| 38 |
-
"PredictiveCodingCircuit",
|
| 39 |
-
"LayerState",
|
| 40 |
-
"FHRREncoder",
|
| 41 |
-
"FHRRCodebook",
|
| 42 |
-
"SemanticFHRRCodebook",
|
| 43 |
-
"bind",
|
| 44 |
-
"bundle",
|
| 45 |
-
"unbind",
|
| 46 |
-
"permute",
|
| 47 |
-
"EnergyCausalArena",
|
| 48 |
-
"CausalEnergyTerm",
|
| 49 |
-
"TopologyMapper",
|
| 50 |
-
"TopologyMapping",
|
| 51 |
-
"VirtualParent",
|
| 52 |
-
"ScoringBridge",
|
| 53 |
-
"NGCLogitsProcessor",
|
| 54 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/core/agent.py
DELETED
|
@@ -1,14 +0,0 @@
|
|
| 1 |
-
"""Deprecated V1 agent shim. Import from ``tensegrity.legacy.v1.agent``."""
|
| 2 |
-
|
| 3 |
-
import warnings
|
| 4 |
-
|
| 5 |
-
warnings.warn(
|
| 6 |
-
"tensegrity.core.agent is legacy V1; use tensegrity.legacy.v1.agent for "
|
| 7 |
-
"the Morton/POMDP agent or tensegrity.core.UnifiedField for the V2 engine.",
|
| 8 |
-
DeprecationWarning,
|
| 9 |
-
stacklevel=2,
|
| 10 |
-
)
|
| 11 |
-
|
| 12 |
-
from tensegrity.legacy.v1.agent import DEFAULT_MEDIATED_SCM_NAME, TensegrityAgent
|
| 13 |
-
|
| 14 |
-
__all__ = ("DEFAULT_MEDIATED_SCM_NAME", "TensegrityAgent")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/core/blanket.py
DELETED
|
@@ -1,14 +0,0 @@
|
|
| 1 |
-
"""Deprecated V1 Markov blanket shim. Import from ``tensegrity.legacy.v1.blanket``."""
|
| 2 |
-
|
| 3 |
-
import warnings
|
| 4 |
-
|
| 5 |
-
warnings.warn(
|
| 6 |
-
"tensegrity.core.blanket is legacy V1; use tensegrity.legacy.v1.blanket "
|
| 7 |
-
"for the old Morton-coded frontend.",
|
| 8 |
-
DeprecationWarning,
|
| 9 |
-
stacklevel=2,
|
| 10 |
-
)
|
| 11 |
-
|
| 12 |
-
from tensegrity.legacy.v1.blanket import MarkovBlanket
|
| 13 |
-
|
| 14 |
-
__all__ = ("MarkovBlanket",)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/core/morton.py
DELETED
|
@@ -1,15 +0,0 @@
|
|
| 1 |
-
"""Deprecated V1 Morton encoder shim. Import from ``tensegrity.legacy.v1.morton``."""
|
| 2 |
-
|
| 3 |
-
import warnings
|
| 4 |
-
|
| 5 |
-
warnings.warn(
|
| 6 |
-
"tensegrity.core.morton is legacy V1; import from tensegrity.legacy.v1.morton "
|
| 7 |
-
"for the Morton-coded frontend (same API — re-export only). There is no "
|
| 8 |
-
"alternative module beyond legacy.v1 for this shim.",
|
| 9 |
-
DeprecationWarning,
|
| 10 |
-
stacklevel=2,
|
| 11 |
-
)
|
| 12 |
-
|
| 13 |
-
from tensegrity.legacy.v1.morton import MortonEncoder
|
| 14 |
-
|
| 15 |
-
__all__ = ("MortonEncoder",)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/core/unified_field.py
DELETED
|
@@ -1,5 +0,0 @@
|
|
| 1 |
-
"""Canonical V2 field module; implementation lives in ``tensegrity.engine``."""
|
| 2 |
-
|
| 3 |
-
from tensegrity.engine.unified_field import EnergyDecomposition, HopfieldMemoryBank, UnifiedField
|
| 4 |
-
|
| 5 |
-
__all__ = ("UnifiedField", "HopfieldMemoryBank", "EnergyDecomposition")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/engine/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
"""
|
| 2 |
-
Unified cognitive engine:
|
| 3 |
-
|
| 4 |
"""
|
| 5 |
|
| 6 |
from tensegrity.engine.unified_field import UnifiedField, HopfieldMemoryBank, EnergyDecomposition
|
|
@@ -21,7 +21,7 @@ from tensegrity.engine.causal_energy import (
|
|
| 21 |
TopologyMapping,
|
| 22 |
VirtualParent,
|
| 23 |
)
|
| 24 |
-
from tensegrity.engine.
|
| 25 |
|
| 26 |
__all__ = (
|
| 27 |
"UnifiedField",
|
|
@@ -41,6 +41,6 @@ __all__ = (
|
|
| 41 |
"TopologyMapper",
|
| 42 |
"TopologyMapping",
|
| 43 |
"VirtualParent",
|
| 44 |
-
"
|
| 45 |
-
"
|
| 46 |
)
|
|
|
|
| 1 |
"""
|
| 2 |
+
Unified cognitive engine: SBERT-native predictive coding, Hopfield memory,
|
| 3 |
+
FHRR compositional encoding, and energy-based causal competition.
|
| 4 |
"""
|
| 5 |
|
| 6 |
from tensegrity.engine.unified_field import UnifiedField, HopfieldMemoryBank, EnergyDecomposition
|
|
|
|
| 21 |
TopologyMapping,
|
| 22 |
VirtualParent,
|
| 23 |
)
|
| 24 |
+
from tensegrity.engine.agent import CognitiveAgent, DEFAULT_MEDIATED_SCM_NAME
|
| 25 |
|
| 26 |
__all__ = (
|
| 27 |
"UnifiedField",
|
|
|
|
| 41 |
"TopologyMapper",
|
| 42 |
"TopologyMapping",
|
| 43 |
"VirtualParent",
|
| 44 |
+
"CognitiveAgent",
|
| 45 |
+
"DEFAULT_MEDIATED_SCM_NAME",
|
| 46 |
)
|
tensegrity/engine/agent.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CognitiveAgent — V3 clean agent without legacy V1 baggage.
|
| 3 |
+
|
| 4 |
+
Replaces TensegrityAgent (legacy/v1/agent.py). Composes:
|
| 5 |
+
- UnifiedField (SBERT-native NGC + Hopfield)
|
| 6 |
+
- FreeEnergyEngine (discrete active inference)
|
| 7 |
+
- CausalArena (competing SCMs)
|
| 8 |
+
- EpisodicMemory (cross-item recall)
|
| 9 |
+
|
| 10 |
+
No Morton codes. No MarkovBlanket. No associative memory random projections.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import hashlib
|
| 14 |
+
import numpy as np
|
| 15 |
+
from typing import Optional, Dict, List, Any
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
from tensegrity.engine.unified_field import UnifiedField
|
| 19 |
+
from tensegrity.inference.free_energy import FreeEnergyEngine
|
| 20 |
+
from tensegrity.causal.arena import CausalArena
|
| 21 |
+
from tensegrity.causal.scm import StructuralCausalModel
|
| 22 |
+
from tensegrity.memory.episodic import EpisodicMemory
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
DEFAULT_MEDIATED_SCM_NAME = "mediated_causal"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CognitiveAgent:
|
| 30 |
+
"""
|
| 31 |
+
V3 cognitive agent operating in SBERT embedding space.
|
| 32 |
+
|
| 33 |
+
Provides the same interface that CognitiveController expects
|
| 34 |
+
(field, perceive(), arena, episodic, experience_replay, n_states)
|
| 35 |
+
without any V1 legacy code.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
n_states: int = 16,
|
| 41 |
+
n_observations: int = 32,
|
| 42 |
+
n_actions: int = 4,
|
| 43 |
+
planning_horizon: int = 3,
|
| 44 |
+
precision: float = 4.0,
|
| 45 |
+
context_dim: int = 32,
|
| 46 |
+
# UnifiedField parameters
|
| 47 |
+
obs_dim: int = 256,
|
| 48 |
+
hidden_dims: Optional[List[int]] = None,
|
| 49 |
+
fhrr_dim: int = 2048,
|
| 50 |
+
hopfield_beta: float = 0.05,
|
| 51 |
+
ngc_settle_steps: int = 20,
|
| 52 |
+
ngc_learning_rate: float = 0.005,
|
| 53 |
+
sbert_dim: Optional[int] = None,
|
| 54 |
+
# Legacy compat: these are accepted but ignored
|
| 55 |
+
sensory_dims: int = 4,
|
| 56 |
+
sensory_bits: int = 4,
|
| 57 |
+
associative_dim: int = 64,
|
| 58 |
+
):
|
| 59 |
+
self.n_states = n_states
|
| 60 |
+
self.n_obs = n_observations
|
| 61 |
+
self.n_actions = n_actions
|
| 62 |
+
|
| 63 |
+
# === Unified Field (SBERT-native NGC + Hopfield) ===
|
| 64 |
+
self.field = UnifiedField(
|
| 65 |
+
obs_dim=obs_dim,
|
| 66 |
+
hidden_dims=hidden_dims or [128, 32],
|
| 67 |
+
fhrr_dim=fhrr_dim,
|
| 68 |
+
hopfield_beta=hopfield_beta,
|
| 69 |
+
ngc_settle_steps=ngc_settle_steps,
|
| 70 |
+
ngc_learning_rate=ngc_learning_rate,
|
| 71 |
+
sbert_dim=sbert_dim,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# === Free Energy Engine (discrete active inference) ===
|
| 75 |
+
self.engine = FreeEnergyEngine(
|
| 76 |
+
n_states=n_states,
|
| 77 |
+
n_observations=n_observations,
|
| 78 |
+
n_actions=n_actions,
|
| 79 |
+
planning_horizon=planning_horizon,
|
| 80 |
+
precision=precision,
|
| 81 |
+
policy_depth=min(planning_horizon, 3),
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# === Causal Arena ===
|
| 85 |
+
self.arena = CausalArena(
|
| 86 |
+
prior_concentration=1.0,
|
| 87 |
+
falsification_threshold=-100.0,
|
| 88 |
+
min_models=2,
|
| 89 |
+
)
|
| 90 |
+
self._init_default_models()
|
| 91 |
+
|
| 92 |
+
# === Episodic Memory ===
|
| 93 |
+
self.episodic = EpisodicMemory(
|
| 94 |
+
context_dim=context_dim,
|
| 95 |
+
capacity=10000,
|
| 96 |
+
drift_rate=0.95,
|
| 97 |
+
encoding_strength=0.3,
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# Agent state
|
| 101 |
+
self._step_count = 0
|
| 102 |
+
self._prev_belief: Optional[np.ndarray] = None
|
| 103 |
+
|
| 104 |
+
def _init_default_models(self):
|
| 105 |
+
"""Initialize causal arena with default competing SCMs."""
|
| 106 |
+
model_a = StructuralCausalModel(name="direct_causal")
|
| 107 |
+
model_a.add_variable("state", n_values=self.n_states)
|
| 108 |
+
model_a.add_variable("observation", n_values=self.n_obs, parents=["state"])
|
| 109 |
+
|
| 110 |
+
model_b = StructuralCausalModel(name=DEFAULT_MEDIATED_SCM_NAME)
|
| 111 |
+
model_b.add_variable("cause", n_values=self.n_states)
|
| 112 |
+
model_b.add_variable("state", n_values=self.n_states, parents=["cause"])
|
| 113 |
+
model_b.add_variable("observation", n_values=self.n_obs, parents=["state"])
|
| 114 |
+
|
| 115 |
+
self.arena.register_model(model_a)
|
| 116 |
+
self.arena.register_model(model_b)
|
| 117 |
+
|
| 118 |
+
def perceive(self, raw_observation: np.ndarray) -> Dict[str, Any]:
|
| 119 |
+
"""
|
| 120 |
+
Perception: observation → UnifiedField → active inference → causal arena.
|
| 121 |
+
"""
|
| 122 |
+
self._step_count += 1
|
| 123 |
+
raw = np.asarray(raw_observation, dtype=np.float64).ravel()
|
| 124 |
+
|
| 125 |
+
# Run through unified field (SBERT-native settling)
|
| 126 |
+
cycle = self.field.observe(raw, input_type="numeric")
|
| 127 |
+
obs_vec = cycle["observation"]
|
| 128 |
+
decomp = cycle["energy"]
|
| 129 |
+
surprise = float(decomp.surprise)
|
| 130 |
+
|
| 131 |
+
# Discrete observation index for the FEE
|
| 132 |
+
h = hashlib.sha256(obs_vec.astype(np.float64).tobytes()).digest()
|
| 133 |
+
obs_idx = int.from_bytes(h[:8], byteorder="big", signed=False) % max(self.n_obs, 1)
|
| 134 |
+
|
| 135 |
+
# Active inference step
|
| 136 |
+
A = getattr(self.engine, '_A', None)
|
| 137 |
+
if A is None:
|
| 138 |
+
# Use epistemic memory-style matrices if available,
|
| 139 |
+
# otherwise use engine defaults
|
| 140 |
+
from tensegrity.memory.epistemic import EpistemicMemory
|
| 141 |
+
em = EpistemicMemory(
|
| 142 |
+
n_states=self.n_states,
|
| 143 |
+
n_observations=self.n_obs,
|
| 144 |
+
n_actions=self.n_actions,
|
| 145 |
+
)
|
| 146 |
+
self._epistemic = em
|
| 147 |
+
A, B, C, D = em.A, em.B, em.C, em.D
|
| 148 |
+
log_A = em.log_A
|
| 149 |
+
self.engine._A = A # cache
|
| 150 |
+
else:
|
| 151 |
+
em = self._epistemic
|
| 152 |
+
A, B, C, D = em.A, em.B, em.C, em.D
|
| 153 |
+
log_A = em.log_A
|
| 154 |
+
|
| 155 |
+
previous_action = self.engine.prev_action
|
| 156 |
+
inference_result = self.engine.step(obs_idx, A, B, C, D, log_A)
|
| 157 |
+
q_states = inference_result["belief_state"]
|
| 158 |
+
F = float(inference_result["free_energy"])
|
| 159 |
+
|
| 160 |
+
# Update epistemic memory
|
| 161 |
+
em.update_likelihood(obs_idx, q_states)
|
| 162 |
+
if previous_action is not None and self._prev_belief is not None:
|
| 163 |
+
em.update_transition(self._prev_belief, q_states, previous_action)
|
| 164 |
+
self._prev_belief = q_states.copy()
|
| 165 |
+
|
| 166 |
+
# Causal arena competition
|
| 167 |
+
causal_obs = {
|
| 168 |
+
"state": int(np.argmax(q_states)),
|
| 169 |
+
"observation": obs_idx,
|
| 170 |
+
}
|
| 171 |
+
if DEFAULT_MEDIATED_SCM_NAME in self.arena.models:
|
| 172 |
+
causal_obs["cause"] = int(np.argmax(q_states))
|
| 173 |
+
arena_result = self.arena.compete(causal_obs)
|
| 174 |
+
|
| 175 |
+
# Episodic memory encoding
|
| 176 |
+
self.episodic.encode(
|
| 177 |
+
observation=raw,
|
| 178 |
+
morton_code=np.array([obs_idx], dtype=np.int64),
|
| 179 |
+
belief_state=q_states,
|
| 180 |
+
action=int(inference_result["action"]),
|
| 181 |
+
surprise=surprise,
|
| 182 |
+
free_energy=F,
|
| 183 |
+
metadata={
|
| 184 |
+
"obs_idx": obs_idx,
|
| 185 |
+
"field_energy": float(decomp.total),
|
| 186 |
+
},
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
return {
|
| 190 |
+
"step": self._step_count,
|
| 191 |
+
"observation_index": obs_idx,
|
| 192 |
+
"belief_state": q_states,
|
| 193 |
+
"free_energy": F,
|
| 194 |
+
"surprise": surprise,
|
| 195 |
+
"action": inference_result["action"],
|
| 196 |
+
"action_confidence": inference_result["action_confidence"],
|
| 197 |
+
"arena": arena_result,
|
| 198 |
+
"epistemic_value": self.engine.epistemic_value,
|
| 199 |
+
"field_cycle": cycle,
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
def experience_replay(self, n_episodes: int = 10) -> Dict[str, Any]:
|
| 203 |
+
"""Replay past episodes to strengthen beliefs."""
|
| 204 |
+
episodes = self.episodic.replay(n_episodes)
|
| 205 |
+
em = getattr(self, '_epistemic', None)
|
| 206 |
+
if em is not None:
|
| 207 |
+
for ep in episodes:
|
| 208 |
+
obs_idx = ep.metadata.get('obs_idx', 0)
|
| 209 |
+
em.update_likelihood(obs_idx, ep.belief_state)
|
| 210 |
+
return {
|
| 211 |
+
'episodes_replayed': len(episodes),
|
| 212 |
+
'mean_surprise': np.mean([ep.surprise for ep in episodes]) if episodes else 0,
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
def add_causal_model(self, model: StructuralCausalModel):
|
| 216 |
+
"""Add a competing causal model to the arena."""
|
| 217 |
+
self.arena.register_model(model)
|
tensegrity/engine/scoring.py
DELETED
|
@@ -1,309 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Semantic scoring bridge + NGC logit bias injection (part of the unified engine).
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import numpy as np
|
| 6 |
-
from typing import Dict, List, Optional, Callable, Set, Tuple, Any
|
| 7 |
-
import math
|
| 8 |
-
import logging
|
| 9 |
-
import re
|
| 10 |
-
import threading
|
| 11 |
-
|
| 12 |
-
logger = logging.getLogger(__name__)
|
| 13 |
-
|
| 14 |
-
torch = None
|
| 15 |
-
def _ensure_torch():
|
| 16 |
-
global torch
|
| 17 |
-
if torch is None:
|
| 18 |
-
import importlib
|
| 19 |
-
torch = importlib.import_module('torch')
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
class NGCLogitsProcessor:
|
| 23 |
-
"""NGC prediction errors → per-step logit biases during LLM decoding."""
|
| 24 |
-
|
| 25 |
-
supports_continuous_batching = False
|
| 26 |
-
|
| 27 |
-
def __init__(self, field, tokenizer, vocab_projections=None,
|
| 28 |
-
scale=1.0, energy_gate=0.1, max_settle_steps=30, max_bias=5.0,
|
| 29 |
-
async_cognitive: bool = True):
|
| 30 |
-
_ensure_torch()
|
| 31 |
-
self.field = field
|
| 32 |
-
self.tokenizer = tokenizer
|
| 33 |
-
self.scale = scale
|
| 34 |
-
self.energy_gate = energy_gate
|
| 35 |
-
self.max_settle_steps = max_settle_steps
|
| 36 |
-
self.max_bias = max_bias
|
| 37 |
-
self.async_cognitive = async_cognitive
|
| 38 |
-
self.vocab_size = tokenizer.vocab_size
|
| 39 |
-
self.projections = vocab_projections or self._build_projections()
|
| 40 |
-
self._step_count = 0
|
| 41 |
-
self._emissions = 0
|
| 42 |
-
self._total_settle_steps = 0
|
| 43 |
-
|
| 44 |
-
self._lock = threading.Lock()
|
| 45 |
-
self._halt = threading.Event()
|
| 46 |
-
self._wake = threading.Event()
|
| 47 |
-
self._pending_ids: Optional[List[int]] = None
|
| 48 |
-
self._latest_bias_np: Optional[np.ndarray] = None
|
| 49 |
-
self._worker: Optional[threading.Thread] = None
|
| 50 |
-
if self.async_cognitive:
|
| 51 |
-
self._worker = threading.Thread(target=self._cognitive_loop, daemon=True)
|
| 52 |
-
self._worker.start()
|
| 53 |
-
|
| 54 |
-
def close(self):
|
| 55 |
-
self._halt.set()
|
| 56 |
-
self._wake.set()
|
| 57 |
-
if self._worker is not None:
|
| 58 |
-
self._worker.join(timeout=2.0)
|
| 59 |
-
if self._worker.is_alive():
|
| 60 |
-
logger.warning(
|
| 61 |
-
"NGCLogitsProcessor worker did not stop within 2.0s (belief_fn may block)"
|
| 62 |
-
)
|
| 63 |
-
self._worker = None
|
| 64 |
-
|
| 65 |
-
def _build_projections(self):
|
| 66 |
-
projections = []
|
| 67 |
-
rng = np.random.RandomState(7777)
|
| 68 |
-
for ell, size in enumerate(self.field.ngc.layer_sizes):
|
| 69 |
-
P = rng.randn(self.vocab_size, size).astype(np.float64)
|
| 70 |
-
P *= (2.0 ** ell) / np.sqrt(size)
|
| 71 |
-
projections.append(P)
|
| 72 |
-
return projections
|
| 73 |
-
|
| 74 |
-
def _compute_bias_from_ids(self, ids: List[int]) -> Optional[np.ndarray]:
|
| 75 |
-
text = self.tokenizer.decode(ids, skip_special_tokens=True)
|
| 76 |
-
tokens = text.lower().split()
|
| 77 |
-
if not tokens:
|
| 78 |
-
return None
|
| 79 |
-
obs = self.field._fhrr_to_obs(self.field.encoder.encode_sequence(tokens))
|
| 80 |
-
settle = self.field.ngc.settle(obs)
|
| 81 |
-
self._total_settle_steps += int(settle.get("settle_steps", self.max_settle_steps))
|
| 82 |
-
et = settle["energy_trace"]
|
| 83 |
-
if len(et) < 2 or abs(et[-1] - et[-2]) >= self.energy_gate:
|
| 84 |
-
return None
|
| 85 |
-
bias = np.zeros(self.vocab_size, dtype=np.float64)
|
| 86 |
-
for ell in range(self.field.ngc.n_layers):
|
| 87 |
-
err = self.field.ngc.layers[ell].error
|
| 88 |
-
if np.linalg.norm(err) > 1e-10:
|
| 89 |
-
bias += self.projections[ell] @ err
|
| 90 |
-
bias /= max(self.field.ngc.n_layers, 1)
|
| 91 |
-
confidence = 1.0 / (1.0 + settle["final_energy"])
|
| 92 |
-
bias *= self.scale * confidence
|
| 93 |
-
np.clip(bias, -self.max_bias, self.max_bias, out=bias)
|
| 94 |
-
self._emissions += 1
|
| 95 |
-
return bias
|
| 96 |
-
|
| 97 |
-
def _cognitive_loop(self):
|
| 98 |
-
while not self._halt.is_set():
|
| 99 |
-
if not self._wake.wait(timeout=0.05):
|
| 100 |
-
continue
|
| 101 |
-
self._wake.clear()
|
| 102 |
-
if self._halt.is_set():
|
| 103 |
-
break
|
| 104 |
-
with self._lock:
|
| 105 |
-
ids = self._pending_ids
|
| 106 |
-
if ids is None:
|
| 107 |
-
continue
|
| 108 |
-
try:
|
| 109 |
-
bias_np = self._compute_bias_from_ids(ids)
|
| 110 |
-
except Exception as e:
|
| 111 |
-
logger.debug("NGC cognitive worker: %s", e)
|
| 112 |
-
bias_np = None
|
| 113 |
-
with self._lock:
|
| 114 |
-
self._latest_bias_np = bias_np
|
| 115 |
-
|
| 116 |
-
def __call__(self, input_ids, scores):
|
| 117 |
-
self._step_count += 1
|
| 118 |
-
_ensure_torch()
|
| 119 |
-
if not isinstance(input_ids, torch.Tensor):
|
| 120 |
-
arr = np.asarray(input_ids)
|
| 121 |
-
if arr.ndim == 1:
|
| 122 |
-
flat = arr.tolist()
|
| 123 |
-
elif arr.ndim == 2:
|
| 124 |
-
flat = arr[-1].tolist()
|
| 125 |
-
else:
|
| 126 |
-
raise ValueError(f"input_ids must be 1D or 2D, got shape {arr.shape}")
|
| 127 |
-
else:
|
| 128 |
-
if input_ids.dim() == 1:
|
| 129 |
-
flat = input_ids.detach().cpu().tolist()
|
| 130 |
-
elif input_ids.dim() == 2:
|
| 131 |
-
flat = input_ids[-1].detach().cpu().tolist()
|
| 132 |
-
else:
|
| 133 |
-
raise ValueError(f"input_ids must be 1D or 2D, got shape {tuple(input_ids.shape)}")
|
| 134 |
-
ids = flat[-16:]
|
| 135 |
-
if self.async_cognitive:
|
| 136 |
-
with self._lock:
|
| 137 |
-
self._pending_ids = list(ids)
|
| 138 |
-
self._wake.set()
|
| 139 |
-
with self._lock:
|
| 140 |
-
bias_np = None if self._latest_bias_np is None else self._latest_bias_np.copy()
|
| 141 |
-
if bias_np is None:
|
| 142 |
-
return scores
|
| 143 |
-
_ensure_torch()
|
| 144 |
-
assert scores.shape[0] == 1, (
|
| 145 |
-
f"NGCLogitsProcessor expects batch size 1, got {scores.shape[0]}"
|
| 146 |
-
)
|
| 147 |
-
return scores + torch.tensor(bias_np, device=scores.device, dtype=scores.dtype).unsqueeze(0)
|
| 148 |
-
|
| 149 |
-
try:
|
| 150 |
-
bias_np = self._compute_bias_from_ids(ids)
|
| 151 |
-
except Exception as e:
|
| 152 |
-
logger.debug("NGCLogitsProcessor: %s", e)
|
| 153 |
-
return scores
|
| 154 |
-
if bias_np is None:
|
| 155 |
-
return scores
|
| 156 |
-
assert scores.shape[0] == 1, (
|
| 157 |
-
f"NGCLogitsProcessor expects batch size 1, got {scores.shape[0]}"
|
| 158 |
-
)
|
| 159 |
-
return scores + torch.tensor(bias_np, device=scores.device, dtype=scores.dtype).unsqueeze(0)
|
| 160 |
-
|
| 161 |
-
@property
|
| 162 |
-
def statistics(self):
|
| 163 |
-
return {
|
| 164 |
-
"decode_steps": self._step_count, "emissions": self._emissions,
|
| 165 |
-
"emission_rate": self._emissions / max(self._step_count, 1),
|
| 166 |
-
"ngc_energy": self.field.ngc.total_energy,
|
| 167 |
-
}
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
class ScoringBridge:
|
| 171 |
-
"""
|
| 172 |
-
Semantic scoring bridge for benchmark evaluation.
|
| 173 |
-
|
| 174 |
-
Combines sentence-level sbert similarity (primary signal) with
|
| 175 |
-
token-level semantic FHRR and NGC energy (complementary signals).
|
| 176 |
-
"""
|
| 177 |
-
|
| 178 |
-
def __init__(self, field=None, obs_dim=256, hidden_dims=None,
|
| 179 |
-
fhrr_dim=2048, ngc_settle_steps=30, ngc_learning_rate=0.01,
|
| 180 |
-
hopfield_beta=0.05, confidence_threshold=0.15,
|
| 181 |
-
context_settle_steps=40, choice_settle_steps=25,
|
| 182 |
-
context_learning_epochs=3):
|
| 183 |
-
from tensegrity.engine.unified_field import UnifiedField
|
| 184 |
-
self.field = field or UnifiedField(
|
| 185 |
-
obs_dim=obs_dim, hidden_dims=hidden_dims or [128, 32],
|
| 186 |
-
fhrr_dim=fhrr_dim, hopfield_beta=hopfield_beta,
|
| 187 |
-
ngc_settle_steps=ngc_settle_steps, ngc_learning_rate=ngc_learning_rate,
|
| 188 |
-
)
|
| 189 |
-
self.confidence_threshold = confidence_threshold
|
| 190 |
-
self.context_settle_steps = context_settle_steps
|
| 191 |
-
self.choice_settle_steps = choice_settle_steps
|
| 192 |
-
self.context_learning_epochs = context_learning_epochs
|
| 193 |
-
self._total_scored = 0
|
| 194 |
-
self._total_gated = 0
|
| 195 |
-
|
| 196 |
-
def _tokenize_smart(self, text: str, max_tokens: int = 48) -> List[str]:
|
| 197 |
-
return re.findall(r"[a-zA-Z]+(?:'[a-z]+)?|[0-9]+(?:\.[0-9]+)?", text.lower())[-max_tokens:]
|
| 198 |
-
|
| 199 |
-
def _encode_and_settle(self, tokens, settle_steps, learn=False):
|
| 200 |
-
if not tokens:
|
| 201 |
-
return {"energy": 0.0, "obs_vec": np.zeros(self.field.obs_dim),
|
| 202 |
-
"abstract_state": np.zeros(self.field.ngc.layer_sizes[-1]),
|
| 203 |
-
"fhrr_vec": np.ones(self.field.fhrr_dim, dtype=np.complex64),
|
| 204 |
-
"settle": {"final_energy": 0.0, "energy_trace": [0.0]}}
|
| 205 |
-
fhrr_vec = self.field.encoder.encode_sequence(tokens)
|
| 206 |
-
obs_vec = self.field._fhrr_to_obs(fhrr_vec)
|
| 207 |
-
settle_result = self.field.ngc.settle(obs_vec, steps=settle_steps)
|
| 208 |
-
if learn:
|
| 209 |
-
self.field.ngc.learn(modulation=1.0)
|
| 210 |
-
return {"energy": settle_result["final_energy"], "obs_vec": obs_vec,
|
| 211 |
-
"abstract_state": self.field.ngc.get_abstract_state(level=-1),
|
| 212 |
-
"fhrr_vec": fhrr_vec, "settle": settle_result}
|
| 213 |
-
|
| 214 |
-
def score_choices(self, prompt: str, choices: List[str]) -> Tuple[List[float], float]:
|
| 215 |
-
"""Score choices via sentence similarity + semantic FHRR + NGC energy."""
|
| 216 |
-
self._total_scored += 1
|
| 217 |
-
n = len(choices)
|
| 218 |
-
|
| 219 |
-
# 1. Sentence-level similarity (primary)
|
| 220 |
-
sentence_sims = self._sentence_similarities(prompt, choices)
|
| 221 |
-
|
| 222 |
-
# 2. Token-level FHRR similarity
|
| 223 |
-
pt = self._tokenize_smart(prompt, max_tokens=64)
|
| 224 |
-
pf = self.field.encoder.encode_sequence(pt) if pt else np.ones(self.field.fhrr_dim, dtype=np.complex64)
|
| 225 |
-
fhrr_sims = []
|
| 226 |
-
for choice in choices:
|
| 227 |
-
ct = self._tokenize_smart(choice, max_tokens=32)
|
| 228 |
-
enc_c = (
|
| 229 |
-
self.field.encoder.encode_sequence(ct)
|
| 230 |
-
if ct
|
| 231 |
-
else np.ones(self.field.fhrr_dim, dtype=np.complex64)
|
| 232 |
-
)
|
| 233 |
-
fhrr_sims.append(self.field.encoder.similarity(pf, enc_c))
|
| 234 |
-
|
| 235 |
-
# 3. NGC energy
|
| 236 |
-
self._encode_and_settle(pt, settle_steps=self.context_settle_steps, learn=True)
|
| 237 |
-
base_state = self.field.ngc.save_state()
|
| 238 |
-
ngc_energies = []
|
| 239 |
-
for choice in choices:
|
| 240 |
-
self.field.ngc.restore_state(base_state)
|
| 241 |
-
r = self._encode_and_settle(
|
| 242 |
-
self._tokenize_smart(prompt + " " + choice, 64),
|
| 243 |
-
self.choice_settle_steps,
|
| 244 |
-
False,
|
| 245 |
-
)
|
| 246 |
-
ngc_energies.append(-r["energy"])
|
| 247 |
-
|
| 248 |
-
# 4. Combine
|
| 249 |
-
def znorm(a):
|
| 250 |
-
s = a.std()
|
| 251 |
-
return (a - a.mean()) / s if s > 1e-10 else np.zeros_like(a)
|
| 252 |
-
|
| 253 |
-
scores_arr = znorm(np.array(sentence_sims)) * 1.0 + znorm(np.array(fhrr_sims)) * 0.3 + znorm(np.array(ngc_energies)) * 0.2
|
| 254 |
-
|
| 255 |
-
# 5. Gate
|
| 256 |
-
sa = np.array(sentence_sims)
|
| 257 |
-
spread = float(sa.max() - sa.min())
|
| 258 |
-
mean = float(np.abs(sa).mean())
|
| 259 |
-
cv = spread / mean if mean > 1e-8 else 0.0
|
| 260 |
-
|
| 261 |
-
shifted = scores_arr - scores_arr.max()
|
| 262 |
-
probs = np.exp(shifted)
|
| 263 |
-
probs = probs / probs.sum() if probs.sum() > 0 else np.ones(n) / n
|
| 264 |
-
entropy = float(-np.sum(probs * np.log(probs + 1e-16)) / np.log(max(n, 2)))
|
| 265 |
-
|
| 266 |
-
thresh = self.confidence_threshold * (3.0 if n <= 2 else 2.0 if n <= 3 else 1.5)
|
| 267 |
-
if cv < thresh or entropy > 0.97:
|
| 268 |
-
self._total_gated += 1
|
| 269 |
-
return [0.0] * n, 1.0
|
| 270 |
-
return scores_arr.tolist(), entropy
|
| 271 |
-
|
| 272 |
-
def _sentence_similarities(self, prompt, choices):
|
| 273 |
-
features = self.field.encoder.features
|
| 274 |
-
if hasattr(features, '_ensure_sbert') and getattr(features, '_sbert', None) is None:
|
| 275 |
-
features._ensure_sbert()
|
| 276 |
-
if hasattr(features, '_sbert') and features._sbert is not None and features._sbert != "FALLBACK":
|
| 277 |
-
embs = features._sbert.encode([prompt] + choices, show_progress_bar=False)
|
| 278 |
-
pe, pn = embs[0], np.linalg.norm(embs[0])
|
| 279 |
-
return [float(np.dot(pe, embs[i+1]) / (pn * np.linalg.norm(embs[i+1])))
|
| 280 |
-
if pn > 1e-8 and np.linalg.norm(embs[i+1]) > 1e-8 else 0.0
|
| 281 |
-
for i in range(len(choices))]
|
| 282 |
-
pt = self._tokenize_smart(prompt, 64)
|
| 283 |
-
pf = self.field.encoder.encode_sequence(pt) if pt else np.ones(self.field.fhrr_dim, dtype=np.complex64)
|
| 284 |
-
out = []
|
| 285 |
-
for c in choices:
|
| 286 |
-
ct = self._tokenize_smart(c, 32)
|
| 287 |
-
enc = self.field.encoder.encode_sequence(ct) if ct else np.ones(self.field.fhrr_dim, dtype=np.complex64)
|
| 288 |
-
out.append(self.field.encoder.similarity(pf, enc))
|
| 289 |
-
return out
|
| 290 |
-
|
| 291 |
-
def sentence_similarities(self, prompt, choices):
|
| 292 |
-
"""Public alias for SBERT/FHRR sentence-level similarity tie-breaks (see ``_sentence_similarities``)."""
|
| 293 |
-
return self._sentence_similarities(prompt, choices)
|
| 294 |
-
|
| 295 |
-
def reset(self):
|
| 296 |
-
self.field.ngc.reinitialize(12345)
|
| 297 |
-
self.field.memory.patterns.clear()
|
| 298 |
-
self.field.memory._matrix = None
|
| 299 |
-
self.field.memory._dirty = True
|
| 300 |
-
self.field.energy_history.clear()
|
| 301 |
-
self.field._step_count = 0
|
| 302 |
-
|
| 303 |
-
@property
|
| 304 |
-
def statistics(self):
|
| 305 |
-
return {"total_scored": self._total_scored, "total_gated": self._total_gated,
|
| 306 |
-
"gate_rate": self._total_gated / max(self._total_scored, 1)}
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/legacy/__init__.py
DELETED
|
@@ -1,5 +0,0 @@
|
|
| 1 |
-
"""Legacy compatibility modules for architectures superseded by the unified field."""
|
| 2 |
-
|
| 3 |
-
from . import v1
|
| 4 |
-
|
| 5 |
-
__all__ = ("v1",)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/legacy/v1/__init__.py
DELETED
|
@@ -1,19 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Legacy V1 substrate.
|
| 3 |
-
|
| 4 |
-
This package contains the Morton-coded Markov blanket, flat POMDP active
|
| 5 |
-
inference loop, and compatibility ``TensegrityAgent`` facade. New integrations
|
| 6 |
-
should prefer ``tensegrity.core`` / ``tensegrity.engine`` and the
|
| 7 |
-
``UnifiedField`` energy landscape.
|
| 8 |
-
"""
|
| 9 |
-
|
| 10 |
-
from tensegrity.legacy.v1.agent import DEFAULT_MEDIATED_SCM_NAME, TensegrityAgent
|
| 11 |
-
from tensegrity.legacy.v1.blanket import MarkovBlanket
|
| 12 |
-
from tensegrity.legacy.v1.morton import MortonEncoder
|
| 13 |
-
|
| 14 |
-
__all__ = (
|
| 15 |
-
"DEFAULT_MEDIATED_SCM_NAME",
|
| 16 |
-
"TensegrityAgent",
|
| 17 |
-
"MarkovBlanket",
|
| 18 |
-
"MortonEncoder",
|
| 19 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/legacy/v1/agent.py
DELETED
|
@@ -1,497 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
TensegrityAgent: The complete cognitive architecture.
|
| 3 |
-
|
| 4 |
-
Integrates all components into a single agent that:
|
| 5 |
-
1. Receives modality-agnostic observations (Morton-encoded)
|
| 6 |
-
2. Updates beliefs via free energy minimization (no gradients)
|
| 7 |
-
3. Maintains three memory systems (epistemic, episodic, associative)
|
| 8 |
-
4. Runs competing causal models in the arena
|
| 9 |
-
5. Selects actions that minimize expected free energy
|
| 10 |
-
6. Generates epistemic actions to resolve model uncertainty
|
| 11 |
-
|
| 12 |
-
The name "Tensegrity" comes from the architectural principle where
|
| 13 |
-
structural integrity comes from the balance of tension and compression.
|
| 14 |
-
Here, the system's cognitive integrity comes from the tension between
|
| 15 |
-
competing causal models (compression = model evidence, tension = model
|
| 16 |
-
disagreement) balanced by the free energy principle.
|
| 17 |
-
"""
|
| 18 |
-
|
| 19 |
-
import hashlib
|
| 20 |
-
import inspect
|
| 21 |
-
import numpy as np
|
| 22 |
-
from typing import Optional, Dict, List, Any, Tuple
|
| 23 |
-
import logging
|
| 24 |
-
|
| 25 |
-
from tensegrity.legacy.v1.morton import MortonEncoder
|
| 26 |
-
from tensegrity.legacy.v1.blanket import MarkovBlanket
|
| 27 |
-
from tensegrity.memory.epistemic import EpistemicMemory
|
| 28 |
-
from tensegrity.memory.episodic import EpisodicMemory
|
| 29 |
-
from tensegrity.memory.associative import AssociativeMemory
|
| 30 |
-
from tensegrity.causal.arena import CausalArena
|
| 31 |
-
from tensegrity.causal.scm import StructuralCausalModel
|
| 32 |
-
from tensegrity.inference.free_energy import FreeEnergyEngine
|
| 33 |
-
from tensegrity.engine.unified_field import UnifiedField
|
| 34 |
-
|
| 35 |
-
logger = logging.getLogger(__name__)
|
| 36 |
-
|
| 37 |
-
# Default SCM registered in ``_init_default_models`` whose observation vector includes ``cause``.
|
| 38 |
-
DEFAULT_MEDIATED_SCM_NAME = "mediated_causal"
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
class TensegrityAgent:
|
| 42 |
-
"""
|
| 43 |
-
A non-gradient cognitive agent.
|
| 44 |
-
|
| 45 |
-
The agent perceives the world through Morton-coded observations,
|
| 46 |
-
maintains beliefs via Bayesian updates, resolves competing causal
|
| 47 |
-
explanations in an adversarial arena, and acts to minimize
|
| 48 |
-
expected free energy.
|
| 49 |
-
|
| 50 |
-
No backpropagation. No gradient descent. No optimizer state.
|
| 51 |
-
|
| 52 |
-
All learning is:
|
| 53 |
-
- Dirichlet counting (epistemic memory)
|
| 54 |
-
- Context drift (episodic memory)
|
| 55 |
-
- Energy minimization via Hopfield dynamics (associative memory)
|
| 56 |
-
- Bayesian model comparison (causal arena)
|
| 57 |
-
- Fixed-point iteration (belief propagation)
|
| 58 |
-
"""
|
| 59 |
-
|
| 60 |
-
def __init__(self,
|
| 61 |
-
n_states: int = 16,
|
| 62 |
-
n_observations: int = 32,
|
| 63 |
-
n_actions: int = 4,
|
| 64 |
-
sensory_dims: int = 4,
|
| 65 |
-
sensory_bits: int = 8,
|
| 66 |
-
context_dim: int = 64,
|
| 67 |
-
associative_dim: int = 128,
|
| 68 |
-
planning_horizon: int = 3,
|
| 69 |
-
precision: float = 4.0,
|
| 70 |
-
zipf_exponent: float = 1.0,
|
| 71 |
-
unified_obs_dim: int = 256,
|
| 72 |
-
unified_hidden_dims: Optional[List[int]] = None,
|
| 73 |
-
unified_fhrr_dim: int = 2048,
|
| 74 |
-
unified_hopfield_beta: float = 0.01,
|
| 75 |
-
unified_ngc_settle_steps: int = 20,
|
| 76 |
-
unified_ngc_learning_rate: float = 0.005,
|
| 77 |
-
epistemic_tension_threshold: float = 0.5,
|
| 78 |
-
epistemic_info_gain_threshold: float = 0.1):
|
| 79 |
-
"""
|
| 80 |
-
Args:
|
| 81 |
-
n_states: Number of hidden states in the generative model
|
| 82 |
-
n_observations: Number of observation categories
|
| 83 |
-
n_actions: Number of possible actions
|
| 84 |
-
sensory_dims: Dimensionality of raw sensory input
|
| 85 |
-
sensory_bits: Bits per dimension for Morton encoding
|
| 86 |
-
context_dim: Dimensionality of episodic context vectors
|
| 87 |
-
associative_dim: Dimensionality of associative memory patterns
|
| 88 |
-
planning_horizon: How far ahead to plan
|
| 89 |
-
precision: Inverse temperature for policy selection
|
| 90 |
-
zipf_exponent: Controls power-law memory access
|
| 91 |
-
unified_obs_dim: Observation layer width for UnifiedField (default matches prior hardcoded wiring)
|
| 92 |
-
unified_hidden_dims: NGC hidden layer sizes; defaults to ``[128, 32]`` when None
|
| 93 |
-
unified_fhrr_dim: FHRR encoder dimensionality
|
| 94 |
-
unified_hopfield_beta: Hopfield inverse temperature in UnifiedField
|
| 95 |
-
unified_ngc_settle_steps: NGC settling iterations
|
| 96 |
-
unified_ngc_learning_rate: Hebbian learning rate inside UnifiedField
|
| 97 |
-
epistemic_tension_threshold: Only run costly intervention search when causal tension exceeds this level
|
| 98 |
-
epistemic_info_gain_threshold: Minimum estimated information gain required for epistemic actions
|
| 99 |
-
"""
|
| 100 |
-
def _req_pos_int(name: str, v: Any) -> int:
|
| 101 |
-
if not isinstance(v, int) or int(v) < 1:
|
| 102 |
-
raise ValueError(f"{name} must be a positive integer")
|
| 103 |
-
return int(v)
|
| 104 |
-
|
| 105 |
-
n_states = _req_pos_int("n_states", n_states)
|
| 106 |
-
n_observations = _req_pos_int("n_observations", n_observations)
|
| 107 |
-
n_actions = _req_pos_int("n_actions", n_actions)
|
| 108 |
-
sensory_dims = _req_pos_int("sensory_dims", sensory_dims)
|
| 109 |
-
sensory_bits = _req_pos_int("sensory_bits", sensory_bits)
|
| 110 |
-
context_dim = _req_pos_int("context_dim", context_dim)
|
| 111 |
-
associative_dim = _req_pos_int("associative_dim", associative_dim)
|
| 112 |
-
if not isinstance(planning_horizon, int) or planning_horizon < 1:
|
| 113 |
-
raise ValueError("planning_horizon must be a positive integer")
|
| 114 |
-
if precision < 0.0:
|
| 115 |
-
raise ValueError("precision must be non-negative")
|
| 116 |
-
if zipf_exponent < 0.0:
|
| 117 |
-
raise ValueError("zipf_exponent must be non-negative")
|
| 118 |
-
unified_obs_dim = _req_pos_int("unified_obs_dim", unified_obs_dim)
|
| 119 |
-
if unified_hidden_dims is not None:
|
| 120 |
-
if not isinstance(unified_hidden_dims, list) or any(
|
| 121 |
-
not isinstance(x, int) or x < 1 for x in unified_hidden_dims
|
| 122 |
-
):
|
| 123 |
-
raise ValueError("unified_hidden_dims must be a list of positive integers")
|
| 124 |
-
unified_fhrr_dim = _req_pos_int("unified_fhrr_dim", unified_fhrr_dim)
|
| 125 |
-
if unified_hopfield_beta < 0.0:
|
| 126 |
-
raise ValueError("unified_hopfield_beta must be non-negative")
|
| 127 |
-
unified_ngc_settle_steps = _req_pos_int("unified_ngc_settle_steps", unified_ngc_settle_steps)
|
| 128 |
-
if unified_ngc_learning_rate < 0.0:
|
| 129 |
-
raise ValueError("unified_ngc_learning_rate must be non-negative")
|
| 130 |
-
if not (0.0 <= float(epistemic_tension_threshold) <= 1.0):
|
| 131 |
-
raise ValueError("epistemic_tension_threshold must be in [0, 1]")
|
| 132 |
-
if not (0.0 <= float(epistemic_info_gain_threshold) <= 1.0):
|
| 133 |
-
raise ValueError("epistemic_info_gain_threshold must be in [0, 1]")
|
| 134 |
-
|
| 135 |
-
self.n_states = n_states
|
| 136 |
-
self.n_obs = n_observations
|
| 137 |
-
self.n_actions = n_actions
|
| 138 |
-
|
| 139 |
-
# === SENSORY INTERFACE (Markov Blanket) ===
|
| 140 |
-
self.encoder = MortonEncoder(n_dims=sensory_dims, bits_per_dim=sensory_bits)
|
| 141 |
-
self.blanket = MarkovBlanket(
|
| 142 |
-
encoder=self.encoder,
|
| 143 |
-
n_sensory_channels=1,
|
| 144 |
-
n_active_channels=1,
|
| 145 |
-
observation_buffer_size=256
|
| 146 |
-
)
|
| 147 |
-
|
| 148 |
-
# === MEMORY SYSTEMS ===
|
| 149 |
-
self.epistemic = EpistemicMemory(
|
| 150 |
-
n_states=n_states,
|
| 151 |
-
n_observations=n_observations,
|
| 152 |
-
n_actions=n_actions,
|
| 153 |
-
zipf_exponent=zipf_exponent
|
| 154 |
-
)
|
| 155 |
-
|
| 156 |
-
self.episodic = EpisodicMemory(
|
| 157 |
-
context_dim=context_dim,
|
| 158 |
-
capacity=10000,
|
| 159 |
-
drift_rate=0.95,
|
| 160 |
-
encoding_strength=0.3,
|
| 161 |
-
zipf_exponent=zipf_exponent
|
| 162 |
-
)
|
| 163 |
-
|
| 164 |
-
self.associative = AssociativeMemory(
|
| 165 |
-
pattern_dim=associative_dim,
|
| 166 |
-
beta=precision,
|
| 167 |
-
max_patterns=5000,
|
| 168 |
-
zipf_exponent=zipf_exponent
|
| 169 |
-
)
|
| 170 |
-
|
| 171 |
-
# === INFERENCE ENGINE ===
|
| 172 |
-
self.engine = FreeEnergyEngine(
|
| 173 |
-
n_states=n_states,
|
| 174 |
-
n_observations=n_observations,
|
| 175 |
-
n_actions=n_actions,
|
| 176 |
-
planning_horizon=planning_horizon,
|
| 177 |
-
precision=precision,
|
| 178 |
-
policy_depth=min(planning_horizon, 3)
|
| 179 |
-
)
|
| 180 |
-
|
| 181 |
-
# === CAUSAL ARENA ===
|
| 182 |
-
self.arena = CausalArena(
|
| 183 |
-
prior_concentration=1.0,
|
| 184 |
-
falsification_threshold=-100.0,
|
| 185 |
-
min_models=2
|
| 186 |
-
)
|
| 187 |
-
|
| 188 |
-
# === AGENT STATE ===
|
| 189 |
-
self._step_count = 0
|
| 190 |
-
self._total_surprise = 0.0
|
| 191 |
-
self._total_free_energy = 0.0
|
| 192 |
-
self._prev_belief_for_transition: Optional[np.ndarray] = None
|
| 193 |
-
self._pending_action: Optional[int] = None
|
| 194 |
-
self._pending_action_confidence: float = 0.0
|
| 195 |
-
self._last_action_distribution: Optional[np.ndarray] = None
|
| 196 |
-
self.epistemic_tension_threshold = float(epistemic_tension_threshold)
|
| 197 |
-
self.epistemic_info_gain_threshold = float(epistemic_info_gain_threshold)
|
| 198 |
-
|
| 199 |
-
# Initialize with default competing models
|
| 200 |
-
self._init_default_models()
|
| 201 |
-
|
| 202 |
-
u_hidden = unified_hidden_dims if unified_hidden_dims is not None else [128, 32]
|
| 203 |
-
# Single perceptual substrate: FHRR → NGC → Hopfield (replaces parallel Morton-sense path).
|
| 204 |
-
self.field = UnifiedField(
|
| 205 |
-
obs_dim=unified_obs_dim,
|
| 206 |
-
hidden_dims=u_hidden,
|
| 207 |
-
fhrr_dim=unified_fhrr_dim,
|
| 208 |
-
hopfield_beta=unified_hopfield_beta,
|
| 209 |
-
ngc_settle_steps=unified_ngc_settle_steps,
|
| 210 |
-
ngc_learning_rate=unified_ngc_learning_rate,
|
| 211 |
-
)
|
| 212 |
-
|
| 213 |
-
def _init_default_models(self):
|
| 214 |
-
"""
|
| 215 |
-
Initialize the causal arena with default competing models.
|
| 216 |
-
|
| 217 |
-
We start with two models that represent competing hypotheses
|
| 218 |
-
about the causal structure of observations:
|
| 219 |
-
Model A: "States cause observations directly" (simple)
|
| 220 |
-
Model B: "States mediate between hidden causes and observations" (complex)
|
| 221 |
-
"""
|
| 222 |
-
# Model A: Simple — direct state-observation link
|
| 223 |
-
model_a = StructuralCausalModel(name="direct_causal")
|
| 224 |
-
model_a.add_variable("state", n_values=self.n_states)
|
| 225 |
-
model_a.add_variable("observation", n_values=self.n_obs,
|
| 226 |
-
parents=["state"])
|
| 227 |
-
|
| 228 |
-
# Model B: Mediated — hidden cause → state → observation
|
| 229 |
-
model_b = StructuralCausalModel(name=DEFAULT_MEDIATED_SCM_NAME)
|
| 230 |
-
model_b.add_variable("cause", n_values=self.n_states)
|
| 231 |
-
model_b.add_variable("state", n_values=self.n_states,
|
| 232 |
-
parents=["cause"])
|
| 233 |
-
model_b.add_variable("observation", n_values=self.n_obs,
|
| 234 |
-
parents=["state"])
|
| 235 |
-
|
| 236 |
-
self.arena.register_model(model_a)
|
| 237 |
-
self.arena.register_model(model_b)
|
| 238 |
-
|
| 239 |
-
def _morton_to_obs_index(self, morton_codes: np.ndarray) -> int:
|
| 240 |
-
"""Map Morton codes to a discrete observation index (legacy hashing).
|
| 241 |
-
|
| 242 |
-
The main ``perceive`` path fingerprints the unified observation vector
|
| 243 |
-
with SHA-256 modulo ``n_obs``; use this routine only where an explicit
|
| 244 |
-
Morton-code → observation-bin mapping is intentional.
|
| 245 |
-
"""
|
| 246 |
-
if self.n_obs <= 0:
|
| 247 |
-
raise ValueError(
|
| 248 |
-
"n_observations must be a positive integer for _morton_to_obs_index mapping"
|
| 249 |
-
)
|
| 250 |
-
if isinstance(morton_codes, (int, np.integer)):
|
| 251 |
-
return int(morton_codes) % self.n_obs
|
| 252 |
-
# For multiple codes, hash the combination
|
| 253 |
-
combined = 0
|
| 254 |
-
for code in morton_codes:
|
| 255 |
-
combined ^= int(code)
|
| 256 |
-
return combined % self.n_obs
|
| 257 |
-
|
| 258 |
-
def _obs_to_associative_pattern(self, observation: int,
|
| 259 |
-
belief_state: np.ndarray) -> np.ndarray:
|
| 260 |
-
"""Project observation + belief into associative memory space."""
|
| 261 |
-
rng = np.random.RandomState(observation)
|
| 262 |
-
|
| 263 |
-
# Combine observation (one-hot) and belief state
|
| 264 |
-
obs_vec = np.zeros(self.n_obs)
|
| 265 |
-
obs_vec[observation] = 1.0
|
| 266 |
-
combined = np.concatenate([obs_vec, belief_state])
|
| 267 |
-
|
| 268 |
-
# Random projection to associative_dim
|
| 269 |
-
W = rng.randn(self.associative.dim, len(combined)) / np.sqrt(len(combined))
|
| 270 |
-
pattern = W @ combined
|
| 271 |
-
norm = np.linalg.norm(pattern)
|
| 272 |
-
if norm > 0:
|
| 273 |
-
pattern /= norm
|
| 274 |
-
return pattern
|
| 275 |
-
|
| 276 |
-
def perceive(self, raw_observation: np.ndarray) -> Dict[str, Any]:
|
| 277 |
-
"""
|
| 278 |
-
One perception path: numeric vector → UnifiedField (FHRR / NGC / Hopfield)
|
| 279 |
-
→ discrete observation index → active inference engine → causal arena.
|
| 280 |
-
|
| 281 |
-
Episodic and classical Hopfield associative traces are not written here;
|
| 282 |
-
memory consolidation for this path lives inside UnifiedField.
|
| 283 |
-
"""
|
| 284 |
-
self._step_count += 1
|
| 285 |
-
raw = np.asarray(raw_observation, dtype=np.float64).ravel()
|
| 286 |
-
|
| 287 |
-
cycle = self.field.observe(raw, input_type="numeric")
|
| 288 |
-
obs_vec = cycle["observation"]
|
| 289 |
-
decomp = cycle["energy"]
|
| 290 |
-
surprise = float(decomp.surprise)
|
| 291 |
-
|
| 292 |
-
# Integer-safe deterministic index from observation vector (avoid float dot overflow)
|
| 293 |
-
h = hashlib.sha256(obs_vec.astype(np.float64, copy=False).tobytes()).digest()
|
| 294 |
-
obs_idx = int.from_bytes(h[:8], byteorder="big", signed=False) % max(self.n_obs, 1)
|
| 295 |
-
|
| 296 |
-
A = self.epistemic.A
|
| 297 |
-
B = self.epistemic.B
|
| 298 |
-
C = self.epistemic.C
|
| 299 |
-
D = self.epistemic.D
|
| 300 |
-
log_A = self.epistemic.log_A
|
| 301 |
-
|
| 302 |
-
# Capture the action that actually led into this transition before
|
| 303 |
-
# ``engine.step`` samples the next action for the current state.
|
| 304 |
-
previous_action = self.engine.prev_action
|
| 305 |
-
inference_result = self.engine.step(obs_idx, A, B, C, D, log_A)
|
| 306 |
-
q_states = inference_result["belief_state"]
|
| 307 |
-
F = float(inference_result["free_energy"])
|
| 308 |
-
|
| 309 |
-
self._pending_action = int(inference_result["action"])
|
| 310 |
-
self._pending_action_confidence = float(inference_result["action_confidence"])
|
| 311 |
-
|
| 312 |
-
self.epistemic.update_likelihood(obs_idx, q_states)
|
| 313 |
-
if (previous_action is not None
|
| 314 |
-
and self._prev_belief_for_transition is not None):
|
| 315 |
-
self.epistemic.update_transition(
|
| 316 |
-
self._prev_belief_for_transition, q_states,
|
| 317 |
-
previous_action)
|
| 318 |
-
self._prev_belief_for_transition = q_states.copy()
|
| 319 |
-
|
| 320 |
-
causal_obs = {
|
| 321 |
-
"state": int(np.argmax(q_states)),
|
| 322 |
-
"observation": obs_idx,
|
| 323 |
-
}
|
| 324 |
-
if DEFAULT_MEDIATED_SCM_NAME in self.arena.models:
|
| 325 |
-
causal_obs["cause"] = int(np.argmax(q_states))
|
| 326 |
-
|
| 327 |
-
arena_result = self.arena.compete(causal_obs)
|
| 328 |
-
|
| 329 |
-
obs_codes = np.array([obs_idx], dtype=np.int64)
|
| 330 |
-
self.blanket.surprise = surprise
|
| 331 |
-
|
| 332 |
-
# Keep all memory systems live on the unified perception path. Earlier
|
| 333 |
-
# versions updated only the UnifiedField's internal Hopfield bank, which
|
| 334 |
-
# left experience replay and agent introspection effectively empty.
|
| 335 |
-
assoc_pattern = self._obs_to_associative_pattern(obs_idx, q_states)
|
| 336 |
-
self.associative.store(
|
| 337 |
-
assoc_pattern,
|
| 338 |
-
metadata={"step": self._step_count, "obs_idx": obs_idx, "free_energy": F},
|
| 339 |
-
)
|
| 340 |
-
self.episodic.encode(
|
| 341 |
-
observation=raw,
|
| 342 |
-
morton_code=obs_codes,
|
| 343 |
-
belief_state=q_states,
|
| 344 |
-
action=self._pending_action,
|
| 345 |
-
surprise=surprise,
|
| 346 |
-
free_energy=F,
|
| 347 |
-
metadata={
|
| 348 |
-
"obs_idx": obs_idx,
|
| 349 |
-
"field_energy": float(decomp.total),
|
| 350 |
-
"memory_similarity": float(cycle.get("memory_similarity", 0.0)),
|
| 351 |
-
},
|
| 352 |
-
)
|
| 353 |
-
|
| 354 |
-
self._total_surprise += surprise
|
| 355 |
-
self._total_free_energy += F
|
| 356 |
-
|
| 357 |
-
return {
|
| 358 |
-
"step": self._step_count,
|
| 359 |
-
"obs_codes": obs_codes,
|
| 360 |
-
"observation_index": obs_idx,
|
| 361 |
-
"belief_state": q_states,
|
| 362 |
-
"free_energy": F,
|
| 363 |
-
"surprise": surprise,
|
| 364 |
-
"action": inference_result["action"],
|
| 365 |
-
"action_confidence": inference_result["action_confidence"],
|
| 366 |
-
"arena": arena_result,
|
| 367 |
-
"associative_energy": float(decomp.memory),
|
| 368 |
-
"epistemic_value": self.engine.epistemic_value,
|
| 369 |
-
"pragmatic_value": self.engine.pragmatic_value,
|
| 370 |
-
"field_cycle": cycle,
|
| 371 |
-
}
|
| 372 |
-
|
| 373 |
-
def act(self) -> Dict[str, Any]:
|
| 374 |
-
"""
|
| 375 |
-
Select and emit an action through the active boundary.
|
| 376 |
-
|
| 377 |
-
Uses the policy posterior from the last perception step.
|
| 378 |
-
Also checks if an epistemic action (experiment) would be more valuable.
|
| 379 |
-
"""
|
| 380 |
-
# Check if an experiment would help resolve causal tension. Intervention
|
| 381 |
-
# search is intentionally gated because it performs model rollouts; when
|
| 382 |
-
# the model posterior is already sharp, this was the dominant runtime cost.
|
| 383 |
-
current_tension = self.arena.current_tension
|
| 384 |
-
experiment = None
|
| 385 |
-
if current_tension >= self.epistemic_tension_threshold:
|
| 386 |
-
experiment = self.arena.suggest_experiment()
|
| 387 |
-
|
| 388 |
-
# Compare epistemic value of experiment vs pragmatic action
|
| 389 |
-
if (experiment is not None and
|
| 390 |
-
experiment["expected_info_gain"] > self.epistemic_info_gain_threshold):
|
| 391 |
-
# Epistemic action: run an experiment to resolve tension
|
| 392 |
-
return {
|
| 393 |
-
'type': 'epistemic',
|
| 394 |
-
'experiment': experiment,
|
| 395 |
-
'reason': 'High causal tension — exploring to resolve',
|
| 396 |
-
'tension': current_tension,
|
| 397 |
-
}
|
| 398 |
-
|
| 399 |
-
# Pragmatic action: act to achieve preferences
|
| 400 |
-
action_dist = np.zeros(self.n_actions)
|
| 401 |
-
for pi_idx, policy in enumerate(self.engine.policies):
|
| 402 |
-
if len(policy) > 0:
|
| 403 |
-
action_dist[policy[0]] += self.engine.q_policies[pi_idx]
|
| 404 |
-
if action_dist.sum() > 0:
|
| 405 |
-
action_dist /= action_dist.sum()
|
| 406 |
-
else:
|
| 407 |
-
action_dist[:] = 1.0 / self.n_actions
|
| 408 |
-
self._last_action_distribution = action_dist.copy()
|
| 409 |
-
|
| 410 |
-
if self._pending_action is None:
|
| 411 |
-
# Allows act() to be called before the first perceive().
|
| 412 |
-
action, confidence = self.engine.select_action()
|
| 413 |
-
self._pending_action = int(action)
|
| 414 |
-
self._pending_action_confidence = float(confidence)
|
| 415 |
-
selected = int(self._pending_action)
|
| 416 |
-
confidence = float(self._pending_action_confidence)
|
| 417 |
-
self.blanket.active_state = np.array([selected])
|
| 418 |
-
self._pending_action = None
|
| 419 |
-
self._pending_action_confidence = 0.0
|
| 420 |
-
|
| 421 |
-
return {
|
| 422 |
-
'type': 'pragmatic',
|
| 423 |
-
'action': selected,
|
| 424 |
-
'confidence': confidence,
|
| 425 |
-
'action_distribution': action_dist,
|
| 426 |
-
'free_energy': self.engine.F_history[-1] if self.engine.F_history else None,
|
| 427 |
-
}
|
| 428 |
-
|
| 429 |
-
def experience_replay(self, n_episodes: int = 10) -> Dict[str, Any]:
|
| 430 |
-
"""
|
| 431 |
-
Replay past episodes to strengthen beliefs.
|
| 432 |
-
|
| 433 |
-
This is the offline learning loop: re-process past observations
|
| 434 |
-
through the epistemic memory to update Dirichlet parameters.
|
| 435 |
-
Weighted by surprise — surprising experiences teach more.
|
| 436 |
-
"""
|
| 437 |
-
episodes = self.episodic.replay(n_episodes)
|
| 438 |
-
|
| 439 |
-
for ep in episodes:
|
| 440 |
-
obs_idx = ep.metadata.get('obs_idx', 0)
|
| 441 |
-
self.epistemic.update_likelihood(obs_idx, ep.belief_state)
|
| 442 |
-
|
| 443 |
-
return {
|
| 444 |
-
'episodes_replayed': len(episodes),
|
| 445 |
-
'mean_surprise': np.mean([ep.surprise for ep in episodes]) if episodes else 0,
|
| 446 |
-
'epistemic_entropy': self.epistemic.entropy(),
|
| 447 |
-
}
|
| 448 |
-
|
| 449 |
-
def introspect(self) -> Dict[str, Any]:
|
| 450 |
-
"""
|
| 451 |
-
Full introspection: report on all system components.
|
| 452 |
-
"""
|
| 453 |
-
return {
|
| 454 |
-
'step': self._step_count,
|
| 455 |
-
'average_surprise': self._total_surprise / max(self._step_count, 1),
|
| 456 |
-
'average_free_energy': self._total_free_energy / max(self._step_count, 1),
|
| 457 |
-
'inference': self.engine.statistics,
|
| 458 |
-
'arena': self.arena.statistics,
|
| 459 |
-
'epistemic_memory': {
|
| 460 |
-
'entropy': self.epistemic.entropy(),
|
| 461 |
-
'access_distribution': self.epistemic.get_access_distribution(),
|
| 462 |
-
},
|
| 463 |
-
'episodic_memory': self.episodic.statistics,
|
| 464 |
-
'associative_memory': self.associative.statistics,
|
| 465 |
-
'blanket': self.blanket.state,
|
| 466 |
-
'tension_trajectory': self.arena.tension_history[-20:],
|
| 467 |
-
'free_energy_trajectory': self.engine.F_history[-20:],
|
| 468 |
-
}
|
| 469 |
-
|
| 470 |
-
def add_causal_model(self, model: StructuralCausalModel):
|
| 471 |
-
"""Add a new competing causal model to the arena."""
|
| 472 |
-
self.arena.register_model(model)
|
| 473 |
-
|
| 474 |
-
def counterfactual(self, evidence: Dict[str, int],
|
| 475 |
-
intervention: Dict[str, int],
|
| 476 |
-
query: List[str]) -> Dict[str, Any]:
|
| 477 |
-
"""
|
| 478 |
-
Ask: "What would have happened if we had done X instead?"
|
| 479 |
-
|
| 480 |
-
Each competing model gives its own answer. Disagreement = tension.
|
| 481 |
-
"""
|
| 482 |
-
return self.arena.counterfactual_comparison(evidence, intervention, query)
|
| 483 |
-
|
| 484 |
-
@classmethod
|
| 485 |
-
def from_config(cls, config: Dict[str, Any]) -> 'TensegrityAgent':
|
| 486 |
-
"""Create an agent from a configuration dictionary (unknown keys ignored)."""
|
| 487 |
-
sig = inspect.signature(cls.__init__)
|
| 488 |
-
allowed = {k for k in sig.parameters if k != "self"}
|
| 489 |
-
kwargs = {k: v for k, v in config.items() if k in allowed}
|
| 490 |
-
return cls(**kwargs)
|
| 491 |
-
|
| 492 |
-
def __repr__(self):
|
| 493 |
-
return (f"TensegrityAgent(states={self.n_states}, obs={self.n_obs}, "
|
| 494 |
-
f"actions={self.n_actions}, step={self._step_count}, "
|
| 495 |
-
f"tension={self.arena.current_tension:.3f})")
|
| 496 |
-
|
| 497 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/legacy/v1/blanket.py
DELETED
|
@@ -1,218 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Markov Blanket: The computational boundary of the agent.
|
| 3 |
-
|
| 4 |
-
In Friston's formalism, the Markov blanket separates internal states (beliefs)
|
| 5 |
-
from external states (world). It consists of:
|
| 6 |
-
- Sensory states (S): what flows IN from the world (observations)
|
| 7 |
-
- Active states (A): what flows OUT to the world (actions)
|
| 8 |
-
|
| 9 |
-
The blanket enforces conditional independence:
|
| 10 |
-
Internal ⊥ External | Blanket
|
| 11 |
-
|
| 12 |
-
This is not a metaphor. It's the literal statistical boundary that defines
|
| 13 |
-
where the agent ends and the world begins. The blanket nodes are the ONLY
|
| 14 |
-
points of contact between the agent's belief states and external reality.
|
| 15 |
-
|
| 16 |
-
Implementation: The blanket manages the flow of Morton-coded observations
|
| 17 |
-
in and action selections out. It also maintains the observation buffer
|
| 18 |
-
that feeds into the free energy engine.
|
| 19 |
-
"""
|
| 20 |
-
|
| 21 |
-
import numpy as np
|
| 22 |
-
from typing import Optional, Dict, Any, List, Tuple
|
| 23 |
-
from collections import deque
|
| 24 |
-
|
| 25 |
-
from tensegrity.legacy.v1.morton import MortonEncoder
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
class MarkovBlanket:
|
| 29 |
-
"""
|
| 30 |
-
The agent's interface with the world.
|
| 31 |
-
|
| 32 |
-
Sensory states receive Morton-coded observations.
|
| 33 |
-
Active states emit discrete actions.
|
| 34 |
-
|
| 35 |
-
The blanket enforces the Markov property: internal states
|
| 36 |
-
are conditionally independent of external states given the blanket.
|
| 37 |
-
|
| 38 |
-
``n_sensory`` / ``n_active`` mirror constructor channel counts and are
|
| 39 |
-
reserved for future multi-channel I/O; ``sense`` still ingests vectors
|
| 40 |
-
shaped for ``encoder.n_dims``, and ``act`` consumes the full softmax over
|
| 41 |
-
actions passed in.
|
| 42 |
-
"""
|
| 43 |
-
|
| 44 |
-
def __init__(self,
|
| 45 |
-
encoder: MortonEncoder,
|
| 46 |
-
n_sensory_channels: int = 1,
|
| 47 |
-
n_active_channels: int = 1,
|
| 48 |
-
observation_buffer_size: int = 64):
|
| 49 |
-
"""
|
| 50 |
-
Args:
|
| 51 |
-
encoder: MortonEncoder for sensory preprocessing
|
| 52 |
-
n_sensory_channels: Number of parallel sensory channels
|
| 53 |
-
n_active_channels: Number of action dimensions
|
| 54 |
-
observation_buffer_size: How many past observations to retain
|
| 55 |
-
"""
|
| 56 |
-
self.encoder = encoder
|
| 57 |
-
self.n_sensory = n_sensory_channels
|
| 58 |
-
self.n_active = n_active_channels
|
| 59 |
-
|
| 60 |
-
# Current blanket state
|
| 61 |
-
self.sensory_state: Optional[np.ndarray] = None # Morton codes
|
| 62 |
-
self.active_state: Optional[np.ndarray] = None # Action indices
|
| 63 |
-
|
| 64 |
-
# Observation buffer — recent history for temporal inference
|
| 65 |
-
self.observation_buffer: deque = deque(maxlen=observation_buffer_size)
|
| 66 |
-
|
| 67 |
-
# Running stats for surprise — per-coordinate counts (variable-length obs).
|
| 68 |
-
self._sense_timestep = 0
|
| 69 |
-
self._obs_sum: Optional[np.ndarray] = None
|
| 70 |
-
self._obs_sq_sum: Optional[np.ndarray] = None
|
| 71 |
-
self._obs_elem_count: Optional[np.ndarray] = None
|
| 72 |
-
|
| 73 |
-
# Blanket surprise (how unexpected was the last observation?)
|
| 74 |
-
self.surprise: float = 0.0
|
| 75 |
-
|
| 76 |
-
def sense(self, raw_observation: np.ndarray, *, allow_multi_point_1d: bool = False) -> np.ndarray:
|
| 77 |
-
"""
|
| 78 |
-
Process a raw observation through the sensory boundary.
|
| 79 |
-
|
| 80 |
-
1. Morton-encode the raw data
|
| 81 |
-
2. Update the observation buffer
|
| 82 |
-
3. Compute surprise (deviation from running statistics)
|
| 83 |
-
|
| 84 |
-
Args:
|
| 85 |
-
raw_observation: Array shaped ``(n_points, encoder.n_dims)``, or ``(n_dims,)``
|
| 86 |
-
for one point. One-dimensional vectors whose length is not ``n_dims``
|
| 87 |
-
are rejected unless ``allow_multi_point_1d=True`` is set, which treats
|
| 88 |
-
the vector as a column (``reshape(-1, 1)``) of scalar observations —
|
| 89 |
-
callers should prefer supplying an explicit `(n_points, n_dims)` array.
|
| 90 |
-
|
| 91 |
-
Returns:
|
| 92 |
-
Morton-coded observation as integer array
|
| 93 |
-
"""
|
| 94 |
-
# Ensure proper shape for Morton encoding
|
| 95 |
-
if raw_observation.ndim == 1:
|
| 96 |
-
if len(raw_observation) == self.encoder.n_dims:
|
| 97 |
-
raw_observation = raw_observation.reshape(1, -1)
|
| 98 |
-
elif allow_multi_point_1d:
|
| 99 |
-
raw_observation = raw_observation.reshape(-1, 1)
|
| 100 |
-
else:
|
| 101 |
-
raise ValueError(
|
| 102 |
-
f"One-dimensional sensory input length {len(raw_observation)} does not match "
|
| 103 |
-
f"encoder.n_dims ({self.encoder.n_dims}). Pass shape "
|
| 104 |
-
"(n_points, n_dims), a length-n_dims vector for one observation, "
|
| 105 |
-
"or opt in with allow_multi_point_1d=True for reshape(-1, 1)."
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
# Morton encode
|
| 109 |
-
morton_codes = self.encoder.encode_continuous(raw_observation)
|
| 110 |
-
if isinstance(morton_codes, (int, np.integer)):
|
| 111 |
-
morton_codes = np.array([morton_codes])
|
| 112 |
-
|
| 113 |
-
self._sense_timestep += 1
|
| 114 |
-
|
| 115 |
-
# Update running statistics for surprise computation
|
| 116 |
-
self._update_statistics(raw_observation)
|
| 117 |
-
|
| 118 |
-
# Compute surprise: -log P(observation) under running model
|
| 119 |
-
self.surprise = self._compute_surprise(raw_observation)
|
| 120 |
-
|
| 121 |
-
# Store in buffer
|
| 122 |
-
self.sensory_state = morton_codes
|
| 123 |
-
self.observation_buffer.append({
|
| 124 |
-
'morton': morton_codes.copy(),
|
| 125 |
-
'raw': raw_observation.copy(),
|
| 126 |
-
'surprise': self.surprise,
|
| 127 |
-
'timestamp': self._sense_timestep
|
| 128 |
-
})
|
| 129 |
-
|
| 130 |
-
return morton_codes
|
| 131 |
-
|
| 132 |
-
def act(self, action_distribution: np.ndarray) -> int:
|
| 133 |
-
"""
|
| 134 |
-
Select an action through the active boundary.
|
| 135 |
-
|
| 136 |
-
The action is sampled from the distribution provided by the
|
| 137 |
-
inference engine (policy = softmax over expected free energies).
|
| 138 |
-
|
| 139 |
-
Args:
|
| 140 |
-
action_distribution: Probability distribution over actions.
|
| 141 |
-
|
| 142 |
-
Returns:
|
| 143 |
-
Selected action index.
|
| 144 |
-
"""
|
| 145 |
-
# Ensure valid distribution
|
| 146 |
-
action_distribution = np.asarray(action_distribution, dtype=np.float64)
|
| 147 |
-
action_distribution = np.maximum(action_distribution, 1e-16)
|
| 148 |
-
action_distribution /= action_distribution.sum()
|
| 149 |
-
|
| 150 |
-
# Sample action
|
| 151 |
-
action = np.random.choice(len(action_distribution), p=action_distribution)
|
| 152 |
-
self.active_state = np.array([action])
|
| 153 |
-
return int(action)
|
| 154 |
-
|
| 155 |
-
def _update_statistics(self, observation: np.ndarray):
|
| 156 |
-
"""Update running statistics for surprise computation."""
|
| 157 |
-
flat = np.asarray(observation, dtype=np.float64).flatten()
|
| 158 |
-
|
| 159 |
-
if self._obs_sum is None:
|
| 160 |
-
self._obs_sum = np.zeros(len(flat), dtype=np.float64)
|
| 161 |
-
self._obs_sq_sum = np.zeros(len(flat), dtype=np.float64)
|
| 162 |
-
self._obs_elem_count = np.zeros(len(flat), dtype=np.float64)
|
| 163 |
-
|
| 164 |
-
lf, ls = len(flat), len(self._obs_sum)
|
| 165 |
-
if lf > ls:
|
| 166 |
-
self._obs_sum = np.pad(self._obs_sum, (0, lf - ls), mode='constant')
|
| 167 |
-
self._obs_sq_sum = np.pad(self._obs_sq_sum, (0, lf - ls), mode='constant')
|
| 168 |
-
self._obs_elem_count = np.pad(self._obs_elem_count, (0, lf - ls), mode='constant')
|
| 169 |
-
|
| 170 |
-
n = min(lf, len(self._obs_sum))
|
| 171 |
-
self._obs_sum[:n] += flat[:n]
|
| 172 |
-
self._obs_sq_sum[:n] += flat[:n] ** 2
|
| 173 |
-
self._obs_elem_count[:n] += 1.0
|
| 174 |
-
|
| 175 |
-
def _compute_surprise(self, observation: np.ndarray) -> float:
|
| 176 |
-
"""
|
| 177 |
-
Compute Bayesian surprise: -log P(o) under running Gaussian model.
|
| 178 |
-
|
| 179 |
-
This is a simple proxy — the full surprise comes from the
|
| 180 |
-
free energy engine. But this gives a fast heuristic at the boundary.
|
| 181 |
-
"""
|
| 182 |
-
flat = np.asarray(observation, dtype=np.float64).flatten()
|
| 183 |
-
assert self._obs_sum is not None and self._obs_elem_count is not None
|
| 184 |
-
n = min(len(flat), len(self._obs_sum))
|
| 185 |
-
cnt = self._obs_elem_count[:n]
|
| 186 |
-
if n < 1 or float(np.min(cnt)) < 2.0:
|
| 187 |
-
return 0.0
|
| 188 |
-
|
| 189 |
-
mean = self._obs_sum[:n] / np.maximum(cnt, 1e-12)
|
| 190 |
-
var = self._obs_sq_sum[:n] / np.maximum(cnt, 1e-12) - mean ** 2
|
| 191 |
-
var = np.maximum(var, 1e-8) # Prevent division by zero
|
| 192 |
-
|
| 193 |
-
# Gaussian log-likelihood (negative = surprise)
|
| 194 |
-
log_prob = -0.5 * np.sum(((flat[:n] - mean) ** 2) / var + np.log(2 * np.pi * var))
|
| 195 |
-
return float(-log_prob) # Higher = more surprising
|
| 196 |
-
|
| 197 |
-
def get_observation_history(self, n: Optional[int] = None) -> List[Dict[str, Any]]:
|
| 198 |
-
"""Get the last n observations from the buffer."""
|
| 199 |
-
if n is None:
|
| 200 |
-
return list(self.observation_buffer)
|
| 201 |
-
return list(self.observation_buffer)[-n:]
|
| 202 |
-
|
| 203 |
-
def get_surprise_trajectory(self) -> np.ndarray:
|
| 204 |
-
"""Get the surprise values over time."""
|
| 205 |
-
return np.array([obs['surprise'] for obs in self.observation_buffer])
|
| 206 |
-
|
| 207 |
-
@property
|
| 208 |
-
def state(self) -> Dict[str, Any]:
|
| 209 |
-
"""Current blanket state summary."""
|
| 210 |
-
return {
|
| 211 |
-
'sensory': self.sensory_state,
|
| 212 |
-
'active': self.active_state,
|
| 213 |
-
'surprise': self.surprise,
|
| 214 |
-
'sense_timestep': self._sense_timestep,
|
| 215 |
-
'buffer_size': len(self.observation_buffer)
|
| 216 |
-
}
|
| 217 |
-
|
| 218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/legacy/v1/morton.py
DELETED
|
@@ -1,308 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Morton (Z-order) Encoder: Modality-Agnostic Sensory Frontend
|
| 3 |
-
|
| 4 |
-
Morton codes interleave bits from multiple dimensions into a single integer,
|
| 5 |
-
preserving spatial locality: points close in N-dimensional space map to
|
| 6 |
-
nearby Morton codes. This gives us a UNIVERSAL encoding for any modality.
|
| 7 |
-
|
| 8 |
-
The key insight: every sensory modality is ultimately a set of measurements
|
| 9 |
-
across dimensions. An image is (x, y, channel). Audio is (time, frequency).
|
| 10 |
-
Text is (position, embedding_dim). Sensor data is (sensor_id, time, value).
|
| 11 |
-
|
| 12 |
-
By Morton-encoding any of these, we get a single integer that:
|
| 13 |
-
1. Preserves neighborhood structure (similar inputs → similar codes)
|
| 14 |
-
2. Is modality-agnostic (the system doesn't "know" what modality it is)
|
| 15 |
-
3. Enables efficient range queries via bit-prefix matching
|
| 16 |
-
4. Maps naturally to Bayesian state spaces (discretize → categorize)
|
| 17 |
-
|
| 18 |
-
Mathematical basis:
|
| 19 |
-
For k dimensions with coordinates (c₁, c₂, ..., cₖ):
|
| 20 |
-
Morton(c₁, c₂, ..., cₖ) = interleave_bits(c₁, c₂, ..., cₖ)
|
| 21 |
-
|
| 22 |
-
Where interleave_bits takes bit i of dimension j and places it at
|
| 23 |
-
position i*k + j in the output. This creates a Z-order space-filling curve.
|
| 24 |
-
"""
|
| 25 |
-
|
| 26 |
-
import numpy as np
|
| 27 |
-
from itertools import product
|
| 28 |
-
from typing import Union, List, Tuple, Optional
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
# Guard against exponential neighborhood enumeration when radius × dims is large.
|
| 32 |
-
MAX_NEIGHBORHOOD_COMBINATIONS = 50_000
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
class MortonEncoder:
|
| 36 |
-
"""
|
| 37 |
-
Encodes arbitrary-dimensional data into Morton codes (Z-order curve indices).
|
| 38 |
-
|
| 39 |
-
This is the Markov blanket's sensory interface — it transforms raw modality
|
| 40 |
-
data into a unified discrete state space that the inference engine operates on.
|
| 41 |
-
|
| 42 |
-
The encoding is information-preserving (invertible) and locality-preserving
|
| 43 |
-
(nearby points in input space → nearby Morton codes).
|
| 44 |
-
"""
|
| 45 |
-
|
| 46 |
-
def __init__(self, n_dims: int, bits_per_dim: int = 10,
|
| 47 |
-
ranges: Optional[List[Tuple[float, float]]] = None):
|
| 48 |
-
"""
|
| 49 |
-
Args:
|
| 50 |
-
n_dims: Number of input dimensions (e.g., 2 for image patches,
|
| 51 |
-
3 for volumetric, N for embeddings)
|
| 52 |
-
bits_per_dim: Resolution per dimension. 10 bits = 1024 levels per dim.
|
| 53 |
-
Total Morton code space = 2^(n_dims * bits_per_dim)
|
| 54 |
-
Must satisfy n_dims * bits_per_dim <= 63 so codes fit np.int64.
|
| 55 |
-
ranges: Min/max per dimension for quantization. If None, auto-calibrated.
|
| 56 |
-
"""
|
| 57 |
-
self.n_dims = n_dims
|
| 58 |
-
self.bits_per_dim = bits_per_dim
|
| 59 |
-
total_bits = n_dims * bits_per_dim
|
| 60 |
-
if total_bits > 63:
|
| 61 |
-
raise ValueError(
|
| 62 |
-
f"total_bits (n_dims * bits_per_dim) must be <= 63 to fit in np.int64; "
|
| 63 |
-
f"got total_bits={total_bits}"
|
| 64 |
-
)
|
| 65 |
-
self.total_bits = total_bits
|
| 66 |
-
self.levels = 2 ** bits_per_dim
|
| 67 |
-
|
| 68 |
-
# Quantization ranges per dimension
|
| 69 |
-
if ranges is not None:
|
| 70 |
-
self.ranges = np.asarray(ranges, dtype=np.float64)
|
| 71 |
-
if self.ranges.ndim != 2 or self.ranges.shape[1] != 2:
|
| 72 |
-
raise ValueError("ranges must be a sequence of (min, max) tuples per dimension.")
|
| 73 |
-
spans = self.ranges[:, 1] - self.ranges[:, 0]
|
| 74 |
-
flat_spans = np.asarray(spans).flatten()
|
| 75 |
-
bad = np.where(np.abs(flat_spans) < 1e-15)[0]
|
| 76 |
-
if len(bad):
|
| 77 |
-
dims_list = [int(i) for i in bad.tolist()]
|
| 78 |
-
raise ValueError(
|
| 79 |
-
"Quantization ranges have zero span on dimension index(es) "
|
| 80 |
-
f"{dims_list}; ensure max > min for each dimension "
|
| 81 |
-
"(or omit ranges to auto-calibrate from data)."
|
| 82 |
-
)
|
| 83 |
-
if int(self.ranges.shape[0]) != int(n_dims):
|
| 84 |
-
raise ValueError(
|
| 85 |
-
f"ranges must have length n_dims ({n_dims}), got shape {self.ranges.shape}."
|
| 86 |
-
)
|
| 87 |
-
else:
|
| 88 |
-
self.ranges = None # Will be set on first encode (auto-calibrate)
|
| 89 |
-
|
| 90 |
-
# Precompute bit interleaving masks for fast encoding
|
| 91 |
-
# For k dims, bit i of dim j goes to position i*k + j
|
| 92 |
-
self._build_interleave_tables()
|
| 93 |
-
|
| 94 |
-
def _build_interleave_tables(self):
|
| 95 |
-
"""Precompute lookup tables for fast bit interleaving."""
|
| 96 |
-
# For each dimension, build a mask that spreads its bits
|
| 97 |
-
# across the interleaved positions
|
| 98 |
-
self._spread_masks = []
|
| 99 |
-
for dim in range(self.n_dims):
|
| 100 |
-
# For dimension `dim`, bit position `b` in the input
|
| 101 |
-
# maps to bit position `b * n_dims + dim` in the output
|
| 102 |
-
mask_positions = [b * self.n_dims + dim for b in range(self.bits_per_dim)]
|
| 103 |
-
self._spread_masks.append(mask_positions)
|
| 104 |
-
|
| 105 |
-
def _spread_bits(self, value: int, dim: int) -> int:
|
| 106 |
-
"""Spread bits of a single value according to its dimension's interleave pattern."""
|
| 107 |
-
result = 0
|
| 108 |
-
for b in range(self.bits_per_dim):
|
| 109 |
-
if value & (1 << b):
|
| 110 |
-
result |= (1 << self._spread_masks[dim][b])
|
| 111 |
-
return result
|
| 112 |
-
|
| 113 |
-
def _compact_bits(self, morton: int, dim: int) -> int:
|
| 114 |
-
"""Extract and compact bits for a single dimension from a Morton code."""
|
| 115 |
-
result = 0
|
| 116 |
-
for b in range(self.bits_per_dim):
|
| 117 |
-
if morton & (1 << self._spread_masks[dim][b]):
|
| 118 |
-
result |= (1 << b)
|
| 119 |
-
return result
|
| 120 |
-
|
| 121 |
-
def quantize(self, values: np.ndarray) -> np.ndarray:
|
| 122 |
-
"""
|
| 123 |
-
Quantize continuous values to discrete levels.
|
| 124 |
-
|
| 125 |
-
Maps each dimension's range to [0, 2^bits_per_dim - 1] uniformly.
|
| 126 |
-
This is the analog-to-digital conversion at the sensory boundary.
|
| 127 |
-
"""
|
| 128 |
-
if self.ranges is None:
|
| 129 |
-
# Auto-calibrate from data
|
| 130 |
-
if values.ndim == 1:
|
| 131 |
-
values = values.reshape(1, -1)
|
| 132 |
-
self.ranges = np.stack([values.min(axis=0), values.max(axis=0)], axis=1)
|
| 133 |
-
# Prevent zero-range dimensions
|
| 134 |
-
zero_range = self.ranges[:, 0] == self.ranges[:, 1]
|
| 135 |
-
self.ranges[zero_range, 1] = self.ranges[zero_range, 0] + 1.0
|
| 136 |
-
|
| 137 |
-
if values.ndim == 1:
|
| 138 |
-
values = values.reshape(1, -1)
|
| 139 |
-
|
| 140 |
-
# Normalize to [0, 1] then scale to [0, levels-1]
|
| 141 |
-
mins = self.ranges[:, 0]
|
| 142 |
-
maxs = self.ranges[:, 1]
|
| 143 |
-
spans = np.maximum(maxs - mins, 1e-15)
|
| 144 |
-
normalized = (values - mins) / spans
|
| 145 |
-
normalized = np.clip(normalized, 0.0, 1.0)
|
| 146 |
-
quantized = (normalized * (self.levels - 1)).astype(np.int64)
|
| 147 |
-
return quantized
|
| 148 |
-
|
| 149 |
-
def dequantize(self, quantized: np.ndarray) -> np.ndarray:
|
| 150 |
-
"""Inverse of quantize — reconstruct continuous approximation."""
|
| 151 |
-
if self.ranges is None:
|
| 152 |
-
raise ValueError(
|
| 153 |
-
"ranges not initialized: call encode (or compute_ranges) "
|
| 154 |
-
"before MortonEncoder.dequantize"
|
| 155 |
-
)
|
| 156 |
-
mins = self.ranges[:, 0]
|
| 157 |
-
maxs = self.ranges[:, 1]
|
| 158 |
-
spans = np.maximum(maxs - mins, 1e-15)
|
| 159 |
-
normalized = quantized.astype(np.float64) / (self.levels - 1)
|
| 160 |
-
return normalized * spans + mins
|
| 161 |
-
|
| 162 |
-
def encode(self, values: np.ndarray) -> np.ndarray:
|
| 163 |
-
"""
|
| 164 |
-
Encode N-dimensional data points into Morton codes.
|
| 165 |
-
|
| 166 |
-
Args:
|
| 167 |
-
values: Shape (n_points, n_dims) or (n_dims,) for single point.
|
| 168 |
-
Can be continuous (will be quantized) or already integer.
|
| 169 |
-
|
| 170 |
-
Returns:
|
| 171 |
-
Morton codes as integer array of shape (n_points,)
|
| 172 |
-
"""
|
| 173 |
-
single = values.ndim == 1
|
| 174 |
-
if single:
|
| 175 |
-
values = values.reshape(1, -1)
|
| 176 |
-
|
| 177 |
-
assert values.shape[1] == self.n_dims, \
|
| 178 |
-
f"Expected {self.n_dims} dims, got {values.shape[1]}"
|
| 179 |
-
|
| 180 |
-
# Quantize if continuous
|
| 181 |
-
if values.dtype in (np.float32, np.float64):
|
| 182 |
-
quantized = self.quantize(values)
|
| 183 |
-
else:
|
| 184 |
-
quantized = np.asarray(values, dtype=np.int64)
|
| 185 |
-
qmin = int(np.min(quantized))
|
| 186 |
-
qmax = int(np.max(quantized))
|
| 187 |
-
lo = 0
|
| 188 |
-
hi = int(self.levels - 1)
|
| 189 |
-
if qmin < lo or qmax > hi:
|
| 190 |
-
raise ValueError(
|
| 191 |
-
f"MortonEncoder.encode expects integer coords in [{lo}, {hi}] "
|
| 192 |
-
f"(levels={self.levels}); got range [{qmin}, {qmax}]"
|
| 193 |
-
)
|
| 194 |
-
|
| 195 |
-
# Interleave bits for each point
|
| 196 |
-
n_points = quantized.shape[0]
|
| 197 |
-
codes = np.zeros(n_points, dtype=np.int64)
|
| 198 |
-
|
| 199 |
-
for i in range(n_points):
|
| 200 |
-
morton = 0
|
| 201 |
-
for d in range(self.n_dims):
|
| 202 |
-
morton |= self._spread_bits(int(quantized[i, d]), d)
|
| 203 |
-
codes[i] = morton
|
| 204 |
-
|
| 205 |
-
return codes[0] if single else codes
|
| 206 |
-
|
| 207 |
-
def decode(self, codes: Union[int, np.ndarray]) -> np.ndarray:
|
| 208 |
-
"""
|
| 209 |
-
Decode Morton codes back to N-dimensional coordinates.
|
| 210 |
-
|
| 211 |
-
Args:
|
| 212 |
-
codes: Morton code(s) as int or array.
|
| 213 |
-
|
| 214 |
-
Returns:
|
| 215 |
-
Quantized coordinates of shape (n_points, n_dims) or (n_dims,)
|
| 216 |
-
"""
|
| 217 |
-
single = isinstance(codes, (int, np.integer))
|
| 218 |
-
if single:
|
| 219 |
-
codes = np.array([codes], dtype=np.int64)
|
| 220 |
-
|
| 221 |
-
n_points = len(codes)
|
| 222 |
-
coords = np.zeros((n_points, self.n_dims), dtype=np.int64)
|
| 223 |
-
|
| 224 |
-
for i in range(n_points):
|
| 225 |
-
for d in range(self.n_dims):
|
| 226 |
-
coords[i, d] = self._compact_bits(int(codes[i]), d)
|
| 227 |
-
|
| 228 |
-
return coords[0] if single else coords
|
| 229 |
-
|
| 230 |
-
def encode_continuous(self, values: np.ndarray) -> np.ndarray:
|
| 231 |
-
"""Encode continuous data — quantizes automatically."""
|
| 232 |
-
return self.encode(values.astype(np.float64))
|
| 233 |
-
|
| 234 |
-
def decode_continuous(self, codes: Union[int, np.ndarray]) -> np.ndarray:
|
| 235 |
-
"""Decode Morton codes back to continuous approximations."""
|
| 236 |
-
quantized = self.decode(codes)
|
| 237 |
-
if quantized.ndim == 1:
|
| 238 |
-
quantized = quantized.reshape(1, -1)
|
| 239 |
-
return self.dequantize(quantized).squeeze()
|
| 240 |
-
|
| 241 |
-
def proximity(self, code_a: int, code_b: int) -> float:
|
| 242 |
-
"""
|
| 243 |
-
Compute proximity between two Morton codes.
|
| 244 |
-
|
| 245 |
-
Uses the XOR distance: codes that differ only in low-order bits
|
| 246 |
-
(fine-grained spatial difference) are closer than those differing
|
| 247 |
-
in high-order bits (coarse spatial difference).
|
| 248 |
-
|
| 249 |
-
Returns value in [0, 1] where 1 = identical.
|
| 250 |
-
"""
|
| 251 |
-
xor = code_a ^ code_b
|
| 252 |
-
if xor == 0:
|
| 253 |
-
return 1.0
|
| 254 |
-
# Count the position of the highest differing bit
|
| 255 |
-
highest_diff = int(xor).bit_length()
|
| 256 |
-
return 1.0 - (highest_diff / self.total_bits)
|
| 257 |
-
|
| 258 |
-
def neighborhood(self, code: int, radius: int = 1) -> List[int]:
|
| 259 |
-
"""
|
| 260 |
-
Find Morton codes within a given radius (in quantized coordinates).
|
| 261 |
-
|
| 262 |
-
Uses ``decode`` → offset enumeration → ``encode`` within ``[0, levels)``.
|
| 263 |
-
"""
|
| 264 |
-
decoded = self.decode(code)
|
| 265 |
-
center = (
|
| 266 |
-
decoded.reshape(-1).astype(np.int64)
|
| 267 |
-
if isinstance(decoded, np.ndarray)
|
| 268 |
-
else np.asarray([decoded], dtype=np.int64)
|
| 269 |
-
)
|
| 270 |
-
n_combo = int((2 * radius + 1) ** self.n_dims)
|
| 271 |
-
if n_combo > MAX_NEIGHBORHOOD_COMBINATIONS:
|
| 272 |
-
raise ValueError(
|
| 273 |
-
f"MortonEncoder.neighborhood would enumerate {n_combo} quantized offset "
|
| 274 |
-
f"combinations (n_dims={self.n_dims}, radius={radius}, levels={self.levels}), "
|
| 275 |
-
f"which exceeds MAX_NEIGHBORHOOD_COMBINATIONS={MAX_NEIGHBORHOOD_COMBINATIONS}; "
|
| 276 |
-
"reduce radius or n_dims."
|
| 277 |
-
)
|
| 278 |
-
|
| 279 |
-
offsets = range(-radius, radius + 1)
|
| 280 |
-
neighbors: List[int] = []
|
| 281 |
-
for tup in product(offsets, repeat=self.n_dims):
|
| 282 |
-
offset = np.array(tup, dtype=np.int64)
|
| 283 |
-
point = center + offset
|
| 284 |
-
if np.all(point >= 0) and np.all(point < self.levels):
|
| 285 |
-
neighbors.append(int(self.encode(point.reshape(1, -1))))
|
| 286 |
-
return sorted(set(neighbors))
|
| 287 |
-
|
| 288 |
-
@staticmethod
|
| 289 |
-
def from_modality(modality: str, **kwargs) -> 'MortonEncoder':
|
| 290 |
-
"""
|
| 291 |
-
Factory for common modality configurations.
|
| 292 |
-
|
| 293 |
-
Args:
|
| 294 |
-
modality: One of 'image', 'audio', 'text', 'timeseries', 'generic'
|
| 295 |
-
"""
|
| 296 |
-
configs = {
|
| 297 |
-
'image': {'n_dims': 3, 'bits_per_dim': 8}, # x, y, channel
|
| 298 |
-
'audio': {'n_dims': 2, 'bits_per_dim': 12}, # time, frequency
|
| 299 |
-
'text': {'n_dims': 2, 'bits_per_dim': 10}, # position, feature
|
| 300 |
-
'timeseries': {'n_dims': 2, 'bits_per_dim': 14}, # time, value
|
| 301 |
-
'generic': {'n_dims': kwargs.get('n_dims', 4), 'bits_per_dim': 8},
|
| 302 |
-
}
|
| 303 |
-
config = configs.get(modality, configs['generic'])
|
| 304 |
-
config.update(kwargs)
|
| 305 |
-
return MortonEncoder(**config)
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tensegrity/pipeline/canonical.py
CHANGED
|
@@ -226,16 +226,6 @@ class CanonicalPipeline:
|
|
| 226 |
self._choice_model_names: List[str] = []
|
| 227 |
self._last_derived_obs: List[Dict[str, int]] = []
|
| 228 |
|
| 229 |
-
# --- Persistent causal knowledge ---
|
| 230 |
-
# Domain-level SCMs persist across items within a task. Instead of
|
| 231 |
-
# rebuilding every SCM from scratch per item (which gives uniform CPTs
|
| 232 |
-
# that contribute noise), we maintain a library of domain SCMs keyed
|
| 233 |
-
# by task domain. When a new item arrives, we look up existing SCMs
|
| 234 |
-
# for that domain and re-register them with accumulated experience.
|
| 235 |
-
# Per-choice ephemeral SCMs are still created, but the domain SCM
|
| 236 |
-
# provides a prior that shapes the per-choice energy competition.
|
| 237 |
-
self._domain_scm_library: Dict[str, StructuralCausalModel] = {}
|
| 238 |
-
|
| 239 |
if self.persistent_state_path:
|
| 240 |
self.load_state(self.persistent_state_path)
|
| 241 |
|
|
@@ -296,15 +286,14 @@ class CanonicalPipeline:
|
|
| 296 |
self._scm_topologies = {}
|
| 297 |
self._choice_model_names = []
|
| 298 |
self._last_derived_obs = []
|
| 299 |
-
|
| 300 |
-
# Determine domain for persistent SCM lookup
|
| 301 |
-
domain = sample.metadata.get("domain", "general")
|
| 302 |
-
|
| 303 |
for i, label in enumerate(labels[:len(sample.choices)]):
|
| 304 |
-
scm = self._build_choice_scm(i, label
|
| 305 |
try:
|
| 306 |
self.energy_arena.register(scm)
|
| 307 |
self._choice_model_names.append(scm.name)
|
|
|
|
|
|
|
|
|
|
| 308 |
n_ngc_layers = len(self.controller.agent.field.ngc.layer_sizes)
|
| 309 |
topology = self._topology_mapper.from_scm(scm, n_layers=n_ngc_layers)
|
| 310 |
self._scm_topologies[scm.name] = topology
|
|
@@ -356,46 +345,23 @@ class CanonicalPipeline:
|
|
| 356 |
|
| 357 |
# ---------- per-choice SCM (used by EnergyCausalArena) ----------
|
| 358 |
|
| 359 |
-
def _build_choice_scm(self, choice_idx: int, label: str
|
| 360 |
-
domain: str = "general") -> StructuralCausalModel:
|
| 361 |
"""
|
| 362 |
-
Build a
|
| 363 |
|
| 364 |
-
The structure is always:
|
| 365 |
prompt_feature ──▶ choice_match ──▶ observation
|
| 366 |
▲
|
| 367 |
│ (lateral) coherence
|
| 368 |
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
uniform Dirichlet priors. The domain model is the persistent
|
| 373 |
-
causal knowledge that survives across items.
|
| 374 |
"""
|
| 375 |
scm = StructuralCausalModel(name=f"choice_{choice_idx}_{label}")
|
| 376 |
scm.add_variable("prompt_feature", n_values=4, parents=[])
|
| 377 |
scm.add_variable("coherence", n_values=4, parents=[])
|
| 378 |
scm.add_variable("choice_match", n_values=4, parents=["prompt_feature"])
|
| 379 |
scm.add_variable("observation", n_values=4, parents=["choice_match", "coherence"])
|
| 380 |
-
|
| 381 |
-
# Seed from domain library if available
|
| 382 |
-
domain_key = f"domain_{domain}"
|
| 383 |
-
if domain_key in self._domain_scm_library:
|
| 384 |
-
domain_scm = self._domain_scm_library[domain_key]
|
| 385 |
-
# Copy accumulated CPTs from the domain model
|
| 386 |
-
for var_name, mech in scm.mechanisms.items():
|
| 387 |
-
domain_mech = domain_scm.mechanisms.get(var_name)
|
| 388 |
-
if domain_mech is not None and mech.cpt.shape == domain_mech.cpt.shape:
|
| 389 |
-
mech.cpt[:] = domain_mech.cpt
|
| 390 |
-
else:
|
| 391 |
-
# Create a new domain SCM for future seeding
|
| 392 |
-
domain_scm = StructuralCausalModel(name=domain_key)
|
| 393 |
-
domain_scm.add_variable("prompt_feature", n_values=4, parents=[])
|
| 394 |
-
domain_scm.add_variable("coherence", n_values=4, parents=[])
|
| 395 |
-
domain_scm.add_variable("choice_match", n_values=4, parents=["prompt_feature"])
|
| 396 |
-
domain_scm.add_variable("observation", n_values=4, parents=["choice_match", "coherence"])
|
| 397 |
-
self._domain_scm_library[domain_key] = domain_scm
|
| 398 |
-
|
| 399 |
return scm
|
| 400 |
|
| 401 |
# ---------- one-shot ingest (delegates to controller) ----------
|
|
@@ -421,61 +387,73 @@ class CanonicalPipeline:
|
|
| 421 |
self, prompt: str, choices: List[str]
|
| 422 |
) -> Tuple[np.ndarray, List[Dict[str, int]]]:
|
| 423 |
"""
|
| 424 |
-
|
| 425 |
-
1. Save NGC base state (prompt-grounded after perceive).
|
| 426 |
-
2. Encode c_i alone, settle NGC under it.
|
| 427 |
-
3. Ask the field to top-down predict the prompt observation.
|
| 428 |
-
4. score_i = -prediction_error.
|
| 429 |
-
5. Discretize the obs/pred for use as energy-arena observations.
|
| 430 |
|
| 431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
"""
|
| 433 |
field = self.controller.agent.field
|
| 434 |
-
prompt_tokens = _alphanum_tokens(prompt, max_tokens=64)
|
| 435 |
-
prompt_obs = field._fhrr_to_obs(field.encoder.encode_sequence(prompt_tokens))
|
| 436 |
|
| 437 |
-
#
|
|
|
|
|
|
|
|
|
|
| 438 |
try:
|
|
|
|
| 439 |
base_state = field.ngc.save_state()
|
| 440 |
except Exception:
|
| 441 |
base_state = None
|
| 442 |
|
| 443 |
scores = np.zeros(len(choices), dtype=np.float64)
|
| 444 |
derived_obs: List[Dict[str, int]] = []
|
|
|
|
| 445 |
for i, c in enumerate(choices):
|
| 446 |
if base_state is not None:
|
| 447 |
try:
|
| 448 |
field.ngc.restore_state(base_state)
|
| 449 |
except Exception:
|
| 450 |
pass
|
| 451 |
-
|
| 452 |
-
|
|
|
|
|
|
|
| 453 |
try:
|
| 454 |
field.ngc.settle(choice_obs, steps=self.falsify_settle_steps)
|
| 455 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 456 |
except Exception as e:
|
| 457 |
-
logger.error(
|
| 458 |
-
"NGC falsification failed for choice %d: %s",
|
| 459 |
-
i, e, exc_info=True,
|
| 460 |
-
)
|
| 461 |
pe = float(1e9)
|
| 462 |
scores[i] = -pe
|
| 463 |
|
| 464 |
-
# Derive
|
| 465 |
-
# Each variable is bucketed into 4 levels to match the per-choice
|
| 466 |
-
# SCM cardinality. The buckets are deterministic from the field
|
| 467 |
-
# state, not random.
|
| 468 |
try:
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
|
|
|
|
|
|
| 474 |
derived_obs.append({
|
| 475 |
-
"prompt_feature": pf,
|
| 476 |
-
"
|
| 477 |
-
"coherence": co,
|
| 478 |
-
"observation": ob,
|
| 479 |
})
|
| 480 |
except Exception:
|
| 481 |
derived_obs.append({
|
|
@@ -483,8 +461,7 @@ class CanonicalPipeline:
|
|
| 483 |
"coherence": 0, "observation": 0,
|
| 484 |
})
|
| 485 |
|
| 486 |
-
# Restore
|
| 487 |
-
# contaminated by the last falsification settle.
|
| 488 |
if base_state is not None:
|
| 489 |
try:
|
| 490 |
field.ngc.restore_state(base_state)
|
|
@@ -853,11 +830,9 @@ class CanonicalPipeline:
|
|
| 853 |
def _sbert_choice_scores(self, sample: TaskSample) -> np.ndarray:
|
| 854 |
"""Score choices by SBERT sentence-level cosine similarity.
|
| 855 |
|
| 856 |
-
|
| 857 |
-
|
| 858 |
-
|
| 859 |
-
by the random FHRR→obs projection and directly measures semantic
|
| 860 |
-
relatedness in the original embedding space.
|
| 861 |
"""
|
| 862 |
n = len(sample.choices)
|
| 863 |
scores = np.zeros(n, dtype=np.float64)
|
|
@@ -865,83 +840,66 @@ class CanonicalPipeline:
|
|
| 865 |
return scores
|
| 866 |
|
| 867 |
field = self.controller.agent.field
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
|
| 871 |
-
|
| 872 |
-
|
|
|
|
| 873 |
return scores
|
| 874 |
|
| 875 |
try:
|
| 876 |
-
|
| 877 |
-
f"{sample.prompt} {c}"
|
| 878 |
-
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
|
| 882 |
-
if pn < 1e-8:
|
| 883 |
-
return scores
|
| 884 |
-
for i in range(n):
|
| 885 |
-
ce = embs[i + 1]
|
| 886 |
-
cn = float(np.linalg.norm(ce))
|
| 887 |
-
if cn > 1e-8:
|
| 888 |
-
scores[i] = float(np.dot(pe, ce) / (pn * cn))
|
| 889 |
except Exception as e:
|
| 890 |
logger.debug("SBERT choice scoring failed: %s", e)
|
| 891 |
|
| 892 |
return scores
|
| 893 |
|
| 894 |
def _memory_choice_scores(self, sample: TaskSample) -> np.ndarray:
|
| 895 |
-
"""
|
| 896 |
|
| 897 |
-
|
| 898 |
-
|
|
|
|
|
|
|
|
|
|
| 899 |
"""
|
| 900 |
n = len(sample.choices)
|
| 901 |
scores = np.zeros(n, dtype=np.float64)
|
| 902 |
if n == 0:
|
| 903 |
return scores
|
| 904 |
|
| 905 |
-
|
| 906 |
-
if
|
| 907 |
return scores
|
| 908 |
|
| 909 |
-
|
| 910 |
-
|
| 911 |
-
|
| 912 |
-
query_belief = np.full(n, 1.0 / n, dtype=np.float64)
|
| 913 |
|
|
|
|
| 914 |
try:
|
| 915 |
-
|
| 916 |
-
retrieved = episodic.retrieve_by_context(query_context=query_ctx, k=8)
|
| 917 |
except Exception as e:
|
| 918 |
-
logger.debug("
|
| 919 |
return scores
|
| 920 |
|
| 921 |
-
|
|
|
|
| 922 |
return scores
|
| 923 |
|
| 924 |
-
|
| 925 |
-
|
| 926 |
-
|
| 927 |
-
|
| 928 |
-
|
| 929 |
-
|
| 930 |
-
|
| 931 |
-
if correct_vec is None:
|
| 932 |
-
continue
|
| 933 |
-
correct_vec = np.asarray(correct_vec, dtype=np.float64)
|
| 934 |
-
cn = np.linalg.norm(correct_vec)
|
| 935 |
-
if cn <= 1e-10:
|
| 936 |
-
continue
|
| 937 |
-
correct_vec = correct_vec / cn
|
| 938 |
-
ctx_sim = float(np.dot(query_ctx, ep.context_vector))
|
| 939 |
-
if ctx_sim <= 0.0:
|
| 940 |
-
continue
|
| 941 |
-
confidence = 1.0 - float(ep.surprise)
|
| 942 |
-
weight = ctx_sim * max(0.05, confidence)
|
| 943 |
-
for i, choice_vec in enumerate(choice_vecs):
|
| 944 |
-
scores[i] += weight * float(np.dot(choice_vec, correct_vec))
|
| 945 |
|
| 946 |
return scores
|
| 947 |
|
|
@@ -977,18 +935,23 @@ class CanonicalPipeline:
|
|
| 977 |
gold_rank_score = 1.0 / n # no discrimination
|
| 978 |
self._channel_alpha[name] += gold_rank_score * 0.5
|
| 979 |
field = self.controller.agent.field
|
| 980 |
-
|
| 981 |
-
|
| 982 |
-
|
| 983 |
-
|
| 984 |
-
|
| 985 |
-
|
| 986 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 987 |
|
| 988 |
try:
|
| 989 |
field.ngc.settle(correct_obs, steps=max(1, self.falsify_settle_steps))
|
| 990 |
field.ngc.learn(modulation=max(0.0, self.feedback_learning_rate))
|
| 991 |
-
field.memory.store(field.ngc.get_abstract_state(level=-1))
|
| 992 |
except Exception as e:
|
| 993 |
logger.debug("feedback NGC learning skipped: %s", e)
|
| 994 |
|
|
@@ -1039,19 +1002,6 @@ class CanonicalPipeline:
|
|
| 1039 |
except Exception as e:
|
| 1040 |
logger.debug("feedback SCM update skipped: %s", e)
|
| 1041 |
|
| 1042 |
-
# Update the persistent domain SCM with the gold-label observation.
|
| 1043 |
-
# This is what makes the causal arena accumulate experience: the
|
| 1044 |
-
# domain SCM's CPTs evolve with each feedback signal, and future
|
| 1045 |
-
# items in the same domain start with this accumulated knowledge.
|
| 1046 |
-
domain = sample.metadata.get("domain", "general")
|
| 1047 |
-
domain_key = f"domain_{domain}"
|
| 1048 |
-
domain_scm = self._domain_scm_library.get(domain_key)
|
| 1049 |
-
if domain_scm is not None and self._last_derived_obs:
|
| 1050 |
-
try:
|
| 1051 |
-
domain_scm.update_from_data([self._last_derived_obs[sample.gold]])
|
| 1052 |
-
except Exception as e:
|
| 1053 |
-
logger.debug("domain SCM update skipped: %s", e)
|
| 1054 |
-
|
| 1055 |
try:
|
| 1056 |
self.controller.agent.experience_replay(n_episodes=3)
|
| 1057 |
except Exception as e:
|
|
|
|
| 226 |
self._choice_model_names: List[str] = []
|
| 227 |
self._last_derived_obs: List[Dict[str, int]] = []
|
| 228 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
if self.persistent_state_path:
|
| 230 |
self.load_state(self.persistent_state_path)
|
| 231 |
|
|
|
|
| 286 |
self._scm_topologies = {}
|
| 287 |
self._choice_model_names = []
|
| 288 |
self._last_derived_obs = []
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
for i, label in enumerate(labels[:len(sample.choices)]):
|
| 290 |
+
scm = self._build_choice_scm(i, label)
|
| 291 |
try:
|
| 292 |
self.energy_arena.register(scm)
|
| 293 |
self._choice_model_names.append(scm.name)
|
| 294 |
+
# Project this SCM's DAG into the NGC layer hierarchy via
|
| 295 |
+
# TopologyMapper. Horizontal causal edges are resolved through
|
| 296 |
+
# virtual parents at higher levels (the "elevator shaft" fix).
|
| 297 |
n_ngc_layers = len(self.controller.agent.field.ngc.layer_sizes)
|
| 298 |
topology = self._topology_mapper.from_scm(scm, n_layers=n_ngc_layers)
|
| 299 |
self._scm_topologies[scm.name] = topology
|
|
|
|
| 345 |
|
| 346 |
# ---------- per-choice SCM (used by EnergyCausalArena) ----------
|
| 347 |
|
| 348 |
+
def _build_choice_scm(self, choice_idx: int, label: str) -> StructuralCausalModel:
|
|
|
|
| 349 |
"""
|
| 350 |
+
Build a tiny SCM for one choice:
|
| 351 |
|
|
|
|
| 352 |
prompt_feature ──▶ choice_match ──▶ observation
|
| 353 |
▲
|
| 354 |
│ (lateral) coherence
|
| 355 |
|
| 356 |
+
The DAG has both vertical and horizontal edges. The TopologyMapper
|
| 357 |
+
is exactly what turns the lateral coherence link into a virtual parent
|
| 358 |
+
in the NGC hierarchy, addressing the topological-mismatch critique.
|
|
|
|
|
|
|
| 359 |
"""
|
| 360 |
scm = StructuralCausalModel(name=f"choice_{choice_idx}_{label}")
|
| 361 |
scm.add_variable("prompt_feature", n_values=4, parents=[])
|
| 362 |
scm.add_variable("coherence", n_values=4, parents=[])
|
| 363 |
scm.add_variable("choice_match", n_values=4, parents=["prompt_feature"])
|
| 364 |
scm.add_variable("observation", n_values=4, parents=["choice_match", "coherence"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
return scm
|
| 366 |
|
| 367 |
# ---------- one-shot ingest (delegates to controller) ----------
|
|
|
|
| 387 |
self, prompt: str, choices: List[str]
|
| 388 |
) -> Tuple[np.ndarray, List[Dict[str, int]]]:
|
| 389 |
"""
|
| 390 |
+
Real falsification in SBERT embedding space.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
|
| 392 |
+
For each choice c_i:
|
| 393 |
+
1. Save NGC state after settling on the prompt.
|
| 394 |
+
2. Get SBERT embedding of choice c_i.
|
| 395 |
+
3. Settle NGC on the choice embedding.
|
| 396 |
+
4. Ask NGC to predict what layer 0 should look like (top-down).
|
| 397 |
+
5. score_i = -||prediction - prompt_embedding||²
|
| 398 |
+
|
| 399 |
+
This is genuine falsification: "if the answer is c_i, and the NGC
|
| 400 |
+
has learned the structure of how answers relate to questions in
|
| 401 |
+
embedding space, does c_i's abstract state predict the prompt?"
|
| 402 |
+
|
| 403 |
+
The NGC W matrices learn across items. After 50+ items, they encode
|
| 404 |
+
real structure: questions in domain X tend to have answers with
|
| 405 |
+
embedding pattern Y. This is knowledge the LLM doesn't have —
|
| 406 |
+
it's cross-item structural knowledge accumulated by the cognitive layer.
|
| 407 |
"""
|
| 408 |
field = self.controller.agent.field
|
|
|
|
|
|
|
| 409 |
|
| 410 |
+
# Get SBERT embeddings directly
|
| 411 |
+
prompt_obs = field.text_to_obs(prompt)
|
| 412 |
+
|
| 413 |
+
# Settle on prompt first to ground the NGC
|
| 414 |
try:
|
| 415 |
+
field.ngc.settle(prompt_obs)
|
| 416 |
base_state = field.ngc.save_state()
|
| 417 |
except Exception:
|
| 418 |
base_state = None
|
| 419 |
|
| 420 |
scores = np.zeros(len(choices), dtype=np.float64)
|
| 421 |
derived_obs: List[Dict[str, int]] = []
|
| 422 |
+
|
| 423 |
for i, c in enumerate(choices):
|
| 424 |
if base_state is not None:
|
| 425 |
try:
|
| 426 |
field.ngc.restore_state(base_state)
|
| 427 |
except Exception:
|
| 428 |
pass
|
| 429 |
+
|
| 430 |
+
# Get SBERT embedding of the choice (or prompt+choice for context)
|
| 431 |
+
choice_obs = field.text_to_obs(f"{prompt} {c}")
|
| 432 |
+
|
| 433 |
try:
|
| 434 |
field.ngc.settle(choice_obs, steps=self.falsify_settle_steps)
|
| 435 |
+
# Prediction: what does the NGC think layer 0 should look like
|
| 436 |
+
# given the abstract state it settled into for this choice?
|
| 437 |
+
predicted = field.ngc.predict_observation()
|
| 438 |
+
# Score: how well does this prediction match the prompt embedding?
|
| 439 |
+
pe = float(np.sum((prompt_obs - predicted) ** 2))
|
| 440 |
except Exception as e:
|
| 441 |
+
logger.error("NGC falsification failed for choice %d: %s", i, e)
|
|
|
|
|
|
|
|
|
|
| 442 |
pe = float(1e9)
|
| 443 |
scores[i] = -pe
|
| 444 |
|
| 445 |
+
# Derive discrete observations for the energy arena
|
|
|
|
|
|
|
|
|
|
| 446 |
try:
|
| 447 |
+
pf = self._bucket_4(float(np.linalg.norm(prompt_obs)))
|
| 448 |
+
cm = self._bucket_4(-pe / max(float(np.linalg.norm(prompt_obs)) ** 2, 1.0))
|
| 449 |
+
co = self._bucket_4(float(
|
| 450 |
+
np.dot(predicted, prompt_obs) /
|
| 451 |
+
(np.linalg.norm(predicted) * np.linalg.norm(prompt_obs) + 1e-10)
|
| 452 |
+
))
|
| 453 |
+
ob = self._bucket_4(float(np.linalg.norm(predicted)))
|
| 454 |
derived_obs.append({
|
| 455 |
+
"prompt_feature": pf, "choice_match": cm,
|
| 456 |
+
"coherence": co, "observation": ob,
|
|
|
|
|
|
|
| 457 |
})
|
| 458 |
except Exception:
|
| 459 |
derived_obs.append({
|
|
|
|
| 461 |
"coherence": 0, "observation": 0,
|
| 462 |
})
|
| 463 |
|
| 464 |
+
# Restore prompt-grounded state
|
|
|
|
| 465 |
if base_state is not None:
|
| 466 |
try:
|
| 467 |
field.ngc.restore_state(base_state)
|
|
|
|
| 830 |
def _sbert_choice_scores(self, sample: TaskSample) -> np.ndarray:
|
| 831 |
"""Score choices by SBERT sentence-level cosine similarity.
|
| 832 |
|
| 833 |
+
Uses field.text_to_obs() which goes directly to SBERT embeddings
|
| 834 |
+
when available, giving the cognitive layer the same semantic signal
|
| 835 |
+
it uses for NGC falsification and Hopfield memory.
|
|
|
|
|
|
|
| 836 |
"""
|
| 837 |
n = len(sample.choices)
|
| 838 |
scores = np.zeros(n, dtype=np.float64)
|
|
|
|
| 840 |
return scores
|
| 841 |
|
| 842 |
field = self.controller.agent.field
|
| 843 |
+
prompt_emb = field.get_sbert_embedding(sample.prompt)
|
| 844 |
+
if prompt_emb is None:
|
| 845 |
+
return scores
|
| 846 |
+
|
| 847 |
+
pn = float(np.linalg.norm(prompt_emb))
|
| 848 |
+
if pn < 1e-8:
|
| 849 |
return scores
|
| 850 |
|
| 851 |
try:
|
| 852 |
+
for i, c in enumerate(sample.choices):
|
| 853 |
+
choice_emb = field.get_sbert_embedding(f"{sample.prompt} {c}")
|
| 854 |
+
if choice_emb is not None:
|
| 855 |
+
cn = float(np.linalg.norm(choice_emb))
|
| 856 |
+
if cn > 1e-8:
|
| 857 |
+
scores[i] = float(np.dot(prompt_emb, choice_emb) / (pn * cn))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 858 |
except Exception as e:
|
| 859 |
logger.debug("SBERT choice scoring failed: %s", e)
|
| 860 |
|
| 861 |
return scores
|
| 862 |
|
| 863 |
def _memory_choice_scores(self, sample: TaskSample) -> np.ndarray:
|
| 864 |
+
"""Score choices by Hopfield memory retrieval in SBERT space.
|
| 865 |
|
| 866 |
+
The Hopfield bank now stores full SBERT embeddings. We query it with
|
| 867 |
+
the prompt's SBERT embedding and measure how similar the retrieved
|
| 868 |
+
memory is to each choice's embedding. This gives real cross-item
|
| 869 |
+
transfer: "past prompts similar to this one had answers similar to
|
| 870 |
+
choice X."
|
| 871 |
"""
|
| 872 |
n = len(sample.choices)
|
| 873 |
scores = np.zeros(n, dtype=np.float64)
|
| 874 |
if n == 0:
|
| 875 |
return scores
|
| 876 |
|
| 877 |
+
field = self.controller.agent.field
|
| 878 |
+
if field.memory.n_patterns == 0:
|
| 879 |
return scores
|
| 880 |
|
| 881 |
+
prompt_emb = field.get_sbert_embedding(sample.prompt)
|
| 882 |
+
if prompt_emb is None:
|
| 883 |
+
return scores
|
|
|
|
| 884 |
|
| 885 |
+
# Retrieve from Hopfield memory using prompt SBERT embedding
|
| 886 |
try:
|
| 887 |
+
retrieved, _energy = field.memory.retrieve(prompt_emb)
|
|
|
|
| 888 |
except Exception as e:
|
| 889 |
+
logger.debug("memory retrieval failed: %s", e)
|
| 890 |
return scores
|
| 891 |
|
| 892 |
+
ret_norm = np.linalg.norm(retrieved)
|
| 893 |
+
if ret_norm < 1e-8:
|
| 894 |
return scores
|
| 895 |
|
| 896 |
+
# Score each choice by similarity to retrieved memory
|
| 897 |
+
for i, c in enumerate(sample.choices):
|
| 898 |
+
choice_emb = field.get_sbert_embedding(f"{sample.prompt} {c}")
|
| 899 |
+
if choice_emb is not None:
|
| 900 |
+
cn = float(np.linalg.norm(choice_emb))
|
| 901 |
+
if cn > 1e-8:
|
| 902 |
+
scores[i] = float(np.dot(retrieved, choice_emb) / (ret_norm * cn))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 903 |
|
| 904 |
return scores
|
| 905 |
|
|
|
|
| 935 |
gold_rank_score = 1.0 / n # no discrimination
|
| 936 |
self._channel_alpha[name] += gold_rank_score * 0.5
|
| 937 |
field = self.controller.agent.field
|
| 938 |
+
|
| 939 |
+
# Store the correct answer's SBERT embedding in Hopfield memory.
|
| 940 |
+
# This is the cross-item learning signal: future prompts will
|
| 941 |
+
# retrieve this embedding and use it to score choices.
|
| 942 |
+
correct_text = f"{sample.prompt} {sample.choices[sample.gold]}"
|
| 943 |
+
correct_emb = field.get_sbert_embedding(correct_text)
|
| 944 |
+
if correct_emb is not None:
|
| 945 |
+
field.memory.store(correct_emb)
|
| 946 |
+
|
| 947 |
+
# Settle NGC on the correct answer's SBERT embedding and learn.
|
| 948 |
+
# This teaches the W matrices the structure of correct Q→A mappings.
|
| 949 |
+
correct_obs = field.text_to_obs(correct_text)
|
| 950 |
+
prompt_obs = field.text_to_obs(sample.prompt)
|
| 951 |
|
| 952 |
try:
|
| 953 |
field.ngc.settle(correct_obs, steps=max(1, self.falsify_settle_steps))
|
| 954 |
field.ngc.learn(modulation=max(0.0, self.feedback_learning_rate))
|
|
|
|
| 955 |
except Exception as e:
|
| 956 |
logger.debug("feedback NGC learning skipped: %s", e)
|
| 957 |
|
|
|
|
| 1002 |
except Exception as e:
|
| 1003 |
logger.debug("feedback SCM update skipped: %s", e)
|
| 1004 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1005 |
try:
|
| 1006 |
self.controller.agent.experience_replay(n_episodes=3)
|
| 1007 |
except Exception as e:
|
tensegrity/pipeline/iterative.py
DELETED
|
@@ -1,466 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Iterative cognitive scorer — LLM-free multi-pass settling over choices.
|
| 3 |
-
|
| 4 |
-
Single-shot ScoringBridge encodes prompt once, settles NGC once per choice,
|
| 5 |
-
fuses sentence + FHRR + NGC scores in one shot. The graft results show this
|
| 6 |
-
behaves like an undifferentiated bias field.
|
| 7 |
-
|
| 8 |
-
This iterative scorer instead runs an active-inference loop:
|
| 9 |
-
1. Encode prompt context, settle NGC, learn (ground the field).
|
| 10 |
-
2. Initialize uniform belief over choices.
|
| 11 |
-
3. For each iteration up to a budget:
|
| 12 |
-
a. Score each choice via NGC free-energy under the *current* field state.
|
| 13 |
-
b. Update beliefs by accumulating evidence (Bayesian-style log-odds).
|
| 14 |
-
c. Take the leading choice's encoding, learn a small Hebbian step under
|
| 15 |
-
it (modulation = belief mass), shaping the field toward that
|
| 16 |
-
interpretation.
|
| 17 |
-
d. Optionally retrieve from Hopfield with the leading encoding to inject
|
| 18 |
-
memory pressure.
|
| 19 |
-
e. Check convergence: top-1 belief mass > τ, or marginal change < ε.
|
| 20 |
-
4. Commit argmax.
|
| 21 |
-
|
| 22 |
-
The LLM is absent. The cognitive layer alone resolves the choice.
|
| 23 |
-
"""
|
| 24 |
-
from __future__ import annotations
|
| 25 |
-
|
| 26 |
-
import re
|
| 27 |
-
import logging
|
| 28 |
-
from dataclasses import dataclass, field
|
| 29 |
-
from typing import Any, Dict, List, Optional, Tuple
|
| 30 |
-
|
| 31 |
-
import numpy as np
|
| 32 |
-
|
| 33 |
-
logger = logging.getLogger(__name__)
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
@dataclass
|
| 37 |
-
class IterationTrace:
|
| 38 |
-
iteration: int
|
| 39 |
-
energies: List[float]
|
| 40 |
-
sentence_sims: List[float]
|
| 41 |
-
fhrr_sims: List[float]
|
| 42 |
-
log_belief: List[float]
|
| 43 |
-
belief: List[float]
|
| 44 |
-
top_idx: int
|
| 45 |
-
top_p: float
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
@dataclass
|
| 49 |
-
class IterativeResult:
|
| 50 |
-
scores: List[float] # final fused scores per choice
|
| 51 |
-
belief: List[float] # final belief vector per choice
|
| 52 |
-
committed_idx: int
|
| 53 |
-
iterations_used: int
|
| 54 |
-
converged: bool
|
| 55 |
-
trace: List[IterationTrace] = field(default_factory=list)
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
class IterativeCognitiveScorer:
|
| 59 |
-
"""
|
| 60 |
-
Multi-pass cognitive scorer over a UnifiedField.
|
| 61 |
-
|
| 62 |
-
No LLM in the loop. Operates on prompt+choices via:
|
| 63 |
-
- sbert sentence similarity (one-shot, doesn't change across iterations)
|
| 64 |
-
- FHRR similarity (one-shot)
|
| 65 |
-
- NGC free energy (recomputed each iteration as the field is shaped)
|
| 66 |
-
- Hopfield retrieval (cumulative memory pressure across iterations)
|
| 67 |
-
"""
|
| 68 |
-
|
| 69 |
-
def __init__(
|
| 70 |
-
self,
|
| 71 |
-
field=None,
|
| 72 |
-
*,
|
| 73 |
-
obs_dim: int = 256,
|
| 74 |
-
hidden_dims: Optional[List[int]] = None,
|
| 75 |
-
fhrr_dim: int = 2048,
|
| 76 |
-
ngc_settle_steps: int = 30,
|
| 77 |
-
ngc_learning_rate: float = 0.01,
|
| 78 |
-
hopfield_beta: float = 0.05,
|
| 79 |
-
# iteration controls
|
| 80 |
-
max_iterations: int = 6,
|
| 81 |
-
convergence_top_p: float = 0.75,
|
| 82 |
-
convergence_delta: float = 1e-3,
|
| 83 |
-
# context settling
|
| 84 |
-
context_settle_steps: int = 40,
|
| 85 |
-
choice_settle_steps: int = 25,
|
| 86 |
-
context_learning_epochs: int = 3,
|
| 87 |
-
# fusion weights (z-scored)
|
| 88 |
-
w_sbert: float = 0.5,
|
| 89 |
-
w_fhrr: float = 0.3,
|
| 90 |
-
w_ngc: float = 1.0,
|
| 91 |
-
w_falsify: float = 0.7,
|
| 92 |
-
# belief update step
|
| 93 |
-
belief_step: float = 0.6,
|
| 94 |
-
# Hebbian shaping is now under the prompt context (not the leading choice),
|
| 95 |
-
# so iteration deepens the prompt model rather than reinforcing the leader.
|
| 96 |
-
shaping_lr_scale: float = 0.5,
|
| 97 |
-
# Hopfield: store leading encoding each iteration; query each iteration
|
| 98 |
-
use_hopfield: bool = True,
|
| 99 |
-
hopfield_steps: int = 2,
|
| 100 |
-
# Episodic memory persists across items in a session. At the start of
|
| 101 |
-
# each item we retrieve past episodes whose context matches the current
|
| 102 |
-
# prompt and use their stored chosen-answer FHRR vectors to bias the
|
| 103 |
-
# current choices — the cross-item learning channel.
|
| 104 |
-
use_episodic: bool = True,
|
| 105 |
-
episodic_context_dim: int = 64,
|
| 106 |
-
episodic_capacity: int = 4096,
|
| 107 |
-
episodic_top_k: int = 8,
|
| 108 |
-
# Default off: the simple "past-answer FHRR similarity" signal is too
|
| 109 |
-
# noisy to help. The wiring (encode/retrieve) stays so smarter signals
|
| 110 |
-
# can be plugged in here without re-plumbing.
|
| 111 |
-
w_episodic: float = 0.0,
|
| 112 |
-
# Minimum cosine match between query and episodic context to trust retrieval.
|
| 113 |
-
episodic_ctx_sim_threshold: float = 0.5,
|
| 114 |
-
# Seed for NGC `reinitialize` on `reset`; None chooses a random seed each time.
|
| 115 |
-
reset_seed: Optional[int] = 12345,
|
| 116 |
-
):
|
| 117 |
-
from tensegrity.engine.unified_field import UnifiedField
|
| 118 |
-
from tensegrity.memory.episodic import EpisodicMemory
|
| 119 |
-
self.field = field or UnifiedField(
|
| 120 |
-
obs_dim=obs_dim,
|
| 121 |
-
hidden_dims=hidden_dims or [128, 32],
|
| 122 |
-
fhrr_dim=fhrr_dim,
|
| 123 |
-
hopfield_beta=hopfield_beta,
|
| 124 |
-
ngc_settle_steps=ngc_settle_steps,
|
| 125 |
-
ngc_learning_rate=ngc_learning_rate,
|
| 126 |
-
)
|
| 127 |
-
self.max_iterations = max_iterations
|
| 128 |
-
self.convergence_top_p = convergence_top_p
|
| 129 |
-
self.convergence_delta = convergence_delta
|
| 130 |
-
self.context_settle_steps = context_settle_steps
|
| 131 |
-
self.choice_settle_steps = choice_settle_steps
|
| 132 |
-
self.context_learning_epochs = context_learning_epochs
|
| 133 |
-
self.w_sbert = w_sbert
|
| 134 |
-
self.w_fhrr = w_fhrr
|
| 135 |
-
self.w_ngc = w_ngc
|
| 136 |
-
self.w_falsify = w_falsify
|
| 137 |
-
self.belief_step = belief_step
|
| 138 |
-
self.shaping_lr_scale = shaping_lr_scale
|
| 139 |
-
self.use_hopfield = use_hopfield
|
| 140 |
-
self.hopfield_steps = hopfield_steps
|
| 141 |
-
self.use_episodic = use_episodic
|
| 142 |
-
self.episodic_top_k = episodic_top_k
|
| 143 |
-
self.w_episodic = w_episodic
|
| 144 |
-
self.episodic_ctx_sim_threshold = episodic_ctx_sim_threshold
|
| 145 |
-
self.reset_seed = reset_seed
|
| 146 |
-
# Dirichlet-style per-channel reliability. Each channel accumulates a
|
| 147 |
-
# pseudocount that grows when the channel's top-ranked choice matches
|
| 148 |
-
# the committed belief on an item. Fusion weights = normalized counts.
|
| 149 |
-
# Uniform prior of 1.0 means we start with equal trust; the system
|
| 150 |
-
# discovers which channels are reliable for the current task on its own.
|
| 151 |
-
self._channels = ["sbert", "fhrr", "ngc", "falsify", "hop", "episodic"]
|
| 152 |
-
self._channel_counts: Dict[str, float] = {c: 1.0 for c in self._channels}
|
| 153 |
-
self.episodic = EpisodicMemory(
|
| 154 |
-
context_dim=episodic_context_dim,
|
| 155 |
-
capacity=episodic_capacity,
|
| 156 |
-
drift_rate=0.95,
|
| 157 |
-
encoding_strength=0.3,
|
| 158 |
-
) if use_episodic else None
|
| 159 |
-
|
| 160 |
-
# ---------- text helpers ----------
|
| 161 |
-
|
| 162 |
-
def _tokenize(self, text: str, max_tokens: int = 48) -> List[str]:
|
| 163 |
-
return re.findall(r"[a-zA-Z]+(?:'[a-z]+)?|[0-9]+(?:\.[0-9]+)?", text.lower())[-max_tokens:]
|
| 164 |
-
|
| 165 |
-
def _encode(self, tokens: List[str]) -> np.ndarray:
|
| 166 |
-
if not tokens:
|
| 167 |
-
return np.ones(self.field.fhrr_dim, dtype=np.complex64)
|
| 168 |
-
return self.field.encoder.encode_sequence(tokens)
|
| 169 |
-
|
| 170 |
-
# ---------- one-shot signals (computed once per item) ----------
|
| 171 |
-
|
| 172 |
-
def _sbert_similarities(self, prompt: str, choices: List[str]) -> List[float]:
|
| 173 |
-
features = self.field.encoder.features
|
| 174 |
-
getter = getattr(features, "get_sbert_model", None)
|
| 175 |
-
sbert = getter() if callable(getter) else None
|
| 176 |
-
if sbert is not None:
|
| 177 |
-
embs = sbert.encode([prompt] + choices, show_progress_bar=False)
|
| 178 |
-
pe = embs[0]
|
| 179 |
-
pn = float(np.linalg.norm(pe))
|
| 180 |
-
out = []
|
| 181 |
-
for i in range(len(choices)):
|
| 182 |
-
ce = embs[i + 1]
|
| 183 |
-
cn = float(np.linalg.norm(ce))
|
| 184 |
-
out.append(float(np.dot(pe, ce) / (pn * cn)) if pn > 1e-8 and cn > 1e-8 else 0.0)
|
| 185 |
-
return out
|
| 186 |
-
if self.field.encoder.semantic and callable(getter) and not getattr(
|
| 187 |
-
self, "_sbert_unavailable_logged", False
|
| 188 |
-
):
|
| 189 |
-
logger.warning("SBERT sentence similarity unavailable; using FHRR cosine similarity.")
|
| 190 |
-
setattr(self, "_sbert_unavailable_logged", True)
|
| 191 |
-
pf = self._encode(self._tokenize(prompt, 64))
|
| 192 |
-
return [
|
| 193 |
-
self.field.encoder.similarity(pf, self._encode(self._tokenize(c, 32)))
|
| 194 |
-
for c in choices
|
| 195 |
-
]
|
| 196 |
-
|
| 197 |
-
def _fhrr_similarities(self, prompt: str, choices: List[str]) -> List[float]:
|
| 198 |
-
pf = self._encode(self._tokenize(prompt, 64))
|
| 199 |
-
return [
|
| 200 |
-
self.field.encoder.similarity(pf, self._encode(self._tokenize(c, 32)))
|
| 201 |
-
for c in choices
|
| 202 |
-
]
|
| 203 |
-
|
| 204 |
-
# ---------- iterative loop ----------
|
| 205 |
-
|
| 206 |
-
def score(self, prompt: str, choices: List[str]) -> IterativeResult:
|
| 207 |
-
n = len(choices)
|
| 208 |
-
if n == 0:
|
| 209 |
-
return IterativeResult(scores=[], belief=[], committed_idx=-1,
|
| 210 |
-
iterations_used=0, converged=False)
|
| 211 |
-
|
| 212 |
-
# 1. One-shot signals
|
| 213 |
-
sbert_sims = np.asarray(self._sbert_similarities(prompt, choices), dtype=np.float64)
|
| 214 |
-
fhrr_sims = np.asarray(self._fhrr_similarities(prompt, choices), dtype=np.float64)
|
| 215 |
-
|
| 216 |
-
# 2. Encode + settle prompt context, learn it
|
| 217 |
-
prompt_tokens = self._tokenize(prompt, max_tokens=64)
|
| 218 |
-
for _ in range(max(1, self.context_learning_epochs)):
|
| 219 |
-
ctx_obs = self.field._fhrr_to_obs(self._encode(prompt_tokens))
|
| 220 |
-
self.field.ngc.settle(ctx_obs, steps=self.context_settle_steps)
|
| 221 |
-
self.field.ngc.learn(modulation=1.0)
|
| 222 |
-
base_state = self.field.ngc.save_state()
|
| 223 |
-
|
| 224 |
-
# Pre-tokenize choice contexts (prompt+choice for joint settling)
|
| 225 |
-
choice_token_lists = [self._tokenize(prompt + " " + c, 64) for c in choices]
|
| 226 |
-
choice_obs = [self.field._fhrr_to_obs(self._encode(t)) for t in choice_token_lists]
|
| 227 |
-
# Choice-only obs (for falsification: settle under choice alone, then predict prompt)
|
| 228 |
-
choice_only_obs = [
|
| 229 |
-
self.field._fhrr_to_obs(self._encode(self._tokenize(c, 32))) for c in choices
|
| 230 |
-
]
|
| 231 |
-
choice_fhrr = [self._encode(self._tokenize(c, 32)) for c in choices]
|
| 232 |
-
# Cache prompt observation vector for falsification target
|
| 233 |
-
prompt_obs_vec = self.field._fhrr_to_obs(self._encode(prompt_tokens))
|
| 234 |
-
|
| 235 |
-
# Episodic retrieval: project current prompt into context space and ask
|
| 236 |
-
# the episodic store for similar past episodes. Each retrieved episode
|
| 237 |
-
# carries the FHRR of the answer that won there. We bias current
|
| 238 |
-
# choices by their similarity to those past winners, weighted by the
|
| 239 |
-
# context match. This is the cross-item memory channel.
|
| 240 |
-
episodic_bias = np.zeros(n, dtype=np.float64)
|
| 241 |
-
if self.use_episodic and self.episodic is not None and len(self.episodic.episodes) > 0:
|
| 242 |
-
uniform_belief = np.full(n, 1.0 / n, dtype=np.float64)
|
| 243 |
-
try:
|
| 244 |
-
query_ctx = self.episodic.compute_item_representation(
|
| 245 |
-
prompt_obs_vec, uniform_belief
|
| 246 |
-
)
|
| 247 |
-
retrieved = self.episodic.retrieve_by_context(
|
| 248 |
-
query_context=query_ctx, k=self.episodic_top_k
|
| 249 |
-
)
|
| 250 |
-
except Exception as e:
|
| 251 |
-
logger.debug("episodic retrieval skipped: %s", e)
|
| 252 |
-
retrieved = []
|
| 253 |
-
if retrieved:
|
| 254 |
-
# Real-valued unit-norm choice vectors (cached for reuse)
|
| 255 |
-
ch_real = []
|
| 256 |
-
for f in choice_fhrr:
|
| 257 |
-
v = np.real(f).astype(np.float64)
|
| 258 |
-
nrm = np.linalg.norm(v)
|
| 259 |
-
ch_real.append(v / nrm if nrm > 1e-10 else v)
|
| 260 |
-
# Only trust episodes whose prompt context strongly matches.
|
| 261 |
-
# Below this threshold, "similar past answer" is noise, not signal.
|
| 262 |
-
for ep in retrieved:
|
| 263 |
-
ans_vec = ep.metadata.get("chosen_fhrr_real") if ep.metadata else None
|
| 264 |
-
if ans_vec is None:
|
| 265 |
-
continue
|
| 266 |
-
ctx_sim = float(np.dot(query_ctx, ep.context_vector))
|
| 267 |
-
if ctx_sim < self.episodic_ctx_sim_threshold:
|
| 268 |
-
continue
|
| 269 |
-
# Also discount by past surprise: episodes the agent struggled
|
| 270 |
-
# with (low committed confidence) carry less authority.
|
| 271 |
-
confidence = max(0.0, 1.0 - float(ep.surprise))
|
| 272 |
-
weight = ctx_sim * confidence
|
| 273 |
-
if weight <= 0:
|
| 274 |
-
continue
|
| 275 |
-
for i in range(n):
|
| 276 |
-
episodic_bias[i] += weight * float(np.dot(ch_real[i], ans_vec))
|
| 277 |
-
|
| 278 |
-
# 3. Initialize belief uniformly in log space
|
| 279 |
-
log_belief = np.zeros(n, dtype=np.float64)
|
| 280 |
-
|
| 281 |
-
trace: List[IterationTrace] = []
|
| 282 |
-
prev_belief = np.ones(n) / n
|
| 283 |
-
converged = False
|
| 284 |
-
iterations_used = 0
|
| 285 |
-
last_channel_scores: Dict[str, np.ndarray] = {}
|
| 286 |
-
|
| 287 |
-
def znorm(a: np.ndarray) -> np.ndarray:
|
| 288 |
-
s = a.std()
|
| 289 |
-
return (a - a.mean()) / s if s > 1e-10 else np.zeros_like(a)
|
| 290 |
-
|
| 291 |
-
for it in range(self.max_iterations):
|
| 292 |
-
iterations_used = it + 1
|
| 293 |
-
|
| 294 |
-
# 3a. Score each choice under current field state.
|
| 295 |
-
# Two NGC signals:
|
| 296 |
-
# energies: free energy of settling on (prompt+choice) jointly.
|
| 297 |
-
# falsify: -prediction_error of (prompt | settled-on-choice-alone).
|
| 298 |
-
# This asks "does this choice's state predict the prompt?"
|
| 299 |
-
# — a real falsification operation, not a fit score.
|
| 300 |
-
energies = np.zeros(n, dtype=np.float64)
|
| 301 |
-
falsify = np.zeros(n, dtype=np.float64)
|
| 302 |
-
for i in range(n):
|
| 303 |
-
self.field.ngc.restore_state(base_state)
|
| 304 |
-
r = self.field.ngc.settle(choice_obs[i], steps=self.choice_settle_steps)
|
| 305 |
-
energies[i] = float(r["final_energy"])
|
| 306 |
-
|
| 307 |
-
# Falsification: settle under choice-only, then ask the field
|
| 308 |
-
# to predict the prompt observation. Higher prediction error =
|
| 309 |
-
# this choice does a worse job of explaining the prompt.
|
| 310 |
-
self.field.ngc.restore_state(base_state)
|
| 311 |
-
self.field.ngc.settle(choice_only_obs[i], steps=self.choice_settle_steps)
|
| 312 |
-
pe = self.field.ngc.prediction_error(prompt_obs_vec)
|
| 313 |
-
falsify[i] = -float(pe)
|
| 314 |
-
ngc_score = -energies
|
| 315 |
-
|
| 316 |
-
# Hopfield bonus: similarity of choice FHRR to retrieved memory
|
| 317 |
-
hop_bonus = np.zeros(n, dtype=np.float64)
|
| 318 |
-
if self.use_hopfield and self.field.memory.n_patterns > 0:
|
| 319 |
-
for i in range(n):
|
| 320 |
-
q = np.real(choice_fhrr[i]).astype(np.float64)
|
| 321 |
-
qn = np.linalg.norm(q)
|
| 322 |
-
if qn < 1e-8:
|
| 323 |
-
continue
|
| 324 |
-
q = q / qn
|
| 325 |
-
retrieved, _e = self.field.memory.retrieve(q, steps=self.hopfield_steps)
|
| 326 |
-
rn = np.linalg.norm(retrieved)
|
| 327 |
-
if rn > 1e-8:
|
| 328 |
-
hop_bonus[i] = float(np.dot(q, retrieved / rn))
|
| 329 |
-
|
| 330 |
-
# 3b. Fuse z-normalized
|
| 331 |
-
# Normalized channel weights from accumulated reliability counts.
|
| 332 |
-
total = sum(self._channel_counts.values())
|
| 333 |
-
w = {c: self._channel_counts[c] / total for c in self._channels}
|
| 334 |
-
|
| 335 |
-
channel_scores = {
|
| 336 |
-
"sbert": znorm(sbert_sims),
|
| 337 |
-
"fhrr": znorm(fhrr_sims),
|
| 338 |
-
"ngc": znorm(ngc_score),
|
| 339 |
-
"falsify": znorm(falsify),
|
| 340 |
-
"hop": znorm(hop_bonus) if self.use_hopfield else np.zeros(n),
|
| 341 |
-
"episodic": znorm(episodic_bias) if self.use_episodic else np.zeros(n),
|
| 342 |
-
}
|
| 343 |
-
fused = sum(w[c] * channel_scores[c] for c in self._channels)
|
| 344 |
-
last_channel_scores = channel_scores
|
| 345 |
-
|
| 346 |
-
# 3c. Accumulate evidence into log-belief
|
| 347 |
-
log_belief = log_belief + self.belief_step * fused
|
| 348 |
-
shifted = log_belief - log_belief.max()
|
| 349 |
-
belief = np.exp(shifted)
|
| 350 |
-
belief = belief / belief.sum() if belief.sum() > 0 else np.ones(n) / n
|
| 351 |
-
|
| 352 |
-
top_idx = int(np.argmax(belief))
|
| 353 |
-
top_p = float(belief[top_idx])
|
| 354 |
-
|
| 355 |
-
trace.append(IterationTrace(
|
| 356 |
-
iteration=it,
|
| 357 |
-
energies=energies.tolist(),
|
| 358 |
-
sentence_sims=sbert_sims.tolist(),
|
| 359 |
-
fhrr_sims=fhrr_sims.tolist(),
|
| 360 |
-
log_belief=log_belief.tolist(),
|
| 361 |
-
belief=belief.tolist(),
|
| 362 |
-
top_idx=top_idx,
|
| 363 |
-
top_p=top_p,
|
| 364 |
-
))
|
| 365 |
-
|
| 366 |
-
# 3d. Hebbian shaping under the PROMPT (not the leading choice).
|
| 367 |
-
# This deepens the field's model of the question over iterations
|
| 368 |
-
# without injecting a positive-feedback loop on the leader.
|
| 369 |
-
self.field.ngc.restore_state(base_state)
|
| 370 |
-
self.field.ngc.settle(prompt_obs_vec, steps=self.context_settle_steps)
|
| 371 |
-
self.field.ngc.learn(modulation=self.shaping_lr_scale)
|
| 372 |
-
|
| 373 |
-
# Re-base on the prompt-grounded state for next iteration's scoring
|
| 374 |
-
base_state = self.field.ngc.save_state()
|
| 375 |
-
|
| 376 |
-
# 3f. Convergence checks
|
| 377 |
-
db = float(np.max(np.abs(belief - prev_belief)))
|
| 378 |
-
prev_belief = belief
|
| 379 |
-
if top_p >= self.convergence_top_p or db < self.convergence_delta:
|
| 380 |
-
converged = True
|
| 381 |
-
break
|
| 382 |
-
|
| 383 |
-
# Store prompt encoding once for Hopfield cross-item memory (not each iteration).
|
| 384 |
-
if self.use_hopfield:
|
| 385 |
-
self.field.memory.store(self._encode(prompt_tokens))
|
| 386 |
-
|
| 387 |
-
committed_idx = int(np.argmax(prev_belief))
|
| 388 |
-
|
| 389 |
-
# Reliability update via *cross-channel agreement* (not agreement with
|
| 390 |
-
# the committed belief — that would be self-fulfilling). Each channel
|
| 391 |
-
# earns one pseudocount per OTHER active channel that picked the same
|
| 392 |
-
# top choice. The consensus structure is the anchor; no single
|
| 393 |
-
# channel is privileged. Channels tracking signal grow together;
|
| 394 |
-
# noisy outliers don't.
|
| 395 |
-
if last_channel_scores and n > 1:
|
| 396 |
-
active = []
|
| 397 |
-
for c in self._channels:
|
| 398 |
-
cs = last_channel_scores.get(c)
|
| 399 |
-
if cs is None:
|
| 400 |
-
continue
|
| 401 |
-
if not np.any(np.abs(cs) > 1e-12):
|
| 402 |
-
continue
|
| 403 |
-
active.append((c, int(np.argmax(cs))))
|
| 404 |
-
for i, (c_i, top_i) in enumerate(active):
|
| 405 |
-
agreements = sum(
|
| 406 |
-
1 for j, (_, top_j) in enumerate(active) if j != i and top_j == top_i
|
| 407 |
-
)
|
| 408 |
-
if agreements > 0:
|
| 409 |
-
self._channel_counts[c_i] += float(agreements) / max(len(active) - 1, 1)
|
| 410 |
-
|
| 411 |
-
# Episodic encoding: store the prompt context together with the FHRR
|
| 412 |
-
# of the chosen answer, so future items can retrieve "what worked
|
| 413 |
-
# last time on a similar prompt."
|
| 414 |
-
if self.use_episodic and self.episodic is not None:
|
| 415 |
-
top_p_final = float(prev_belief[committed_idx]) if n > 0 else 0.0
|
| 416 |
-
chosen_real = np.real(choice_fhrr[committed_idx]).astype(np.float64)
|
| 417 |
-
chosen_norm = np.linalg.norm(chosen_real)
|
| 418 |
-
if chosen_norm > 1e-10:
|
| 419 |
-
chosen_real = chosen_real / chosen_norm
|
| 420 |
-
try:
|
| 421 |
-
self.episodic.encode(
|
| 422 |
-
observation=prompt_obs_vec,
|
| 423 |
-
morton_code=np.zeros(1, dtype=np.int64),
|
| 424 |
-
belief_state=np.asarray(prev_belief, dtype=np.float64),
|
| 425 |
-
action=committed_idx,
|
| 426 |
-
surprise=float(1.0 - top_p_final),
|
| 427 |
-
free_energy=float(np.mean(energies) if n > 0 else 0.0),
|
| 428 |
-
metadata={"chosen_fhrr_real": chosen_real},
|
| 429 |
-
)
|
| 430 |
-
except Exception as e:
|
| 431 |
-
logger.debug("episodic encode skipped: %s", e)
|
| 432 |
-
|
| 433 |
-
return IterativeResult(
|
| 434 |
-
scores=log_belief.tolist(),
|
| 435 |
-
belief=prev_belief.tolist(),
|
| 436 |
-
committed_idx=committed_idx,
|
| 437 |
-
iterations_used=iterations_used,
|
| 438 |
-
converged=converged,
|
| 439 |
-
trace=trace,
|
| 440 |
-
)
|
| 441 |
-
|
| 442 |
-
def reset(self):
|
| 443 |
-
"""Per-item reset. Clears NGC working state but PRESERVES Hopfield
|
| 444 |
-
patterns and episodic memory — those carry across items in a session
|
| 445 |
-
and provide cross-item learning.
|
| 446 |
-
|
| 447 |
-
NGC weights are reinitialized using ``reset_seed``: default ``12345``
|
| 448 |
-
matches legacy behavior for reproducibility; pass ``None`` for a random
|
| 449 |
-
seed each reset, or any other integer to pin runs.
|
| 450 |
-
"""
|
| 451 |
-
seed = self.reset_seed
|
| 452 |
-
if seed is None:
|
| 453 |
-
seed = int(np.random.randint(0, 2 ** 31))
|
| 454 |
-
self.field.ngc.reinitialize(seed)
|
| 455 |
-
self.field.energy_history.clear()
|
| 456 |
-
self.field._step_count = 0
|
| 457 |
-
|
| 458 |
-
def reset_session(self):
|
| 459 |
-
"""Full reset. Use at task / session boundaries to clear all memory
|
| 460 |
-
and per-channel reliability priors (which are task-specific)."""
|
| 461 |
-
self.reset()
|
| 462 |
-
self.field.memory.clear()
|
| 463 |
-
if self.episodic is not None:
|
| 464 |
-
self.episodic.clear()
|
| 465 |
-
for c in self._channels:
|
| 466 |
-
self._channel_counts[c] = 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tests/.DS_Store
DELETED
|
Binary file (6.15 kB)
|
|
|