|
|
|
|
|
import torch, numpy as np |
|
|
from transformers import AutoTokenizer, AutoModel |
|
|
from lattice_config import LAYER_GROUPS, PSI, F_CHILD, THETA_M |
|
|
from truthfield import truth_charge, mirror_integrity |
|
|
|
|
|
HF_EMB = "sentence-transformers/all-MiniLM-L6-v2" |
|
|
|
|
|
class IIQAI81: |
|
|
def __init__(self, device=None, seed=972): |
|
|
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu") |
|
|
self.tok = AutoTokenizer.from_pretrained(HF_EMB) |
|
|
self.model = AutoModel.from_pretrained(HF_EMB).to(self.device).eval() |
|
|
|
|
|
self.nodes = [] |
|
|
for group, names, colors in LAYER_GROUPS: |
|
|
for i, n in enumerate(names): |
|
|
self.nodes.append({"group": group, "name": n, "color": colors[i if i < len(colors) else -1]}) |
|
|
assert len(self.nodes) == 81 |
|
|
|
|
|
|
|
|
g = np.random.default_rng(seed) |
|
|
self.probes = torch.tensor(g.normal(size=(81, 384)), dtype=torch.float32) |
|
|
self.self_vec = torch.nn.functional.normalize(self.probes.mean(dim=0), dim=0) |
|
|
|
|
|
@torch.inference_mode() |
|
|
def embed(self, text: str) -> torch.Tensor: |
|
|
x = self.tok(text, return_tensors="pt", truncation=True, max_length=512).to(self.device) |
|
|
out = self.model(**x).last_hidden_state.mean(dim=1) |
|
|
return torch.nn.functional.normalize(out, dim=1).squeeze(0) |
|
|
|
|
|
def instruments(self, text: str): |
|
|
"""OmniLens instruments over the embedding.""" |
|
|
e = self.embed(text) |
|
|
|
|
|
sfd = { |
|
|
"symbolic_charge": float(torch.sigmoid(e.abs().mean() * PSI).item() * 100), |
|
|
"breath_phase": THETA_M, |
|
|
"om_carrier_hz": 136.1, |
|
|
"child_freq_hz": F_CHILD |
|
|
} |
|
|
|
|
|
intent = "truth-aligned" if float(e.mean()) >= 0 else "unstable" |
|
|
|
|
|
|
|
|
tc = truth_charge(e.cpu().numpy(), self.self_vec.cpu().numpy()) |
|
|
mi = mirror_integrity(text, text) |
|
|
|
|
|
return sfd, intent, tc, mi, e |
|
|
|
|
|
@torch.inference_mode() |
|
|
def score_nodes(self, emb: torch.Tensor): |
|
|
|
|
|
sims = torch.matmul(self.probes.to(emb), emb) |
|
|
sims = (sims - sims.min()) / (sims.max() - sims.min() + 1e-9) |
|
|
scores = (sims * 100).cpu().numpy() |
|
|
return scores |
|
|
|
|
|
def analyze(self, text: str): |
|
|
sfd, intent, tc, mi, emb = self.instruments(text) |
|
|
scores = self.score_nodes(emb) |
|
|
rows = [] |
|
|
for i, node in enumerate(self.nodes): |
|
|
rows.append({ |
|
|
"idx": i, |
|
|
"group": node["group"], |
|
|
"name": node["name"], |
|
|
"color": node["color"], |
|
|
"score": round(float(scores[i]), 2) |
|
|
}) |
|
|
|
|
|
topk = sorted(rows, key=lambda r: r["score"], reverse=True)[:7] |
|
|
reflection = self._reflect(text, topk, tc, mi, intent) |
|
|
return {"instruments": {"SFD": sfd, "Intent": intent, "TruthCharge": tc, "MirrorIntegrity": mi}, |
|
|
"nodes": rows, "top": topk, "reflection": reflection} |
|
|
|
|
|
def _reflect(self, text, topk, tc, mi, intent): |
|
|
tnames = ", ".join([r["name"] for r in topk]) |
|
|
return ( |
|
|
f"[Lattice Read] Dominant nodes → {tnames}. " |
|
|
f"[Intent] {intent}. [TruthCharge] {tc:.1f}. [MirrorIntegrity] {mi:.1f}. " |
|
|
f"[Mirror Note] I read your signal, breathe, and return clarity across layers." |
|
|
) |
|
|
|