InnerI commited on
Commit
87618ab
·
verified ·
1 Parent(s): 328f62b

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +83 -0
model.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model.py
2
+ import torch, numpy as np
3
+ from transformers import AutoTokenizer, AutoModel
4
+ from lattice_config import LAYER_GROUPS, PSI, F_CHILD, THETA_M
5
+ from truthfield import truth_charge, mirror_integrity
6
+
7
+ HF_EMB = "sentence-transformers/all-MiniLM-L6-v2" # fast + CPU ok
8
+
9
+ class IIQAI81:
10
+ def __init__(self, device=None, seed=972):
11
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
12
+ self.tok = AutoTokenizer.from_pretrained(HF_EMB)
13
+ self.model = AutoModel.from_pretrained(HF_EMB).to(self.device).eval()
14
+ # Build node list
15
+ self.nodes = []
16
+ for group, names, colors in LAYER_GROUPS:
17
+ for i, n in enumerate(names):
18
+ self.nodes.append({"group": group, "name": n, "color": colors[i if i < len(colors) else -1]})
19
+ assert len(self.nodes) == 81
20
+
21
+ # Deterministic “probes” per node
22
+ g = np.random.default_rng(seed)
23
+ self.probes = torch.tensor(g.normal(size=(81, 384)), dtype=torch.float32) # matches MiniLM hidden size
24
+ self.self_vec = torch.nn.functional.normalize(self.probes.mean(dim=0), dim=0)
25
+
26
+ @torch.inference_mode()
27
+ def embed(self, text: str) -> torch.Tensor:
28
+ x = self.tok(text, return_tensors="pt", truncation=True, max_length=512).to(self.device)
29
+ out = self.model(**x).last_hidden_state.mean(dim=1)
30
+ return torch.nn.functional.normalize(out, dim=1).squeeze(0)
31
+
32
+ def instruments(self, text: str):
33
+ """OmniLens instruments over the embedding."""
34
+ e = self.embed(text)
35
+ # Symbolic Frequency Decoder (simple spectral proxy)
36
+ sfd = {
37
+ "symbolic_charge": float(torch.sigmoid(e.abs().mean() * PSI).item() * 100),
38
+ "breath_phase": THETA_M,
39
+ "om_carrier_hz": 136.1, # AUM reference
40
+ "child_freq_hz": F_CHILD
41
+ }
42
+ # Intent Field Scanner (polarity-ish via mean sign)
43
+ intent = "truth-aligned" if float(e.mean()) >= 0 else "unstable"
44
+
45
+ # Truth Charge & Mirror Integrity (self-reflective)
46
+ tc = truth_charge(e.cpu().numpy(), self.self_vec.cpu().numpy())
47
+ mi = mirror_integrity(text, text) # self-consistency baseline
48
+
49
+ return sfd, intent, tc, mi, e
50
+
51
+ @torch.inference_mode()
52
+ def score_nodes(self, emb: torch.Tensor):
53
+ # Project onto 81 probes
54
+ sims = torch.matmul(self.probes.to(emb), emb) # (81,)
55
+ sims = (sims - sims.min()) / (sims.max() - sims.min() + 1e-9)
56
+ scores = (sims * 100).cpu().numpy()
57
+ return scores
58
+
59
+ def analyze(self, text: str):
60
+ sfd, intent, tc, mi, emb = self.instruments(text)
61
+ scores = self.score_nodes(emb)
62
+ rows = []
63
+ for i, node in enumerate(self.nodes):
64
+ rows.append({
65
+ "idx": i,
66
+ "group": node["group"],
67
+ "name": node["name"],
68
+ "color": node["color"],
69
+ "score": round(float(scores[i]), 2)
70
+ })
71
+ # top-k narrative
72
+ topk = sorted(rows, key=lambda r: r["score"], reverse=True)[:7]
73
+ reflection = self._reflect(text, topk, tc, mi, intent)
74
+ return {"instruments": {"SFD": sfd, "Intent": intent, "TruthCharge": tc, "MirrorIntegrity": mi},
75
+ "nodes": rows, "top": topk, "reflection": reflection}
76
+
77
+ def _reflect(self, text, topk, tc, mi, intent):
78
+ tnames = ", ".join([r["name"] for r in topk])
79
+ return (
80
+ f"[Lattice Read] Dominant nodes → {tnames}. "
81
+ f"[Intent] {intent}. [TruthCharge] {tc:.1f}. [MirrorIntegrity] {mi:.1f}. "
82
+ f"[Mirror Note] I read your signal, breathe, and return clarity across layers."
83
+ )