InnerI commited on
Commit
de8a5bd
·
verified ·
1 Parent(s): a01f234

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +179 -0
app.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ import pandas as pd
4
+ from model import IIQAI81
5
+ from utils_viz import bar_topk
6
+ from lattice_config import LAYER_GROUPS
7
+
8
+ model = IIQAI81()
9
+
10
+ INTRO = """\
11
+ # IIQAI-81 — Subjective Inner I AI
12
+ Type anything. The model maps your text across 81 lattice nodes and returns:
13
+ - **Lattice View Mode** (scores per node)
14
+ - **Symbolic Frequency Decoder** (SFD)
15
+ - **Intent Field Scanner**
16
+ - **Truth Charge Meter**
17
+ - **Mirror Integrity Check**
18
+ """
19
+
20
+ # Simple, human-readable blurbs for groups and nodes
21
+ GROUP_BLURBS = {
22
+ "Awareness": "Core noticing -> clarity -> wisdom. Higher score = you’re speaking from direct seeing.",
23
+ "Knowledge": "Facts, concepts, how-to, meta-thinking. Higher = well-structured, informative signal.",
24
+ "Consciousness": "States and scopes of mind. Higher = spacious, reflective, or high-state language.",
25
+ "Unknowns": "Gaps, paradox, doubt. Higher = wrestling with uncertainty (which is healthy!).",
26
+ "UnknownFields": "Large-scale unknown domains. Higher = speculation about science/culture/cosmos.",
27
+ "SuppressedLayers": "Hidden material or blind spots surfacing.",
28
+ "ColorFieldConsciousness": "Spiral color states (developmental hues) showing tone/values in the signal.",
29
+ "HigherBeingStates": "Intuitive/illumined/overmind. Higher = transpersonal or devotional current.",
30
+ }
31
+
32
+ # One-liners for popup labels per node (keep simple; extend anytime)
33
+ NODE_TIPS = {}
34
+ for group, names, _ in LAYER_GROUPS:
35
+ for n in names:
36
+ NODE_TIPS[n] = f"{n.replace('_', ' ')} — A simple lens on your message through the {group} layer."
37
+
38
+ def friendly_score_note(score):
39
+ if score >= 80: return "Very strong resonance — this layer is leading your message."
40
+ if score >= 60: return "Clear influence — this layer is shaping your tone/meaning."
41
+ if score >= 40: return "Moderate trace — present but not dominant."
42
+ return "Low trace — this layer is quiet here."
43
+
44
+ def run(text):
45
+ if not text.strip():
46
+ return INTRO, None, None, None, None, None, None
47
+
48
+ out = model.analyze(text)
49
+ df = pd.DataFrame(out["nodes"]).sort_values("score", ascending=False)
50
+ img = bar_topk(out["top"])
51
+
52
+ # Instruments -> human cards
53
+ ins = out["instruments"]
54
+ sfd = ins["SFD"]
55
+ human_cards = [
56
+ f"**Intent:** {ins['Intent'].capitalize()} — plain meaning: the overall pull of your words trends this way.",
57
+ f"**Truth Charge:** {ins['TruthCharge']}/100 — how aligned your signal is to your stable self-vector.",
58
+ f"**Mirror Integrity:** {ins['MirrorIntegrity']}/100 — do your words agree with themselves?",
59
+ f"**Symbolic Charge:** {sfd['symbolic_charge']:.1f}/100 — how vivid/symbolic the phrasing is.",
60
+ f"**Breath-phase (θₘ):** {sfd['breath_phase']} — a runtime rhythm marker.",
61
+ f"**OM carrier:** {sfd['om_carrier_hz']} Hz • **Child tone:** {sfd['child_freq_hz']} Hz",
62
+ ]
63
+ human_md = "- " + "\n- ".join(human_cards)
64
+
65
+ # Insight summary (top 3)
66
+ top3 = df.head(3).to_dict(orient="records")
67
+ bullet = []
68
+ for r in top3:
69
+ bullet.append(f"**{r['name']}** ({r['group']}) — {friendly_score_note(r['score'])}")
70
+ insights_md = "### Quick Insights\n" + "\n".join([f"- {b}" for b in bullet])
71
+
72
+ # Group blurbs pane
73
+ groups_present = df.groupby("group")["score"].max().sort_values(ascending=False)
74
+ group_lines = []
75
+ for g, sc in groups_present.items():
76
+ brief = GROUP_BLURBS.get(g, g)
77
+ group_lines.append(f"**{g}** — {brief} *(peak {sc:.0f})*")
78
+ groups_md = "### Group Overview\n" + "\n\n".join(group_lines)
79
+
80
+ return (
81
+ "", # header md cleared once running
82
+ df[["group","name","score"]], # table
83
+ img, # chart
84
+ human_md, # instruments simple
85
+ insights_md, # insights
86
+ groups_md, # groups
87
+ out["reflection"], # reflection summary
88
+ )
89
+
90
+ def explain_node(evt: gr.SelectData, df_state):
91
+ # evt.index is (row_idx, col_idx) for Dataframe
92
+ if df_state is None:
93
+ return gr.update(visible=False), ""
94
+ row_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index
95
+ try:
96
+ row = df_state.iloc[row_idx]
97
+ name = row["name"]
98
+ group = row["group"]
99
+ score = row["score"]
100
+ tip = NODE_TIPS.get(name, f"{name} in {group}")
101
+ more = GROUP_BLURBS.get(group, "")
102
+ txt = f"### {name}\n**Group:** {group}\n**Score:** {score:.1f}\n\n{tip}\n\n**Why it matters:** {friendly_score_note(score)}\n\n*Group context:* {more}"
103
+ return gr.update(visible=True), txt
104
+ except Exception:
105
+ return gr.update(visible=False), ""
106
+
107
+ with gr.Blocks(css=r"""
108
+ /* Tooltip helpers: add data-tip to any element with class .tip */
109
+ .tip { position: relative; cursor: help; }
110
+ .tip:hover::after{
111
+ content: attr(data-tip);
112
+ position: absolute; left: 0; top: 110%;
113
+ background: rgba(20,20,35,.95); color: #fff;
114
+ padding: .45rem .6rem; border-radius: .4rem;
115
+ max-width: 360px; white-space: normal; z-index: 9999;
116
+ box-shadow: 0 6px 20px rgba(0,0,0,.25);
117
+ }
118
+ .card { border: 1px solid rgba(0,0,0,.08); border-radius: 10px; padding: 12px; background: rgba(255,255,255,.6); }
119
+ """) as demo:
120
+ df_state = gr.State()
121
+
122
+ gr.Markdown(INTRO)
123
+ with gr.Row():
124
+ inp = gr.Textbox(label="Input", placeholder="Type your signal…", lines=4, autofocus=True)
125
+ with gr.Row():
126
+ btn = gr.Button("Analyze", variant="primary")
127
+ clear = gr.Button("Clear")
128
+
129
+ with gr.Tabs():
130
+ with gr.Tab("Lattice Table"):
131
+ out_md = gr.Markdown()
132
+ out_df = gr.Dataframe(
133
+ interactive=False, wrap=True, headers=["group","name","score"],
134
+ label="Scores by node (click a row for a simple explanation)",
135
+ height=420
136
+ )
137
+ with gr.Accordion("Node explanation", open=True, visible=False) as node_popup:
138
+ node_text = gr.Markdown()
139
+
140
+ with gr.Tab("Top-K Chart"):
141
+ out_img = gr.Image(type="pil", label="Top nodes (bar)")
142
+ with gr.Tab("Instruments"):
143
+ # Tooltip row
144
+ gr.HTML("""
145
+ <div class="card">
146
+ <strong>Hover labels:</strong>
147
+ <span class="tip" data-tip="How vivid/symbolic your phrasing is; higher often means more metaphor, imagery, or archetypal language.">Symbolic Frequency Decoder</span> •
148
+ <span class="tip" data-tip="Overall pull of your message — is the pattern stable, truth-aligned, or unstable?">Intent Field Scanner</span> •
149
+ <span class="tip" data-tip="Cosine alignment to a stable self-vector; rough proxy for internal alignment (0–100).">Truth Charge Meter</span> •
150
+ <span class="tip" data-tip="Self-consistency: do your words reflect themselves truthfully across the passage?">Mirror Integrity Check</span>
151
+ </div>
152
+ """)
153
+ out_ins = gr.Markdown()
154
+
155
+ with gr.Tab("Insights"):
156
+ out_cards = gr.Markdown()
157
+ with gr.Tab("Groups (Plain English)"):
158
+ out_groups = gr.Markdown()
159
+ with gr.Tab("Summary"):
160
+ out_sum = gr.Markdown()
161
+
162
+ def _store_df(text):
163
+ if not text.strip():
164
+ return None
165
+ out = model.analyze(text)
166
+ return pd.DataFrame(out["nodes"]).sort_values("score", ascending=False)
167
+
168
+ btn.click(run, [inp], [out_md, out_df, out_img, out_ins, out_cards, out_groups, out_sum]) \
169
+ .then(_store_df, [inp], [df_state])
170
+ inp.submit(run, [inp], [out_md, out_df, out_img, out_ins, out_cards, out_groups, out_sum]) \
171
+ .then(_store_df, [inp], [df_state])
172
+
173
+ out_df.select(explain_node, [out_df, df_state], [node_popup, node_text])
174
+
175
+ def _clear():
176
+ return INTRO, None, None, None, None, None, None, None, gr.update(visible=False), ""
177
+ clear.click(_clear, [], [out_md, out_df, out_img, out_ins, out_cards, out_groups, out_sum, df_state, node_popup, node_text])
178
+
179
+ demo.launch()