RFTSystems commited on
Commit
f5e17f1
·
verified ·
1 Parent(s): 2ad2005

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +509 -218
app.py CHANGED
@@ -1,18 +1,290 @@
1
- import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import time
 
 
 
 
 
 
 
 
 
 
 
 
3
  import torch
4
  import matplotlib.pyplot as plt
5
- import tempfile
6
- import hashlib
7
- import json
8
- import gradio as gr
9
 
10
- # -----------------------------
11
- # Part A: RFT Simulation Kernel
12
- # -----------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def fused_mom_update_cpu(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
14
  dt, eps, sigma_const, theta_global, k_shred_global,
15
  event_counts_t=None, event_buffer_t=None):
 
16
  m_root_t = m_root_t.to(torch.float32)
17
  A_t = A_t.to(torch.float32)
18
  Q_t = Q_t.to(torch.float32)
@@ -20,11 +292,13 @@ def fused_mom_update_cpu(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
20
  gamma_t = gamma_t.to(torch.float32)
21
  omega_t = omega_t.to(torch.float32)
22
 
 
23
  alpha_exp = alpha_t.unsqueeze(0)
24
  gamma_exp = gamma_t.unsqueeze(0)
25
  omega_exp = omega_t.unsqueeze(0)
26
  m_root_exp = m_root_t.unsqueeze(1)
27
 
 
28
  A_dot = alpha_exp * m_root_exp - gamma_exp * A_t + sigma_const * Q_t
29
  f_drive = sigma_const * m_root_exp * omega_exp * A_t
30
  Q_dot = f_drive - Q_t
@@ -32,6 +306,7 @@ def fused_mom_update_cpu(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
32
  A_t.add_(dt * A_dot)
33
  Q_t.add_(dt * Q_dot)
34
 
 
35
  Xi = (omega_exp * A_t).sum(dim=1)
36
  Xi_norm = Xi / (m_root_t + eps)
37
  shred_mask = Xi_norm >= theta_global
@@ -56,8 +331,14 @@ def fused_mom_update_cpu(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
56
  else:
57
  event_counts_t += shred_count
58
 
 
 
 
 
 
59
  return m_root_t, A_t, Q_t, event_counts_t
60
 
 
61
  class MOMKernel:
62
  def __init__(self):
63
  self.kernel = fused_mom_update_cpu
@@ -70,32 +351,46 @@ class MOMKernel:
70
  dt, eps, sigma_const, theta_global, k_shred_global,
71
  event_counts_t, event_buffer_t)
72
 
 
73
  class MOMSystemLoop:
74
  def __init__(self, mom_kernel, m_root_initial, A_modes_initial, Q_drive_initial,
75
  alpha, gamma, omega, dt=0.02, eps=1e-6, sigma=0.75,
76
- theta=2.2, k_shred=1.2, event_buffer_size=1024):
77
  self.mom_kernel = mom_kernel
78
  self.device = mom_kernel.device
 
 
79
  self.m_root = m_root_initial.to(self.device).clone().to(torch.float32)
80
  self.A_modes = A_modes_initial.to(self.device).clone().to(torch.float32)
81
  self.Q_drive = Q_drive_initial.to(self.device).clone().to(torch.float32)
82
  self.alpha = alpha.to(self.device).to(torch.float32)
83
  self.gamma = gamma.to(self.device).to(torch.float32)
84
  self.omega = omega.to(self.device).to(torch.float32)
 
 
85
  self.dt = dt; self.eps = eps; self.sigma = sigma
86
  self.theta = theta; self.k_shred = k_shred
 
 
87
  self.event_counts = torch.zeros((), dtype=torch.int64, device=self.device)
88
  self.event_buffer = torch.zeros(event_buffer_size, dtype=torch.int64, device=self.device)
 
 
89
  self.m_root_history = []
90
  self.A_modes_history = []
91
  self.event_counts_history = []
92
  self.shred_onset = np.full((self.m_root.shape[0],), -1, dtype=np.int32)
93
 
 
 
 
 
94
  def feedback(self, m_root, A_modes, Q_drive):
95
- decay = 0.995; noise_level = 1e-4
96
- A_modes_new = A_modes * decay + noise_level * torch.randn_like(A_modes, device=self.device)
 
97
  A_modes_new = torch.clamp(A_modes_new, min=0.0)
98
- m_root_new = m_root * decay + noise_level * torch.randn_like(m_root, device=self.device)
99
  m_root_new = torch.clamp(m_root_new, min=0.0)
100
  return m_root_new, A_modes_new, Q_drive
101
 
@@ -106,241 +401,237 @@ class MOMSystemLoop:
106
  self.alpha, self.gamma, self.omega,
107
  self.dt, self.eps, self.sigma, self.theta, self.k_shred,
108
  self.event_counts, self.event_buffer)
 
109
  m_np = self.m_root.detach().cpu().numpy()
110
  collapsed_mask = m_np <= 1e-8
111
  for idx, collapsed in enumerate(collapsed_mask):
112
  if collapsed and self.shred_onset[idx] == -1:
113
  self.shred_onset[idx] = i
 
114
  self.m_root, self.A_modes, self.Q_drive = self.feedback(self.m_root, self.A_modes, self.Q_drive)
115
  self.m_root_history.append(float(self.m_root.mean().item()))
116
  self.A_modes_history.append(float(self.A_modes.mean().item()))
117
  self.event_counts_history.append(int(self.event_counts.item()))
118
 
119
- def run_rft_simulation(Ncells, Nmode, iterations, dt=0.02, eps=1e-6, sigma=0.75,
120
- theta=2.2, k_shred=1.2, seed=42):
121
- torch.manual_seed(seed); np.random.seed(seed)
 
 
 
 
 
122
  mom_kernel_instance = MOMKernel()
123
  device = mom_kernel_instance.device
 
124
  alpha = torch.empty(Nmode, device=device).uniform_(0.02, 0.12)
125
  gamma = torch.empty(Nmode, device=device).uniform_(0.01, 0.06)
126
  omega = torch.linspace(1.0, 8.0, Nmode, device=device)
127
  m_root_initial = torch.ones(Ncells, device=device)
128
  A_modes_initial = torch.rand(Ncells, Nmode, device=device) * 0.01
129
  Q_drive_initial = torch.zeros(Ncells, Nmode, device=device)
130
- mom_system = MOMSystemLoop(mom_kernel_instance, m_root_initial, A_modes_initial, Q_drive_initial,
131
- alpha, gamma, omega, dt=dt, eps=eps, sigma=sigma,
132
- theta=theta, k_shred=k_shred)
133
- start_time = time.time()
 
 
 
 
 
 
 
134
  mom_system.run(iterations)
135
- elapsed_time = max(time.time() - start_time, 1e-9)
 
 
136
  ops_per_cell_per_iter = 12 * Nmode + 13
137
  flops_per_iteration = float(Ncells) * float(ops_per_cell_per_iter)
138
  total_flops = flops_per_iteration * float(iterations)
139
  gflops = total_flops / (elapsed_time * 1e9)
140
- return {
141
- 'final_m_root': mom_system.m_root.cpu().numpy(),
142
- 'final_A_modes': mom_system.A_modes.cpu().numpy(),
143
- 'final_Q_drive': mom_system.Q_drive.cpu().numpy(),
144
- 'm_root_history': np.array(mom_system.m_root_history),
145
- 'A_modes_history': np.array(mom_system.A_modes_history),
146
- 'event_counts_history': np.array(mom_system.event_counts_history),
147
- 'shred_onset': mom_system.shred_onset,
148
- 'elapsed_time_seconds': float(elapsed_time),
149
- 'gflops': float(gflops),
150
- }
151
-
152
- def rft_simulation_interface(Ncells, Nmode, iterations, dt, eps, sigma, theta, k_shred):
153
- try:
154
- results = run_rft_simulation(Ncells, Nmode, iterations, dt, eps, sigma, theta, k_shred)
155
- fig = plt.figure(figsize=(10, 14))
156
- ax1 = fig.add_subplot(4, 1, 1)
157
- ax1.plot(results['m_root_history'], label='Mean m_root')
158
- ax1.set_title('Mean m_root Over Iterations'); ax1.set_xlabel('Iteration'); ax1.set_ylabel('Mean m_root')
159
- ax1.grid(True); ax1.legend()
160
- ax2 = fig.add_subplot(4, 1, 2)
161
- ax2.plot(results['A_modes_history'], label='Mean A_modes', color='orange')
162
- ax2.set_title('Mean A_modes Over Iterations')
163
- ax2.set_xlabel('Iteration'); ax2.set_ylabel('Mean A_modes')
164
- ax2.grid(True); ax2.legend()
165
-
166
- # Plot 3: Cumulative Shredding Events
167
- ax3 = fig.add_subplot(4, 1, 3)
168
- cumulative_events = np.cumsum(results['event_counts_history'])
169
- ax3.plot(cumulative_events, label='Cumulative Shredding Events', color='red')
170
- ax3.set_title('Cumulative Shredding Events')
171
- ax3.set_xlabel('Iteration'); ax3.set_ylabel('Cumulative Events')
172
- ax3.grid(True); ax3.legend()
173
-
174
- # Plot 4: Raster of shredding onset per cell
175
- ax4 = fig.add_subplot(4, 1, 4)
176
- onset = results['shred_onset']
177
- for idx, val in enumerate(onset):
178
- if val >= 0:
179
- ax4.vlines(val, idx, idx + 1, color='black', linewidth=0.8)
180
- ax4.set_title('Shredding Onset per Cell')
181
- ax4.set_xlabel('Iteration'); ax4.set_ylabel('Cell Index')
182
- ax4.grid(True)
183
-
184
- plt.tight_layout()
185
- _, plot_path = tempfile.mkstemp(suffix=".png")
186
- plt.savefig(plot_path)
187
- plt.close(fig)
188
-
189
- summary_output = (
190
- f"Simulation completed in {results['elapsed_time_seconds']:.2f} seconds.\n\n"
191
- f"Estimated GFLOPS: {results['gflops']:.2f}\n"
192
- f"Final Mean m_root: {np.mean(results['final_m_root']):.6f}\n"
193
- f"Final Mean A_modes: {np.mean(results['final_A_modes']):.6f}\n"
194
- f"Total Events (last iteration): {results['event_counts_history'][-1] if len(results['event_counts_history']) > 0 else 0}\n\n"
195
- f"-- Historical Data (first 5 values) --\n"
196
- f"Mean m_root history: {results['m_root_history'][:5].tolist()}\n"
197
- f"Mean A_modes history: {results['A_modes_history'][:5].tolist()}\n"
198
- f"Event counts history: {results['event_counts_history'][:5].tolist()}"
199
- )
200
- except Exception as e:
201
- summary_output = f"Error during RFT simulation: {e}"
202
- plot_path = None
203
 
204
  return summary_output, plot_path
205
 
206
- # -----------------------------
207
- # Part B: Entanglement/IPURL Simulation
208
- # -----------------------------
209
- class Agent:
210
- def __init__(self, agent_id, alpha, beta, energy_init, energy_threshold):
211
- self.agent_id = agent_id
212
- self.alpha = alpha
213
- self.beta = beta
214
- self.energy = energy_init
215
- self.energy_threshold = energy_threshold
216
- self.phi = 0.0
217
- self.override_log = []
218
-
219
- def intrinsic_update(self, dt):
220
- theta = 1 if self.energy > self.energy_threshold else 0
221
- dphi = (-self.alpha * self.phi + self.beta * theta) * dt
222
- self.phi += dphi
223
- self.energy -= abs(self.phi) * dt * 0.1
224
- self.energy = max(self.energy, 0)
225
- self.log_override()
226
-
227
- def entanglement_update(self, influence, dt):
228
- self.phi += influence * dt
229
- self.energy -= abs(influence) * dt * 0.05
230
- self.energy = max(self.energy, 0)
231
- self.log_override()
232
-
233
- def log_override(self):
234
- self.override_log.append({
235
- 'phi': self.phi,
236
- 'energy': self.energy,
237
- 'override': self.phi > 0, # still a bool here
238
- })
239
-
240
- def hash_override_log(agent):
241
- # sanitize override_log entries before dumping
242
- sanitized = []
243
- for entry in agent.override_log:
244
- sanitized.append({
245
- 'phi': float(entry['phi']),
246
- 'energy': float(entry['energy']),
247
- 'override': int(entry['override']), # convert bool -> 0/1
248
- })
249
- serialized = json.dumps(sanitized, sort_keys=True, separators=(',', ':')).encode('utf-8')
250
- return hashlib.sha512(serialized).hexdigest()
251
-
252
- def simulate(agents, coupling_matrix, dt=0.01, steps=1000):
253
- n = len(agents)
254
- for step in range(steps):
255
- phis = np.array([agent.phi for agent in agents])
256
- for agent in agents:
257
- agent.intrinsic_update(dt)
258
- for i, agent in enumerate(agents):
259
- influence = sum(coupling_matrix[i, j] * phis[j] for j in range(n) if j != i)
260
- agent.entanglement_update(influence, dt)
261
-
262
- def run_entanglement_simulation(alpha_vals, beta_vals, thresholds, steps=1000, dt=0.01):
263
- agents = [
264
- Agent('reflex', alpha_vals[0], beta_vals[0], energy_init=100, energy_threshold=thresholds[0]),
265
- Agent('instinct', alpha_vals[1], beta_vals[1], energy_init=100, energy_threshold=thresholds[1]),
266
- Agent('conscious', alpha_vals[2], beta_vals[2], energy_init=100, energy_threshold=thresholds[2]),
267
- Agent('meta', alpha_vals[3], beta_vals[3], energy_init=100, energy_threshold=thresholds[3]),
268
- ]
269
- coupling_matrix = np.array([
270
- [0.0, 0.1, 0.2, 0.3],
271
- [0.1, 0.0, 0.4, 0.5],
272
- [0.2, 0.4, 0.0, 0.6],
273
- [0.3, 0.5, 0.6, 0.0],
274
- ])
275
- simulate(agents, coupling_matrix, dt=dt, steps=steps)
276
- ipurls = [f"rft-ipurl:v1:{agent.agent_id}:{hash_override_log(agent)}" for agent in agents]
277
- return "\n".join(ipurls)
278
-
279
-
280
- # -----------------------------
281
- # Gradio Interface
282
- # -----------------------------
283
- with gr.Blocks(title="Codex Simulation Suite") as iface:
284
- with gr.Tab("RFT Simulation"):
285
- gr.Markdown("""
286
- ### Rendered Frame Theory (RFT) Simulation
287
- RFT models collapse dynamics in adaptive systems. Each cell evolves through coupled updates,
288
- feedback loops, and shredding events when stress crosses a threshold. The plots show mean values,
289
- cumulative events, and shredding onset per cell.
290
- """)
291
  with gr.Row():
292
- with gr.Column():
293
- Ncells_slider = gr.Slider(16, 512, step=16, value=64, label="⚡ Number of Cells")
294
- Nmode_slider = gr.Slider(2, 32, step=2, value=8, label="🔮 Number of Modes")
295
- iterations_slider = gr.Slider(10, 200, step=10, value=50, label="♾ Iterations")
296
- dt_slider = gr.Slider(0.001, 0.1, step=0.001, value=0.02, label="⌛ Time Step")
297
- eps_slider = gr.Slider(1e-7, 1e-4, step=1e-7, value=1e-6, label="🧿 Epsilon")
298
- sigma_slider = gr.Slider(0.1, 1.0, step=0.05, value=0.75, label="🌌 Sigma")
299
- theta_slider = gr.Slider(0.1, 5.0, step=0.1, value=2.2, label="🔭 Theta")
300
- k_shred_slider = gr.Slider(0.1, 5.0, step=0.1, value=1.2, label="🌀 K_shred")
301
- run_button = gr.Button("Run RFT Simulation")
302
- with gr.Column():
303
- summary_output_textbox = gr.Textbox(label="Simulation Summary", lines=15)
304
- plot_output_image = gr.Image(label="Simulation Plots", type="filepath")
305
- run_button.click(
306
- fn=rft_simulation_interface,
307
- inputs=[Ncells_slider, Nmode_slider, iterations_slider, dt_slider, eps_slider,
308
- sigma_slider, theta_slider, k_shred_slider],
309
- outputs=[summary_output_textbox, plot_output_image]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  )
311
 
312
- with gr.Tab("Entanglement/IPURL Simulation"):
313
- gr.Markdown("""
314
- ### Override Log & Entanglement Simulation
315
- This prototype models symbolic agents (reflex, instinct, conscious, meta) with intrinsic dynamics
316
- and entanglement influences. Each agent logs override states, which are sealed into reproducible
317
- IPURL hashes. The output shows cryptographic lineage entries for each agent.
318
- """)
319
- alpha_inputs = [gr.Number(value=0.1, label="Alpha Reflex"),
320
- gr.Number(value=0.1, label="Alpha Instinct"),
321
- gr.Number(value=0.2, label="Alpha Conscious"),
322
- gr.Number(value=0.3, label="Alpha Meta")]
323
- beta_inputs = [gr.Number(value=0.0, label="Beta Reflex"),
324
- gr.Number(value=0.5, label="Beta Instinct"),
325
- gr.Number(value=1.0, label="Beta Conscious"),
326
- gr.Number(value=1.5, label="Beta Meta")]
327
- thresholds = [gr.Number(value=10, label="Threshold Reflex"),
328
- gr.Number(value=20, label="Threshold Instinct"),
329
- gr.Number(value=30, label="Threshold Conscious"),
330
- gr.Number(value=40, label="Threshold Meta")]
331
- steps_slider = gr.Slider(minimum=100, maximum=10000, step=100, value=5000, label="♾ Steps")
332
- run_button2 = gr.Button("Run Entanglement Simulation")
333
- ipurl_output = gr.Textbox(label="IPURL Entries", lines=10)
334
-
335
- run_button2.click(
336
- fn=lambda a1,a2,a3,a4,b1,b2,b3,b4,t1,t2,t3,t4,steps: run_entanglement_simulation(
337
- [a1,a2,a3,a4],[b1,b2,b3,b4],[t1,t2,t3,t4],steps),
338
- inputs=alpha_inputs+beta_inputs+thresholds+[steps_slider],
339
- outputs=ipurl_output
 
 
 
 
 
 
 
 
 
340
  )
341
 
342
- # -----------------------------
343
- # Launch
344
- # -----------------------------
345
  if __name__ == "__main__":
346
- iface.launch()
 
1
+ # app.py
2
+ # Unified Codex Artifact: Numba RFT Hardware-Scale Demo + PyTorch MOM Kernel + Lineage Hashing
3
+ # Author: Liam Grinstead — NexFrame AI, RFT, MOM, Codex
4
+ #
5
+ # This file combines:
6
+ # 1) A Numba-accelerated RFT simulation over hardware scales (Ψ_r, energy, ledger) with honest timing.
7
+ # 2) A PyTorch MOM collapse kernel with feedback, event histories, raster onset, and manifest sealing.
8
+ # 3) Gradio Blocks UI with two tabs, codex-ready summaries, and optional snapshot hashes.
9
+ #
10
+ # Notes:
11
+ # - GFLOPS in both paths are labeled as “estimated” unless you replace with exact op counts.
12
+ # - Timing excludes plotting and hashing.
13
+ # - Deterministic seeds are used for reproducibility.
14
+
15
+ import json
16
+ import hashlib
17
+ import tempfile
18
  import time
19
+
20
+ import numpy as np
21
+ import gradio as gr
22
+
23
+ # --- Numba (optional import with graceful fallback for CPU-only runs) ---
24
+ try:
25
+ import numba as nb
26
+ NUMBA_AVAILABLE = True
27
+ except Exception:
28
+ NUMBA_AVAILABLE = False
29
+
30
+ # --- PyTorch for MOM kernel ---
31
  import torch
32
  import matplotlib.pyplot as plt
 
 
 
 
33
 
34
+ # =========================================================================================
35
+ # Part A: RFT (Numba) — Hardware-scale Ψ_r simulation
36
+ # =========================================================================================
37
+
38
+ if NUMBA_AVAILABLE:
39
+ @nb.njit(parallel=True, fastmath=True)
40
+ def rft_simulation_numba(
41
+ hardware_scale_input, steps_input,
42
+ Psi_r_init, tau_eff, delta_tau_pred, epsilon_c,
43
+ phi_loop, eta_sync, omega_n, lambda_m,
44
+ alpha_nonlin, beta_nonlin, gamma_nonlin, delta_nonlin, epsilon_nonlin, zeta_nonlin, kappa_nonlin, mu_nonlin, n_nonlin,
45
+ ops_per_sec_base, flops_base
46
+ ):
47
+ num_scales = hardware_scale_input.shape[0]
48
+ Psi_r = np.full((num_scales, steps_input), Psi_r_init, dtype=np.float64)
49
+ energy = np.full((num_scales, steps_input), 100.0, dtype=np.float64)
50
+ ledger_size = np.zeros((num_scales, steps_input), dtype=np.int64)
51
+
52
+ prod = omega_n * lambda_m
53
+ if prod <= 0.0:
54
+ prod = 1e-12
55
+ log_term = np.log(prod)
56
+
57
+ for scale_idx in nb.prange(num_scales):
58
+ scale = hardware_scale_input[scale_idx]
59
+ for step in range(1, steps_input):
60
+ scale_efficiency = 1 - 0.0002 * (scale - 1)
61
+ if scale_efficiency < 0.5:
62
+ scale_efficiency = 0.5
63
+
64
+ current_psi_r = Psi_r[scale_idx, step - 1]
65
+
66
+ ops_per_sec = ops_per_sec_base * scale * scale_efficiency * (1 + 0.05 * current_psi_r)
67
+
68
+ phase_entropy = current_psi_r * epsilon_c
69
+
70
+ # Nonlinear term (unused in state update but computed for conceptual completeness)
71
+ numerator = (current_psi_r ** alpha_nonlin) * (tau_eff ** beta_nonlin) * (phi_loop ** delta_nonlin) * (log_term ** zeta_nonlin)
72
+ denominator = ((delta_tau_pred + epsilon_c) ** gamma_nonlin) * (eta_sync ** epsilon_nonlin) * (1 + mu_nonlin * (phase_entropy ** n_nonlin))
73
+ _ = numerator / denominator # kept as a placeholder for future coupling
74
+
75
+ # Energy loss if the phase exceeds baseline
76
+ energy_loss = 0.01 * (current_psi_r - Psi_r_init) if current_psi_r > Psi_r_init else 0.0
77
+ energy_val = energy[scale_idx, step - 1] - energy_loss
78
+ if energy_val < 0.0:
79
+ energy_val = 0.0
80
+ energy[scale_idx, step] = energy_val
81
+
82
+ # Ledger growth (synthetic, narratable)
83
+ ledger_growth = int(scale * 5 + (step % 10))
84
+ ledger_size[scale_idx, step] = ledger_size[scale_idx, step - 1] + ledger_growth
85
+
86
+ # Phase update
87
+ Psi_r[scale_idx, step] = current_psi_r + 0.0005 * energy_loss + 0.00001 * ops_per_sec / 1e6
88
+
89
+ return Psi_r, energy, ledger_size
90
+
91
+
92
+ def run_rft_hardware_scale(
93
+ num_scales_to_simulate: int,
94
+ simulation_steps: int,
95
+ psi_r_init_val: float,
96
+ tau_eff_val: float,
97
+ delta_tau_pred_val: float,
98
+ epsilon_c_val: float,
99
+ phi_loop_val: float,
100
+ eta_sync_val: float,
101
+ omega_n_val: float,
102
+ lambda_m_val: float,
103
+ alpha_exp: float,
104
+ beta_exp: float,
105
+ gamma_exp: float,
106
+ delta_exp: float,
107
+ epsilon_exp: float,
108
+ zeta_exp: float,
109
+ kappa_exp: float,
110
+ mu_exp: float,
111
+ n_exp: int,
112
+ seed: int = 42,
113
+ include_hash: bool = True
114
+ ):
115
+ np.random.seed(seed)
116
+ hardware_scale = np.linspace(1, 1000, num_scales_to_simulate)
117
+ ops_per_sec_base = 1e6
118
+ flops_base = 2e6
119
+
120
+ # Optional warmup to avoid JIT timing skew
121
+ if NUMBA_AVAILABLE:
122
+ _ = rft_simulation_numba(
123
+ hardware_scale_input=hardware_scale[:2],
124
+ steps_input=5,
125
+ Psi_r_init=psi_r_init_val,
126
+ tau_eff=tau_eff_val,
127
+ delta_tau_pred=delta_tau_pred_val,
128
+ epsilon_c=epsilon_c_val,
129
+ phi_loop=phi_loop_val,
130
+ eta_sync=eta_sync_val,
131
+ omega_n=omega_n_val,
132
+ lambda_m=lambda_m_val,
133
+ alpha_nonlin=alpha_exp,
134
+ beta_nonlin=beta_exp,
135
+ gamma_nonlin=gamma_exp,
136
+ delta_nonlin=delta_exp,
137
+ epsilon_nonlin=epsilon_exp,
138
+ zeta_nonlin=zeta_exp,
139
+ kappa_nonlin=kappa_exp,
140
+ mu_nonlin=mu_exp,
141
+ n_nonlin=n_exp,
142
+ ops_per_sec_base=ops_per_sec_base,
143
+ flops_base=flops_base
144
+ )
145
+
146
+ start = time.perf_counter()
147
+ if NUMBA_AVAILABLE:
148
+ Psi_r_res, energy_res, ledger_res = rft_simulation_numba(
149
+ hardware_scale_input=hardware_scale,
150
+ steps_input=simulation_steps,
151
+ Psi_r_init=psi_r_init_val,
152
+ tau_eff=tau_eff_val,
153
+ delta_tau_pred=delta_tau_pred_val,
154
+ epsilon_c=epsilon_c_val,
155
+ phi_loop=phi_loop_val,
156
+ eta_sync=eta_sync_val,
157
+ omega_n=omega_n_val,
158
+ lambda_m=lambda_m_val,
159
+ alpha_nonlin=alpha_exp,
160
+ beta_nonlin=beta_exp,
161
+ gamma_nonlin=gamma_exp,
162
+ delta_nonlin=delta_exp,
163
+ epsilon_nonlin=epsilon_exp,
164
+ zeta_nonlin=zeta_exp,
165
+ kappa_nonlin=kappa_exp,
166
+ mu_nonlin=mu_exp,
167
+ n_nonlin=n_exp,
168
+ ops_per_sec_base=ops_per_sec_base,
169
+ flops_base=flops_base
170
+ )
171
+ else:
172
+ # Fallback pure NumPy implementation (slower, but keeps app functional)
173
+ num_scales = hardware_scale.shape[0]
174
+ Psi_r_res = np.full((num_scales, simulation_steps), psi_r_init_val, dtype=np.float64)
175
+ energy_res = np.full((num_scales, simulation_steps), 100.0, dtype=np.float64)
176
+ ledger_res = np.zeros((num_scales, simulation_steps), dtype=np.int64)
177
+ prod = omega_n_val * lambda_m_val
178
+ if prod <= 0.0:
179
+ prod = 1e-12
180
+ log_term = np.log(prod)
181
+ for scale_idx in range(num_scales):
182
+ scale = hardware_scale[scale_idx]
183
+ for step in range(1, simulation_steps):
184
+ scale_eff = 1 - 0.0002 * (scale - 1)
185
+ if scale_eff < 0.5:
186
+ scale_eff = 0.5
187
+ current_psi = Psi_r_res[scale_idx, step - 1]
188
+ ops_per_sec = ops_per_sec_base * scale * scale_eff * (1 + 0.05 * current_psi)
189
+ phase_entropy = current_psi * epsilon_c_val
190
+ numerator = (current_psi ** alpha_exp) * (tau_eff_val ** beta_exp) * (phi_loop_val ** delta_exp) * (log_term ** zeta_exp)
191
+ denominator = ((delta_tau_pred_val + epsilon_c_val) ** gamma_exp) * (eta_sync_val ** epsilon_exp) * (1 + mu_exp * (phase_entropy ** n_exp))
192
+ _ = numerator / denominator
193
+ energy_loss = 0.01 * (current_psi - psi_r_init_val) if current_psi > psi_r_init_val else 0.0
194
+ energy_val = energy_res[scale_idx, step - 1] - energy_loss
195
+ if energy_val < 0.0:
196
+ energy_val = 0.0
197
+ energy_res[scale_idx, step] = energy_val
198
+ ledger_growth = int(scale * 5 + (step % 10))
199
+ ledger_res[scale_idx, step] = ledger_res[scale_idx, step - 1] + ledger_growth
200
+ Psi_r_res[scale_idx, step] = current_psi + 0.0005 * energy_loss + 0.00001 * ops_per_sec / 1e6
201
+
202
+ elapsed = max(time.perf_counter() - start, 1e-9)
203
+
204
+ # Final metrics at max scale
205
+ max_scale_idx = num_scales_to_simulate - 1
206
+ final_step = simulation_steps - 1
207
+ psi_r_final = float(Psi_r_res[max_scale_idx, final_step])
208
+ energy_final = float(energy_res[max_scale_idx, final_step])
209
+ ledger_final = int(ledger_res[max_scale_idx, final_step])
210
+
211
+ scale = hardware_scale[max_scale_idx]
212
+ scale_efficiency = max(1 - 0.0002 * (scale - 1), 0.5)
213
+ ops_per_sec_final = ops_per_sec_base * scale * scale_efficiency * (1 + 0.05 * psi_r_final)
214
+ flops_final = flops_base * scale * scale_efficiency * (1 + 0.05 * psi_r_final)
215
+ total_estimated_flops = flops_final * simulation_steps * num_scales_to_simulate
216
+ avg_gflops_per_sec = total_estimated_flops / (elapsed * 1e9)
217
+
218
+ # Plot
219
+ fig, ax = plt.subplots(figsize=(7, 4))
220
+ ax.plot(hardware_scale, Psi_r_res[:, -1], color="#3b82f6")
221
+ ax.set_title("Final Ψ_r vs Hardware Scale")
222
+ ax.set_xlabel("Hardware Scale (SPUs)")
223
+ ax.set_ylabel("Final Ψ_r")
224
+ ax.grid(True, alpha=0.3)
225
+ plt.tight_layout()
226
+ _, plot_path = tempfile.mkstemp(suffix=".png")
227
+ plt.savefig(plot_path)
228
+ plt.close(fig)
229
+
230
+ # Manifest + hash
231
+ run_ipurl = None
232
+ if include_hash:
233
+ manifest = {
234
+ "num_scales": int(num_scales_to_simulate),
235
+ "steps": int(simulation_steps),
236
+ "Psi_r_init": float(psi_r_init_val),
237
+ "tau_eff": float(tau_eff_val),
238
+ "delta_tau_pred": float(delta_tau_pred_val),
239
+ "epsilon_c": float(epsilon_c_val),
240
+ "phi_loop": float(phi_loop_val),
241
+ "eta_sync": float(eta_sync_val),
242
+ "omega_n": float(omega_n_val),
243
+ "lambda_m": float(lambda_m_val),
244
+ "alpha": float(alpha_exp),
245
+ "beta": float(beta_exp),
246
+ "gamma": float(gamma_exp),
247
+ "delta": float(delta_exp),
248
+ "epsilon": float(epsilon_exp),
249
+ "zeta": float(zeta_exp),
250
+ "kappa": float(kappa_exp),
251
+ "mu": float(mu_exp),
252
+ "n": int(n_exp),
253
+ "seed": int(seed),
254
+ "elapsed_seconds": float(elapsed),
255
+ "avg_gflops_per_sec_est": float(avg_gflops_per_sec),
256
+ "psi_r_final": float(psi_r_final),
257
+ "energy_final": float(energy_final),
258
+ "ledger_final": int(ledger_final),
259
+ "psi_r_head": [float(x) for x in Psi_r_res[max_scale_idx, :10]]
260
+ }
261
+ serialized = json.dumps(manifest, sort_keys=True, separators=(",", ":")).encode("utf-8")
262
+ run_ipurl = f"rft-numba:v1:{hashlib.sha512(serialized).hexdigest()}"
263
+
264
+ summary = (
265
+ f"RFT Hardware-Scale Simulation\n"
266
+ f"- Simulation Time: {elapsed:.6f} s\n"
267
+ f"- Max Scale: {hardware_scale[max_scale_idx]:.1f} SPUs\n"
268
+ f"- Final Ψ_r: {psi_r_final:.6f}\n"
269
+ f"- Final Energy (%): {energy_final:.6f}\n"
270
+ f"- Final Ledger Size: {ledger_final}\n"
271
+ f"- Estimated Peak Ops/sec: {ops_per_sec_final:.2e}\n"
272
+ f"- Estimated Peak FLOPS: {flops_final:.2e}\n"
273
+ f"- Naive Average GFLOPS/sec: {avg_gflops_per_sec:.2f}\n"
274
+ + (f"- Run IPURL: {run_ipurl}\n" if run_ipurl else "")
275
+ )
276
+
277
+ return summary, plot_path
278
+
279
+
280
+ # =========================================================================================
281
+ # Part B: PyTorch MOM kernel — Collapse dynamics + histories + raster onset
282
+ # =========================================================================================
283
+
284
  def fused_mom_update_cpu(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
285
  dt, eps, sigma_const, theta_global, k_shred_global,
286
  event_counts_t=None, event_buffer_t=None):
287
+ # Types
288
  m_root_t = m_root_t.to(torch.float32)
289
  A_t = A_t.to(torch.float32)
290
  Q_t = Q_t.to(torch.float32)
 
292
  gamma_t = gamma_t.to(torch.float32)
293
  omega_t = omega_t.to(torch.float32)
294
 
295
+ # Expand
296
  alpha_exp = alpha_t.unsqueeze(0)
297
  gamma_exp = gamma_t.unsqueeze(0)
298
  omega_exp = omega_t.unsqueeze(0)
299
  m_root_exp = m_root_t.unsqueeze(1)
300
 
301
+ # Dynamics
302
  A_dot = alpha_exp * m_root_exp - gamma_exp * A_t + sigma_const * Q_t
303
  f_drive = sigma_const * m_root_exp * omega_exp * A_t
304
  Q_dot = f_drive - Q_t
 
306
  A_t.add_(dt * A_dot)
307
  Q_t.add_(dt * Q_dot)
308
 
309
+ # Shred trigger
310
  Xi = (omega_exp * A_t).sum(dim=1)
311
  Xi_norm = Xi / (m_root_t + eps)
312
  shred_mask = Xi_norm >= theta_global
 
331
  else:
332
  event_counts_t += shred_count
333
 
334
+ # Optional: write indices into event buffer (pack iteration externally)
335
+ if event_buffer_t is not None and isinstance(event_buffer_t, torch.Tensor):
336
+ # This is a simple increment-only counter; you can replace with actual raster indexing scheme.
337
+ pass
338
+
339
  return m_root_t, A_t, Q_t, event_counts_t
340
 
341
+
342
  class MOMKernel:
343
  def __init__(self):
344
  self.kernel = fused_mom_update_cpu
 
351
  dt, eps, sigma_const, theta_global, k_shred_global,
352
  event_counts_t, event_buffer_t)
353
 
354
+
355
  class MOMSystemLoop:
356
  def __init__(self, mom_kernel, m_root_initial, A_modes_initial, Q_drive_initial,
357
  alpha, gamma, omega, dt=0.02, eps=1e-6, sigma=0.75,
358
+ theta=2.2, k_shred=1.2, event_buffer_size=1024, rng_seed=42):
359
  self.mom_kernel = mom_kernel
360
  self.device = mom_kernel.device
361
+
362
+ # State
363
  self.m_root = m_root_initial.to(self.device).clone().to(torch.float32)
364
  self.A_modes = A_modes_initial.to(self.device).clone().to(torch.float32)
365
  self.Q_drive = Q_drive_initial.to(self.device).clone().to(torch.float32)
366
  self.alpha = alpha.to(self.device).to(torch.float32)
367
  self.gamma = gamma.to(self.device).to(torch.float32)
368
  self.omega = omega.to(self.device).to(torch.float32)
369
+
370
+ # Params
371
  self.dt = dt; self.eps = eps; self.sigma = sigma
372
  self.theta = theta; self.k_shred = k_shred
373
+
374
+ # Events
375
  self.event_counts = torch.zeros((), dtype=torch.int64, device=self.device)
376
  self.event_buffer = torch.zeros(event_buffer_size, dtype=torch.int64, device=self.device)
377
+
378
+ # Histories
379
  self.m_root_history = []
380
  self.A_modes_history = []
381
  self.event_counts_history = []
382
  self.shred_onset = np.full((self.m_root.shape[0],), -1, dtype=np.int32)
383
 
384
+ # RNG for deterministic noise
385
+ self.gen = torch.Generator(device=self.device)
386
+ self.gen.manual_seed(int(rng_seed))
387
+
388
  def feedback(self, m_root, A_modes, Q_drive):
389
+ decay = 0.995
390
+ noise_level = 1e-4
391
+ A_modes_new = A_modes * decay + noise_level * torch.randn_like(A_modes, generator=self.gen, device=self.device)
392
  A_modes_new = torch.clamp(A_modes_new, min=0.0)
393
+ m_root_new = m_root * decay + noise_level * torch.randn_like(m_root, generator=self.gen, device=self.device)
394
  m_root_new = torch.clamp(m_root_new, min=0.0)
395
  return m_root_new, A_modes_new, Q_drive
396
 
 
401
  self.alpha, self.gamma, self.omega,
402
  self.dt, self.eps, self.sigma, self.theta, self.k_shred,
403
  self.event_counts, self.event_buffer)
404
+
405
  m_np = self.m_root.detach().cpu().numpy()
406
  collapsed_mask = m_np <= 1e-8
407
  for idx, collapsed in enumerate(collapsed_mask):
408
  if collapsed and self.shred_onset[idx] == -1:
409
  self.shred_onset[idx] = i
410
+
411
  self.m_root, self.A_modes, self.Q_drive = self.feedback(self.m_root, self.A_modes, self.Q_drive)
412
  self.m_root_history.append(float(self.m_root.mean().item()))
413
  self.A_modes_history.append(float(self.A_modes.mean().item()))
414
  self.event_counts_history.append(int(self.event_counts.item()))
415
 
416
+
417
+ def run_mom_simulation(
418
+ Ncells, Nmode, iterations, dt=0.02, eps=1e-6, sigma=0.75,
419
+ theta=2.2, k_shred=1.2, seed=42, include_hash=True
420
+ ):
421
+ torch.manual_seed(seed)
422
+ np.random.seed(seed)
423
+
424
  mom_kernel_instance = MOMKernel()
425
  device = mom_kernel_instance.device
426
+
427
  alpha = torch.empty(Nmode, device=device).uniform_(0.02, 0.12)
428
  gamma = torch.empty(Nmode, device=device).uniform_(0.01, 0.06)
429
  omega = torch.linspace(1.0, 8.0, Nmode, device=device)
430
  m_root_initial = torch.ones(Ncells, device=device)
431
  A_modes_initial = torch.rand(Ncells, Nmode, device=device) * 0.01
432
  Q_drive_initial = torch.zeros(Ncells, Nmode, device=device)
433
+
434
+ mom_system = MOMSystemLoop(
435
+ mom_kernel_instance, m_root_initial, A_modes_initial, Q_drive_initial,
436
+ alpha, gamma, omega, dt=dt, eps=eps, sigma=sigma,
437
+ theta=theta, k_shred=k_shred, rng_seed=seed
438
+ )
439
+
440
+ # Warmup (excluded from timing)
441
+ mom_system.run(1)
442
+
443
+ start_time = time.perf_counter()
444
  mom_system.run(iterations)
445
+ elapsed_time = max(time.perf_counter() - start_time, 1e-9)
446
+
447
+ # Estimated FLOPs (placeholder estimate)
448
  ops_per_cell_per_iter = 12 * Nmode + 13
449
  flops_per_iteration = float(Ncells) * float(ops_per_cell_per_iter)
450
  total_flops = flops_per_iteration * float(iterations)
451
  gflops = total_flops / (elapsed_time * 1e9)
452
+
453
+ # Build plots (excluded from elapsed_time)
454
+ fig = plt.figure(figsize=(10, 14))
455
+ ax1 = fig.add_subplot(4, 1, 1)
456
+ ax1.plot(mom_system.m_root_history, label='Mean m_root')
457
+ ax1.set_title('Mean m_root Over Iterations'); ax1.set_xlabel('Iteration'); ax1.set_ylabel('Mean m_root')
458
+ ax1.grid(True); ax1.legend()
459
+
460
+ ax2 = fig.add_subplot(4, 1, 2)
461
+ ax2.plot(mom_system.A_modes_history, label='Mean A_modes', color='orange')
462
+ ax2.set_title('Mean A_modes Over Iterations')
463
+ ax2.set_xlabel('Iteration'); ax2.set_ylabel('Mean A_modes')
464
+ ax2.grid(True); ax2.legend()
465
+
466
+ ax3 = fig.add_subplot(4, 1, 3)
467
+ cumulative_events = np.cumsum(np.array(mom_system.event_counts_history))
468
+ ax3.plot(cumulative_events, label='Cumulative Shredding Events', color='red')
469
+ ax3.set_title('Cumulative Shredding Events')
470
+ ax3.set_xlabel('Iteration'); ax3.set_ylabel('Cumulative Events')
471
+ ax3.grid(True); ax3.legend()
472
+
473
+ ax4 = fig.add_subplot(4, 1, 4)
474
+ onset = mom_system.shred_onset
475
+ for idx, val in enumerate(onset):
476
+ if val >= 0:
477
+ ax4.vlines(val, idx, idx + 1, color='black', linewidth=0.8)
478
+ ax4.set_title('Shredding Onset per Cell')
479
+ ax4.set_xlabel('Iteration'); ax4.set_ylabel('Cell Index')
480
+ ax4.grid(True)
481
+
482
+ plt.tight_layout()
483
+ _, plot_path = tempfile.mkstemp(suffix=".png")
484
+ plt.savefig(plot_path)
485
+ plt.close(fig)
486
+
487
+ # Manifest + hash
488
+ run_ipurl = None
489
+ if include_hash:
490
+ manifest = {
491
+ "Ncells": int(Ncells), "Nmode": int(Nmode), "iterations": int(iterations),
492
+ "dt": float(dt), "eps": float(eps), "sigma": float(sigma),
493
+ "theta": float(theta), "k_shred": float(k_shred), "seed": int(seed),
494
+ "elapsed_time_seconds": float(elapsed_time),
495
+ "gflops_estimated": float(gflops),
496
+ "m_root_head": [float(x) for x in mom_system.m_root_history[:10]],
497
+ "A_modes_head": [float(x) for x in mom_system.A_modes_history[:10]],
498
+ "event_counts_head": [int(x) for x in mom_system.event_counts_history[:10]],
499
+ }
500
+ serialized = json.dumps(manifest, sort_keys=True, separators=(",", ":")).encode("utf-8")
501
+ run_ipurl = f"mom-kernel:v1:{hashlib.sha512(serialized).hexdigest()}"
502
+
503
+ summary_output = (
504
+ f"MOM Kernel Simulation\n"
505
+ f"- Simulation Time: {elapsed_time:.6f} s\n"
506
+ f"- Estimated GFLOPS (per fused step): {gflops:.4f}\n"
507
+ f"- Final Mean m_root: {float(torch.mean(mom_system.m_root).item()):.6f}\n"
508
+ f"- Final Mean A_modes: {float(torch.mean(mom_system.A_modes).item()):.6f}\n"
509
+ f"- Total Events (last iter): {mom_system.event_counts_history[-1] if len(mom_system.event_counts_history) > 0 else 0}\n"
510
+ + (f"- Run IPURL: {run_ipurl}\n" if run_ipurl else "")
511
+ )
 
 
 
512
 
513
  return summary_output, plot_path
514
 
515
+
516
+ # =========================================================================================
517
+ # Part C: Gradio UI — Two tabs for unified artifact
518
+ # =========================================================================================
519
+
520
+ with gr.Blocks(title="NexFrame RFT + MOM Unified Artifact") as demo:
521
+ gr.Markdown("# NexFrame Codex: RFT Hardware Scaling + MOM Collapse Kernel")
522
+ gr.Markdown("This artifact combines a Numba-accelerated RFT simulation across hardware scales with a PyTorch MOM kernel for collapse dynamics. Each run can be sealed with a lineage hash (IPURL).")
523
+
524
+ with gr.Tab("RFT Hardware Scaling"):
525
+ gr.Markdown("### RFT Simulation (Numba/NumPy)\nAdjust parameters to explore Ψ_r, energy, and ledger dynamics across hardware scales.")
526
+ with gr.Row():
527
+ num_scales = gr.Slider(5, 100, step=5, value=50, label="Number of Hardware Scales")
528
+ steps = gr.Slider(100, 5000, step=100, value=2000, label="Simulation Steps per Scale")
529
+ seed_rft = gr.Number(value=42, label="Seed", precision=0)
530
+ include_hash_rft = gr.Checkbox(value=True, label="Seal run with hash (IPURL)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
  with gr.Row():
532
+ psi_r_init = gr.Slider(0.1, 2.0, step=0.1, value=1.0, label="Psi_r_init")
533
+ tau_eff = gr.Slider(0.01, 1.0, step=0.01, value=0.05, label="tau_eff")
534
+ delta_tau_pred = gr.Slider(0.001, 0.1, step=0.001, value=0.01, label="delta_tau_pred")
535
+ epsilon_c = gr.Slider(0.001, 0.1, step=0.001, value=0.005, label="epsilon_c")
536
+ with gr.Row():
537
+ phi_loop = gr.Slider(0.1, 2.0, step=0.1, value=1.0, label="phi_loop")
538
+ eta_sync = gr.Slider(0.001, 0.1, step=0.001, value=0.01, label="eta_sync")
539
+ omega_n = gr.Slider(10, 100, step=1, value=50, label="omega_n")
540
+ lambda_m = gr.Slider(10, 200, step=10, value=100, label="lambda_m")
541
+ gr.Markdown("#### Nonlinear exponents")
542
+ with gr.Row():
543
+ alpha = gr.Slider(0.5, 3.0, step=0.1, value=1.1, label="alpha")
544
+ beta = gr.Slider(0.5, 3.0, step=0.1, value=2.0, label="beta")
545
+ gamma = gr.Slider(0.5, 3.0, step=0.1, value=1.2, label="gamma")
546
+ delta = gr.Slider(0.5, 3.0, step=0.1, value=1.0, label="delta")
547
+ with gr.Row():
548
+ epsilon = gr.Slider(0.5, 3.0, step=0.1, value=1.0, label="epsilon")
549
+ zeta = gr.Slider(0.5, 3.0, step=0.1, value=1.0, label="zeta")
550
+ kappa = gr.Slider(1.0, 10.0, step=0.1, value=5.0, label="kappa")
551
+ mu = gr.Slider(1.0, 20.0, step=1.0, value=10.0, label="mu")
552
+ n = gr.Slider(1, 5, step=1, value=2, label="n")
553
+
554
+ rft_run = gr.Button("Run RFT Simulation")
555
+ rft_summary = gr.Markdown(label="RFT Summary")
556
+ rft_plot = gr.Image(label="Final Ψ_r across Hardware Scales", type="filepath")
557
+
558
+ def _rft_ui(num_scales_to_simulate, simulation_steps, psi_r_init_val, tau_eff_val,
559
+ delta_tau_pred_val, epsilon_c_val, phi_loop_val, eta_sync_val,
560
+ omega_n_val, lambda_m_val, alpha_exp, beta_exp, gamma_exp,
561
+ delta_exp, epsilon_exp, zeta_exp, kappa_exp, mu_exp, n_exp,
562
+ seed, include_hash):
563
+ return run_rft_hardware_scale(
564
+ num_scales_to_simulate=int(num_scales_to_simulate),
565
+ simulation_steps=int(simulation_steps),
566
+ psi_r_init_val=float(psi_r_init_val),
567
+ tau_eff_val=float(tau_eff_val),
568
+ delta_tau_pred_val=float(delta_tau_pred_val),
569
+ epsilon_c_val=float(epsilon_c_val),
570
+ phi_loop_val=float(phi_loop_val),
571
+ eta_sync_val=float(eta_sync_val),
572
+ omega_n_val=float(omega_n_val),
573
+ lambda_m_val=float(lambda_m_val),
574
+ alpha_exp=float(alpha_exp),
575
+ beta_exp=float(beta_exp),
576
+ gamma_exp=float(gamma_exp),
577
+ delta_exp=float(delta_exp),
578
+ epsilon_exp=float(epsilon_exp),
579
+ zeta_exp=float(zeta_exp),
580
+ kappa_exp=float(kappa_exp),
581
+ mu_exp=float(mu_exp),
582
+ n_exp=int(n_exp),
583
+ seed=int(seed),
584
+ include_hash=bool(include_hash)
585
+ )
586
+
587
+ rft_run.click(
588
+ _rft_ui,
589
+ inputs=[
590
+ num_scales, steps, psi_r_init, tau_eff, delta_tau_pred, epsilon_c,
591
+ phi_loop, eta_sync, omega_n, lambda_m, alpha, beta, gamma, delta,
592
+ epsilon, zeta, kappa, mu, n, seed_rft, include_hash_rft
593
+ ],
594
+ outputs=[rft_summary, rft_plot]
595
  )
596
 
597
+ with gr.Tab("MOM Collapse Kernel"):
598
+ gr.Markdown("### MOM Kernel (PyTorch)\nSimulate collapse dynamics with shredding onset and event histories.")
599
+ with gr.Row():
600
+ Ncells = gr.Slider(8, 4096, step=8, value=256, label="Cells")
601
+ Nmode = gr.Slider(4, 512, step=4, value=64, label="Modes per Cell")
602
+ iterations = gr.Slider(10, 5000, step=10, value=500, label="Iterations")
603
+ seed_mom = gr.Number(value=42, label="Seed", precision=0)
604
+ include_hash_mom = gr.Checkbox(value=True, label="Seal run with hash (IPURL)")
605
+ with gr.Row():
606
+ dt = gr.Slider(0.001, 0.1, step=0.001, value=0.02, label="dt")
607
+ eps = gr.Slider(1e-8, 1e-4, step=1e-8, value=1e-6, label="eps")
608
+ sigma = gr.Slider(0.1, 1.5, step=0.01, value=0.75, label="sigma")
609
+ theta = gr.Slider(0.5, 5.0, step=0.1, value=2.2, label="theta")
610
+ k_shred = gr.Slider(0.1, 5.0, step=0.1, value=1.2, label="k_shred")
611
+
612
+ mom_run = gr.Button("Run MOM Simulation")
613
+ mom_summary = gr.Markdown(label="MOM Summary")
614
+ mom_plot = gr.Image(label="MOM Plots", type="filepath")
615
+
616
+ def _mom_ui(Ncells_val, Nmode_val, iterations_val, dt_val, eps_val, sigma_val, theta_val, k_shred_val, seed_val, include_hash_val):
617
+ return run_mom_simulation(
618
+ Ncells=int(Ncells_val),
619
+ Nmode=int(Nmode_val),
620
+ iterations=int(iterations_val),
621
+ dt=float(dt_val),
622
+ eps=float(eps_val),
623
+ sigma=float(sigma_val),
624
+ theta=float(theta_val),
625
+ k_shred=float(k_shred_val),
626
+ seed=int(seed_val),
627
+ include_hash=bool(include_hash_val)
628
+ )
629
+
630
+ mom_run.click(
631
+ _mom_ui,
632
+ inputs=[Ncells, Nmode, iterations, dt, eps, sigma, theta, k_shred, seed_mom, include_hash_mom],
633
+ outputs=[mom_summary, mom_plot]
634
  )
635
 
 
 
 
636
  if __name__ == "__main__":
637
+ demo.launch()