RFTSystems's picture
Update app.py
25c59a3 verified
# app.py
# Unified Codex Artifact: Numba RFT Hardware-Scale Demo + PyTorch MOM Kernel + Lineage Hashing
# Author: Liam Grinstead — NexFrame AI, RFT, MOM, Codex
#
# This file combines:
# 1) A Numba-accelerated RFT simulation over hardware scales (Ψ_r, energy, ledger) with honest timing.
# 2) A PyTorch MOM collapse kernel with feedback, event histories, raster onset, and manifest sealing.
# 3) Gradio Blocks UI with two tabs, codex-ready summaries, and optional snapshot hashes.
#
# Notes:
# - GFLOPS in both paths are labeled as “estimated” unless you replace with exact op counts.
# - Timing excludes plotting and hashing.
# - Deterministic seeds are used for reproducibility.
import json
import hashlib
import tempfile
import time
import numpy as np
import gradio as gr
# --- Numba (optional import with graceful fallback for CPU-only runs) ---
try:
import numba as nb
NUMBA_AVAILABLE = True
except Exception:
NUMBA_AVAILABLE = False
# --- PyTorch for MOM kernel ---
import torch
import matplotlib.pyplot as plt
# =========================================================================================
# Part A: RFT (Numba) — Hardware-scale Ψ_r simulation
# =========================================================================================
if NUMBA_AVAILABLE:
@nb.njit(parallel=True, fastmath=True)
def rft_simulation_numba(
hardware_scale_input, steps_input,
Psi_r_init, tau_eff, delta_tau_pred, epsilon_c,
phi_loop, eta_sync, omega_n, lambda_m,
alpha_nonlin, beta_nonlin, gamma_nonlin, delta_nonlin, epsilon_nonlin, zeta_nonlin, kappa_nonlin, mu_nonlin, n_nonlin,
ops_per_sec_base, flops_base
):
num_scales = hardware_scale_input.shape[0]
Psi_r = np.full((num_scales, steps_input), Psi_r_init, dtype=np.float64)
energy = np.full((num_scales, steps_input), 100.0, dtype=np.float64)
ledger_size = np.zeros((num_scales, steps_input), dtype=np.int64)
prod = omega_n * lambda_m
if prod <= 0.0:
prod = 1e-12
log_term = np.log(prod)
for scale_idx in nb.prange(num_scales):
scale = hardware_scale_input[scale_idx]
for step in range(1, steps_input):
scale_efficiency = 1 - 0.0002 * (scale - 1)
if scale_efficiency < 0.5:
scale_efficiency = 0.5
current_psi_r = Psi_r[scale_idx, step - 1]
ops_per_sec = ops_per_sec_base * scale * scale_efficiency * (1 + 0.05 * current_psi_r)
phase_entropy = current_psi_r * epsilon_c
# Nonlinear term (unused in state update but computed for conceptual completeness)
numerator = (current_psi_r ** alpha_nonlin) * (tau_eff ** beta_nonlin) * (phi_loop ** delta_nonlin) * (log_term ** zeta_nonlin)
denominator = ((delta_tau_pred + epsilon_c) ** gamma_nonlin) * (eta_sync ** epsilon_nonlin) * (1 + mu_nonlin * (phase_entropy ** n_nonlin))
_ = numerator / denominator # kept as a placeholder for future coupling
# Energy loss if the phase exceeds baseline
energy_loss = 0.01 * (current_psi_r - Psi_r_init) if current_psi_r > Psi_r_init else 0.0
energy_val = energy[scale_idx, step - 1] - energy_loss
if energy_val < 0.0:
energy_val = 0.0
energy[scale_idx, step] = energy_val
# Ledger growth (synthetic, narratable)
ledger_growth = int(scale * 5 + (step % 10))
ledger_size[scale_idx, step] = ledger_size[scale_idx, step - 1] + ledger_growth
# Phase update
Psi_r[scale_idx, step] = current_psi_r + 0.0005 * energy_loss + 0.00001 * ops_per_sec / 1e6
return Psi_r, energy, ledger_size
def run_rft_hardware_scale(
num_scales_to_simulate: int,
simulation_steps: int,
psi_r_init_val: float,
tau_eff_val: float,
delta_tau_pred_val: float,
epsilon_c_val: float,
phi_loop_val: float,
eta_sync_val: float,
omega_n_val: float,
lambda_m_val: float,
alpha_exp: float,
beta_exp: float,
gamma_exp: float,
delta_exp: float,
epsilon_exp: float,
zeta_exp: float,
kappa_exp: float,
mu_exp: float,
n_exp: int,
seed: int = 42,
include_hash: bool = True
):
np.random.seed(seed)
hardware_scale = np.linspace(1, 1000, num_scales_to_simulate)
ops_per_sec_base = 1e6
flops_base = 2e6
# Optional warmup to avoid JIT timing skew
if NUMBA_AVAILABLE:
_ = rft_simulation_numba(
hardware_scale_input=hardware_scale[:2],
steps_input=5,
Psi_r_init=psi_r_init_val,
tau_eff=tau_eff_val,
delta_tau_pred=delta_tau_pred_val,
epsilon_c=epsilon_c_val,
phi_loop=phi_loop_val,
eta_sync=eta_sync_val,
omega_n=omega_n_val,
lambda_m=lambda_m_val,
alpha_nonlin=alpha_exp,
beta_nonlin=beta_exp,
gamma_nonlin=gamma_exp,
delta_nonlin=delta_exp,
epsilon_nonlin=epsilon_exp,
zeta_nonlin=zeta_exp,
kappa_nonlin=kappa_exp,
mu_nonlin=mu_exp,
n_nonlin=n_exp,
ops_per_sec_base=ops_per_sec_base,
flops_base=flops_base
)
start = time.perf_counter()
if NUMBA_AVAILABLE:
Psi_r_res, energy_res, ledger_res = rft_simulation_numba(
hardware_scale_input=hardware_scale,
steps_input=simulation_steps,
Psi_r_init=psi_r_init_val,
tau_eff=tau_eff_val,
delta_tau_pred=delta_tau_pred_val,
epsilon_c=epsilon_c_val,
phi_loop=phi_loop_val,
eta_sync=eta_sync_val,
omega_n=omega_n_val,
lambda_m=lambda_m_val,
alpha_nonlin=alpha_exp,
beta_nonlin=beta_exp,
gamma_nonlin=gamma_exp,
delta_nonlin=delta_exp,
epsilon_nonlin=epsilon_exp,
zeta_nonlin=zeta_exp,
kappa_nonlin=kappa_exp,
mu_nonlin=mu_exp,
n_nonlin=n_exp,
ops_per_sec_base=ops_per_sec_base,
flops_base=flops_base
)
else:
# Fallback pure NumPy implementation (slower, but keeps app functional)
num_scales = hardware_scale.shape[0]
Psi_r_res = np.full((num_scales, simulation_steps), psi_r_init_val, dtype=np.float64)
energy_res = np.full((num_scales, simulation_steps), 100.0, dtype=np.float64)
ledger_res = np.zeros((num_scales, simulation_steps), dtype=np.int64)
prod = omega_n_val * lambda_m_val
if prod <= 0.0:
prod = 1e-12
log_term = np.log(prod)
for scale_idx in range(num_scales):
scale = hardware_scale[scale_idx]
for step in range(1, simulation_steps):
scale_eff = 1 - 0.0002 * (scale - 1)
if scale_eff < 0.5:
scale_eff = 0.5
current_psi = Psi_r_res[scale_idx, step - 1]
ops_per_sec = ops_per_sec_base * scale * scale_eff * (1 + 0.05 * current_psi)
phase_entropy = current_psi * epsilon_c_val
numerator = (current_psi ** alpha_exp) * (tau_eff_val ** beta_exp) * (phi_loop_val ** delta_exp) * (log_term ** zeta_exp)
denominator = ((delta_tau_pred_val + epsilon_c_val) ** gamma_exp) * (eta_sync_val ** epsilon_exp) * (1 + mu_exp * (phase_entropy ** n_exp))
_ = numerator / denominator
energy_loss = 0.01 * (current_psi - psi_r_init_val) if current_psi > psi_r_init_val else 0.0
energy_val = energy_res[scale_idx, step - 1] - energy_loss
if energy_val < 0.0:
energy_val = 0.0
energy_res[scale_idx, step] = energy_val
ledger_growth = int(scale * 5 + (step % 10))
ledger_res[scale_idx, step] = ledger_res[scale_idx, step - 1] + ledger_growth
Psi_r_res[scale_idx, step] = current_psi + 0.0005 * energy_loss + 0.00001 * ops_per_sec / 1e6
elapsed = max(time.perf_counter() - start, 1e-9)
# Final metrics at max scale
max_scale_idx = num_scales_to_simulate - 1
final_step = simulation_steps - 1
psi_r_final = float(Psi_r_res[max_scale_idx, final_step])
energy_final = float(energy_res[max_scale_idx, final_step])
ledger_final = int(ledger_res[max_scale_idx, final_step])
scale = hardware_scale[max_scale_idx]
scale_efficiency = max(1 - 0.0002 * (scale - 1), 0.5)
ops_per_sec_final = ops_per_sec_base * scale * scale_efficiency * (1 + 0.05 * psi_r_final)
flops_final = flops_base * scale * scale_efficiency * (1 + 0.05 * psi_r_final)
total_estimated_flops = flops_final * simulation_steps * num_scales_to_simulate
avg_gflops_per_sec = total_estimated_flops / (elapsed * 1e9)
# Plot
fig, ax = plt.subplots(figsize=(7, 4))
ax.plot(hardware_scale, Psi_r_res[:, -1], color="#3b82f6")
ax.set_title("Final Ψ_r vs Hardware Scale")
ax.set_xlabel("Hardware Scale (SPUs)")
ax.set_ylabel("Final Ψ_r")
ax.grid(True, alpha=0.3)
plt.tight_layout()
_, plot_path = tempfile.mkstemp(suffix=".png")
plt.savefig(plot_path)
plt.close(fig)
# Manifest + hash
run_ipurl = None
if include_hash:
manifest = {
"num_scales": int(num_scales_to_simulate),
"steps": int(simulation_steps),
"Psi_r_init": float(psi_r_init_val),
"tau_eff": float(tau_eff_val),
"delta_tau_pred": float(delta_tau_pred_val),
"epsilon_c": float(epsilon_c_val),
"phi_loop": float(phi_loop_val),
"eta_sync": float(eta_sync_val),
"omega_n": float(omega_n_val),
"lambda_m": float(lambda_m_val),
"alpha": float(alpha_exp),
"beta": float(beta_exp),
"gamma": float(gamma_exp),
"delta": float(delta_exp),
"epsilon": float(epsilon_exp),
"zeta": float(zeta_exp),
"kappa": float(kappa_exp),
"mu": float(mu_exp),
"n": int(n_exp),
"seed": int(seed),
"elapsed_seconds": float(elapsed),
"avg_gflops_per_sec_est": float(avg_gflops_per_sec),
"psi_r_final": float(psi_r_final),
"energy_final": float(energy_final),
"ledger_final": int(ledger_final),
"psi_r_head": [float(x) for x in Psi_r_res[max_scale_idx, :10]]
}
serialized = json.dumps(manifest, sort_keys=True, separators=(",", ":")).encode("utf-8")
run_ipurl = f"rft-numba:v1:{hashlib.sha512(serialized).hexdigest()}"
summary = (
f"RFT Hardware-Scale Simulation\n"
f"- Simulation Time: {elapsed:.6f} s\n"
f"- Max Scale: {hardware_scale[max_scale_idx]:.1f} SPUs\n"
f"- Final Ψ_r: {psi_r_final:.6f}\n"
f"- Final Energy (%): {energy_final:.6f}\n"
f"- Final Ledger Size: {ledger_final}\n"
f"- Estimated Peak Ops/sec: {ops_per_sec_final:.2e}\n"
f"- Estimated Peak FLOPS: {flops_final:.2e}\n"
f"- Naive Average GFLOPS/sec: {avg_gflops_per_sec:.2f}\n"
+ (f"- Run IPURL: {run_ipurl}\n" if run_ipurl else "")
)
return summary, plot_path
# =========================================================================================
# Part B: PyTorch MOM kernel — Collapse dynamics + histories + raster onset
# =========================================================================================
def fused_mom_update_cpu(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
dt, eps, sigma_const, theta_global, k_shred_global,
event_counts_t=None, event_buffer_t=None):
# Types
m_root_t = m_root_t.to(torch.float32)
A_t = A_t.to(torch.float32)
Q_t = Q_t.to(torch.float32)
alpha_t = alpha_t.to(torch.float32)
gamma_t = gamma_t.to(torch.float32)
omega_t = omega_t.to(torch.float32)
# Expand
alpha_exp = alpha_t.unsqueeze(0)
gamma_exp = gamma_t.unsqueeze(0)
omega_exp = omega_t.unsqueeze(0)
m_root_exp = m_root_t.unsqueeze(1)
# Dynamics
A_dot = alpha_exp * m_root_exp - gamma_exp * A_t + sigma_const * Q_t
f_drive = sigma_const * m_root_exp * omega_exp * A_t
Q_dot = f_drive - Q_t
A_t.add_(dt * A_dot)
Q_t.add_(dt * Q_dot)
# Shred trigger
Xi = (omega_exp * A_t).sum(dim=1)
Xi_norm = Xi / (m_root_t + eps)
shred_mask = Xi_norm >= theta_global
if torch.any(shred_mask):
eta_values = torch.zeros_like(Xi_norm)
eta_calc = 1.0 - torch.exp(-k_shred_global * (Xi_norm[shred_mask] - theta_global))
eta_values[shred_mask] = torch.clamp(eta_calc, 0.0, 1.0)
diss = 0.01 * m_root_t * eta_values
m_post = (1.0 - eta_values) * m_root_t - diss
m_post = torch.clamp(m_post, min=0.0)
m_root_t[shred_mask] = m_post[shred_mask]
shred_count = int(torch.sum(shred_mask).item())
if event_counts_t is not None:
if isinstance(event_counts_t, torch.Tensor):
if event_counts_t.dtype not in (torch.int64, torch.int32):
event_counts_t = event_counts_t.to(torch.int64)
event_counts_t.add_(shred_count)
else:
event_counts_t += shred_count
# Optional: write indices into event buffer (pack iteration externally)
if event_buffer_t is not None and isinstance(event_buffer_t, torch.Tensor):
# This is a simple increment-only counter; you can replace with actual raster indexing scheme.
pass
return m_root_t, A_t, Q_t, event_counts_t
class MOMKernel:
def __init__(self):
self.kernel = fused_mom_update_cpu
self.device = torch.device('cpu')
def __call__(self, m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
dt, eps, sigma_const, theta_global, k_shred_global,
event_counts_t=None, event_buffer_t=None):
return self.kernel(m_root_t, A_t, Q_t, alpha_t, gamma_t, omega_t,
dt, eps, sigma_const, theta_global, k_shred_global,
event_counts_t, event_buffer_t)
class MOMSystemLoop:
def __init__(self, mom_kernel, m_root_initial, A_modes_initial, Q_drive_initial,
alpha, gamma, omega, dt=0.02, eps=1e-6, sigma=0.75,
theta=2.2, k_shred=1.2, event_buffer_size=1024, rng_seed=42):
self.mom_kernel = mom_kernel
self.device = mom_kernel.device
# State
self.m_root = m_root_initial.to(self.device).clone().to(torch.float32)
self.A_modes = A_modes_initial.to(self.device).clone().to(torch.float32)
self.Q_drive = Q_drive_initial.to(self.device).clone().to(torch.float32)
self.alpha = alpha.to(self.device).to(torch.float32)
self.gamma = gamma.to(self.device).to(torch.float32)
self.omega = omega.to(self.device).to(torch.float32)
# Params
self.dt = dt; self.eps = eps; self.sigma = sigma
self.theta = theta; self.k_shred = k_shred
# Events
self.event_counts = torch.zeros((), dtype=torch.int64, device=self.device)
self.event_buffer = torch.zeros(event_buffer_size, dtype=torch.int64, device=self.device)
# Histories
self.m_root_history = []
self.A_modes_history = []
self.event_counts_history = []
self.shred_onset = np.full((self.m_root.shape[0],), -1, dtype=np.int32)
# RNG for deterministic noise
self.gen = torch.Generator(device=self.device)
self.gen.manual_seed(int(rng_seed))
def feedback(self, m_root, A_modes, Q_drive):
decay = 0.995
noise_level = 1e-4
A_modes_new = A_modes * decay + noise_level * torch.random_like(A_modes, generator=self.gen, device=self.device)
A_modes_new = torch.clamp(A_modes_new, min=0.0)
m_root_new = m_root * decay + noise_level * torch.randn_like(m_root, generator=self.gen, device=self.device)
m_root_new = torch.clamp(m_root_new, min=0.0)
return m_root_new, A_modes_new, Q_drive
def run(self, iterations):
for i in range(iterations):
self.event_counts.zero_()
self.mom_kernel(self.m_root, self.A_modes, self.Q_drive,
self.alpha, self.gamma, self.omega,
self.dt, self.eps, self.sigma, self.theta, self.k_shred,
self.event_counts, self.event_buffer)
m_np = self.m_root.detach().cpu().numpy()
collapsed_mask = m_np <= 1e-8
for idx, collapsed in enumerate(collapsed_mask):
if collapsed and self.shred_onset[idx] == -1:
self.shred_onset[idx] = i
self.m_root, self.A_modes, self.Q_drive = self.feedback(self.m_root, self.A_modes, self.Q_drive)
self.m_root_history.append(float(self.m_root.mean().item()))
self.A_modes_history.append(float(self.A_modes.mean().item()))
self.event_counts_history.append(int(self.event_counts.item()))
def run_mom_simulation(
Ncells, Nmode, iterations, dt=0.02, eps=1e-6, sigma=0.75,
theta=2.2, k_shred=1.2, seed=42, include_hash=True
):
torch.manual_seed(seed)
np.random.seed(seed)
mom_kernel_instance = MOMKernel()
device = mom_kernel_instance.device
alpha = torch.empty(Nmode, device=device).uniform_(0.02, 0.12)
gamma = torch.empty(Nmode, device=device).uniform_(0.01, 0.06)
omega = torch.linspace(1.0, 8.0, Nmode, device=device)
m_root_initial = torch.ones(Ncells, device=device)
A_modes_initial = torch.rand(Ncells, Nmode, device=device) * 0.01
Q_drive_initial = torch.zeros(Ncells, Nmode, device=device)
mom_system = MOMSystemLoop(
mom_kernel_instance, m_root_initial, A_modes_initial, Q_drive_initial,
alpha, gamma, omega, dt=dt, eps=eps, sigma=sigma,
theta=theta, k_shred=k_shred, rng_seed=seed
)
# Warmup (excluded from timing)
mom_system.run(1)
start_time = time.perf_counter()
mom_system.run(iterations)
elapsed_time = max(time.perf_counter() - start_time, 1e-9)
# Estimated FLOPs (placeholder estimate)
ops_per_cell_per_iter = 12 * Nmode + 13
flops_per_iteration = float(Ncells) * float(ops_per_cell_per_iter)
total_flops = flops_per_iteration * float(iterations)
gflops = total_flops / (elapsed_time * 1e9)
# Build plots (excluded from elapsed_time)
fig = plt.figure(figsize=(10, 14))
ax1 = fig.add_subplot(4, 1, 1)
ax1.plot(mom_system.m_root_history, label='Mean m_root')
ax1.set_title('Mean m_root Over Iterations'); ax1.set_xlabel('Iteration'); ax1.set_ylabel('Mean m_root')
ax1.grid(True); ax1.legend()
ax2 = fig.add_subplot(4, 1, 2)
ax2.plot(mom_system.A_modes_history, label='Mean A_modes', color='orange')
ax2.set_title('Mean A_modes Over Iterations')
ax2.set_xlabel('Iteration'); ax2.set_ylabel('Mean A_modes')
ax2.grid(True); ax2.legend()
ax3 = fig.add_subplot(4, 1, 3)
cumulative_events = np.cumsum(np.array(mom_system.event_counts_history))
ax3.plot(cumulative_events, label='Cumulative Shredding Events', color='red')
ax3.set_title('Cumulative Shredding Events')
ax3.set_xlabel('Iteration'); ax3.set_ylabel('Cumulative Events')
ax3.grid(True); ax3.legend()
ax4 = fig.add_subplot(4, 1, 4)
onset = mom_system.shred_onset
for idx, val in enumerate(onset):
if val >= 0:
ax4.vlines(val, idx, idx + 1, color='black', linewidth=0.8)
ax4.set_title('Shredding Onset per Cell')
ax4.set_xlabel('Iteration'); ax4.set_ylabel('Cell Index')
ax4.grid(True)
plt.tight_layout()
_, plot_path = tempfile.mkstemp(suffix=".png")
plt.savefig(plot_path)
plt.close(fig)
# Manifest + hash
run_ipurl = None
if include_hash:
manifest = {
"Ncells": int(Ncells), "Nmode": int(Nmode), "iterations": int(iterations),
"dt": float(dt), "eps": float(eps), "sigma": float(sigma),
"theta": float(theta), "k_shred": float(k_shred), "seed": int(seed),
"elapsed_time_seconds": float(elapsed_time),
"gflops_estimated": float(gflops),
"m_root_head": [float(x) for x in mom_system.m_root_history[:10]],
"A_modes_head": [float(x) for x in mom_system.A_modes_history[:10]],
"event_counts_head": [int(x) for x in mom_system.event_counts_history[:10]],
}
serialized = json.dumps(manifest, sort_keys=True, separators=(",", ":")).encode("utf-8")
run_ipurl = f"mom-kernel:v1:{hashlib.sha512(serialized).hexdigest()}"
summary_output = (
f"MOM Kernel Simulation\n"
f"- Simulation Time: {elapsed_time:.6f} s\n"
f"- Estimated GFLOPS (per fused step): {gflops:.4f}\n"
f"- Final Mean m_root: {float(torch.mean(mom_system.m_root).item()):.6f}\n"
f"- Final Mean A_modes: {float(torch.mean(mom_system.A_modes).item()):.6f}\n"
f"- Total Events (last iter): {mom_system.event_counts_history[-1] if len(mom_system.event_counts_history) > 0 else 0}\n"
+ (f"- Run IPURL: {run_ipurl}\n" if run_ipurl else "")
)
return summary_output, plot_path
# =========================================================================================
# Part C: Gradio UI — Two tabs for unified artifact
# =========================================================================================
with gr.Blocks(title="NexFrame RFT + MOM Unified Artifact") as demo:
gr.Markdown("# NexFrame Codex: RFT Hardware Scaling + MOM Collapse Kernel")
gr.Markdown("This artifact combines a Numba-accelerated RFT simulation across hardware scales with a PyTorch MOM kernel for collapse dynamics. Each run can be sealed with a lineage hash (IPURL).")
with gr.Tab("RFT Hardware Scaling"):
gr.Markdown("### RFT Simulation (Numba/NumPy)\nAdjust parameters to explore Ψ_r, energy, and ledger dynamics across hardware scales.")
with gr.Row():
num_scales = gr.Slider(5, 100, step=5, value=50, label="Number of Hardware Scales")
steps = gr.Slider(100, 5000, step=100, value=2000, label="Simulation Steps per Scale")
seed_rft = gr.Number(value=42, label="Seed", precision=0)
include_hash_rft = gr.Checkbox(value=True, label="Seal run with hash (IPURL)")
with gr.Row():
psi_r_init = gr.Slider(0.1, 2.0, step=0.1, value=1.0, label="Psi_r_init")
tau_eff = gr.Slider(0.01, 1.0, step=0.01, value=0.05, label="tau_eff")
delta_tau_pred = gr.Slider(0.001, 0.1, step=0.001, value=0.01, label="delta_tau_pred")
epsilon_c = gr.Slider(0.001, 0.1, step=0.001, value=0.005, label="epsilon_c")
with gr.Row():
phi_loop = gr.Slider(0.1, 2.0, step=0.1, value=1.0, label="phi_loop")
eta_sync = gr.Slider(0.001, 0.1, step=0.001, value=0.01, label="eta_sync")
omega_n = gr.Slider(10, 100, step=1, value=50, label="omega_n")
lambda_m = gr.Slider(10, 200, step=10, value=100, label="lambda_m")
gr.Markdown("#### Nonlinear exponents")
with gr.Row():
alpha = gr.Slider(0.5, 3.0, step=0.1, value=1.1, label="alpha")
beta = gr.Slider(0.5, 3.0, step=0.1, value=2.0, label="beta")
gamma = gr.Slider(0.5, 3.0, step=0.1, value=1.2, label="gamma")
delta = gr.Slider(0.5, 3.0, step=0.1, value=1.0, label="delta")
with gr.Row():
epsilon = gr.Slider(0.5, 3.0, step=0.1, value=1.0, label="epsilon")
zeta = gr.Slider(0.5, 3.0, step=0.1, value=1.0, label="zeta")
kappa = gr.Slider(1.0, 10.0, step=0.1, value=5.0, label="kappa")
mu = gr.Slider(1.0, 20.0, step=1.0, value=10.0, label="mu")
n = gr.Slider(1, 5, step=1, value=2, label="n")
rft_run = gr.Button("Run RFT Simulation")
rft_summary = gr.Markdown(label="RFT Summary")
rft_plot = gr.Image(label="Final Ψ_r across Hardware Scales", type="filepath")
def _rft_ui(num_scales_to_simulate, simulation_steps, psi_r_init_val, tau_eff_val,
delta_tau_pred_val, epsilon_c_val, phi_loop_val, eta_sync_val,
omega_n_val, lambda_m_val, alpha_exp, beta_exp, gamma_exp,
delta_exp, epsilon_exp, zeta_exp, kappa_exp, mu_exp, n_exp,
seed, include_hash):
return run_rft_hardware_scale(
num_scales_to_simulate=int(num_scales_to_simulate),
simulation_steps=int(simulation_steps),
psi_r_init_val=float(psi_r_init_val),
tau_eff_val=float(tau_eff_val),
delta_tau_pred_val=float(delta_tau_pred_val),
epsilon_c_val=float(epsilon_c_val),
phi_loop_val=float(phi_loop_val),
eta_sync_val=float(eta_sync_val),
omega_n_val=float(omega_n_val),
lambda_m_val=float(lambda_m_val),
alpha_exp=float(alpha_exp),
beta_exp=float(beta_exp),
gamma_exp=float(gamma_exp),
delta_exp=float(delta_exp),
epsilon_exp=float(epsilon_exp),
zeta_exp=float(zeta_exp),
kappa_exp=float(kappa_exp),
mu_exp=float(mu_exp),
n_exp=int(n_exp),
seed=int(seed),
include_hash=bool(include_hash)
)
rft_run.click(
_rft_ui,
inputs=[
num_scales, steps, psi_r_init, tau_eff, delta_tau_pred, epsilon_c,
phi_loop, eta_sync, omega_n, lambda_m, alpha, beta, gamma, delta,
epsilon, zeta, kappa, mu, n, seed_rft, include_hash_rft
],
outputs=[rft_summary, rft_plot]
)
with gr.Tab("MOM Collapse Kernel"):
gr.Markdown("### MOM Kernel (PyTorch)\nSimulate collapse dynamics with shredding onset and event histories.")
with gr.Row():
Ncells = gr.Slider(8, 4096, step=8, value=256, label="Cells")
Nmode = gr.Slider(4, 512, step=4, value=64, label="Modes per Cell")
iterations = gr.Slider(10, 5000, step=10, value=500, label="Iterations")
seed_mom = gr.Number(value=42, label="Seed", precision=0)
include_hash_mom = gr.Checkbox(value=True, label="Seal run with hash (IPURL)")
with gr.Row():
dt = gr.Slider(0.001, 0.1, step=0.001, value=0.02, label="dt")
eps = gr.Slider(1e-8, 1e-4, step=1e-8, value=1e-6, label="eps")
sigma = gr.Slider(0.1, 1.5, step=0.01, value=0.75, label="sigma")
theta = gr.Slider(0.5, 5.0, step=0.1, value=2.2, label="theta")
k_shred = gr.Slider(0.1, 5.0, step=0.1, value=1.2, label="k_shred")
mom_run = gr.Button("Run MOM Simulation")
mom_summary = gr.Markdown(label="MOM Summary")
mom_plot = gr.Image(label="MOM Plots", type="filepath")
def _mom_ui(Ncells_val, Nmode_val, iterations_val, dt_val, eps_val, sigma_val, theta_val, k_shred_val, seed_val, include_hash_val):
return run_mom_simulation(
Ncells=int(Ncells_val),
Nmode=int(Nmode_val),
iterations=int(iterations_val),
dt=float(dt_val),
eps=float(eps_val),
sigma=float(sigma_val),
theta=float(theta_val),
k_shred=float(k_shred_val),
seed=int(seed_val),
include_hash=bool(include_hash_val)
)
mom_run.click(
_mom_ui,
inputs=[Ncells, Nmode, iterations, dt, eps, sigma, theta, k_shred, seed_mom, include_hash_mom],
outputs=[mom_summary, mom_plot]
)
if __name__ == "__main__":
demo.launch()