Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,46 +1,100 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import numpy as np
|
| 3 |
-
|
| 4 |
# ============================================================
|
| 5 |
-
#
|
| 6 |
-
#
|
| 7 |
# ============================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
"
|
| 12 |
-
"
|
| 13 |
-
"
|
| 14 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
}
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
#
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
z_noise = np.random.normal(0, noise_scale * 0.8)
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
variance = abs(q_noise) + abs(z_noise)
|
| 45 |
if variance > 0.15:
|
| 46 |
status = "critical"
|
|
@@ -49,47 +103,267 @@ def run_simulation(profile, noise_scale):
|
|
| 49 |
else:
|
| 50 |
status = "nominal"
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
"
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
}
|
| 64 |
|
| 65 |
-
#
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
title="RFT-Ω Harmonic Validation Interface v3",
|
| 82 |
-
description=(
|
| 83 |
-
"Simulate harmonic stability (QΩ) and coherence (ζ_sync) "
|
| 84 |
-
"with adaptive baselines and domain-specific weighting. "
|
| 85 |
-
"Adjust noise to test system resilience. Typical stable range: "
|
| 86 |
-
"QΩ 0.82–0.89 | ζ_sync 0.75–0.88"
|
| 87 |
-
)
|
| 88 |
)
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
|
|
|
| 94 |
if __name__ == "__main__":
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# ============================================================
|
| 2 |
+
# RFT-Ω FRAMEWORK — TOTAL-PROOF API (Sprint 1)
|
| 3 |
+
# Author: Liam Grinstead (RFT Systems) | All Rights Reserved
|
| 4 |
# ============================================================
|
| 5 |
+
"""
|
| 6 |
+
This Space is a deterministic, signed validation harness for the
|
| 7 |
+
Rendered Frame Theory (RFT-Ω) harmonic stability kernel.
|
| 8 |
+
|
| 9 |
+
Core guarantees:
|
| 10 |
+
- Deterministic replay via seed + config.
|
| 11 |
+
- Multiple schedules (single/ramp/random/impulse/step).
|
| 12 |
+
- Multiple noise distributions (gauss/uniform).
|
| 13 |
+
- Signed outputs: SHA-512 over (config+results).
|
| 14 |
+
- Downloadable bundle: run.json, run.sha512, ABOUT.json, NOTICE.txt.
|
| 15 |
+
- Health & About endpoints for ops integration.
|
| 16 |
+
|
| 17 |
+
Legal:
|
| 18 |
+
All Rights Reserved — RFT-IPURL v1.0 (UK / Berne).
|
| 19 |
+
Research validation use only. No reverse-engineering or derivative
|
| 20 |
+
kernels without written consent from the Author.
|
| 21 |
+
Contact: liamgrinstead2@gmail.com
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import os, io, json, time, math, hashlib, zipfile, random
|
| 25 |
+
from datetime import datetime
|
| 26 |
+
from typing import Dict, Any, List, Tuple
|
| 27 |
+
|
| 28 |
+
import numpy as np
|
| 29 |
+
import gradio as gr
|
| 30 |
+
|
| 31 |
+
# Optional FastAPI for /healthz and /about
|
| 32 |
+
try:
|
| 33 |
+
from fastapi import FastAPI
|
| 34 |
+
HAVE_FASTAPI = True
|
| 35 |
+
except Exception:
|
| 36 |
+
HAVE_FASTAPI = False
|
| 37 |
+
|
| 38 |
+
# ------------------ Version / About -------------------------
|
| 39 |
+
RFT_VERSION = "v4.0-total-proof-sprint1"
|
| 40 |
+
RFT_DOI = "https://doi.org/10.5281/zenodo.17466722"
|
| 41 |
+
HF_URL = "https://rftsystems-rft-omega-api.hf.space"
|
| 42 |
+
LEGAL_NOTICE = (
|
| 43 |
+
"All Rights Reserved — RFT-IPURL v1.0 (UK / Berne). "
|
| 44 |
+
"Research validation use only. No reverse-engineering or derivative kernels without written consent."
|
| 45 |
+
)
|
| 46 |
|
| 47 |
+
ABOUT_BLOCK = {
|
| 48 |
+
"name": "RFT-Ω Framework — Total-Proof API",
|
| 49 |
+
"version": RFT_VERSION,
|
| 50 |
+
"doi": RFT_DOI,
|
| 51 |
+
"space": HF_URL,
|
| 52 |
+
"profiles": ["AI / Neural", "SpaceX / Aerospace", "Energy / RHES", "Extreme Perturbation"],
|
| 53 |
+
"noise_distributions": ["gauss", "uniform"],
|
| 54 |
+
"schedules": ["single", "ramp", "random", "impulse", "step"],
|
| 55 |
+
"metrics": ["QΩ", "ζ_sync", "status"],
|
| 56 |
+
"integrity": ["run_id", "sha512(config+results)", "bundle(zip)"],
|
| 57 |
+
"legal": LEGAL_NOTICE,
|
| 58 |
}
|
| 59 |
|
| 60 |
+
# ------------------ Soft Rate Limit -------------------------
|
| 61 |
+
RUN_HISTORY_TS: List[float] = [] # timestamps of recent runs
|
| 62 |
+
MAX_RUNS_PER_MINUTE = 60 # global fairness guard
|
| 63 |
+
|
| 64 |
+
def _rate_limit_ok() -> Tuple[bool, str]:
|
| 65 |
+
now = time.time()
|
| 66 |
+
# prune >60s
|
| 67 |
+
while RUN_HISTORY_TS and now - RUN_HISTORY_TS[0] > 60.0:
|
| 68 |
+
RUN_HISTORY_TS.pop(0)
|
| 69 |
+
if len(RUN_HISTORY_TS) >= MAX_RUNS_PER_MINUTE:
|
| 70 |
+
return False, "Rate limit exceeded (demo fairness). Please retry shortly."
|
| 71 |
+
RUN_HISTORY_TS.append(now)
|
| 72 |
+
return True, "ok"
|
| 73 |
+
|
| 74 |
+
# ------------------ Profiles & Simulator --------------------
|
| 75 |
+
PROFILES = {
|
| 76 |
+
"AI / Neural": {"base": (0.86, 0.80), "w": (0.65, 0.35)},
|
| 77 |
+
"SpaceX / Aerospace": {"base": (0.84, 0.79), "w": (0.60, 0.40)},
|
| 78 |
+
"Energy / RHES": {"base": (0.83, 0.78), "w": (0.55, 0.45)},
|
| 79 |
+
"Extreme Perturbation":{"base": (0.82, 0.77), "w": (0.50, 0.50)},
|
| 80 |
+
}
|
| 81 |
|
| 82 |
+
def _rng(seed: int) -> np.random.RandomState:
|
| 83 |
+
return np.random.RandomState(seed)
|
|
|
|
| 84 |
|
| 85 |
+
def simulate_step(rng: np.random.RandomState, profile: str, sigma: float, noise_dist: str) -> Dict[str, Any]:
|
| 86 |
+
base_q, base_z = PROFILES[profile]["base"]
|
| 87 |
+
wq, wz = PROFILES[profile]["w"]
|
| 88 |
|
| 89 |
+
if noise_dist == "uniform":
|
| 90 |
+
q_noise = rng.uniform(-sigma, sigma)
|
| 91 |
+
z_noise = rng.uniform(-sigma * 0.8, sigma * 0.8)
|
| 92 |
+
else: # "gauss"
|
| 93 |
+
q_noise = rng.normal(0, sigma)
|
| 94 |
+
z_noise = rng.normal(0, sigma * 0.8)
|
| 95 |
+
|
| 96 |
+
q = float(np.clip(base_q + wq * q_noise, 0.0, 0.99))
|
| 97 |
+
z = float(np.clip(base_z + wz * z_noise, 0.0, 0.99))
|
| 98 |
variance = abs(q_noise) + abs(z_noise)
|
| 99 |
if variance > 0.15:
|
| 100 |
status = "critical"
|
|
|
|
| 103 |
else:
|
| 104 |
status = "nominal"
|
| 105 |
|
| 106 |
+
return {"sigma": float(round(sigma, 6)), "QΩ": q, "ζ_sync": z, "status": status}
|
| 107 |
+
|
| 108 |
+
# ------------------ Schedules -------------------------------
|
| 109 |
+
def build_sigma_series(schedule_type: str, params: Dict[str, Any]) -> List[float]:
|
| 110 |
+
# robust defaults
|
| 111 |
+
if schedule_type == "single":
|
| 112 |
+
sigma = float(params.get("sigma", 0.05))
|
| 113 |
+
return [sigma]
|
| 114 |
+
|
| 115 |
+
elif schedule_type == "ramp":
|
| 116 |
+
start = float(params.get("start", 0.0))
|
| 117 |
+
stop = float(params.get("stop", 0.3))
|
| 118 |
+
steps = int(params.get("steps", 10))
|
| 119 |
+
steps = max(1, steps)
|
| 120 |
+
return list(np.linspace(start, stop, steps))
|
| 121 |
+
|
| 122 |
+
elif schedule_type == "random":
|
| 123 |
+
smin = float(params.get("min", 0.0))
|
| 124 |
+
smax = float(params.get("max", 0.3))
|
| 125 |
+
steps = int(params.get("steps", 10))
|
| 126 |
+
seed = int(params.get("seed", 0))
|
| 127 |
+
r = random.Random(seed)
|
| 128 |
+
return [r.uniform(smin, smax) for _ in range(max(1, steps))]
|
| 129 |
+
|
| 130 |
+
elif schedule_type == "impulse":
|
| 131 |
+
base = float(params.get("base", 0.05))
|
| 132 |
+
spike = float(params.get("spike", 0.25))
|
| 133 |
+
at = int(params.get("at", 5))
|
| 134 |
+
steps = int(params.get("steps", 10))
|
| 135 |
+
series = [base] * max(1, steps)
|
| 136 |
+
if 0 <= at < len(series):
|
| 137 |
+
series[at] = spike
|
| 138 |
+
return series
|
| 139 |
+
|
| 140 |
+
elif schedule_type == "step":
|
| 141 |
+
before = float(params.get("before", 0.05))
|
| 142 |
+
after = float(params.get("after", 0.20))
|
| 143 |
+
at = int(params.get("at", 5))
|
| 144 |
+
steps = int(params.get("steps", 10))
|
| 145 |
+
series = [before] * max(1, steps)
|
| 146 |
+
for i in range(len(series)):
|
| 147 |
+
if i >= at:
|
| 148 |
+
series[i] = after
|
| 149 |
+
return series
|
| 150 |
+
|
| 151 |
+
else:
|
| 152 |
+
# fallback
|
| 153 |
+
return [float(params.get("sigma", 0.05))]
|
| 154 |
+
|
| 155 |
+
# ------------------ Integrity / Bundling --------------------
|
| 156 |
+
def sha512_hex(s: str) -> str:
|
| 157 |
+
return hashlib.sha512(s.encode("utf-8")).hexdigest()
|
| 158 |
+
|
| 159 |
+
def make_run_id(seed: int, profile: str) -> str:
|
| 160 |
+
# Mix timestamp + seed + profile to generate a short run id
|
| 161 |
+
raw = f"{time.time_ns()}::{seed}::{profile}::{random.random()}"
|
| 162 |
+
return hashlib.sha256(raw.encode("utf-8")).hexdigest()[:16]
|
| 163 |
+
|
| 164 |
+
def write_bundle(run_dir: str, config: Dict[str, Any], results: Dict[str, Any]) -> Tuple[str, str]:
|
| 165 |
+
os.makedirs(run_dir, exist_ok=True)
|
| 166 |
+
run_json_path = os.path.join(run_dir, "run.json")
|
| 167 |
+
with open(run_json_path, "w") as f:
|
| 168 |
+
json.dump({"config": config, "results": results}, f, indent=2)
|
| 169 |
+
|
| 170 |
+
# SHA-512 over canonical (sorted) JSON string
|
| 171 |
+
canonical = json.dumps({"config": config, "results": results}, sort_keys=True)
|
| 172 |
+
digest = sha512_hex(canonical)
|
| 173 |
+
sha_path = os.path.join(run_dir, "run.sha512")
|
| 174 |
+
with open(sha_path, "w") as f:
|
| 175 |
+
f.write(digest + "\n")
|
| 176 |
+
|
| 177 |
+
about_path = os.path.join(run_dir, "ABOUT.json")
|
| 178 |
+
with open(about_path, "w") as f:
|
| 179 |
+
json.dump(ABOUT_BLOCK, f, indent=2)
|
| 180 |
+
|
| 181 |
+
notice_path = os.path.join(run_dir, "NOTICE.txt")
|
| 182 |
+
with open(notice_path, "w") as f:
|
| 183 |
+
f.write(LEGAL_NOTICE + "\n")
|
| 184 |
+
|
| 185 |
+
# zip it
|
| 186 |
+
zip_path = os.path.join(run_dir, "rft_run_bundle.zip")
|
| 187 |
+
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as z:
|
| 188 |
+
z.write(run_json_path, arcname="run.json")
|
| 189 |
+
z.write(sha_path, arcname="run.sha512")
|
| 190 |
+
z.write(about_path, arcname="ABOUT.json")
|
| 191 |
+
z.write(notice_path, arcname="NOTICE.txt")
|
| 192 |
+
|
| 193 |
+
return zip_path, digest
|
| 194 |
+
|
| 195 |
+
# ------------------ Core Runner -----------------------------
|
| 196 |
+
def run_total_proof(
|
| 197 |
+
profile: str,
|
| 198 |
+
noise_dist: str,
|
| 199 |
+
schedule_type: str,
|
| 200 |
+
schedule_params_text: str,
|
| 201 |
+
seed: int,
|
| 202 |
+
samples: int,
|
| 203 |
+
):
|
| 204 |
+
ok, msg = _rate_limit_ok()
|
| 205 |
+
if not ok:
|
| 206 |
+
return {"error": msg, "rft_notice": LEGAL_NOTICE}, None
|
| 207 |
+
|
| 208 |
+
# parse schedule params
|
| 209 |
+
try:
|
| 210 |
+
params = json.loads(schedule_params_text) if schedule_params_text.strip() else {}
|
| 211 |
+
if not isinstance(params, dict):
|
| 212 |
+
raise ValueError("schedule_params must be a JSON object.")
|
| 213 |
+
except Exception as e:
|
| 214 |
+
return {"error": f"Invalid schedule_params JSON: {e}", "rft_notice": LEGAL_NOTICE}, None
|
| 215 |
+
|
| 216 |
+
# build sigma series
|
| 217 |
+
sigma_series = build_sigma_series(schedule_type, params)
|
| 218 |
+
|
| 219 |
+
# deterministic RNG per step (derived from master seed + index)
|
| 220 |
+
rng_master = _rng(seed)
|
| 221 |
+
results_steps = []
|
| 222 |
+
trigger_count = 0
|
| 223 |
+
|
| 224 |
+
for i, sigma in enumerate(sigma_series):
|
| 225 |
+
# average over 'samples' repeats for this step
|
| 226 |
+
qs, zs, statuses = [], [], []
|
| 227 |
+
# derive a sub-seed for determinism but step-unique noise
|
| 228 |
+
sub_seed = int(rng_master.randint(0, 2**31 - 1))
|
| 229 |
+
step_rng = _rng(sub_seed)
|
| 230 |
+
for _ in range(max(1, samples)):
|
| 231 |
+
out = simulate_step(step_rng, profile, float(sigma), noise_dist)
|
| 232 |
+
qs.append(out["QΩ"]); zs.append(out["ζ_sync"]); statuses.append(out["status"])
|
| 233 |
+
|
| 234 |
+
q_mean = float(np.mean(qs))
|
| 235 |
+
z_mean = float(np.mean(zs))
|
| 236 |
+
# majority status (ties → worst)
|
| 237 |
+
status_counts = {"nominal": 0, "perturbed": 0, "critical": 0}
|
| 238 |
+
for s in statuses:
|
| 239 |
+
status_counts[s] += 1
|
| 240 |
+
# order critical > perturbed > nominal
|
| 241 |
+
majority = max(status_counts.items(), key=lambda kv: (kv[1], ["nominal","perturbed","critical"].index(kv[0])))
|
| 242 |
+
status_final = majority[0]
|
| 243 |
+
if status_final != "nominal":
|
| 244 |
+
trigger_count += 1
|
| 245 |
+
|
| 246 |
+
results_steps.append({
|
| 247 |
+
"index": i,
|
| 248 |
+
"sigma": float(round(float(sigma), 6)),
|
| 249 |
+
"QΩ_mean": float(round(q_mean, 6)),
|
| 250 |
+
"ζ_sync_mean": float(round(z_mean, 6)),
|
| 251 |
+
"status_majority": status_final,
|
| 252 |
+
"samples": int(max(1, samples))
|
| 253 |
+
})
|
| 254 |
+
|
| 255 |
+
# summary
|
| 256 |
+
statuses = [s["status_majority"] for s in results_steps]
|
| 257 |
+
summary = {
|
| 258 |
+
"steps": len(results_steps),
|
| 259 |
+
"nominal_count": int(sum(1 for s in statuses if s == "nominal")),
|
| 260 |
+
"perturbed_count": int(sum(1 for s in statuses if s == "perturbed")),
|
| 261 |
+
"critical_count": int(sum(1 for s in statuses if s == "critical")),
|
| 262 |
+
"triggers_non_nominal": int(trigger_count)
|
| 263 |
}
|
| 264 |
|
| 265 |
+
# build config and results blocks
|
| 266 |
+
run_id = make_run_id(seed, profile)
|
| 267 |
+
config = {
|
| 268 |
+
"run_id": run_id,
|
| 269 |
+
"timestamp_utc": datetime.utcnow().isoformat() + "Z",
|
| 270 |
+
"profile": profile,
|
| 271 |
+
"noise_distribution": noise_dist,
|
| 272 |
+
"schedule_type": schedule_type,
|
| 273 |
+
"schedule_params": params,
|
| 274 |
+
"seed": int(seed),
|
| 275 |
+
"samples_per_step": int(max(1, samples)),
|
| 276 |
+
"about_version": RFT_VERSION,
|
| 277 |
+
"about_doi": RFT_DOI,
|
| 278 |
+
}
|
| 279 |
+
results = {
|
| 280 |
+
"series": results_steps,
|
| 281 |
+
"summary": summary,
|
| 282 |
+
"rft_notice": LEGAL_NOTICE
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
# write bundle
|
| 286 |
+
run_dir = f"/tmp/rft_run_{run_id}"
|
| 287 |
+
zip_path, digest = write_bundle(run_dir, config, results)
|
| 288 |
+
|
| 289 |
+
# minimal top-level response
|
| 290 |
+
head = {
|
| 291 |
+
"run_id": run_id,
|
| 292 |
+
"sha512": digest,
|
| 293 |
+
"sha512_short": digest[:16] + "…",
|
| 294 |
+
"steps": summary["steps"],
|
| 295 |
+
"counts": {
|
| 296 |
+
"nominal": summary["nominal_count"],
|
| 297 |
+
"perturbed": summary["perturbed_count"],
|
| 298 |
+
"critical": summary["critical_count"],
|
| 299 |
+
},
|
| 300 |
+
"rft_notice": LEGAL_NOTICE
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
return {"config": config, "head": head, "results": results}, zip_path
|
| 304 |
|
| 305 |
+
# ------------------ Gradio UI -------------------------------
|
| 306 |
+
DEFAULT_PARAMS = json.dumps({
|
| 307 |
+
"sigma": 0.05 # valid for "single"
|
| 308 |
+
}, indent=2)
|
| 309 |
+
|
| 310 |
+
HELP_TEXT = (
|
| 311 |
+
"Schedule JSON examples:\n"
|
| 312 |
+
"- single: {\"sigma\": 0.05}\n"
|
| 313 |
+
"- ramp: {\"start\": 0.0, \"stop\": 0.30, \"steps\": 12}\n"
|
| 314 |
+
"- random: {\"min\": 0.01, \"max\": 0.30, \"steps\": 10, \"seed\": 0}\n"
|
| 315 |
+
"- impulse:{\"base\": 0.05, \"spike\": 0.25, \"at\": 5, \"steps\": 12}\n"
|
| 316 |
+
"- step: {\"before\": 0.05, \"after\": 0.20, \"at\": 6, \"steps\": 12}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 317 |
)
|
| 318 |
|
| 319 |
+
with gr.Blocks(title="RFT-Ω Total-Proof API") as demo:
|
| 320 |
+
gr.Markdown(f"### RFT-Ω Total-Proof API ({RFT_VERSION}) \n"
|
| 321 |
+
f"A deterministic, signed validation harness for harmonic stability (QΩ) and coherence (ζ_sync). \n"
|
| 322 |
+
f"**DOI:** {RFT_DOI} \n"
|
| 323 |
+
f"**Legal:** {LEGAL_NOTICE}")
|
| 324 |
+
|
| 325 |
+
with gr.Row():
|
| 326 |
+
profile = gr.Radio(list(PROFILES.keys()), label="System Profile", value="AI / Neural")
|
| 327 |
+
noise_dist = gr.Radio(["gauss", "uniform"], label="Noise Distribution", value="gauss")
|
| 328 |
+
|
| 329 |
+
with gr.Row():
|
| 330 |
+
schedule_type = gr.Radio(["single", "ramp", "random", "impulse", "step"], label="Schedule Type", value="single")
|
| 331 |
+
seed_in = gr.Number(value=123, precision=0, label="Seed (int)")
|
| 332 |
+
samples_in = gr.Slider(1, 20, value=5, step=1, label="Samples per Step")
|
| 333 |
+
|
| 334 |
+
schedule_params = gr.Code(value=DEFAULT_PARAMS, language="json", label="Schedule Parameters (JSON)")
|
| 335 |
+
gr.Markdown(HELP_TEXT)
|
| 336 |
+
|
| 337 |
+
run_btn = gr.Button("Run Deterministic Simulation & Sign Results")
|
| 338 |
+
out_json = gr.JSON(label="Signed Run Summary (config, head, results)")
|
| 339 |
+
out_bundle = gr.File(label="Download Signed Bundle (zip)")
|
| 340 |
+
|
| 341 |
+
def _on_run(p, nd, st, sp_json, seed, samples):
|
| 342 |
+
data, zip_path = run_total_proof(p, nd, st, sp_json, int(seed), int(samples))
|
| 343 |
+
return data, zip_path
|
| 344 |
+
|
| 345 |
+
run_btn.click(_on_run, inputs=[profile, noise_dist, schedule_type, schedule_params, seed_in, samples_in],
|
| 346 |
+
outputs=[out_json, out_bundle])
|
| 347 |
+
|
| 348 |
+
gr.Markdown("**Ops endpoints:** `/healthz`, `/about` (FastAPI mounted if available).")
|
| 349 |
+
|
| 350 |
+
# ------------------ FastAPI Mount (optional) ----------------
|
| 351 |
+
if HAVE_FASTAPI:
|
| 352 |
+
api = FastAPI(title="RFT-Ω Total-Proof Ops")
|
| 353 |
+
@api.get("/healthz")
|
| 354 |
+
def healthz():
|
| 355 |
+
return {"ok": True, "service": "RFT-Ω Total-Proof", "version": RFT_VERSION}
|
| 356 |
+
|
| 357 |
+
@api.get("/about")
|
| 358 |
+
def about():
|
| 359 |
+
return ABOUT_BLOCK
|
| 360 |
+
|
| 361 |
+
app = gr.mount_gradio_app(api, demo, path="/")
|
| 362 |
+
else:
|
| 363 |
+
# Gradio app only
|
| 364 |
+
app = demo
|
| 365 |
|
| 366 |
+
# ------------------ Main -----------------------------------
|
| 367 |
if __name__ == "__main__":
|
| 368 |
+
# Gradio will handle launch in HF; local dev:
|
| 369 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|