RFTSystems commited on
Commit
6fa522a
·
verified ·
1 Parent(s): d3b805d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -273
app.py CHANGED
@@ -1,24 +1,10 @@
1
  # ============================================================
2
- # RFT-Ω FRAMEWORK — API (Sprint 1)
3
  # Author: Liam Grinstead (RFT Systems) | All Rights Reserved
4
  # ============================================================
5
  """
6
- This Space is a deterministic, signed validation harness for the
7
  Rendered Frame Theory (RFT-Ω) harmonic stability kernel.
8
-
9
- Core guarantees:
10
- - Deterministic replay via seed + config.
11
- - Multiple schedules (single/ramp/random/impulse/step).
12
- - Multiple noise distributions (gauss/uniform).
13
- - Signed outputs: SHA-512 over (config+results).
14
- - Downloadable bundle: run.json, run.sha512, ABOUT.json, NOTICE.txt.
15
- - Health & About endpoints for ops integration.
16
-
17
- Legal:
18
- All Rights Reserved — RFT-IPURL v1.0 (UK / Berne).
19
- Research validation use only. No reverse-engineering or derivative
20
- kernels without written consent from the Author.
21
- Contact: liamgrinstead2@gmail.com
22
  """
23
 
24
  import os, io, json, time, math, hashlib, zipfile, random
@@ -28,15 +14,14 @@ from typing import Dict, Any, List, Tuple
28
  import numpy as np
29
  import gradio as gr
30
 
31
- # Optional FastAPI for /healthz and /about
32
  try:
33
  from fastapi import FastAPI
34
  HAVE_FASTAPI = True
35
  except Exception:
36
  HAVE_FASTAPI = False
37
 
38
- # ------------------ Version / About -------------------------
39
- RFT_VERSION = "v4.0-total-proof-sprint1"
40
  RFT_DOI = "https://doi.org/10.5281/zenodo.17466722"
41
  HF_URL = "https://rftsystems-rft-omega-api.hf.space"
42
  LEGAL_NOTICE = (
@@ -57,21 +42,20 @@ ABOUT_BLOCK = {
57
  "legal": LEGAL_NOTICE,
58
  }
59
 
60
- # ------------------ Soft Rate Limit -------------------------
61
- RUN_HISTORY_TS: List[float] = [] # timestamps of recent runs
62
- MAX_RUNS_PER_MINUTE = 60 # global fairness guard
63
 
64
  def _rate_limit_ok() -> Tuple[bool, str]:
65
  now = time.time()
66
- # prune >60s
67
- while RUN_HISTORY_TS and now - RUN_HISTORY_TS[0] > 60.0:
68
  RUN_HISTORY_TS.pop(0)
69
  if len(RUN_HISTORY_TS) >= MAX_RUNS_PER_MINUTE:
70
  return False, "Rate limit exceeded (demo fairness). Please retry shortly."
71
  RUN_HISTORY_TS.append(now)
72
  return True, "ok"
73
 
74
- # ------------------ Profiles & Simulator --------------------
75
  PROFILES = {
76
  "AI / Neural": {"base": (0.86, 0.80), "w": (0.65, 0.35)},
77
  "SpaceX / Aerospace": {"base": (0.84, 0.79), "w": (0.60, 0.40)},
@@ -85,16 +69,14 @@ def _rng(seed: int) -> np.random.RandomState:
85
  def simulate_step(rng: np.random.RandomState, profile: str, sigma: float, noise_dist: str) -> Dict[str, Any]:
86
  base_q, base_z = PROFILES[profile]["base"]
87
  wq, wz = PROFILES[profile]["w"]
88
-
89
  if noise_dist == "uniform":
90
  q_noise = rng.uniform(-sigma, sigma)
91
  z_noise = rng.uniform(-sigma * 0.8, sigma * 0.8)
92
- else: # "gauss"
93
  q_noise = rng.normal(0, sigma)
94
  z_noise = rng.normal(0, sigma * 0.8)
95
-
96
- q = float(np.clip(base_q + wq * q_noise, 0.0, 0.99))
97
- z = float(np.clip(base_z + wz * z_noise, 0.0, 0.99))
98
  variance = abs(q_noise) + abs(z_noise)
99
  if variance > 0.15:
100
  status = "critical"
@@ -102,268 +84,148 @@ def simulate_step(rng: np.random.RandomState, profile: str, sigma: float, noise_
102
  status = "perturbed"
103
  else:
104
  status = "nominal"
105
-
106
- return {"sigma": float(round(sigma, 6)), "QΩ": q, "ζ_sync": z, "status": status}
107
 
108
  # ------------------ Schedules -------------------------------
109
  def build_sigma_series(schedule_type: str, params: Dict[str, Any]) -> List[float]:
110
- # robust defaults
111
  if schedule_type == "single":
112
- sigma = float(params.get("sigma", 0.05))
113
- return [sigma]
114
-
115
  elif schedule_type == "ramp":
116
- start = float(params.get("start", 0.0))
117
- stop = float(params.get("stop", 0.3))
118
- steps = int(params.get("steps", 10))
119
- steps = max(1, steps)
120
- return list(np.linspace(start, stop, steps))
121
-
122
  elif schedule_type == "random":
123
- smin = float(params.get("min", 0.0))
124
- smax = float(params.get("max", 0.3))
125
- steps = int(params.get("steps", 10))
126
- seed = int(params.get("seed", 0))
127
- r = random.Random(seed)
128
- return [r.uniform(smin, smax) for _ in range(max(1, steps))]
129
-
130
  elif schedule_type == "impulse":
131
- base = float(params.get("base", 0.05))
132
- spike = float(params.get("spike", 0.25))
133
- at = int(params.get("at", 5))
134
- steps = int(params.get("steps", 10))
135
- series = [base] * max(1, steps)
136
- if 0 <= at < len(series):
137
- series[at] = spike
138
- return series
139
-
140
  elif schedule_type == "step":
141
- before = float(params.get("before", 0.05))
142
- after = float(params.get("after", 0.20))
143
- at = int(params.get("at", 5))
144
- steps = int(params.get("steps", 10))
145
- series = [before] * max(1, steps)
146
- for i in range(len(series)):
147
- if i >= at:
148
- series[i] = after
149
- return series
150
-
151
- else:
152
- # fallback
153
- return [float(params.get("sigma", 0.05))]
154
-
155
- # ------------------ Integrity / Bundling --------------------
156
- def sha512_hex(s: str) -> str:
157
- return hashlib.sha512(s.encode("utf-8")).hexdigest()
158
-
159
- def make_run_id(seed: int, profile: str) -> str:
160
- # Mix timestamp + seed + profile to generate a short run id
161
- raw = f"{time.time_ns()}::{seed}::{profile}::{random.random()}"
162
- return hashlib.sha256(raw.encode("utf-8")).hexdigest()[:16]
163
-
164
- def write_bundle(run_dir: str, config: Dict[str, Any], results: Dict[str, Any]) -> Tuple[str, str]:
165
- os.makedirs(run_dir, exist_ok=True)
166
- run_json_path = os.path.join(run_dir, "run.json")
167
- with open(run_json_path, "w") as f:
168
- json.dump({"config": config, "results": results}, f, indent=2)
169
-
170
- # SHA-512 over canonical (sorted) JSON string
171
- canonical = json.dumps({"config": config, "results": results}, sort_keys=True)
172
- digest = sha512_hex(canonical)
173
- sha_path = os.path.join(run_dir, "run.sha512")
174
- with open(sha_path, "w") as f:
175
- f.write(digest + "\n")
176
-
177
- about_path = os.path.join(run_dir, "ABOUT.json")
178
- with open(about_path, "w") as f:
179
- json.dump(ABOUT_BLOCK, f, indent=2)
180
-
181
- notice_path = os.path.join(run_dir, "NOTICE.txt")
182
- with open(notice_path, "w") as f:
183
- f.write(LEGAL_NOTICE + "\n")
184
-
185
- # zip it
186
- zip_path = os.path.join(run_dir, "rft_run_bundle.zip")
187
- with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as z:
188
- z.write(run_json_path, arcname="run.json")
189
- z.write(sha_path, arcname="run.sha512")
190
- z.write(about_path, arcname="ABOUT.json")
191
- z.write(notice_path, arcname="NOTICE.txt")
192
-
193
- return zip_path, digest
194
-
195
- # ------------------ Core Runner -----------------------------
196
- def run_total_proof(
197
- profile: str,
198
- noise_dist: str,
199
- schedule_type: str,
200
- schedule_params_text: str,
201
- seed: int,
202
- samples: int,
203
- ):
204
- ok, msg = _rate_limit_ok()
205
- if not ok:
206
- return {"error": msg, "rft_notice": LEGAL_NOTICE}, None
207
-
208
- # parse schedule params
209
  try:
210
- params = json.loads(schedule_params_text) if schedule_params_text.strip() else {}
211
- if not isinstance(params, dict):
212
- raise ValueError("schedule_params must be a JSON object.")
213
  except Exception as e:
214
- return {"error": f"Invalid schedule_params JSON: {e}", "rft_notice": LEGAL_NOTICE}, None
215
-
216
- # build sigma series
217
- sigma_series = build_sigma_series(schedule_type, params)
218
-
219
- # deterministic RNG per step (derived from master seed + index)
220
- rng_master = _rng(seed)
221
- results_steps = []
222
- trigger_count = 0
223
-
224
- for i, sigma in enumerate(sigma_series):
225
- # average over 'samples' repeats for this step
226
- qs, zs, statuses = [], [], []
227
- # derive a sub-seed for determinism but step-unique noise
228
- sub_seed = int(rng_master.randint(0, 2**31 - 1))
229
- step_rng = _rng(sub_seed)
230
- for _ in range(max(1, samples)):
231
- out = simulate_step(step_rng, profile, float(sigma), noise_dist)
232
- qs.append(out[""]); zs.append(out["ζ_sync"]); statuses.append(out["status"])
233
-
234
- q_mean = float(np.mean(qs))
235
- z_mean = float(np.mean(zs))
236
- # majority status (ties → worst)
237
- status_counts = {"nominal": 0, "perturbed": 0, "critical": 0}
238
- for s in statuses:
239
- status_counts[s] += 1
240
- # order critical > perturbed > nominal
241
- majority = max(status_counts.items(), key=lambda kv: (kv[1], ["nominal","perturbed","critical"].index(kv[0])))
242
- status_final = majority[0]
243
- if status_final != "nominal":
244
- trigger_count += 1
245
-
246
- results_steps.append({
247
- "index": i,
248
- "sigma": float(round(float(sigma), 6)),
249
- "QΩ_mean": float(round(q_mean, 6)),
250
- "ζ_sync_mean": float(round(z_mean, 6)),
251
- "status_majority": status_final,
252
- "samples": int(max(1, samples))
253
- })
254
-
255
- # summary
256
- statuses = [s["status_majority"] for s in results_steps]
257
- summary = {
258
- "steps": len(results_steps),
259
- "nominal_count": int(sum(1 for s in statuses if s == "nominal")),
260
- "perturbed_count": int(sum(1 for s in statuses if s == "perturbed")),
261
- "critical_count": int(sum(1 for s in statuses if s == "critical")),
262
- "triggers_non_nominal": int(trigger_count)
263
- }
264
-
265
- # build config and results blocks
266
- run_id = make_run_id(seed, profile)
267
- config = {
268
- "run_id": run_id,
269
- "timestamp_utc": datetime.utcnow().isoformat() + "Z",
270
- "profile": profile,
271
- "noise_distribution": noise_dist,
272
- "schedule_type": schedule_type,
273
- "schedule_params": params,
274
- "seed": int(seed),
275
- "samples_per_step": int(max(1, samples)),
276
- "about_version": RFT_VERSION,
277
- "about_doi": RFT_DOI,
278
- }
279
- results = {
280
- "series": results_steps,
281
- "summary": summary,
282
- "rft_notice": LEGAL_NOTICE
283
- }
284
-
285
- # write bundle
286
- run_dir = f"/tmp/rft_run_{run_id}"
287
- zip_path, digest = write_bundle(run_dir, config, results)
288
-
289
- # minimal top-level response
290
- head = {
291
- "run_id": run_id,
292
- "sha512": digest,
293
- "sha512_short": digest[:16] + "…",
294
- "steps": summary["steps"],
295
- "counts": {
296
- "nominal": summary["nominal_count"],
297
- "perturbed": summary["perturbed_count"],
298
- "critical": summary["critical_count"],
299
- },
300
- "rft_notice": LEGAL_NOTICE
301
- }
302
-
303
- return {"config": config, "head": head, "results": results}, zip_path
304
 
305
  # ------------------ Gradio UI -------------------------------
306
- DEFAULT_PARAMS = json.dumps({
307
- "sigma": 0.05 # valid for "single"
308
- }, indent=2)
309
-
310
- HELP_TEXT = (
311
- "Schedule JSON examples:\n"
312
- "- single: {\"sigma\": 0.05}\n"
313
- "- ramp: {\"start\": 0.0, \"stop\": 0.30, \"steps\": 12}\n"
314
- "- random: {\"min\": 0.01, \"max\": 0.30, \"steps\": 10, \"seed\": 0}\n"
315
- "- impulse:{\"base\": 0.05, \"spike\": 0.25, \"at\": 5, \"steps\": 12}\n"
316
- "- step: {\"before\": 0.05, \"after\": 0.20, \"at\": 6, \"steps\": 12}\n"
317
- )
318
 
319
  with gr.Blocks(title="RFT-Ω Total-Proof API") as demo:
320
  gr.Markdown(f"### RFT-Ω Total-Proof API ({RFT_VERSION}) \n"
321
- f"A deterministic, signed validation harness for harmonic stability (QΩ) and coherence (ζ_sync). \n"
322
- f"**DOI:** {RFT_DOI} \n"
323
- f"**Legal:** {LEGAL_NOTICE}")
324
-
325
  with gr.Row():
326
- profile = gr.Radio(list(PROFILES.keys()), label="System Profile", value="AI / Neural")
327
- noise_dist = gr.Radio(["gauss", "uniform"], label="Noise Distribution", value="gauss")
328
-
329
  with gr.Row():
330
- schedule_type = gr.Radio(["single", "ramp", "random", "impulse", "step"], label="Schedule Type", value="single")
331
- seed_in = gr.Number(value=123, precision=0, label="Seed (int)")
332
- samples_in = gr.Slider(1, 20, value=5, step=1, label="Samples per Step")
333
-
334
- schedule_params = gr.Code(value=DEFAULT_PARAMS, language="json", label="Schedule Parameters (JSON)")
335
  gr.Markdown(HELP_TEXT)
336
-
337
- run_btn = gr.Button("Run Deterministic Simulation & Sign Results")
338
- out_json = gr.JSON(label="Signed Run Summary (config, head, results)")
339
- out_bundle = gr.File(label="Download Signed Bundle (zip)")
340
-
341
- def _on_run(p, nd, st, sp_json, seed, samples):
342
- data, zip_path = run_total_proof(p, nd, st, sp_json, int(seed), int(samples))
343
- return data, zip_path
344
-
345
- run_btn.click(_on_run, inputs=[profile, noise_dist, schedule_type, schedule_params, seed_in, samples_in],
346
- outputs=[out_json, out_bundle])
347
-
348
- gr.Markdown("**Ops endpoints:** `/healthz`, `/about` (FastAPI mounted if available).")
349
-
350
- # ------------------ FastAPI Mount (optional) ----------------
351
  if HAVE_FASTAPI:
352
- api = FastAPI(title="RFT-Ω Total-Proof Ops")
353
  @api.get("/healthz")
354
- def healthz():
355
- return {"ok": True, "service": "RFT-Ω Total-Proof", "version": RFT_VERSION}
356
-
357
  @api.get("/about")
358
- def about():
359
- return ABOUT_BLOCK
360
-
361
- app = gr.mount_gradio_app(api, demo, path="/")
362
  else:
363
- # Gradio app only
364
- app = demo
365
-
366
- # ------------------ Main -----------------------------------
367
- if __name__ == "__main__":
368
- # Gradio will handle launch in HF; local dev:
369
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
1
  # ============================================================
2
+ # RFT-Ω FRAMEWORK — TOTAL-PROOF API (Sprint 1, stable UI)
3
  # Author: Liam Grinstead (RFT Systems) | All Rights Reserved
4
  # ============================================================
5
  """
6
+ Deterministic, signed validation harness for the
7
  Rendered Frame Theory (RFT-Ω) harmonic stability kernel.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  """
9
 
10
  import os, io, json, time, math, hashlib, zipfile, random
 
14
  import numpy as np
15
  import gradio as gr
16
 
 
17
  try:
18
  from fastapi import FastAPI
19
  HAVE_FASTAPI = True
20
  except Exception:
21
  HAVE_FASTAPI = False
22
 
23
+ # ------------------ About / Legal ---------------------------
24
+ RFT_VERSION = "v4.0-total-proof-stable"
25
  RFT_DOI = "https://doi.org/10.5281/zenodo.17466722"
26
  HF_URL = "https://rftsystems-rft-omega-api.hf.space"
27
  LEGAL_NOTICE = (
 
42
  "legal": LEGAL_NOTICE,
43
  }
44
 
45
+ # ------------------ Rate limit ------------------------------
46
+ RUN_HISTORY_TS: List[float] = []
47
+ MAX_RUNS_PER_MINUTE = 60
48
 
49
  def _rate_limit_ok() -> Tuple[bool, str]:
50
  now = time.time()
51
+ while RUN_HISTORY_TS and now - RUN_HISTORY_TS[0] > 60:
 
52
  RUN_HISTORY_TS.pop(0)
53
  if len(RUN_HISTORY_TS) >= MAX_RUNS_PER_MINUTE:
54
  return False, "Rate limit exceeded (demo fairness). Please retry shortly."
55
  RUN_HISTORY_TS.append(now)
56
  return True, "ok"
57
 
58
+ # ------------------ Profiles / Simulation -------------------
59
  PROFILES = {
60
  "AI / Neural": {"base": (0.86, 0.80), "w": (0.65, 0.35)},
61
  "SpaceX / Aerospace": {"base": (0.84, 0.79), "w": (0.60, 0.40)},
 
69
  def simulate_step(rng: np.random.RandomState, profile: str, sigma: float, noise_dist: str) -> Dict[str, Any]:
70
  base_q, base_z = PROFILES[profile]["base"]
71
  wq, wz = PROFILES[profile]["w"]
 
72
  if noise_dist == "uniform":
73
  q_noise = rng.uniform(-sigma, sigma)
74
  z_noise = rng.uniform(-sigma * 0.8, sigma * 0.8)
75
+ else:
76
  q_noise = rng.normal(0, sigma)
77
  z_noise = rng.normal(0, sigma * 0.8)
78
+ q = float(np.clip(base_q + wq*q_noise, 0.0, 0.99))
79
+ z = float(np.clip(base_z + wz*z_noise, 0.0, 0.99))
 
80
  variance = abs(q_noise) + abs(z_noise)
81
  if variance > 0.15:
82
  status = "critical"
 
84
  status = "perturbed"
85
  else:
86
  status = "nominal"
87
+ return {"sigma": float(round(sigma,6)), "QΩ": q, "ζ_sync": z, "status": status}
 
88
 
89
  # ------------------ Schedules -------------------------------
90
  def build_sigma_series(schedule_type: str, params: Dict[str, Any]) -> List[float]:
 
91
  if schedule_type == "single":
92
+ return [float(params.get("sigma", 0.05))]
 
 
93
  elif schedule_type == "ramp":
94
+ return list(np.linspace(float(params.get("start",0.0)),
95
+ float(params.get("stop",0.3)),
96
+ int(params.get("steps",10))))
 
 
 
97
  elif schedule_type == "random":
98
+ r = random.Random(int(params.get("seed",0)))
99
+ return [r.uniform(float(params.get("min",0.0)),
100
+ float(params.get("max",0.3)))
101
+ for _ in range(int(params.get("steps",10)))]
 
 
 
102
  elif schedule_type == "impulse":
103
+ base, spike, at, steps = float(params.get("base",0.05)), float(params.get("spike",0.25)), int(params.get("at",5)), int(params.get("steps",10))
104
+ s=[base]*steps
105
+ if 0<=at<len(s): s[at]=spike
106
+ return s
 
 
 
 
 
107
  elif schedule_type == "step":
108
+ before, after, at, steps = float(params.get("before",0.05)), float(params.get("after",0.2)), int(params.get("at",5)), int(params.get("steps",10))
109
+ s=[before]*steps
110
+ for i in range(steps):
111
+ if i>=at: s[i]=after
112
+ return s
113
+ return [float(params.get("sigma",0.05))]
114
+
115
+ # ------------------ Integrity helpers -----------------------
116
+ def sha512_hex(s:str)->str: return hashlib.sha512(s.encode()).hexdigest()
117
+ def make_run_id(seed:int,profile:str)->str:
118
+ return hashlib.sha256(f"{time.time_ns()}::{seed}::{profile}::{random.random()}".encode()).hexdigest()[:16]
119
+
120
+ def write_bundle(run_dir:str,config:Dict[str,Any],results:Dict[str,Any])->Tuple[str,str]:
121
+ os.makedirs(run_dir,exist_ok=True)
122
+ run_json=os.path.join(run_dir,"run.json")
123
+ with open(run_json,"w") as f: json.dump({"config":config,"results":results},f,indent=2)
124
+ canonical=json.dumps({"config":config,"results":results},sort_keys=True)
125
+ digest=sha512_hex(canonical)
126
+ with open(os.path.join(run_dir,"run.sha512"),"w") as f: f.write(digest+"\n")
127
+ with open(os.path.join(run_dir,"ABOUT.json"),"w") as f: json.dump(ABOUT_BLOCK,f,indent=2)
128
+ with open(os.path.join(run_dir,"NOTICE.txt"),"w") as f: f.write(LEGAL_NOTICE+"\n")
129
+ zip_path=os.path.join(run_dir,"rft_run_bundle.zip")
130
+ with zipfile.ZipFile(zip_path,"w",zipfile.ZIP_DEFLATED) as z:
131
+ for fn in ["run.json","run.sha512","ABOUT.json","NOTICE.txt"]:
132
+ z.write(os.path.join(run_dir,fn),arcname=fn)
133
+ return zip_path,digest
134
+
135
+ # ------------------ Core runner -----------------------------
136
+ def run_total_proof(profile,noise_dist,schedule_type,schedule_params_text,seed,samples):
137
+ ok,msg=_rate_limit_ok()
138
+ if not ok: return {"error":msg,"rft_notice":LEGAL_NOTICE},None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  try:
140
+ params=json.loads(schedule_params_text) if schedule_params_text.strip() else {}
141
+ if not isinstance(params,dict): raise ValueError
 
142
  except Exception as e:
143
+ return {"error":f"Invalid schedule_params JSON: {e}","rft_notice":LEGAL_NOTICE},None
144
+ sigma_series=build_sigma_series(schedule_type,params)
145
+ rng_master=_rng(seed)
146
+ results_steps=[]; trigger_count=0
147
+ for i,sigma in enumerate(sigma_series):
148
+ qs, zs, statuses=[],[],[]
149
+ sub_seed=int(rng_master.randint(0,2**31-1))
150
+ step_rng=_rng(sub_seed)
151
+ for _ in range(max(1,samples)):
152
+ o=simulate_step(step_rng,profile,float(sigma),noise_dist)
153
+ qs.append(o["QΩ"]); zs.append(o["ζ_sync"]); statuses.append(o["status"])
154
+ q_mean=float(np.mean(qs)); z_mean=float(np.mean(zs))
155
+ counts={"nominal":0,"perturbed":0,"critical":0}
156
+ for s in statuses: counts[s]+=1
157
+ majority=max(counts.items(),key=lambda kv:(kv[1],["nominal","perturbed","critical"].index(kv[0])))[0]
158
+ if majority!="nominal": trigger_count+=1
159
+ results_steps.append({"index":i,"sigma":round(float(sigma),6),
160
+ "QΩ_mean":round(q_mean,6),"ζ_sync_mean":round(z_mean,6),
161
+ "status_majority":majority,"samples":int(samples)})
162
+ statuses=[r["status_majority"] for r in results_steps]
163
+ summary={"steps":len(results_steps),
164
+ "nominal_count":statuses.count("nominal"),
165
+ "perturbed_count":statuses.count("perturbed"),
166
+ "critical_count":statuses.count("critical"),
167
+ "triggers_non_nominal":trigger_count}
168
+ run_id=make_run_id(seed,profile)
169
+ config={"run_id":run_id,"timestamp_utc":datetime.utcnow().isoformat()+"Z",
170
+ "profile":profile,"noise_distribution":noise_dist,
171
+ "schedule_type":schedule_type,"schedule_params":params,
172
+ "seed":int(seed),"samples_per_step":int(samples),
173
+ "about_version":RFT_VERSION,"about_doi":RFT_DOI}
174
+ results={"series":results_steps,"summary":summary,"rft_notice":LEGAL_NOTICE}
175
+ run_dir=f"/tmp/rft_run_{run_id}"
176
+ zip_path,digest=write_bundle(run_dir,config,results)
177
+ head={"run_id":run_id,"sha512":digest,"sha512_short":digest[:16]+"…",
178
+ "steps":summary["steps"],
179
+ "counts":{"nominal":summary["nominal_count"],
180
+ "perturbed":summary["perturbed_count"],
181
+ "critical":summary["critical_count"]},
182
+ "rft_notice":LEGAL_NOTICE}
183
+ return {"config":config,"head":head,"results":results},zip_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
  # ------------------ Gradio UI -------------------------------
186
+ DEFAULT_PARAMS=json.dumps({"sigma":0.05},indent=2)
187
+ HELP_TEXT=("Schedule JSON examples:\n"
188
+ "- single: {\"sigma\": 0.05}\n"
189
+ "- ramp: {\"start\": 0.0, \"stop\": 0.3, \"steps\": 12}\n"
190
+ "- random: {\"min\": 0.01, \"max\": 0.3, \"steps\": 10, \"seed\": 0}\n"
191
+ "- impulse: {\"base\":0.05,\"spike\":0.25,\"at\":5,\"steps\":12}\n"
192
+ "- step: {\"before\":0.05,\"after\":0.2,\"at\":6,\"steps\":12}\n")
 
 
 
 
 
193
 
194
  with gr.Blocks(title="RFT-Ω Total-Proof API") as demo:
195
  gr.Markdown(f"### RFT-Ω Total-Proof API ({RFT_VERSION}) \n"
196
+ f"Stable display mode — deterministic, signed validation harness. \n"
197
+ f"**DOI:** {RFT_DOI} \n**Legal:** {LEGAL_NOTICE}")
 
 
198
  with gr.Row():
199
+ profile=gr.Radio(list(PROFILES.keys()),label="System Profile",value="AI / Neural")
200
+ noise_dist=gr.Radio(["gauss","uniform"],label="Noise Distribution",value="gauss")
 
201
  with gr.Row():
202
+ schedule_type=gr.Radio(["single","ramp","random","impulse","step"],label="Schedule Type",value="single")
203
+ seed_in=gr.Number(value=123,precision=0,label="Seed (int)")
204
+ samples_in=gr.Slider(1,20,value=5,step=1,label="Samples per Step")
205
+ schedule_params=gr.Code(value=DEFAULT_PARAMS,language="json",label="Schedule Parameters (JSON)")
 
206
  gr.Markdown(HELP_TEXT)
207
+ run_btn=gr.Button("Run Deterministic Simulation & Sign Results")
208
+ out_json=gr.JSON(label="Signed Run Summary")
209
+ out_bundle=gr.File(label="Download Signed Bundle (zip)")
210
+ def _on_run(p,nd,st,sp_json,seed,samples): return run_total_proof(p,nd,st,sp_json,int(seed),int(samples))
211
+ run_btn.click(_on_run,inputs=[profile,noise_dist,schedule_type,schedule_params,seed_in,samples_in],
212
+ outputs=[out_json,out_bundle])
213
+ gr.Markdown("Ops endpoints: `/healthz`, `/about` (FastAPI mounted if available).")
214
+
215
+ # ------------------ FastAPI Mount ---------------------------
 
 
 
 
 
 
216
  if HAVE_FASTAPI:
217
+ api=FastAPI(title="RFT-Ω Total-Proof Ops")
218
  @api.get("/healthz")
219
+ def healthz(): return {"ok":True,"service":"RFT-Ω Total-Proof","version":RFT_VERSION}
 
 
220
  @api.get("/about")
221
+ def about(): return ABOUT_BLOCK
222
+ app=gr.mount_gradio_app(api,demo,path="/")
 
 
223
  else:
224
+ app=demo
225
+
226
+ # ------------------ Launch (stable queue) -------------------
227
+ if __name__=="__main__":
228
+ demo.queue(concurrency_count=1,max_size=5).launch(server_name="0.0.0.0",
229
+ server_port=7860,
230
+ show_error=True,
231
+ debug=False)