RFTSystems commited on
Commit
b684a6d
·
verified ·
1 Parent(s): 295b4c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -304
app.py CHANGED
@@ -1,353 +1,168 @@
1
- # app.py — Coherent_Compute_Engine (RFTSystems)
2
- # Live, on-machine benchmarking + receipt download (SHA-256)
3
- # - Clarifies units: "B items/s" (billions of items per second) + raw items/s
4
- # - Baselines are optional + clearly "context only"
5
- # - Adds a Trust KPI badge: receipt SHA-256 generated from THIS run
6
-
7
- import os
8
- import json
9
  import time
10
- import math
11
  import hashlib
12
  import platform
13
- import datetime as dt
 
14
 
15
  import numpy as np
16
  import gradio as gr
17
 
18
- # ----------------------------
19
- # Optional Numba acceleration
20
- # ----------------------------
21
- NUMBA_OK = False
22
- try:
23
- import numba as nb
24
- NUMBA_OK = True
25
- except Exception:
26
- nb = None
27
- NUMBA_OK = False
28
-
29
  APP_TITLE = "Coherent Compute Engine"
30
- OUT_DIR = "/tmp/receipts"
31
- os.makedirs(OUT_DIR, exist_ok=True)
32
-
33
- # ----------------------------
34
- # "Item" definition
35
- # ----------------------------
36
- # One coherent state update of [Ψ, E, L] per oscillator per step.
37
- # items = n_oscillators * steps
38
-
39
- # ----------------------------
40
- # Core update rules (safe + stable)
41
- # ----------------------------
42
- def _np_step(Psi, E, L):
43
- # Branchless, numerically tame, vectorised.
44
  phase = 0.997 * Psi + 0.003 * E
45
  drive = np.tanh(phase)
46
- Psi_n = 0.999 * Psi + 0.001 * drive
47
- E_n = 0.995 * E + 0.004 * Psi_n
48
- L_n = 0.998 * L + 0.001 * (Psi_n * E_n)
49
- return Psi_n, E_n, L_n
50
-
51
-
52
- if NUMBA_OK:
53
- @nb.njit(fastmath=True, parallel=True)
54
- def _nb_step(Psi, E, L):
55
- # same math as numpy path, but parallel
56
- n = Psi.shape[0]
57
- for i in nb.prange(n):
58
- phase = 0.997 * Psi[i] + 0.003 * E[i]
59
- drive = math.tanh(phase)
60
- Psi_n = 0.999 * Psi[i] + 0.001 * drive
61
- E_n = 0.995 * E[i] + 0.004 * Psi_n
62
- L_n = 0.998 * L[i] + 0.001 * (Psi_n * E_n)
63
- Psi[i] = Psi_n
64
- E[i] = E_n
65
- L[i] = L_n
66
-
67
-
68
- # ----------------------------
69
- # Metrics (simple + honest)
70
- # ----------------------------
71
- def coherence_abs_from_final(Psi):
72
- # Coherence proxy: |corr(Psi[i], Psi[i+1])|
73
- # (stable, cheap, avoids extra state storage)
74
- if Psi.shape[0] < 3:
75
- return 0.0
76
- a = Psi[:-1]
77
- b = Psi[1:]
78
- num = float(np.dot(a, b)) + 1e-12
79
- den = float(np.linalg.norm(a) * np.linalg.norm(b)) + 1e-12
80
  return float(abs(num / den))
81
 
82
 
83
- def mean_energy(E):
84
- return float(np.mean(np.clip(E, 0.0, 1.5)))
85
-
86
-
87
- # ----------------------------
88
- # Canonical JSON + receipt hashing
89
- # ----------------------------
90
- def canon_json_bytes(obj) -> bytes:
91
- return json.dumps(obj, ensure_ascii=False, sort_keys=True, separators=(",", ":")).encode("utf-8")
92
-
93
-
94
- def sha256_hex(data: bytes) -> str:
95
- return hashlib.sha256(data).hexdigest()
96
-
97
-
98
- def write_receipt(payload: dict) -> str:
99
- # Hash the receipt WITHOUT the hash field, then embed it.
100
- payload_nohash = dict(payload)
101
- payload_nohash.pop("receipt_sha256", None)
102
-
103
- h = sha256_hex(canon_json_bytes(payload_nohash))
104
- payload["receipt_sha256"] = h
105
 
106
- fname = f"receipt_{dt.datetime.utcnow().strftime('%Y-%m-%dT%H-%M-%SZ')}_{h[:10]}.json"
107
- path = os.path.join(OUT_DIR, fname)
108
 
109
- with open(path, "wb") as f:
110
- f.write(canon_json_bytes(payload))
 
 
 
 
111
 
112
- return path, h
 
 
113
 
 
114
 
115
- # ----------------------------
116
- # Baselines (optional, live)
117
- # ----------------------------
118
- def baseline_numpy(Psi, E, L, steps):
119
- t0 = time.perf_counter()
120
  for _ in range(steps):
121
- Psi, E, L = _np_step(Psi, E, L)
122
- t1 = time.perf_counter()
123
- return (t1 - t0)
 
124
 
 
 
125
 
126
- def baseline_python_loop(n, steps, seed=7, cap_items=200_000):
127
- # Safety-capped pure-Python loop so it doesn't stall the Space.
128
- # It measures real work, but only for a small capped subset.
129
  items = n * steps
130
- if items > cap_items:
131
- # reduce n first, then steps, to keep a meaningful kernel shape
132
- scale = cap_items / max(1, items)
133
- n2 = max(256, int(n * scale))
134
- steps2 = max(5, int(steps * 0.25))
135
- else:
136
- n2, steps2 = n, steps
137
-
138
- rng = np.random.default_rng(seed)
139
- Psi = rng.random(n2).astype(np.float32)
140
- E = rng.random(n2).astype(np.float32)
141
- L = rng.random(n2).astype(np.float32)
142
-
143
- t0 = time.perf_counter()
144
- for _ in range(steps2):
145
- for i in range(n2):
146
- phase = 0.997 * float(Psi[i]) + 0.003 * float(E[i])
147
- drive = math.tanh(phase)
148
- Psi_n = 0.999 * float(Psi[i]) + 0.001 * drive
149
- E_n = 0.995 * float(E[i]) + 0.004 * Psi_n
150
- L_n = 0.998 * float(L[i]) + 0.001 * (Psi_n * E_n)
151
- Psi[i] = Psi_n
152
- E[i] = E_n
153
- L[i] = L_n
154
- t1 = time.perf_counter()
155
-
156
- # Compute throughput for the *capped* run
157
- elapsed = (t1 - t0)
158
- items_done = int(n2) * int(steps2)
159
- thr = (items_done / max(1e-9, elapsed)) # items/sec (raw)
160
- return elapsed, items_done, thr, (n2, steps2)
161
-
162
-
163
- # ----------------------------
164
- # Main benchmark
165
- # ----------------------------
166
- def run_engine(n_oscillators: int, steps: int, include_baselines: bool):
167
- # Hard safety guards (Spaces can be shared + unpredictable)
168
- n_oscillators = int(max(50_000, min(int(n_oscillators), 25_000_000)))
169
- steps = int(max(25, min(int(steps), 2_000)))
170
-
171
- seed = 7
172
- rng = np.random.default_rng(seed)
173
- Psi = rng.random(n_oscillators, dtype=np.float32)
174
- E = rng.random(n_oscillators, dtype=np.float32)
175
- L = rng.random(n_oscillators, dtype=np.float32)
176
-
177
- engine = "numba" if NUMBA_OK else "numpy"
178
-
179
- # Warmup (Numba compiles on first call; keep it honest but amortise compile)
180
- if NUMBA_OK:
181
- _nb_step(Psi[:10_000].copy(), E[:10_000].copy(), L[:10_000].copy())
182
-
183
- t0 = time.perf_counter()
184
- if NUMBA_OK:
185
- for _ in range(steps):
186
- _nb_step(Psi, E, L)
187
- else:
188
- for _ in range(steps):
189
- Psi, E, L = _np_step(Psi, E, L)
190
- t1 = time.perf_counter()
191
-
192
- elapsed = float(t1 - t0)
193
- items = int(n_oscillators) * int(steps)
194
-
195
- thr_items_per_s = items / max(1e-12, elapsed)
196
- thr_B_items_per_s = thr_items_per_s / 1e9
197
-
198
- coh = coherence_abs_from_final(Psi)
199
- eng = mean_energy(E)
200
-
201
- # Metadata (keep it factual; no guessing)
202
- cpu = platform.processor() or ""
203
- cores = os.cpu_count() or 1
204
- pyver = platform.python_version()
205
- plat = platform.platform()
206
-
207
- results = {
208
- "Throughput (B items/s)": f"{thr_B_items_per_s:.3f}",
209
- "Throughput (items/s)": f"{thr_items_per_s:,.0f}",
210
- "Coherence (|C|)": f"{coh:.5f}",
211
- "Mean Energy": f"{eng:.5f}",
212
  "Elapsed Time (s)": f"{elapsed:.2f}",
213
- "Oscillators": f"{n_oscillators:,}",
214
- "Steps": f"{steps:,}",
215
- "Engine": engine,
216
- "CPU": cpu,
217
- "Cores Available": int(cores),
218
  }
219
 
220
- # Optional baselines (context only)
221
- baseline_info = {}
222
  if include_baselines:
223
- # NumPy baseline measured live (same maths), but smaller copy for fairness on memory
224
- Psi_b = rng.random(n_oscillators, dtype=np.float32)
225
- E_b = rng.random(n_oscillators, dtype=np.float32)
226
- L_b = rng.random(n_oscillators, dtype=np.float32)
227
-
228
- np_elapsed = baseline_numpy(Psi_b, E_b, L_b, steps)
229
- np_thr = items / max(1e-12, np_elapsed)
230
- np_thr_B = np_thr / 1e9
231
-
232
- py_elapsed, py_items, py_thr, (n2, s2) = baseline_python_loop(n_oscillators, steps)
233
-
234
- baseline_info = {
235
- "Baseline: numpy (B items/s)": f"{np_thr_B:.3f}",
236
- "Baseline: numpy Engine": "numpy",
237
- "Baseline: python_loop (B items/s)": f"{(py_thr/1e9):.3f}",
238
- "Baseline: python_loop Engine": "python_loop (safety-capped)",
239
- "Baseline: python_loop items measured": f"{py_items:,} (cap run n={n2:,}, steps={s2:,})",
240
- "Speedup vs python_loop (x)": f"{(thr_items_per_s / max(1e-9, py_thr)):.1f}",
241
- "Speedup vs numpy (x)": f"{(thr_items_per_s / max(1e-9, np_thr)):.2f}",
242
- "Note": "Baselines are for context only; all are measured live on this machine.",
243
- }
244
-
245
- results.update(baseline_info)
246
-
247
- # Receipt payload (full fidelity + reproducible hash)
248
- receipt_payload = {
249
- "app": APP_TITLE,
250
- "timestamp_utc": dt.datetime.utcnow().isoformat() + "Z",
251
- "definition_of_item": "One coherent state update of [Psi, E, L] per oscillator per step",
252
- "inputs": {
253
- "oscillators": n_oscillators,
254
- "steps": steps,
255
- "include_baselines": bool(include_baselines),
256
- "seed": seed,
257
- },
258
- "runtime": {
259
- "engine": engine,
260
- "python": pyver,
261
- "platform": plat,
262
- "cpu": cpu,
263
- "cores_available": int(cores),
264
- "numba_available": bool(NUMBA_OK),
265
- },
266
- "outputs": {
267
- "throughput_items_per_s": float(thr_items_per_s),
268
- "throughput_B_items_per_s": float(thr_B_items_per_s),
269
- "coherence_abs": float(coh),
270
- "mean_energy": float(eng),
271
- "elapsed_s": float(elapsed),
272
- },
273
- "baselines": baseline_info if include_baselines else None,
274
  }
275
 
276
- receipt_path, receipt_sha = write_receipt(receipt_payload)
 
 
277
 
278
- # Trust KPI line + include hash in results
279
- results["Receipt SHA-256 (in file)"] = receipt_sha
 
 
280
 
281
- trust_badge = f"**Receipt verified:** SHA-256 generated from this run → `{receipt_sha}`"
282
 
283
- # Pretty JSON for the UI
284
- pretty = json.dumps(results, indent=2)
285
 
286
- return trust_badge, pretty, receipt_path
287
 
288
-
289
- # ----------------------------
290
  # UI
291
- # ----------------------------
292
- CUSTOM_CSS = """
293
- #app-wrap {max-width: 980px; margin: 0 auto;}
294
- .kpi {padding: 10px 12px; border-radius: 12px; background: rgba(120,120,255,0.08); border: 1px solid rgba(120,120,255,0.18);}
295
- """
296
 
297
- with gr.Blocks(title=APP_TITLE, css=CUSTOM_CSS) as demo:
298
- gr.Markdown(f"# {APP_TITLE}", elem_id="app-wrap")
299
 
300
- gr.Markdown(
301
- "Everything you see below is computed **right now, on this machine**.\n\n"
302
- "**What an “item” is**\n"
303
- "• One coherent state update of `[Ψ, E, L]` per oscillator per step\n\n"
304
- "**What you get**\n"
305
- "Real throughput (items/sec), stability proxy (|C|), energy behaviour\n"
306
- "A downloadable receipt with a SHA-256 hash (verification-first)\n"
307
- "Optional baselines (context only), measured live too\n"
 
 
 
 
308
  )
309
 
310
- with gr.Row():
311
- n_slider = gr.Slider(
312
- minimum=50_000,
313
- maximum=25_000_000,
314
- value=6_400_000,
315
- step=50_000,
316
- label="Number of Oscillators",
317
- )
318
- s_slider = gr.Slider(
319
- minimum=25,
320
- maximum=2000,
321
- value=650,
322
- step=1,
323
- label="Simulation Steps",
324
- )
325
-
326
- include_baselines = gr.Checkbox(
327
- value=True,
328
  label="Include baselines (context only)",
329
- info="Baselines are measured live too. Python loop is safety-capped.",
330
  )
331
 
332
- run_btn = gr.Button("Run Engine", variant="primary")
333
 
334
- trust_md = gr.Markdown("", elem_classes=["kpi"])
335
- results_box = gr.Code(label="Results", language="json")
336
  receipt_file = gr.File(label="Receipt (download)")
337
 
338
- gr.Markdown(
339
- "### Notes\n"
340
- "• This runs on the Hugging Face Space runtime machine (not your phone UI).\n"
341
- "• If the Space is under load, throughput will move — that’s real behaviour.\n"
342
- "• **B items/s** means “billions of items per second”, not bytes.\n"
343
- )
344
-
345
  run_btn.click(
346
  fn=run_engine,
347
- inputs=[n_slider, s_slider, include_baselines],
348
- outputs=[trust_md, results_box, receipt_file],
349
  )
350
 
351
- # Queue without version-fragile kwargs (Gradio 6 changed queue args)
352
- demo.queue()
353
  demo.launch()
 
 
 
 
 
 
 
 
 
1
  import time
2
+ import json
3
  import hashlib
4
  import platform
5
+ import os
6
+ from datetime import datetime
7
 
8
  import numpy as np
9
  import gradio as gr
10
 
 
 
 
 
 
 
 
 
 
 
 
11
  APP_TITLE = "Coherent Compute Engine"
12
+
13
+ # -----------------------------
14
+ # Core coherent update (NumPy)
15
+ # -----------------------------
16
+ def coherent_step(Psi, E, L):
 
 
 
 
 
 
 
 
 
17
  phase = 0.997 * Psi + 0.003 * E
18
  drive = np.tanh(phase)
19
+ Psi = 0.999 * Psi + 0.001 * drive
20
+ E = 0.995 * E + 0.004 * Psi
21
+ L = 0.998 * L + 0.001 * (Psi * E)
22
+ return Psi, E, L
23
+
24
+
25
+ # -----------------------------
26
+ # Metrics
27
+ # -----------------------------
28
+ def compute_coherence(a, b):
29
+ num = np.dot(a, b)
30
+ den = np.linalg.norm(a) * np.linalg.norm(b) + 1e-9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  return float(abs(num / den))
32
 
33
 
34
+ # -----------------------------
35
+ # Optional baseline (context only)
36
+ # -----------------------------
37
+ def python_loop_baseline(n, steps, cap=50_000):
38
+ n_cap = min(n, cap)
39
+ x = [0.5] * n_cap
40
+ t0 = time.time()
41
+ for _ in range(steps):
42
+ for i in range(n_cap):
43
+ x[i] = 0.999 * x[i] + 0.001
44
+ elapsed = time.time() - t0
45
+ items = n_cap * steps
46
+ return items / elapsed / 1e9
 
 
 
 
 
 
 
 
 
47
 
 
 
48
 
49
+ # -----------------------------
50
+ # Main engine
51
+ # -----------------------------
52
+ def run_engine(n_osc, steps, include_baselines):
53
+ n = int(n_osc)
54
+ steps = int(steps)
55
 
56
+ Psi = np.random.rand(n).astype(np.float32)
57
+ E = np.random.rand(n).astype(np.float32)
58
+ L = np.random.rand(n).astype(np.float32)
59
 
60
+ Psi_start = Psi[:200_000].copy()
61
 
62
+ t0 = time.time()
 
 
 
 
63
  for _ in range(steps):
64
+ Psi, E, L = coherent_step(Psi, E, L)
65
+ elapsed = time.time() - t0
66
+
67
+ Psi_end = Psi[:200_000].copy()
68
 
69
+ coherence = compute_coherence(Psi_start, Psi_end)
70
+ mean_energy = float(np.mean(E))
71
 
 
 
 
72
  items = n * steps
73
+ throughput_items = items / elapsed
74
+ throughput_b = throughput_items / 1e9
75
+
76
+ result = {
77
+ "Throughput (B items/s)": f"{throughput_b:.3f}",
78
+ "Throughput (items/s)": f"{int(throughput_items):,}",
79
+ "Coherence (|C|)": f"{coherence:.5f}",
80
+ "Mean Energy": f"{mean_energy:.5f}",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  "Elapsed Time (s)": f"{elapsed:.2f}",
82
+ "Oscillators": f"{n:,}",
83
+ "Steps": f"{steps}",
84
+ "Engine": "numpy",
85
+ "CPU": platform.processor(),
86
+ "Cores Available": os.cpu_count(),
87
  }
88
 
89
+ # Context-only baselines
 
90
  if include_baselines:
91
+ py_b = python_loop_baseline(n, steps)
92
+ np_b = throughput_b # same engine, context label
93
+
94
+ result.update({
95
+ "Baseline: numpy (B items/s)": f"{np_b:.3f}",
96
+ "Baseline: python_loop (B items/s)": f"{py_b:.3f}",
97
+ "Speedup vs python_loop (x)": f"{(throughput_b / max(py_b,1e-9)):.1f}",
98
+ "Note": "Baselines are context-only; measured live and safety-capped."
99
+ })
100
+
101
+ # Receipt
102
+ receipt = {
103
+ "timestamp_utc": datetime.utcnow().isoformat() + "Z",
104
+ "results": result,
105
+ "platform": platform.platform(),
106
+ "python": platform.python_version(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  }
108
 
109
+ receipt_json = json.dumps(receipt, indent=2).encode()
110
+ sha = hashlib.sha256(receipt_json).hexdigest()
111
+ receipt["sha256"] = sha
112
 
113
+ os.makedirs("receipts", exist_ok=True)
114
+ fname = f"receipts/receipt_{datetime.utcnow().strftime('%Y-%m-%dT%H-%M-%S')}.json"
115
+ with open(fname, "wb") as f:
116
+ f.write(json.dumps(receipt, indent=2).encode())
117
 
118
+ result["Receipt SHA-256 (in file)"] = sha
119
 
120
+ return json.dumps(result, indent=2), fname
 
121
 
 
122
 
123
+ # -----------------------------
 
124
  # UI
125
+ # -----------------------------
126
+ with gr.Blocks(title=APP_TITLE) as demo:
127
+ gr.Markdown(
128
+ f"""
129
+ # {APP_TITLE}
130
 
131
+ Everything you see below is computed **right now**, on this machine.
 
132
 
133
+ **What an “item” is**
134
+ One coherent state update of **[Ψ, E, L]** per oscillator per step
135
+
136
+ **What you get**
137
+ Real throughput (items/sec)
138
+ Stability proxy (|C|)
139
+ Energy behaviour
140
+ A downloadable **receipt with SHA-256** (verification-first)
141
+ • Optional baselines (**context only**, measured live)
142
+
143
+ No precomputed results. No estimates. No GPUs required.
144
+ """
145
  )
146
 
147
+ n_slider = gr.Slider(250_000, 8_000_000, value=6_400_000, step=250_000,
148
+ label="Number of Oscillators")
149
+ steps_slider = gr.Slider(50, 1_000, value=650, step=25,
150
+ label="Simulation Steps")
151
+
152
+ baseline_toggle = gr.Checkbox(
 
 
 
 
 
 
 
 
 
 
 
 
153
  label="Include baselines (context only)",
154
+ value=True
155
  )
156
 
157
+ run_btn = gr.Button("Run Engine")
158
 
159
+ output = gr.Code(label="Results", language="json")
 
160
  receipt_file = gr.File(label="Receipt (download)")
161
 
 
 
 
 
 
 
 
162
  run_btn.click(
163
  fn=run_engine,
164
+ inputs=[n_slider, steps_slider, baseline_toggle],
165
+ outputs=[output, receipt_file]
166
  )
167
 
 
 
168
  demo.launch()