RFTSystems commited on
Commit
bdc67af
·
verified ·
1 Parent(s): 627112e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +476 -333
app.py CHANGED
@@ -1,13 +1,14 @@
1
  import os
2
  import math
3
  import json
 
4
  import numpy as np
5
  import pandas as pd
6
  import matplotlib.pyplot as plt
7
  import gradio as gr
8
 
9
  # ===============================================================
10
- # Rendered Frame Theory (RFT) — Observer Agent Console (All-in-One Space)
11
  # Author: Liam Grinstead
12
  # Purpose: Transparent, reproducible, benchmarkable agent demos
13
  # Dependencies: numpy, pandas, matplotlib, gradio (NO scipy)
@@ -20,7 +21,9 @@ os.makedirs(OUTDIR, exist_ok=True)
20
  # Shared utilities
21
  # -----------------------------
22
  def set_seed(seed: int):
23
- np.random.seed(int(seed) % (2**32 - 1))
 
 
24
 
25
  def clamp(x, lo, hi):
26
  return max(lo, min(hi, x))
@@ -37,7 +40,7 @@ def df_to_csv_file(df: pd.DataFrame, name: str):
37
  return path
38
 
39
  # -----------------------------
40
- # RFT Core: τ_eff + gating (Observer-style decision timing)
41
  # -----------------------------
42
  def tau_eff_adaptive(
43
  uncertainty: float,
@@ -46,13 +49,6 @@ def tau_eff_adaptive(
46
  gain: float = 1.2,
47
  cap: float = 4.0
48
  ):
49
- """
50
- τ_eff is implemented here as a timing/decision delay modifier.
51
- - base: baseline τ_eff
52
- - slow_by: explicit slow-down term
53
- - gain: reaction strength to uncertainty
54
- - cap: prevents absurd values
55
- """
56
  u = clamp(float(uncertainty), 0.0, 1.0)
57
  tau = base + slow_by + gain * u
58
  return clamp(tau, base, cap)
@@ -61,11 +57,6 @@ def rft_confidence(uncertainty: float):
61
  return clamp(1.0 - float(uncertainty), 0.0, 1.0)
62
 
63
  def rft_gate(conf: float, tau_eff: float, threshold: float):
64
- """
65
- Decision gate (observer-style “commit” trigger):
66
- - higher τ_eff makes the gate stricter
67
- - threshold is the minimum confidence needed
68
- """
69
  conf = float(conf)
70
  tau_eff = float(tau_eff)
71
  effective = threshold + 0.08 * (tau_eff - 1.0)
@@ -164,7 +155,7 @@ def simulate_neo(
164
  ax = fig3.add_subplot(111)
165
  ax.step(df["t"], df["baseline_alert"], where="post")
166
  ax.step(df["t"], df["rft_alert"], where="post")
167
- ax.set_title("NEO: Alerts (Baseline vs Observer-gated RFT)")
168
  ax.set_xlabel("t (step)")
169
  ax.set_ylabel("alert (0/1)")
170
  p_alerts = save_plot(fig3, f"neo_alerts_seed{seed}.png")
@@ -178,7 +169,9 @@ def simulate_neo(
178
  "baseline_alerts": int(alerts_baseline),
179
  "rft_candidates": int(alerts_rft_raw),
180
  "rft_alerts_filtered": int(alerts_rft_filtered),
181
- "false_positive_proxy_reduction_%": float(100.0 * (1.0 - (alerts_rft_filtered / max(alerts_rft_raw, 1)))),
 
 
182
  "ops_proxy": int(ops_proxy),
183
  }
184
 
@@ -260,7 +253,7 @@ def simulate_jitter(
260
  fig1 = plt.figure(figsize=(10, 4))
261
  ax = fig1.add_subplot(111)
262
  ax.plot(df["t"], df["jitter"])
263
- ax.set_title("Jitter: residual vs time (running observer-gated plant)")
264
  ax.set_xlabel("t (step)")
265
  ax.set_ylabel("jitter (arb)")
266
  p_jit = save_plot(fig1, f"jitter_residual_seed{seed}.png")
@@ -269,7 +262,7 @@ def simulate_jitter(
269
  ax = fig2.add_subplot(111)
270
  ax.step(df["t"], df["baseline_active"], where="post")
271
  ax.step(df["t"], df["rft_active"], where="post")
272
- ax.set_title("Jitter: Actuation duty (Baseline vs Observer-gated RFT)")
273
  ax.set_xlabel("t (step)")
274
  ax.set_ylabel("active (0/1)")
275
  p_duty = save_plot(fig2, f"jitter_duty_seed{seed}.png")
@@ -472,216 +465,366 @@ def simulate_landing(
472
 
473
  return summary, [p_alt, p_x, p_w, p_a], csv_path
474
 
475
- # -----------------------------
476
- # Predator Avoidance (2D)
477
- # -----------------------------
478
- def simulate_predator_avoidance(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
  seed: int,
 
480
  steps: int,
481
- dt: float,
482
- world_size: float,
483
- predator_speed: float,
484
- agent_speed: float,
485
- catch_radius: float,
486
- sense_noise: float,
487
- rft_k_attract: float,
488
- rft_k_repulse: float,
489
- baseline_k_attract: float,
490
- baseline_k_repulse: float,
491
- gate_threshold: float,
492
- tau_gain: float,
493
- goal_radius: float,
494
- show_baseline: bool
 
495
  ):
496
- """
497
- A simple pursuit/evasion harness:
498
- - Predator chases the agent.
499
- - Agent tries to reach a goal while avoiding capture.
500
- Baseline: always-on potential field (attract goal + repulse predator).
501
- Observer-gated RFT: same control, but can "wait" unless risk is high.
502
- """
503
  set_seed(seed)
504
 
505
- # Initial positions
506
- agent = np.array([-0.55 * world_size, -0.10 * world_size], dtype=float)
507
- predator = np.array([0.35 * world_size, 0.25 * world_size], dtype=float)
508
- goal = np.array([0.60 * world_size, -0.45 * world_size], dtype=float)
509
-
510
- # Helper: normalize vector
511
- def nrm(v):
512
- s = float(np.linalg.norm(v))
513
- if s < 1e-12:
514
- return v * 0.0
515
- return v / s
 
 
 
 
 
 
 
 
 
 
 
 
516
 
517
- # Logging
518
  rows = []
519
  ops_proxy = 0
520
 
521
- caught = False
522
- reached_goal = False
523
- t_end = int(steps)
524
-
525
- # Simple "energy" proxy
526
- energy_baseline = 0.0
527
- energy_rft = 0.0
528
-
529
  for t in range(int(steps)):
530
- # Noisy sensed predator position (agent perception)
531
- pred_meas = predator + np.random.normal(0.0, sense_noise, size=2)
532
-
533
- # Distances
534
- d_pa = float(np.linalg.norm(pred_meas - agent)) # perceived predator-agent
535
- d_true = float(np.linalg.norm(predator - agent)) # true predator-agent
536
- d_ag = float(np.linalg.norm(goal - agent)) # agent-goal
537
-
538
- # Uncertainty proxy:
539
- # - higher when predator is close (risk)
540
- # - higher when sensing is noisy
541
- # - mild term for distance-to-goal (to bias to act near the end)
542
- risk = clamp((catch_radius / max(d_pa, 1e-6)) * 0.55, 0.0, 1.0)
543
- noise_term = clamp((sense_noise / max(world_size, 1e-9)) * 8.0, 0.0, 1.0)
544
- goal_term = clamp(1.0 - (d_ag / max(world_size, 1e-9)), 0.0, 1.0) * 0.15
545
- uncertainty = clamp(risk + noise_term + goal_term, 0.0, 1.0)
546
-
547
- tau = tau_eff_adaptive(uncertainty, base=1.0, slow_by=1.0, gain=tau_gain, cap=4.0)
548
- conf = rft_confidence(uncertainty)
549
-
550
- # Predator moves toward agent (pure pursuit)
551
- predator_dir = nrm(agent - predator)
552
- predator = predator + predator_dir * predator_speed * dt
553
-
554
- # Baseline control (always on)
555
- v_goal_b = nrm(goal - agent) * baseline_k_attract
556
- v_away_b = nrm(agent - pred_meas) * (baseline_k_repulse / max(d_pa, 0.25))
557
- v_base = v_goal_b + v_away_b
558
- v_base = nrm(v_base) * agent_speed
559
-
560
- # RFT (observer-gated) control
561
- # Gate can be overridden if risk is high / predator is close / goal is near.
562
- do_action = rft_gate(conf, tau, gate_threshold)
563
- must_act = (d_pa < (3.0 * catch_radius)) or (d_ag < (2.5 * goal_radius))
564
- do_action = bool(do_action or must_act)
565
-
566
- v_rft = np.array([0.0, 0.0], dtype=float)
567
- if do_action:
568
- phase = clamp(1.0 - (d_ag / max(world_size, 1e-9)), 0.0, 1.0)
569
- lookahead = 1.0 + 1.2 * phase
570
-
571
- v_goal = nrm(goal - agent) * (rft_k_attract * lookahead)
572
- v_away = nrm(agent - pred_meas) * ((rft_k_repulse * lookahead) / max(d_pa, 0.25))
573
- v_rft = v_goal + v_away
574
- v_rft = nrm(v_rft) * agent_speed
575
-
576
- # Choose which agent motion to apply to the plant
577
- if show_baseline:
578
- agent_next = agent + v_base * dt
579
- energy_baseline += float(np.linalg.norm(v_base)) * dt
580
- else:
581
- agent_next = agent + v_rft * dt
582
- energy_rft += float(np.linalg.norm(v_rft)) * dt
583
-
584
- # Clamp to bounds
585
- agent_next[0] = clamp(agent_next[0], -world_size, world_size)
586
- agent_next[1] = clamp(agent_next[1], -world_size, world_size)
587
- agent = agent_next
588
-
589
- # Termination checks
590
- if d_true <= catch_radius:
591
- caught = True
592
- t_end = t + 1
593
- if d_ag <= goal_radius:
594
- reached_goal = True
595
- t_end = t + 1
596
-
597
- ops_proxy += 14
598
 
599
  rows.append({
600
  "t": t,
601
- "agent_x": agent[0],
602
- "agent_y": agent[1],
603
- "pred_x": predator[0],
604
- "pred_y": predator[1],
605
- "goal_x": goal[0],
606
- "goal_y": goal[1],
607
- "d_pred_agent_true": d_true,
608
- "d_pred_agent_meas": d_pa,
609
- "d_agent_goal": d_ag,
610
- "sense_noise": sense_noise,
611
- "uncertainty": uncertainty,
612
- "tau_eff": tau,
613
- "confidence": conf,
614
- "action_taken": int(do_action),
615
- "mode": "baseline" if show_baseline else "observer_gated_rft",
616
- "v_base_x": v_base[0],
617
- "v_base_y": v_base[1],
618
- "v_rft_x": v_rft[0],
619
- "v_rft_y": v_rft[1],
620
- "caught": int(caught),
621
- "reached_goal": int(reached_goal),
622
  })
623
 
624
- if caught or reached_goal:
625
- break
626
-
627
  df = pd.DataFrame(rows)
 
628
 
629
- # Plots
630
- fig1 = plt.figure(figsize=(7.5, 7.0))
631
  ax = fig1.add_subplot(111)
632
- ax.plot(df["agent_x"], df["agent_y"], label="Agent")
633
- ax.plot(df["pred_x"], df["pred_y"], label="Predator")
634
- ax.scatter([df["goal_x"].iloc[0]], [df["goal_y"].iloc[0]], marker="x", s=80, label="Goal")
635
- ax.set_title("Predator Avoidance: Trajectories")
636
- ax.set_xlabel("x")
637
- ax.set_ylabel("y")
638
- ax.legend(loc="best")
639
- p_traj = save_plot(fig1, f"predator_traj_seed{seed}_{'baseline' if show_baseline else 'rft'}.png")
640
 
641
  fig2 = plt.figure(figsize=(10, 4))
642
  ax = fig2.add_subplot(111)
643
- ax.plot(df["t"], df["d_pred_agent_true"], label="True dist (pred→agent)")
644
- ax.plot(df["t"], df["d_agent_goal"], label="Dist (agent→goal)")
645
- ax.axhline(catch_radius, linestyle="--", label="Catch radius")
646
- ax.axhline(goal_radius, linestyle="--", label="Goal radius")
647
- ax.set_title("Predator Avoidance: Distances vs time")
648
  ax.set_xlabel("t (step)")
649
- ax.set_ylabel("distance")
650
- ax.legend(loc="best")
651
- p_dist = save_plot(fig2, f"predator_dist_seed{seed}_{'baseline' if show_baseline else 'rft'}.png")
652
 
653
- fig3 = plt.figure(figsize=(10, 3))
654
  ax = fig3.add_subplot(111)
655
- ax.step(df["t"], df["action_taken"], where="post")
656
- ax.set_title("Predator Avoidance: Action timeline (observer-gated)")
 
 
657
  ax.set_xlabel("t (step)")
658
- ax.set_ylabel("action (0/1)")
659
- p_act = save_plot(fig3, f"predator_action_seed{seed}_{'baseline' if show_baseline else 'rft'}.png")
660
-
661
- csv_path = df_to_csv_file(df, f"predator_log_seed{seed}_{'baseline' if show_baseline else 'rft'}.csv")
662
-
663
- min_sep = float(df["d_pred_agent_true"].min()) if len(df) else 0.0
664
- steps_ran = int(len(df))
665
- success = bool(reached_goal and not caught)
 
 
 
 
 
 
 
666
 
667
  summary = {
668
  "seed": int(seed),
669
- "mode": "baseline" if show_baseline else "observer_gated_rft",
670
- "steps_ran": steps_ran,
671
- "caught": bool(caught),
672
- "reached_goal": bool(reached_goal),
673
- "success": bool(success),
674
- "min_separation_true": float(min_sep),
675
- "final_dist_to_goal": float(df["d_agent_goal"].iloc[-1]) if len(df) else None,
676
- "energy_proxy_baseline": float(energy_baseline),
677
- "energy_proxy_rft": float(energy_rft),
678
  "ops_proxy": int(ops_proxy),
679
  }
680
 
681
- return summary, [p_traj, p_dist, p_act], csv_path
 
 
 
 
682
 
683
  # -----------------------------
684
- # Benchmarks (NEO/Jitter/Landing only to keep UI stable)
685
  # -----------------------------
686
  def run_benchmarks(
687
  seed: int,
@@ -767,42 +910,37 @@ def run_benchmarks(
767
  f"- Landing: final offset={l_sum['final_landing_offset_m']:.2f} m (goal 10 m), anomalies={l_sum['total_anomalies_detected']}, actions={l_sum['total_control_actions']}\n"
768
  )
769
 
770
- all_imgs = neo_imgs + jit_imgs + land_imgs # 3 + 3 + 4 = 10
771
  return txt, score, score_path, all_imgs, [neo_csv, jit_csv, land_csv]
772
 
773
  # -----------------------------
774
  # UI text blocks
775
  # -----------------------------
776
  HOME_MD = """
777
- # RFT — Observer Agent Console
778
-
779
- I built this Space to be transparent, reproducible, and benchmarkable.
780
 
781
- This is **not** a consciousness claim.
782
- When I say “observer” here, I mean a practical decision-timing mechanism: uncertainty → τ_eff → gate → commit or wait.
783
 
784
  Run it. Change parameters. Break it. Compare baseline vs RFT.
785
 
786
- What I’m demonstrating is a simple idea:
787
 
788
  **Decision timing matters.**
789
- RFT treats timing (τ_eff), uncertainty, and action “commit” as first-class controls.
790
 
791
- This Space contains working agent harnesses:
792
- - **NEO alerting** (filter noisy close-approach alerts)
793
- - **Satellite jitter reduction** (reduce actuator duty / chatter while keeping residual low)
794
- - **Starship-style landing harness** (simplified timing-control harness)
795
- - **Predator avoidance** (agent reaches goal while avoiding a pursuing predator)
796
 
797
- Every tab shows what it’s doing, why, and where it wins or loses.
798
-
799
- No SciPy. No hidden dependencies. No model weights. No tricks.
800
  """
801
 
802
  LIVE_MD = """
803
  # Live Console
804
 
805
- This tab is a single place to run everything quickly and export logs.
806
 
807
  - deterministic runs (seeded)
808
  - plots saved
@@ -811,102 +949,80 @@ This tab is a single place to run everything quickly and export logs.
811
  """
812
 
813
  THEORY_PRACTICE_MD = """
814
- # Theory → Practice (how I implement the RFT Observer Agent idea here)
815
-
816
- This Space uses RFT in a practical way:
817
 
818
- ## 1) Uncertainty (explicit)
819
- I compute an uncertainty proxy from noise + disturbance scale.
820
 
821
  ## 2) Confidence
822
- Confidence is the complement: confidence = 1 − uncertainty (clipped 0..1).
823
 
824
  ## 3) Adaptive τ_eff
825
- τ_eff is implemented as a timing/decision strictness modifier:
826
- - higher uncertainty → higher τ_eff
827
- - and yes, I explicitly slow τ_eff by 1.0, because this was the behaviour I wanted to test.
828
 
829
- ## 4) Decision gate
830
- I only apply “decisive actions” when the gate condition passes:
831
- - confidence must exceed a threshold
832
- - τ_eff increases strictness (makes the gate harder under uncertainty)
833
 
834
- ## 5) Why this matters
835
  Baseline controllers often act constantly.
836
- This observer-gated approach tries to act less often, but more decisively, so you waste less energy and trigger fewer junk corrections/alerts.
837
  """
838
 
839
  MATH_MD = r"""
840
  # Mathematics (minimal and implementation-linked)
841
 
842
- ## Variables (used in this Space)
843
- - u ∈ [0,1] : uncertainty proxy (dimensionless)
844
- - C ∈ [0,1] : confidence proxy (dimensionless)
845
- - τ_eff ≥ 1 : effective decision-timing factor (dimensionless)
846
-
847
- ## Definitions
848
 
849
- ### Confidence
850
  \[
851
  C = \text{clip}(1 - u, 0, 1)
852
  \]
853
 
854
- ### Adaptive τ_eff (with “slow by 1.0”)
855
  \[
856
  \tau_{\text{eff}} = \text{clip}(1 + 1.0 + g\cdot u,\; 1,\; \tau_{\max})
857
  \]
858
 
859
- ### Decision gate (concept)
860
- Higher τ_eff makes decisions stricter:
861
  \[
862
  \text{Gate} = \left[C \ge \theta + k(\tau_{\text{eff}}-1)\right]
863
  \]
864
-
865
- That is exactly what I implement here: more uncertainty → higher τ_eff → harder gate → fewer low-confidence actions.
866
  """
867
 
868
  INVESTOR_MD = """
869
- # Investor / Agency Walkthrough (plain language)
870
 
871
- ## What I’m proving inside this Space
872
- I’m demonstrating a decision-timing framework that can be applied to:
873
- - alert filtering (NEO / tracking)
874
  - stabilisation (jitter reduction)
875
- - anomaly-aware control loops (landing harness)
876
- - pursuit/evasion behaviour (predator avoidance)
877
-
878
- This is a runnable harness:
879
- - you can reproduce results with seeds
880
- - you can export logs
881
- - you can compare baseline vs RFT
882
- - you can change thresholds and see behaviour shift
883
 
884
- ## What I’m not claiming
885
- - I’m not claiming certification
886
- - I’m not claiming any company is using this
887
- - I’m not claiming this replaces validation pipelines
888
 
889
- ## What would make it production-grade
890
  - real sensor ingestion + timing constraints
891
  - hardware-in-loop testing
892
- - systematic dataset validation
893
- - integration targets (embedded, REST, batch)
894
  """
895
 
896
  REPRO_MD = """
897
  # Reproducibility & Logs
898
 
899
- Everything here is reproducible:
900
- - set the seed
901
- - run baseline vs RFT with the same seed
902
- - export the CSV
903
- - verify plots and metrics
904
-
905
- CSV schema is explicit in the exports:
906
- - time index
907
- - state values
908
- - uncertainty, confidence, τ_eff
909
- - alerts/actions flags
910
  """
911
 
912
  # -----------------------------
@@ -956,28 +1072,38 @@ def ui_run_landing(seed, steps, dt, wind_max, thrust_noise, kp_base, kp_rft, gat
956
  summary_txt = json.dumps(summary, indent=2)
957
  return summary_txt, imgs[0], imgs[1], imgs[2], imgs[3], csv_path
958
 
959
- def ui_run_predator(seed, steps, dt, world_size, predator_speed, agent_speed, catch_radius, sense_noise,
960
- rft_k_attract, rft_k_repulse, base_k_attract, base_k_repulse, gate_th, tau_gain, goal_radius, show_baseline):
961
- summary, imgs, csv_path = simulate_predator_avoidance(
 
 
 
 
962
  seed=int(seed),
 
963
  steps=int(steps),
964
- dt=float(dt),
965
- world_size=float(world_size),
966
- predator_speed=float(predator_speed),
967
- agent_speed=float(agent_speed),
968
- catch_radius=float(catch_radius),
969
- sense_noise=float(sense_noise),
970
- rft_k_attract=float(rft_k_attract),
971
- rft_k_repulse=float(rft_k_repulse),
972
- baseline_k_attract=float(base_k_attract),
973
- baseline_k_repulse=float(base_k_repulse),
974
- gate_threshold=float(gate_th),
975
- tau_gain=float(tau_gain),
976
- goal_radius=float(goal_radius),
977
- show_baseline=bool(show_baseline),
 
978
  )
979
  summary_txt = json.dumps(summary, indent=2)
980
- return summary_txt, imgs[0], imgs[1], imgs[2], csv_path
 
 
 
 
981
 
982
  def ui_run_bench(seed, neo_steps, neo_dt, neo_alert_km, neo_noise_km, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain):
983
  txt, score_df, score_csv, imgs, logs = run_benchmarks(
@@ -996,7 +1122,7 @@ def ui_run_bench(seed, neo_steps, neo_dt, neo_alert_km, neo_noise_km, jit_steps,
996
  # -----------------------------
997
  # Gradio UI
998
  # -----------------------------
999
- with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / Predator)") as demo:
1000
  gr.Markdown(HOME_MD)
1001
 
1002
  with gr.Tabs():
@@ -1025,7 +1151,7 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / P
1025
  land_wind = gr.Slider(0.0, 25.0, value=15.0, step=0.5, label="Landing wind max (m/s)")
1026
  land_thrust_noise = gr.Slider(0.0, 10.0, value=3.0, step=0.1, label="Landing thrust noise")
1027
 
1028
- run_b = gr.Button("Run Full Benchmarks (Baseline vs Observer-gated RFT)")
1029
 
1030
  bench_txt = gr.Textbox(label="Benchmark summary", lines=6)
1031
  bench_table = gr.Dataframe(label="Scorecard (CSV also exported)")
@@ -1052,14 +1178,18 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / P
1052
  run_b.click(
1053
  ui_run_bench,
1054
  inputs=[seed_live, neo_steps, neo_dt, neo_alert, neo_noise, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain_live],
1055
- outputs=[bench_txt, bench_table, bench_score_csv, img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, neo_log, jit_log, land_log]
 
 
 
 
1056
  )
1057
 
1058
- with gr.Tab("NEO Observer Agent"):
1059
  gr.Markdown(
1060
- "# Near-Earth Object (NEO) Observer Agent\n"
1061
  "Baseline: distance threshold only.\n"
1062
- "Observer-gated RFT: distance threshold + confidence + τ_eff decision gate.\n"
1063
  )
1064
  with gr.Row():
1065
  seed_neo = gr.Number(value=42, precision=0, label="Seed")
@@ -1068,7 +1198,7 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / P
1068
  with gr.Row():
1069
  alert_km = gr.Slider(1000, 20000, value=5000, step=50, label="Alert threshold (km)")
1070
  noise_km = gr.Slider(0.0, 200.0, value=35.0, step=1.0, label="Measurement noise (km)")
1071
- rft_conf_th = gr.Slider(0.1, 0.95, value=0.55, step=0.01, label="Confidence threshold")
1072
  tau_gain = gr.Slider(0.0, 3.0, value=1.2, step=0.05, label="τ_eff gain")
1073
  show_debug = gr.Checkbox(value=False, label="Show debug table (first rows)")
1074
  run_neo = gr.Button("Run NEO Simulation")
@@ -1087,11 +1217,11 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / P
1087
  outputs=[out_neo_summary, out_neo_debug, out_neo_img1, out_neo_img2, out_neo_img3, out_neo_csv]
1088
  )
1089
 
1090
- with gr.Tab("Satellite Jitter Observer Agent"):
1091
  gr.Markdown(
1092
- "# Satellite Jitter Reduction (Observer-gated)\n"
1093
  "Baseline: continuous correction.\n"
1094
- "Observer-gated RFT: gated correction using confidence + τ_eff.\n"
1095
  )
1096
  with gr.Row():
1097
  seed_j = gr.Number(value=42, precision=0, label="Seed")
@@ -1120,7 +1250,7 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / P
1120
 
1121
  with gr.Tab("Starship Landing Harness"):
1122
  gr.Markdown(
1123
- "# Starship-style Landing Harness (Observer-gated decision timing)\n"
1124
  "This is not a flight model. It’s a timing-control harness.\n"
1125
  )
1126
  with gr.Row():
@@ -1154,52 +1284,65 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / P
1154
 
1155
  with gr.Tab("Predator Avoidance"):
1156
  gr.Markdown(
1157
- "# Predator Avoidance (Observer Agent)\n"
1158
- "Agent tries to reach a goal while avoiding a pursuing predator.\n"
1159
- "This is a simple pursuit/evasion harness designed to be inspectable and reproducible.\n"
1160
- "Toggle **Baseline** (always-on control) vs **Observer-gated RFT** (can wait unless risk is high).\n"
1161
  )
 
1162
  with gr.Row():
1163
- seed_p = gr.Number(value=7, precision=0, label="Seed")
1164
- steps_p = gr.Slider(50, 1500, value=450, step=1, label="Steps")
1165
- dt_p = gr.Slider(0.05, 1.0, value=0.20, step=0.01, label="dt")
1166
- with gr.Row():
1167
- world_size = gr.Slider(20.0, 300.0, value=120.0, step=5.0, label="World size (bounds)")
1168
- predator_speed = gr.Slider(0.1, 40.0, value=10.0, step=0.1, label="Predator speed")
1169
- agent_speed = gr.Slider(0.1, 40.0, value=12.0, step=0.1, label="Agent speed")
1170
- with gr.Row():
1171
- catch_radius = gr.Slider(0.5, 30.0, value=6.0, step=0.5, label="Catch radius")
1172
- goal_radius = gr.Slider(0.5, 30.0, value=6.0, step=0.5, label="Goal radius")
1173
- sense_noise = gr.Slider(0.0, 10.0, value=1.2, step=0.1, label="Sensor noise (predator)")
1174
- with gr.Row():
1175
- base_k_att = gr.Slider(0.1, 10.0, value=1.2, step=0.05, label="Baseline attract gain")
1176
- base_k_rep = gr.Slider(0.1, 30.0, value=10.0, step=0.1, label="Baseline repulse gain")
1177
- with gr.Row():
1178
- rft_k_att = gr.Slider(0.1, 10.0, value=1.2, step=0.05, label="RFT attract gain")
1179
- rft_k_rep = gr.Slider(0.1, 30.0, value=10.0, step=0.1, label="RFT repulse gain")
1180
  with gr.Row():
1181
- gate_p = gr.Slider(0.1, 0.95, value=0.55, step=0.01, label="Gate threshold")
1182
- tau_p = gr.Slider(0.0, 3.0, value=1.2, step=0.05, label="τ_eff gain")
1183
- mode_baseline = gr.Checkbox(value=False, label="Run Baseline mode (unchecked = Observer-gated RFT)")
1184
- run_p = gr.Button("Run Predator Avoidance")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185
 
1186
  out_p_summary = gr.Textbox(label="Summary JSON", lines=12)
1187
  with gr.Row():
1188
- out_p_img1 = gr.Image(label="Trajectories")
1189
- out_p_img2 = gr.Image(label="Distances vs time")
1190
- out_p_img3 = gr.Image(label="Action timeline")
 
 
1191
  out_p_csv = gr.File(label="Download Predator CSV log")
1192
 
1193
  run_p.click(
1194
  ui_run_predator,
1195
- inputs=[seed_p, steps_p, dt_p, world_size, predator_speed, agent_speed, catch_radius, sense_noise,
1196
- rft_k_att, rft_k_rep, base_k_att, base_k_rep, gate_p, tau_p, goal_radius, mode_baseline],
1197
- outputs=[out_p_summary, out_p_img1, out_p_img2, out_p_img3, out_p_csv]
 
 
 
 
1198
  )
1199
 
1200
- with gr.Tab("Benchmarks"):
1201
- gr.Markdown("# Benchmarks\nRun full packs from the Live Console tab.\nEverything is seeded, logged, and exportable.\n")
1202
-
1203
  with gr.Tab("Theory → Practice"):
1204
  gr.Markdown(THEORY_PRACTICE_MD)
1205
 
 
1
  import os
2
  import math
3
  import json
4
+ import random
5
  import numpy as np
6
  import pandas as pd
7
  import matplotlib.pyplot as plt
8
  import gradio as gr
9
 
10
  # ===============================================================
11
+ # Rendered Frame Theory (RFT) — Agent Console (All-in-One Space)
12
  # Author: Liam Grinstead
13
  # Purpose: Transparent, reproducible, benchmarkable agent demos
14
  # Dependencies: numpy, pandas, matplotlib, gradio (NO scipy)
 
21
  # Shared utilities
22
  # -----------------------------
23
  def set_seed(seed: int):
24
+ seed = int(seed) % (2**32 - 1)
25
+ np.random.seed(seed)
26
+ random.seed(seed)
27
 
28
  def clamp(x, lo, hi):
29
  return max(lo, min(hi, x))
 
40
  return path
41
 
42
  # -----------------------------
43
+ # RFT Core: τ_eff + gating
44
  # -----------------------------
45
  def tau_eff_adaptive(
46
  uncertainty: float,
 
49
  gain: float = 1.2,
50
  cap: float = 4.0
51
  ):
 
 
 
 
 
 
 
52
  u = clamp(float(uncertainty), 0.0, 1.0)
53
  tau = base + slow_by + gain * u
54
  return clamp(tau, base, cap)
 
57
  return clamp(1.0 - float(uncertainty), 0.0, 1.0)
58
 
59
  def rft_gate(conf: float, tau_eff: float, threshold: float):
 
 
 
 
 
60
  conf = float(conf)
61
  tau_eff = float(tau_eff)
62
  effective = threshold + 0.08 * (tau_eff - 1.0)
 
155
  ax = fig3.add_subplot(111)
156
  ax.step(df["t"], df["baseline_alert"], where="post")
157
  ax.step(df["t"], df["rft_alert"], where="post")
158
+ ax.set_title("NEO: Alerts (Baseline vs RFT)")
159
  ax.set_xlabel("t (step)")
160
  ax.set_ylabel("alert (0/1)")
161
  p_alerts = save_plot(fig3, f"neo_alerts_seed{seed}.png")
 
169
  "baseline_alerts": int(alerts_baseline),
170
  "rft_candidates": int(alerts_rft_raw),
171
  "rft_alerts_filtered": int(alerts_rft_filtered),
172
+ "false_positive_proxy_reduction_%": float(
173
+ 100.0 * (1.0 - (alerts_rft_filtered / max(alerts_rft_raw, 1)))
174
+ ),
175
  "ops_proxy": int(ops_proxy),
176
  }
177
 
 
253
  fig1 = plt.figure(figsize=(10, 4))
254
  ax = fig1.add_subplot(111)
255
  ax.plot(df["t"], df["jitter"])
256
+ ax.set_title("Jitter: residual vs time (running RFT plant)")
257
  ax.set_xlabel("t (step)")
258
  ax.set_ylabel("jitter (arb)")
259
  p_jit = save_plot(fig1, f"jitter_residual_seed{seed}.png")
 
262
  ax = fig2.add_subplot(111)
263
  ax.step(df["t"], df["baseline_active"], where="post")
264
  ax.step(df["t"], df["rft_active"], where="post")
265
+ ax.set_title("Jitter: Actuation duty (Baseline vs RFT gating)")
266
  ax.set_xlabel("t (step)")
267
  ax.set_ylabel("active (0/1)")
268
  p_duty = save_plot(fig2, f"jitter_duty_seed{seed}.png")
 
465
 
466
  return summary, [p_alt, p_x, p_w, p_a], csv_path
467
 
468
+ # ===============================================================
469
+ # Predator Avoidance (Reflex vs QuantumObserver "RFT-style")
470
+ # ===============================================================
471
+ def numpy_convolve2d_toroidal(array: np.ndarray, kernel: np.ndarray) -> np.ndarray:
472
+ out = np.zeros_like(array, dtype=float)
473
+ kcx = kernel.shape[0] // 2
474
+ kcy = kernel.shape[1] // 2
475
+ rows, cols = array.shape
476
+ for i in range(rows):
477
+ for j in range(cols):
478
+ val = 0.0
479
+ for m in range(kernel.shape[0]):
480
+ for n in range(kernel.shape[1]):
481
+ x = (i + m - kcx) % rows
482
+ y = (j + n - kcy) % cols
483
+ val += array[x, y] * kernel[m, n]
484
+ out[i, j] = val
485
+ return out
486
+
487
+ class Predator:
488
+ def __init__(self, grid_size: int):
489
+ self.grid_size = grid_size
490
+ self.x = random.randint(0, grid_size - 1)
491
+ self.y = random.randint(0, grid_size - 1)
492
+
493
+ def move(self):
494
+ dx, dy = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
495
+ self.x = (self.x + dx) % self.grid_size
496
+ self.y = (self.y + dy) % self.grid_size
497
+
498
+ class ReflexAgent:
499
+ def __init__(self, grid_size: int):
500
+ self.grid_size = grid_size
501
+ self.x = random.randint(0, grid_size - 1)
502
+ self.y = random.randint(0, grid_size - 1)
503
+ self.collisions = 0
504
+
505
+ def move(self):
506
+ dx, dy = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
507
+ self.x = (self.x + dx) % self.grid_size
508
+ self.y = (self.y + dy) % self.grid_size
509
+
510
+ class QuantumObserverAgent:
511
+ def __init__(
512
+ self,
513
+ grid_size: int,
514
+ move_kernel: np.ndarray,
515
+ energy_max: float,
516
+ energy_regen: float,
517
+ base_override_cost: float,
518
+ quantum_boost_prob: float,
519
+ quantum_boost_amount: float,
520
+ sense_noise_prob: float,
521
+ alpha: float,
522
+ beta: float,
523
+ dt_internal: float,
524
+ override_threshold: float
525
+ ):
526
+ self.grid_size = grid_size
527
+ self.move_kernel = move_kernel.astype(float)
528
+
529
+ self.pos_prob = np.zeros((grid_size, grid_size), dtype=float)
530
+ x, y = np.random.randint(grid_size), np.random.randint(grid_size)
531
+ self.pos_prob[x, y] = 1.0
532
+ self.x, self.y = int(x), int(y)
533
+
534
+ self.energy_max = float(energy_max)
535
+ self.energy = float(energy_max)
536
+ self.energy_regen = float(energy_regen)
537
+ self.base_override_cost = float(base_override_cost)
538
+ self.quantum_boost_prob = float(quantum_boost_prob)
539
+ self.quantum_boost_amount = float(quantum_boost_amount)
540
+ self.sense_noise_prob = float(sense_noise_prob)
541
+
542
+ self.alpha = float(alpha)
543
+ self.beta = float(beta)
544
+ self.dt_internal = float(dt_internal)
545
+ self.override_threshold = float(override_threshold)
546
+
547
+ self.psi_override = (0.08 + 0j)
548
+ self.overrides = 0
549
+ self.collisions = 0
550
+
551
+ def move(self):
552
+ dx, dy = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
553
+ self.x = (self.x + dx) % self.grid_size
554
+ self.y = (self.y + dy) % self.grid_size
555
+ self.pos_prob.fill(0.0)
556
+ self.pos_prob[self.x, self.y] = 1.0
557
+
558
+ def sense_predators(self, predators):
559
+ perceived = []
560
+ for p in predators:
561
+ if random.random() < self.sense_noise_prob:
562
+ continue
563
+ perceived.append((p.x, p.y))
564
+ return perceived
565
+
566
+ def compute_threat(self, perceived):
567
+ threat = 0.0
568
+ radius = 2
569
+ for (px, py) in perceived:
570
+ xs = [(px + dx) % self.grid_size for dx in range(-radius, radius + 1)]
571
+ ys = [(py + dy) % self.grid_size for dy in range(-radius, radius + 1)]
572
+ sub = self.pos_prob[np.ix_(xs, ys)]
573
+ threat += float(sub.sum())
574
+ return threat
575
+
576
+ def update_override_state(self, perceived):
577
+ T = self.compute_threat(perceived)
578
+ E = self.energy / max(self.energy_max, 1e-9)
579
+ drive = (self.alpha * T) - (self.beta * E)
580
+
581
+ exp_term = clamp(drive, -6.0, 6.0) * 0.22 * self.dt_internal
582
+ amp = math.exp(exp_term)
583
+ amp = clamp(amp, 0.75, 1.35)
584
+
585
+ H = drive + 0.01 * (abs(self.psi_override) ** 2)
586
+ self.psi_override *= amp * np.exp(-1j * H * self.dt_internal)
587
+
588
+ mag = abs(self.psi_override)
589
+ if mag > 1.0:
590
+ self.psi_override /= mag
591
+
592
+ def get_override_probability(self):
593
+ return float(min(abs(self.psi_override) ** 2, 1.0))
594
+
595
+ def apply_override(self, perceived):
596
+ field = numpy_convolve2d_toroidal(self.pos_prob, self.move_kernel)
597
+ field = np.maximum(field, 0.0)
598
+
599
+ for (px, py) in perceived:
600
+ for dx in range(-2, 3):
601
+ for dy in range(-2, 3):
602
+ nx = (px + dx) % self.grid_size
603
+ ny = (py + dy) % self.grid_size
604
+ dist = abs(dx) + abs(dy)
605
+ field[nx, ny] *= (1.0 - 0.30 / (dist + 1.0))
606
+
607
+ s = float(field.sum())
608
+ if s <= 0:
609
+ field[:] = 1.0 / (self.grid_size * self.grid_size)
610
+ else:
611
+ field /= s
612
+
613
+ self.pos_prob = field
614
+
615
+ flat = self.pos_prob.flatten().copy()
616
+ for (px, py) in perceived:
617
+ flat[px * self.grid_size + py] = 0.0
618
+
619
+ tot = float(flat.sum())
620
+ if tot <= 0:
621
+ self.move()
622
+ return
623
+
624
+ flat /= tot
625
+ idx = np.random.choice(self.grid_size * self.grid_size, p=flat)
626
+ self.x, self.y = divmod(int(idx), self.grid_size)
627
+
628
+ def quantum_energy_boost(self):
629
+ if random.random() < self.quantum_boost_prob:
630
+ return float(self.quantum_boost_amount)
631
+ return 0.0
632
+
633
+ def regen_energy(self):
634
+ boost = self.quantum_energy_boost()
635
+ self.energy = clamp(self.energy + self.energy_regen + boost, 0.0, self.energy_max)
636
+ if self.energy < self.energy_max and random.random() < 0.05:
637
+ self.energy = self.energy_max
638
+
639
+ def move_observer(self, predators, group_coherence: float):
640
+ if self.energy <= 0:
641
+ self.move()
642
+ return 0, 0.0, 0.0
643
+
644
+ perceived = self.sense_predators(predators)
645
+ self.update_override_state(perceived)
646
+
647
+ P_ov = self.get_override_probability()
648
+ threat = self.compute_threat(perceived)
649
+
650
+ acted = 0
651
+ if (P_ov >= self.override_threshold) and (self.energy > 0):
652
+ effective_cost = self.base_override_cost * (1.0 - float(group_coherence))
653
+ if self.energy >= effective_cost:
654
+ self.overrides += 1
655
+ self.energy -= effective_cost
656
+ self.apply_override(perceived)
657
+ self.psi_override = (0.08 + 0j)
658
+ acted = 1
659
+ else:
660
+ self.move()
661
+ else:
662
+ self.move()
663
+
664
+ return acted, P_ov, threat
665
+
666
+ def simulate_predator(
667
  seed: int,
668
+ grid_size: int,
669
  steps: int,
670
+ num_reflex: int,
671
+ num_observer: int,
672
+ num_predators: int,
673
+ group_coherence: float,
674
+ sense_noise_prob: float,
675
+ override_threshold: float,
676
+ alpha: float,
677
+ beta: float,
678
+ dt_internal: float,
679
+ energy_max: float,
680
+ base_override_cost: float,
681
+ energy_regen: float,
682
+ quantum_boost_prob: float,
683
+ quantum_boost_amount: float,
684
+ show_heatmap: bool
685
  ):
 
 
 
 
 
 
 
686
  set_seed(seed)
687
 
688
+ move_kernel = np.array([[0, 0.2, 0],
689
+ [0.2, 0.2, 0.2],
690
+ [0, 0.2, 0]], dtype=float)
691
+
692
+ reflex_agents = [ReflexAgent(grid_size) for _ in range(int(num_reflex))]
693
+ observer_agents = [
694
+ QuantumObserverAgent(
695
+ grid_size=grid_size,
696
+ move_kernel=move_kernel,
697
+ energy_max=energy_max,
698
+ energy_regen=energy_regen,
699
+ base_override_cost=base_override_cost,
700
+ quantum_boost_prob=quantum_boost_prob,
701
+ quantum_boost_amount=quantum_boost_amount,
702
+ sense_noise_prob=sense_noise_prob,
703
+ alpha=alpha,
704
+ beta=beta,
705
+ dt_internal=dt_internal,
706
+ override_threshold=override_threshold
707
+ )
708
+ for _ in range(int(num_observer))
709
+ ]
710
+ predators = [Predator(grid_size) for _ in range(int(num_predators))]
711
 
 
712
  rows = []
713
  ops_proxy = 0
714
 
 
 
 
 
 
 
 
 
715
  for t in range(int(steps)):
716
+ for p in predators:
717
+ p.move()
718
+
719
+ for a in reflex_agents:
720
+ a.move()
721
+ for p in predators:
722
+ if a.x == p.x and a.y == p.y:
723
+ a.collisions += 1
724
+
725
+ actions = []
726
+ povs = []
727
+ threats = []
728
+ for a in observer_agents:
729
+ acted, P_ov, threat = a.move_observer(predators, group_coherence)
730
+ a.regen_energy()
731
+ actions.append(acted)
732
+ povs.append(P_ov)
733
+ threats.append(threat)
734
+ for p in predators:
735
+ if a.x == p.x and a.y == p.y:
736
+ a.collisions += 1
737
+
738
+ ops_proxy += 18
739
+
740
+ reflex_collisions = int(sum(a.collisions for a in reflex_agents))
741
+ observer_collisions = int(sum(a.collisions for a in observer_agents))
742
+ avg_overrides = float(np.mean([a.overrides for a in observer_agents])) if observer_agents else 0.0
743
+ avg_energy = float(np.mean([a.energy for a in observer_agents])) if observer_agents else 0.0
744
+ avg_threat = float(np.mean(threats)) if threats else 0.0
745
+ avg_pov = float(np.mean(povs)) if povs else 0.0
746
+ avg_act = float(np.mean(actions)) if actions else 0.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
747
 
748
  rows.append({
749
  "t": t,
750
+ "reflex_collisions_cum": reflex_collisions,
751
+ "observer_collisions_cum": observer_collisions,
752
+ "avg_observer_overrides": avg_overrides,
753
+ "avg_observer_energy": avg_energy,
754
+ "avg_observer_threat": avg_threat,
755
+ "avg_observer_P_override": avg_pov,
756
+ "avg_observer_action": avg_act,
757
+ "predators_positions": "|".join([f"{p.x},{p.y}" for p in predators]),
 
 
 
 
 
 
 
 
 
 
 
 
 
758
  })
759
 
 
 
 
760
  df = pd.DataFrame(rows)
761
+ csv_path = df_to_csv_file(df, f"predator_log_seed{seed}.csv")
762
 
763
+ fig1 = plt.figure(figsize=(10, 4))
 
764
  ax = fig1.add_subplot(111)
765
+ ax.plot(df["t"], df["reflex_collisions_cum"], label="Reflex collisions (cum)")
766
+ ax.plot(df["t"], df["observer_collisions_cum"], label="Observer collisions (cum)")
767
+ ax.set_title("Predator Avoidance: Collisions (Reflex vs RFT)")
768
+ ax.set_xlabel("t (step)")
769
+ ax.set_ylabel("collisions (cum)")
770
+ ax.legend()
771
+ p_col = save_plot(fig1, f"predator_collisions_seed{seed}.png")
 
772
 
773
  fig2 = plt.figure(figsize=(10, 4))
774
  ax = fig2.add_subplot(111)
775
+ ax.plot(df["t"], df["avg_observer_overrides"], label="Avg overrides (observer)")
776
+ ax.plot(df["t"], df["avg_observer_energy"], label="Avg energy (observer)")
777
+ ax.set_title("Predator Avoidance: Overrides + Energy (Observer)")
 
 
778
  ax.set_xlabel("t (step)")
779
+ ax.set_ylabel("value")
780
+ ax.legend()
781
+ p_ov = save_plot(fig2, f"predator_overrides_energy_seed{seed}.png")
782
 
783
+ fig3 = plt.figure(figsize=(10, 4))
784
  ax = fig3.add_subplot(111)
785
+ ax.plot(df["t"], df["avg_observer_threat"], label="Avg threat")
786
+ ax.plot(df["t"], df["avg_observer_P_override"], label="Avg P_override")
787
+ ax.plot(df["t"], df["avg_observer_action"], label="Avg action rate")
788
+ ax.set_title("Predator Avoidance: Threat vs Override Probability vs Action Rate")
789
  ax.set_xlabel("t (step)")
790
+ ax.set_ylabel("value")
791
+ ax.legend()
792
+ p_thr = save_plot(fig3, f"predator_threat_seed{seed}.png")
793
+
794
+ heatmap_path = None
795
+ if show_heatmap and len(observer_agents) > 0:
796
+ field = observer_agents[0].pos_prob
797
+ fig4 = plt.figure(figsize=(6, 5))
798
+ ax = fig4.add_subplot(111)
799
+ im = ax.imshow(field, aspect="auto")
800
+ ax.set_title("Observer Agent[0]: Final probability field (pos_prob)")
801
+ ax.set_xlabel("y")
802
+ ax.set_ylabel("x")
803
+ fig4.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
804
+ heatmap_path = save_plot(fig4, f"predator_probfield_seed{seed}.png")
805
 
806
  summary = {
807
  "seed": int(seed),
808
+ "grid_size": int(grid_size),
809
+ "steps": int(steps),
810
+ "num_reflex": int(num_reflex),
811
+ "num_observer": int(num_observer),
812
+ "num_predators": int(num_predators),
813
+ "final_reflex_collisions": int(df["reflex_collisions_cum"].iloc[-1]) if len(df) else 0,
814
+ "final_observer_collisions": int(df["observer_collisions_cum"].iloc[-1]) if len(df) else 0,
815
+ "final_avg_observer_overrides": float(df["avg_observer_overrides"].iloc[-1]) if len(df) else 0.0,
816
+ "final_avg_observer_energy": float(df["avg_observer_energy"].iloc[-1]) if len(df) else 0.0,
817
  "ops_proxy": int(ops_proxy),
818
  }
819
 
820
+ imgs = [p_col, p_ov, p_thr]
821
+ if heatmap_path is not None:
822
+ imgs.append(heatmap_path)
823
+
824
+ return summary, imgs, csv_path
825
 
826
  # -----------------------------
827
+ # Benchmarks (NEO/Jitter/Landing)
828
  # -----------------------------
829
  def run_benchmarks(
830
  seed: int,
 
910
  f"- Landing: final offset={l_sum['final_landing_offset_m']:.2f} m (goal 10 m), anomalies={l_sum['total_anomalies_detected']}, actions={l_sum['total_control_actions']}\n"
911
  )
912
 
913
+ all_imgs = neo_imgs + jit_imgs + land_imgs
914
  return txt, score, score_path, all_imgs, [neo_csv, jit_csv, land_csv]
915
 
916
  # -----------------------------
917
  # UI text blocks
918
  # -----------------------------
919
  HOME_MD = """
920
+ # Rendered Frame Theory (RFT) — Agent Console
 
 
921
 
922
+ This Space is meant to be transparent, reproducible, and benchmarkable.
 
923
 
924
  Run it. Change parameters. Break it. Compare baseline vs RFT.
925
 
926
+ Core idea:
927
 
928
  **Decision timing matters.**
929
+ RFT treats timing (τ_eff), uncertainty, and action “collapse” as first-class controls.
930
 
931
+ This Space contains:
932
+ - **NEO alerting**
933
+ - **Satellite jitter reduction**
934
+ - **Starship-style landing harness**
935
+ - **Predator avoidance** (Reflex vs RFT-style "QuantumObserver" agents)
936
 
937
+ No SciPy. No hidden dependencies. No model weights.
 
 
938
  """
939
 
940
  LIVE_MD = """
941
  # Live Console
942
 
943
+ Run everything quickly and export logs.
944
 
945
  - deterministic runs (seeded)
946
  - plots saved
 
949
  """
950
 
951
  THEORY_PRACTICE_MD = """
952
+ # Theory → Practice (how I implement RFT here)
 
 
953
 
954
+ ## 1) Uncertainty
955
+ Explicit uncertainty proxy from noise + disturbance scale.
956
 
957
  ## 2) Confidence
958
+ confidence = 1 − uncertainty (clipped 0..1).
959
 
960
  ## 3) Adaptive τ_eff
961
+ Higher uncertainty higher τ_eff.
 
 
962
 
963
+ ## 4) Collapse gate
964
+ Act only when the gate passes:
965
+ - confidence exceeds a threshold
966
+ - τ_eff increases strictness under uncertainty
967
 
968
+ ## 5) Why it matters
969
  Baseline controllers often act constantly.
970
+ RFT tries to act less often, but more decisively.
971
  """
972
 
973
  MATH_MD = r"""
974
  # Mathematics (minimal and implementation-linked)
975
 
976
+ u [0,1] : uncertainty proxy
977
+ C ∈ [0,1] : confidence proxy
978
+ τ_eff 1 : effective decision timing factor
 
 
 
979
 
980
+ Confidence:
981
  \[
982
  C = \text{clip}(1 - u, 0, 1)
983
  \]
984
 
985
+ Adaptive τ_eff:
986
  \[
987
  \tau_{\text{eff}} = \text{clip}(1 + 1.0 + g\cdot u,\; 1,\; \tau_{\max})
988
  \]
989
 
990
+ Collapse gate (concept):
 
991
  \[
992
  \text{Gate} = \left[C \ge \theta + k(\tau_{\text{eff}}-1)\right]
993
  \]
 
 
994
  """
995
 
996
  INVESTOR_MD = """
997
+ # Investor / Agency Walkthrough
998
 
999
+ What this Space demonstrates:
1000
+ - alert filtering (NEO)
 
1001
  - stabilisation (jitter reduction)
1002
+ - anomaly-aware control (landing harness)
1003
+ - threat-aware avoidance (predator demo)
 
 
 
 
 
 
1004
 
1005
+ What it is not:
1006
+ - not flight-certified
1007
+ - not a production pipeline
1008
+ - not a claim that anyone is using it
1009
 
1010
+ What makes it production-grade:
1011
  - real sensor ingestion + timing constraints
1012
  - hardware-in-loop testing
1013
+ - dataset validation
 
1014
  """
1015
 
1016
  REPRO_MD = """
1017
  # Reproducibility & Logs
1018
 
1019
+ Everything is reproducible:
1020
+ - set seed
1021
+ - run
1022
+ - export CSV
1023
+ - verify plots + metrics
1024
+
1025
+ CSV schema is explicit in the exports.
 
 
 
 
1026
  """
1027
 
1028
  # -----------------------------
 
1072
  summary_txt = json.dumps(summary, indent=2)
1073
  return summary_txt, imgs[0], imgs[1], imgs[2], imgs[3], csv_path
1074
 
1075
+ def ui_run_predator(seed, grid_size, steps, num_reflex, num_observer, num_predators,
1076
+ group_coherence, sense_noise_prob, override_threshold,
1077
+ alpha, beta, dt_internal,
1078
+ energy_max, base_override_cost, energy_regen,
1079
+ quantum_boost_prob, quantum_boost_amount,
1080
+ show_heatmap):
1081
+ summary, imgs, csv_path = simulate_predator(
1082
  seed=int(seed),
1083
+ grid_size=int(grid_size),
1084
  steps=int(steps),
1085
+ num_reflex=int(num_reflex),
1086
+ num_observer=int(num_observer),
1087
+ num_predators=int(num_predators),
1088
+ group_coherence=float(group_coherence),
1089
+ sense_noise_prob=float(sense_noise_prob),
1090
+ override_threshold=float(override_threshold),
1091
+ alpha=float(alpha),
1092
+ beta=float(beta),
1093
+ dt_internal=float(dt_internal),
1094
+ energy_max=float(energy_max),
1095
+ base_override_cost=float(base_override_cost),
1096
+ energy_regen=float(energy_regen),
1097
+ quantum_boost_prob=float(quantum_boost_prob),
1098
+ quantum_boost_amount=float(quantum_boost_amount),
1099
+ show_heatmap=bool(show_heatmap)
1100
  )
1101
  summary_txt = json.dumps(summary, indent=2)
1102
+ img1 = imgs[0] if len(imgs) > 0 else None
1103
+ img2 = imgs[1] if len(imgs) > 1 else None
1104
+ img3 = imgs[2] if len(imgs) > 2 else None
1105
+ img4 = imgs[3] if len(imgs) > 3 else None
1106
+ return summary_txt, img1, img2, img3, img4, csv_path
1107
 
1108
  def ui_run_bench(seed, neo_steps, neo_dt, neo_alert_km, neo_noise_km, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain):
1109
  txt, score_df, score_csv, imgs, logs = run_benchmarks(
 
1122
  # -----------------------------
1123
  # Gradio UI
1124
  # -----------------------------
1125
+ with gr.Blocks(title="RFT — Agent Console (NEO / Jitter / Landing / Predator)") as demo:
1126
  gr.Markdown(HOME_MD)
1127
 
1128
  with gr.Tabs():
 
1151
  land_wind = gr.Slider(0.0, 25.0, value=15.0, step=0.5, label="Landing wind max (m/s)")
1152
  land_thrust_noise = gr.Slider(0.0, 10.0, value=3.0, step=0.1, label="Landing thrust noise")
1153
 
1154
+ run_b = gr.Button("Run Full Benchmarks (Baseline vs RFT)")
1155
 
1156
  bench_txt = gr.Textbox(label="Benchmark summary", lines=6)
1157
  bench_table = gr.Dataframe(label="Scorecard (CSV also exported)")
 
1178
  run_b.click(
1179
  ui_run_bench,
1180
  inputs=[seed_live, neo_steps, neo_dt, neo_alert, neo_noise, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain_live],
1181
+ outputs=[
1182
+ bench_txt, bench_table, bench_score_csv,
1183
+ img1, img2, img3, img4, img5, img6, img7, img8, img9, img10,
1184
+ neo_log, jit_log, land_log
1185
+ ]
1186
  )
1187
 
1188
+ with gr.Tab("NEO Agent"):
1189
  gr.Markdown(
1190
+ "# Near-Earth Object (NEO) Alerting Agent\n"
1191
  "Baseline: distance threshold only.\n"
1192
+ "RFT: distance threshold + confidence + τ_eff collapse gate.\n"
1193
  )
1194
  with gr.Row():
1195
  seed_neo = gr.Number(value=42, precision=0, label="Seed")
 
1198
  with gr.Row():
1199
  alert_km = gr.Slider(1000, 20000, value=5000, step=50, label="Alert threshold (km)")
1200
  noise_km = gr.Slider(0.0, 200.0, value=35.0, step=1.0, label="Measurement noise (km)")
1201
+ rft_conf_th = gr.Slider(0.1, 0.95, value=0.55, step=0.01, label="RFT confidence threshold")
1202
  tau_gain = gr.Slider(0.0, 3.0, value=1.2, step=0.05, label="τ_eff gain")
1203
  show_debug = gr.Checkbox(value=False, label="Show debug table (first rows)")
1204
  run_neo = gr.Button("Run NEO Simulation")
 
1217
  outputs=[out_neo_summary, out_neo_debug, out_neo_img1, out_neo_img2, out_neo_img3, out_neo_csv]
1218
  )
1219
 
1220
+ with gr.Tab("Satellite Jitter Agent"):
1221
  gr.Markdown(
1222
+ "# Satellite Jitter Reduction\n"
1223
  "Baseline: continuous correction.\n"
1224
+ "RFT: gated correction using confidence + τ_eff.\n"
1225
  )
1226
  with gr.Row():
1227
  seed_j = gr.Number(value=42, precision=0, label="Seed")
 
1250
 
1251
  with gr.Tab("Starship Landing Harness"):
1252
  gr.Markdown(
1253
+ "# Starship-style Landing Harness (Simplified)\n"
1254
  "This is not a flight model. It’s a timing-control harness.\n"
1255
  )
1256
  with gr.Row():
 
1284
 
1285
  with gr.Tab("Predator Avoidance"):
1286
  gr.Markdown(
1287
+ "# Predator Avoidance (Reflex vs RFT)\n"
1288
+ "Grid world with roaming predators.\n"
1289
+ "Reflex agents: random walk.\n"
1290
+ "Observer agents: probability field + threat-weighted override.\n"
1291
  )
1292
+
1293
  with gr.Row():
1294
+ seed_p = gr.Number(value=42, precision=0, label="Seed")
1295
+ grid_size = gr.Slider(10, 60, value=20, step=1, label="Grid size")
1296
+ steps_p = gr.Slider(50, 1500, value=200, step=1, label="Steps")
1297
+
 
 
 
 
 
 
 
 
 
 
 
 
 
1298
  with gr.Row():
1299
+ num_reflex = gr.Slider(0, 50, value=10, step=1, label="Reflex agents")
1300
+ num_observer = gr.Slider(0, 20, value=3, step=1, label="Observer agents")
1301
+ num_predators = gr.Slider(1, 20, value=3, step=1, label="Predators")
1302
+
1303
+ with gr.Accordion("RFT / Agent parameters", open=True):
1304
+ with gr.Row():
1305
+ group_coherence = gr.Slider(0.0, 0.95, value=0.30, step=0.01, label="Group coherence")
1306
+ sense_noise_prob = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Sense noise probability")
1307
+ override_threshold = gr.Slider(0.0, 1.0, value=0.02, step=0.005, label="Override threshold (P_ov)")
1308
+
1309
+ with gr.Row():
1310
+ alpha = gr.Slider(0.0, 50.0, value=15.0, step=0.5, label="alpha (threat gain)")
1311
+ beta = gr.Slider(0.0, 10.0, value=0.5, step=0.05, label="beta (energy term)")
1312
+ dt_internal = gr.Slider(0.01, 1.0, value=0.2, step=0.01, label="override dt")
1313
+
1314
+ with gr.Row():
1315
+ energy_max = gr.Slider(1.0, 300.0, value=100.0, step=1.0, label="Energy max")
1316
+ base_override_cost = gr.Slider(0.0, 10.0, value=1.0, step=0.1, label="Base override cost")
1317
+ energy_regen = gr.Slider(0.0, 1.0, value=0.05, step=0.01, label="Energy regen")
1318
+
1319
+ with gr.Row():
1320
+ quantum_boost_prob = gr.Slider(0.0, 1.0, value=0.10, step=0.01, label="Quantum boost probability")
1321
+ quantum_boost_amount = gr.Slider(0.0, 50.0, value=5.0, step=0.5, label="Quantum boost amount")
1322
+ show_heatmap = gr.Checkbox(value=True, label="Show probability field heatmap (agent[0])")
1323
+
1324
+ run_p = gr.Button("Run Predator Simulation")
1325
 
1326
  out_p_summary = gr.Textbox(label="Summary JSON", lines=12)
1327
  with gr.Row():
1328
+ out_p_img1 = gr.Image(label="Collisions (cumulative)")
1329
+ out_p_img2 = gr.Image(label="Overrides + Energy")
1330
+ with gr.Row():
1331
+ out_p_img3 = gr.Image(label="Threat / P_override / Action rate")
1332
+ out_p_img4 = gr.Image(label="Final probability field (optional)")
1333
  out_p_csv = gr.File(label="Download Predator CSV log")
1334
 
1335
  run_p.click(
1336
  ui_run_predator,
1337
+ inputs=[seed_p, grid_size, steps_p, num_reflex, num_observer, num_predators,
1338
+ group_coherence, sense_noise_prob, override_threshold,
1339
+ alpha, beta, dt_internal,
1340
+ energy_max, base_override_cost, energy_regen,
1341
+ quantum_boost_prob, quantum_boost_amount,
1342
+ show_heatmap],
1343
+ outputs=[out_p_summary, out_p_img1, out_p_img2, out_p_img3, out_p_img4, out_p_csv]
1344
  )
1345
 
 
 
 
1346
  with gr.Tab("Theory → Practice"):
1347
  gr.Markdown(THEORY_PRACTICE_MD)
1348