RFTSystems commited on
Commit
627112e
·
verified ·
1 Parent(s): 39bcd20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +288 -22
app.py CHANGED
@@ -49,7 +49,7 @@ def tau_eff_adaptive(
49
  """
50
  τ_eff is implemented here as a timing/decision delay modifier.
51
  - base: baseline τ_eff
52
- - slow_by: explicit slow-down term (I wanted this behaviour: slow by 1.0)
53
  - gain: reaction strength to uncertainty
54
  - cap: prevents absurd values
55
  """
@@ -178,9 +178,7 @@ def simulate_neo(
178
  "baseline_alerts": int(alerts_baseline),
179
  "rft_candidates": int(alerts_rft_raw),
180
  "rft_alerts_filtered": int(alerts_rft_filtered),
181
- "false_positive_proxy_reduction_%": float(
182
- 100.0 * (1.0 - (alerts_rft_filtered / max(alerts_rft_raw, 1)))
183
- ),
184
  "ops_proxy": int(ops_proxy),
185
  }
186
 
@@ -301,7 +299,6 @@ def simulate_jitter(
301
 
302
  # -----------------------------
303
  # Starship-style Landing Harness (2D)
304
- # (Same logic you have now; only naming/wording updated)
305
  # -----------------------------
306
  def simulate_landing(
307
  seed: int,
@@ -476,7 +473,215 @@ def simulate_landing(
476
  return summary, [p_alt, p_x, p_w, p_a], csv_path
477
 
478
  # -----------------------------
479
- # Benchmarks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
  # -----------------------------
481
  def run_benchmarks(
482
  seed: int,
@@ -566,7 +771,7 @@ def run_benchmarks(
566
  return txt, score, score_path, all_imgs, [neo_csv, jit_csv, land_csv]
567
 
568
  # -----------------------------
569
- # UI text blocks (your voice, clean + friendly + no consciousness claims)
570
  # -----------------------------
571
  HOME_MD = """
572
  # RFT — Observer Agent Console
@@ -583,10 +788,11 @@ What I’m demonstrating is a simple idea:
583
  **Decision timing matters.**
584
  RFT treats timing (τ_eff), uncertainty, and action “commit” as first-class controls.
585
 
586
- This Space contains three working agent harnesses:
587
  - **NEO alerting** (filter noisy close-approach alerts)
588
  - **Satellite jitter reduction** (reduce actuator duty / chatter while keeping residual low)
589
- - **Starship-style landing harness** (simplified, but structured to test decision timing under wind/thrust disturbances)
 
590
 
591
  Every tab shows what it’s doing, why, and where it wins or loses.
592
 
@@ -667,6 +873,7 @@ I’m demonstrating a decision-timing framework that can be applied to:
667
  - alert filtering (NEO / tracking)
668
  - stabilisation (jitter reduction)
669
  - anomaly-aware control loops (landing harness)
 
670
 
671
  This is a runnable harness:
672
  - you can reproduce results with seeds
@@ -675,9 +882,9 @@ This is a runnable harness:
675
  - you can change thresholds and see behaviour shift
676
 
677
  ## What I’m not claiming
678
- - I’m not claiming flight certification
679
  - I’m not claiming any company is using this
680
- - I’m not claiming this replaces aerospace validation pipelines
681
 
682
  ## What would make it production-grade
683
  - real sensor ingestion + timing constraints
@@ -749,6 +956,29 @@ def ui_run_landing(seed, steps, dt, wind_max, thrust_noise, kp_base, kp_rft, gat
749
  summary_txt = json.dumps(summary, indent=2)
750
  return summary_txt, imgs[0], imgs[1], imgs[2], imgs[3], csv_path
751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752
  def ui_run_bench(seed, neo_steps, neo_dt, neo_alert_km, neo_noise_km, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain):
753
  txt, score_df, score_csv, imgs, logs = run_benchmarks(
754
  seed=int(seed),
@@ -766,7 +996,7 @@ def ui_run_bench(seed, neo_steps, neo_dt, neo_alert_km, neo_noise_km, jit_steps,
766
  # -----------------------------
767
  # Gradio UI
768
  # -----------------------------
769
- with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing)") as demo:
770
  gr.Markdown(HOME_MD)
771
 
772
  with gr.Tabs():
@@ -822,17 +1052,12 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing)")
822
  run_b.click(
823
  ui_run_bench,
824
  inputs=[seed_live, neo_steps, neo_dt, neo_alert, neo_noise, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain_live],
825
- outputs=[
826
- bench_txt, bench_table, bench_score_csv,
827
- img1, img2, img3, img4, img5, img6, img7, img8, img9, img10,
828
- neo_log, jit_log, land_log
829
- ]
830
  )
831
 
832
  with gr.Tab("NEO Observer Agent"):
833
  gr.Markdown(
834
  "# Near-Earth Object (NEO) Observer Agent\n"
835
- "This is a test harness for filtering close-approach alerts under noise.\n"
836
  "Baseline: distance threshold only.\n"
837
  "Observer-gated RFT: distance threshold + confidence + τ_eff decision gate.\n"
838
  )
@@ -927,13 +1152,54 @@ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing)")
927
  outputs=[out_l_summary, out_l_img1, out_l_img2, out_l_img3, out_l_img4, out_l_csv]
928
  )
929
 
930
- with gr.Tab("Benchmarks"):
931
  gr.Markdown(
932
- "# Benchmarks\n"
933
- "Run full packs from the Live Console tab.\n"
934
- "Everything is seeded, logged, and exportable.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
935
  )
936
 
 
 
 
937
  with gr.Tab("Theory → Practice"):
938
  gr.Markdown(THEORY_PRACTICE_MD)
939
 
 
49
  """
50
  τ_eff is implemented here as a timing/decision delay modifier.
51
  - base: baseline τ_eff
52
+ - slow_by: explicit slow-down term
53
  - gain: reaction strength to uncertainty
54
  - cap: prevents absurd values
55
  """
 
178
  "baseline_alerts": int(alerts_baseline),
179
  "rft_candidates": int(alerts_rft_raw),
180
  "rft_alerts_filtered": int(alerts_rft_filtered),
181
+ "false_positive_proxy_reduction_%": float(100.0 * (1.0 - (alerts_rft_filtered / max(alerts_rft_raw, 1)))),
 
 
182
  "ops_proxy": int(ops_proxy),
183
  }
184
 
 
299
 
300
  # -----------------------------
301
  # Starship-style Landing Harness (2D)
 
302
  # -----------------------------
303
  def simulate_landing(
304
  seed: int,
 
473
  return summary, [p_alt, p_x, p_w, p_a], csv_path
474
 
475
  # -----------------------------
476
+ # Predator Avoidance (2D)
477
+ # -----------------------------
478
+ def simulate_predator_avoidance(
479
+ seed: int,
480
+ steps: int,
481
+ dt: float,
482
+ world_size: float,
483
+ predator_speed: float,
484
+ agent_speed: float,
485
+ catch_radius: float,
486
+ sense_noise: float,
487
+ rft_k_attract: float,
488
+ rft_k_repulse: float,
489
+ baseline_k_attract: float,
490
+ baseline_k_repulse: float,
491
+ gate_threshold: float,
492
+ tau_gain: float,
493
+ goal_radius: float,
494
+ show_baseline: bool
495
+ ):
496
+ """
497
+ A simple pursuit/evasion harness:
498
+ - Predator chases the agent.
499
+ - Agent tries to reach a goal while avoiding capture.
500
+ Baseline: always-on potential field (attract goal + repulse predator).
501
+ Observer-gated RFT: same control, but can "wait" unless risk is high.
502
+ """
503
+ set_seed(seed)
504
+
505
+ # Initial positions
506
+ agent = np.array([-0.55 * world_size, -0.10 * world_size], dtype=float)
507
+ predator = np.array([0.35 * world_size, 0.25 * world_size], dtype=float)
508
+ goal = np.array([0.60 * world_size, -0.45 * world_size], dtype=float)
509
+
510
+ # Helper: normalize vector
511
+ def nrm(v):
512
+ s = float(np.linalg.norm(v))
513
+ if s < 1e-12:
514
+ return v * 0.0
515
+ return v / s
516
+
517
+ # Logging
518
+ rows = []
519
+ ops_proxy = 0
520
+
521
+ caught = False
522
+ reached_goal = False
523
+ t_end = int(steps)
524
+
525
+ # Simple "energy" proxy
526
+ energy_baseline = 0.0
527
+ energy_rft = 0.0
528
+
529
+ for t in range(int(steps)):
530
+ # Noisy sensed predator position (agent perception)
531
+ pred_meas = predator + np.random.normal(0.0, sense_noise, size=2)
532
+
533
+ # Distances
534
+ d_pa = float(np.linalg.norm(pred_meas - agent)) # perceived predator-agent
535
+ d_true = float(np.linalg.norm(predator - agent)) # true predator-agent
536
+ d_ag = float(np.linalg.norm(goal - agent)) # agent-goal
537
+
538
+ # Uncertainty proxy:
539
+ # - higher when predator is close (risk)
540
+ # - higher when sensing is noisy
541
+ # - mild term for distance-to-goal (to bias to act near the end)
542
+ risk = clamp((catch_radius / max(d_pa, 1e-6)) * 0.55, 0.0, 1.0)
543
+ noise_term = clamp((sense_noise / max(world_size, 1e-9)) * 8.0, 0.0, 1.0)
544
+ goal_term = clamp(1.0 - (d_ag / max(world_size, 1e-9)), 0.0, 1.0) * 0.15
545
+ uncertainty = clamp(risk + noise_term + goal_term, 0.0, 1.0)
546
+
547
+ tau = tau_eff_adaptive(uncertainty, base=1.0, slow_by=1.0, gain=tau_gain, cap=4.0)
548
+ conf = rft_confidence(uncertainty)
549
+
550
+ # Predator moves toward agent (pure pursuit)
551
+ predator_dir = nrm(agent - predator)
552
+ predator = predator + predator_dir * predator_speed * dt
553
+
554
+ # Baseline control (always on)
555
+ v_goal_b = nrm(goal - agent) * baseline_k_attract
556
+ v_away_b = nrm(agent - pred_meas) * (baseline_k_repulse / max(d_pa, 0.25))
557
+ v_base = v_goal_b + v_away_b
558
+ v_base = nrm(v_base) * agent_speed
559
+
560
+ # RFT (observer-gated) control
561
+ # Gate can be overridden if risk is high / predator is close / goal is near.
562
+ do_action = rft_gate(conf, tau, gate_threshold)
563
+ must_act = (d_pa < (3.0 * catch_radius)) or (d_ag < (2.5 * goal_radius))
564
+ do_action = bool(do_action or must_act)
565
+
566
+ v_rft = np.array([0.0, 0.0], dtype=float)
567
+ if do_action:
568
+ phase = clamp(1.0 - (d_ag / max(world_size, 1e-9)), 0.0, 1.0)
569
+ lookahead = 1.0 + 1.2 * phase
570
+
571
+ v_goal = nrm(goal - agent) * (rft_k_attract * lookahead)
572
+ v_away = nrm(agent - pred_meas) * ((rft_k_repulse * lookahead) / max(d_pa, 0.25))
573
+ v_rft = v_goal + v_away
574
+ v_rft = nrm(v_rft) * agent_speed
575
+
576
+ # Choose which agent motion to apply to the plant
577
+ if show_baseline:
578
+ agent_next = agent + v_base * dt
579
+ energy_baseline += float(np.linalg.norm(v_base)) * dt
580
+ else:
581
+ agent_next = agent + v_rft * dt
582
+ energy_rft += float(np.linalg.norm(v_rft)) * dt
583
+
584
+ # Clamp to bounds
585
+ agent_next[0] = clamp(agent_next[0], -world_size, world_size)
586
+ agent_next[1] = clamp(agent_next[1], -world_size, world_size)
587
+ agent = agent_next
588
+
589
+ # Termination checks
590
+ if d_true <= catch_radius:
591
+ caught = True
592
+ t_end = t + 1
593
+ if d_ag <= goal_radius:
594
+ reached_goal = True
595
+ t_end = t + 1
596
+
597
+ ops_proxy += 14
598
+
599
+ rows.append({
600
+ "t": t,
601
+ "agent_x": agent[0],
602
+ "agent_y": agent[1],
603
+ "pred_x": predator[0],
604
+ "pred_y": predator[1],
605
+ "goal_x": goal[0],
606
+ "goal_y": goal[1],
607
+ "d_pred_agent_true": d_true,
608
+ "d_pred_agent_meas": d_pa,
609
+ "d_agent_goal": d_ag,
610
+ "sense_noise": sense_noise,
611
+ "uncertainty": uncertainty,
612
+ "tau_eff": tau,
613
+ "confidence": conf,
614
+ "action_taken": int(do_action),
615
+ "mode": "baseline" if show_baseline else "observer_gated_rft",
616
+ "v_base_x": v_base[0],
617
+ "v_base_y": v_base[1],
618
+ "v_rft_x": v_rft[0],
619
+ "v_rft_y": v_rft[1],
620
+ "caught": int(caught),
621
+ "reached_goal": int(reached_goal),
622
+ })
623
+
624
+ if caught or reached_goal:
625
+ break
626
+
627
+ df = pd.DataFrame(rows)
628
+
629
+ # Plots
630
+ fig1 = plt.figure(figsize=(7.5, 7.0))
631
+ ax = fig1.add_subplot(111)
632
+ ax.plot(df["agent_x"], df["agent_y"], label="Agent")
633
+ ax.plot(df["pred_x"], df["pred_y"], label="Predator")
634
+ ax.scatter([df["goal_x"].iloc[0]], [df["goal_y"].iloc[0]], marker="x", s=80, label="Goal")
635
+ ax.set_title("Predator Avoidance: Trajectories")
636
+ ax.set_xlabel("x")
637
+ ax.set_ylabel("y")
638
+ ax.legend(loc="best")
639
+ p_traj = save_plot(fig1, f"predator_traj_seed{seed}_{'baseline' if show_baseline else 'rft'}.png")
640
+
641
+ fig2 = plt.figure(figsize=(10, 4))
642
+ ax = fig2.add_subplot(111)
643
+ ax.plot(df["t"], df["d_pred_agent_true"], label="True dist (pred→agent)")
644
+ ax.plot(df["t"], df["d_agent_goal"], label="Dist (agent→goal)")
645
+ ax.axhline(catch_radius, linestyle="--", label="Catch radius")
646
+ ax.axhline(goal_radius, linestyle="--", label="Goal radius")
647
+ ax.set_title("Predator Avoidance: Distances vs time")
648
+ ax.set_xlabel("t (step)")
649
+ ax.set_ylabel("distance")
650
+ ax.legend(loc="best")
651
+ p_dist = save_plot(fig2, f"predator_dist_seed{seed}_{'baseline' if show_baseline else 'rft'}.png")
652
+
653
+ fig3 = plt.figure(figsize=(10, 3))
654
+ ax = fig3.add_subplot(111)
655
+ ax.step(df["t"], df["action_taken"], where="post")
656
+ ax.set_title("Predator Avoidance: Action timeline (observer-gated)")
657
+ ax.set_xlabel("t (step)")
658
+ ax.set_ylabel("action (0/1)")
659
+ p_act = save_plot(fig3, f"predator_action_seed{seed}_{'baseline' if show_baseline else 'rft'}.png")
660
+
661
+ csv_path = df_to_csv_file(df, f"predator_log_seed{seed}_{'baseline' if show_baseline else 'rft'}.csv")
662
+
663
+ min_sep = float(df["d_pred_agent_true"].min()) if len(df) else 0.0
664
+ steps_ran = int(len(df))
665
+ success = bool(reached_goal and not caught)
666
+
667
+ summary = {
668
+ "seed": int(seed),
669
+ "mode": "baseline" if show_baseline else "observer_gated_rft",
670
+ "steps_ran": steps_ran,
671
+ "caught": bool(caught),
672
+ "reached_goal": bool(reached_goal),
673
+ "success": bool(success),
674
+ "min_separation_true": float(min_sep),
675
+ "final_dist_to_goal": float(df["d_agent_goal"].iloc[-1]) if len(df) else None,
676
+ "energy_proxy_baseline": float(energy_baseline),
677
+ "energy_proxy_rft": float(energy_rft),
678
+ "ops_proxy": int(ops_proxy),
679
+ }
680
+
681
+ return summary, [p_traj, p_dist, p_act], csv_path
682
+
683
+ # -----------------------------
684
+ # Benchmarks (NEO/Jitter/Landing only to keep UI stable)
685
  # -----------------------------
686
  def run_benchmarks(
687
  seed: int,
 
771
  return txt, score, score_path, all_imgs, [neo_csv, jit_csv, land_csv]
772
 
773
  # -----------------------------
774
+ # UI text blocks
775
  # -----------------------------
776
  HOME_MD = """
777
  # RFT — Observer Agent Console
 
788
  **Decision timing matters.**
789
  RFT treats timing (τ_eff), uncertainty, and action “commit” as first-class controls.
790
 
791
+ This Space contains working agent harnesses:
792
  - **NEO alerting** (filter noisy close-approach alerts)
793
  - **Satellite jitter reduction** (reduce actuator duty / chatter while keeping residual low)
794
+ - **Starship-style landing harness** (simplified timing-control harness)
795
+ - **Predator avoidance** (agent reaches goal while avoiding a pursuing predator)
796
 
797
  Every tab shows what it’s doing, why, and where it wins or loses.
798
 
 
873
  - alert filtering (NEO / tracking)
874
  - stabilisation (jitter reduction)
875
  - anomaly-aware control loops (landing harness)
876
+ - pursuit/evasion behaviour (predator avoidance)
877
 
878
  This is a runnable harness:
879
  - you can reproduce results with seeds
 
882
  - you can change thresholds and see behaviour shift
883
 
884
  ## What I’m not claiming
885
+ - I’m not claiming certification
886
  - I’m not claiming any company is using this
887
+ - I’m not claiming this replaces validation pipelines
888
 
889
  ## What would make it production-grade
890
  - real sensor ingestion + timing constraints
 
956
  summary_txt = json.dumps(summary, indent=2)
957
  return summary_txt, imgs[0], imgs[1], imgs[2], imgs[3], csv_path
958
 
959
+ def ui_run_predator(seed, steps, dt, world_size, predator_speed, agent_speed, catch_radius, sense_noise,
960
+ rft_k_attract, rft_k_repulse, base_k_attract, base_k_repulse, gate_th, tau_gain, goal_radius, show_baseline):
961
+ summary, imgs, csv_path = simulate_predator_avoidance(
962
+ seed=int(seed),
963
+ steps=int(steps),
964
+ dt=float(dt),
965
+ world_size=float(world_size),
966
+ predator_speed=float(predator_speed),
967
+ agent_speed=float(agent_speed),
968
+ catch_radius=float(catch_radius),
969
+ sense_noise=float(sense_noise),
970
+ rft_k_attract=float(rft_k_attract),
971
+ rft_k_repulse=float(rft_k_repulse),
972
+ baseline_k_attract=float(base_k_attract),
973
+ baseline_k_repulse=float(base_k_repulse),
974
+ gate_threshold=float(gate_th),
975
+ tau_gain=float(tau_gain),
976
+ goal_radius=float(goal_radius),
977
+ show_baseline=bool(show_baseline),
978
+ )
979
+ summary_txt = json.dumps(summary, indent=2)
980
+ return summary_txt, imgs[0], imgs[1], imgs[2], csv_path
981
+
982
  def ui_run_bench(seed, neo_steps, neo_dt, neo_alert_km, neo_noise_km, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain):
983
  txt, score_df, score_csv, imgs, logs = run_benchmarks(
984
  seed=int(seed),
 
996
  # -----------------------------
997
  # Gradio UI
998
  # -----------------------------
999
+ with gr.Blocks(title="RFT — Observer Agent Console (NEO / Jitter / Landing / Predator)") as demo:
1000
  gr.Markdown(HOME_MD)
1001
 
1002
  with gr.Tabs():
 
1052
  run_b.click(
1053
  ui_run_bench,
1054
  inputs=[seed_live, neo_steps, neo_dt, neo_alert, neo_noise, jit_steps, jit_dt, jit_noise, land_steps, land_dt, land_wind, land_thrust_noise, tau_gain_live],
1055
+ outputs=[bench_txt, bench_table, bench_score_csv, img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, neo_log, jit_log, land_log]
 
 
 
 
1056
  )
1057
 
1058
  with gr.Tab("NEO Observer Agent"):
1059
  gr.Markdown(
1060
  "# Near-Earth Object (NEO) Observer Agent\n"
 
1061
  "Baseline: distance threshold only.\n"
1062
  "Observer-gated RFT: distance threshold + confidence + τ_eff decision gate.\n"
1063
  )
 
1152
  outputs=[out_l_summary, out_l_img1, out_l_img2, out_l_img3, out_l_img4, out_l_csv]
1153
  )
1154
 
1155
+ with gr.Tab("Predator Avoidance"):
1156
  gr.Markdown(
1157
+ "# Predator Avoidance (Observer Agent)\n"
1158
+ "Agent tries to reach a goal while avoiding a pursuing predator.\n"
1159
+ "This is a simple pursuit/evasion harness designed to be inspectable and reproducible.\n"
1160
+ "Toggle **Baseline** (always-on control) vs **Observer-gated RFT** (can wait unless risk is high).\n"
1161
+ )
1162
+ with gr.Row():
1163
+ seed_p = gr.Number(value=7, precision=0, label="Seed")
1164
+ steps_p = gr.Slider(50, 1500, value=450, step=1, label="Steps")
1165
+ dt_p = gr.Slider(0.05, 1.0, value=0.20, step=0.01, label="dt")
1166
+ with gr.Row():
1167
+ world_size = gr.Slider(20.0, 300.0, value=120.0, step=5.0, label="World size (bounds)")
1168
+ predator_speed = gr.Slider(0.1, 40.0, value=10.0, step=0.1, label="Predator speed")
1169
+ agent_speed = gr.Slider(0.1, 40.0, value=12.0, step=0.1, label="Agent speed")
1170
+ with gr.Row():
1171
+ catch_radius = gr.Slider(0.5, 30.0, value=6.0, step=0.5, label="Catch radius")
1172
+ goal_radius = gr.Slider(0.5, 30.0, value=6.0, step=0.5, label="Goal radius")
1173
+ sense_noise = gr.Slider(0.0, 10.0, value=1.2, step=0.1, label="Sensor noise (predator)")
1174
+ with gr.Row():
1175
+ base_k_att = gr.Slider(0.1, 10.0, value=1.2, step=0.05, label="Baseline attract gain")
1176
+ base_k_rep = gr.Slider(0.1, 30.0, value=10.0, step=0.1, label="Baseline repulse gain")
1177
+ with gr.Row():
1178
+ rft_k_att = gr.Slider(0.1, 10.0, value=1.2, step=0.05, label="RFT attract gain")
1179
+ rft_k_rep = gr.Slider(0.1, 30.0, value=10.0, step=0.1, label="RFT repulse gain")
1180
+ with gr.Row():
1181
+ gate_p = gr.Slider(0.1, 0.95, value=0.55, step=0.01, label="Gate threshold")
1182
+ tau_p = gr.Slider(0.0, 3.0, value=1.2, step=0.05, label="τ_eff gain")
1183
+ mode_baseline = gr.Checkbox(value=False, label="Run Baseline mode (unchecked = Observer-gated RFT)")
1184
+ run_p = gr.Button("Run Predator Avoidance")
1185
+
1186
+ out_p_summary = gr.Textbox(label="Summary JSON", lines=12)
1187
+ with gr.Row():
1188
+ out_p_img1 = gr.Image(label="Trajectories")
1189
+ out_p_img2 = gr.Image(label="Distances vs time")
1190
+ out_p_img3 = gr.Image(label="Action timeline")
1191
+ out_p_csv = gr.File(label="Download Predator CSV log")
1192
+
1193
+ run_p.click(
1194
+ ui_run_predator,
1195
+ inputs=[seed_p, steps_p, dt_p, world_size, predator_speed, agent_speed, catch_radius, sense_noise,
1196
+ rft_k_att, rft_k_rep, base_k_att, base_k_rep, gate_p, tau_p, goal_radius, mode_baseline],
1197
+ outputs=[out_p_summary, out_p_img1, out_p_img2, out_p_img3, out_p_csv]
1198
  )
1199
 
1200
+ with gr.Tab("Benchmarks"):
1201
+ gr.Markdown("# Benchmarks\nRun full packs from the Live Console tab.\nEverything is seeded, logged, and exportable.\n")
1202
+
1203
  with gr.Tab("Theory → Practice"):
1204
  gr.Markdown(THEORY_PRACTICE_MD)
1205