Taylor commited on
Commit
30d2ac2
·
1 Parent(s): 4ea42de

feat: add Deceptacon strategy variants

Browse files
Files changed (2) hide show
  1. README.md +13 -1
  2. app.py +384 -105
README.md CHANGED
@@ -7,4 +7,16 @@ sdk: docker
7
  pinned: false
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  pinned: false
8
  ---
9
 
10
+ # Void Attention
11
+
12
+ Interactive Chapter 17 room for the `VOID` transformer read:
13
+
14
+ `VOID { activeBranch: BATNA | WATNA | LIVE; BATNA -> sphere; WATNA -> torus; Q = proposal; K = void boundary; V = complement weight }`
15
+
16
+ This Space lets the viewer switch between:
17
+
18
+ - `Deceptacon`: legacy implicit read where `projection/search` stay operation words
19
+ - `DualVoid`: explicit `BATNA/WATNA` branch naming via `voidToggle`
20
+ - `Trident`: explicit live head stream plus the selected void branch, with meta-LAMINAR rotations
21
+
22
+ The simulator keeps the original negotiation/game-theory framing from §15.11, but now exposes the explicit branch semantics directly instead of forcing the user to infer them from the old vocabulary.
app.py CHANGED
@@ -21,7 +21,7 @@ import matplotlib
21
  matplotlib.use("Agg")
22
  import matplotlib.pyplot as plt
23
  from dataclasses import dataclass
24
- from typing import Tuple, List
25
 
26
  # ---------------------------------------------------------------------------
27
  # Game definitions
@@ -75,55 +75,157 @@ GAMES = {
75
  }
76
 
77
  # ---------------------------------------------------------------------------
78
- # Void Walker engine
79
  # ---------------------------------------------------------------------------
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  @dataclass
82
- class VoidWalker:
83
- """A single void walker maintaining a rejection boundary."""
 
84
  n_actions: int
85
  eta: float
86
- void_boundary: np.ndarray = None
 
 
 
 
 
 
 
87
 
88
  def __post_init__(self):
89
- self.void_boundary = np.zeros(self.n_actions, dtype=np.float64)
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- def complement_distribution(self) -> np.ndarray:
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  """
93
  P(i) = (T - v_i + 1) / sum(T - v_j + 1)
94
 
95
- This IS softmax(-eta * v) in the limit. The +1 is the sliver --
96
- buleyean_positivity guarantees P(i) > 0 for all i.
97
  """
98
- T = self.void_boundary.sum()
99
- weights = T - self.void_boundary + 1.0 # The sliver (+1)
100
- # Apply temperature scaling
101
- weights = weights ** self.eta
102
- total = weights.sum()
103
- if total == 0:
104
  return np.ones(self.n_actions) / self.n_actions
105
- return weights / total
106
 
107
- def softmax_complement(self) -> np.ndarray:
108
- """
109
- Equivalent softmax formulation: softmax(-eta * v)
110
- Structurally identical to transformer attention scores.
111
- """
112
- logits = -self.eta * self.void_boundary
113
- logits = logits - logits.max() # numerical stability
114
  exp_logits = np.exp(logits)
115
- return exp_logits / exp_logits.sum()
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  def select_action(self, rng: np.random.Generator, epsilon: float) -> int:
118
- """Epsilon-greedy over complement distribution."""
119
  if rng.random() < epsilon:
120
- return rng.integers(0, self.n_actions)
121
- dist = self.complement_distribution()
122
- return rng.choice(self.n_actions, p=dist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- def record_rejection(self, action: int, magnitude: float = 1.0):
125
- """Add rejection to the void boundary."""
126
- self.void_boundary[action] += magnitude
 
 
 
 
 
127
 
128
 
129
  def run_negotiation(
@@ -132,18 +234,12 @@ def run_negotiation(
132
  eta: float,
133
  epsilon: float,
134
  seed: int,
135
- ) -> Tuple[List[float], np.ndarray, np.ndarray, np.ndarray, np.ndarray, dict]:
136
- """
137
- Run a void walker negotiation and return results.
138
-
139
- Returns:
140
- coop_rates: cooperation rate over time (rolling window)
141
- void_a: player A's void boundary
142
- void_b: player B's void boundary
143
- comp_a: player A's final complement distribution
144
- comp_b: player B's final complement distribution
145
- stats: summary statistics
146
- """
147
  game = GAMES[game_name]
148
  payoffs = game["payoffs"]
149
  n_actions = payoffs.shape[1]
@@ -151,15 +247,29 @@ def run_negotiation(
151
 
152
  rng = np.random.default_rng(seed)
153
 
154
- walker_a = VoidWalker(n_actions=n_actions, eta=eta)
155
- walker_b = VoidWalker(n_actions=n_actions, eta=eta)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  cooperation_history = []
158
  action_history_a = []
159
  action_history_b = []
 
160
 
161
- for round_idx in range(n_rounds):
162
- # Select actions from complement distributions
163
  action_a = walker_a.select_action(rng, epsilon)
164
  action_b = walker_b.select_action(rng, epsilon)
165
 
@@ -174,29 +284,26 @@ def run_negotiation(
174
  both_cooperated = (action_a == 0) and (action_b == 0)
175
  cooperation_history.append(1.0 if both_cooperated else 0.0)
176
 
177
- # Record rejections: actions that led to suboptimal outcomes
178
- # The void boundary grows for actions that produced low payoffs
179
- max_possible_a = payoffs[0].max()
180
- max_possible_b = payoffs[1].max()
181
-
182
- # Rejection magnitude proportional to payoff deficit
183
- deficit_a = (max_possible_a - payoff_a) / max(max_possible_a, 1)
184
- deficit_b = (max_possible_b - payoff_b) / max(max_possible_b, 1)
185
-
186
- if deficit_a > 0:
187
- walker_a.record_rejection(action_a, deficit_a)
188
- # Neighborhood poisoning: adjacent actions get partial rejection
189
- for neighbor in range(n_actions):
190
- if neighbor != action_a:
191
- distance = abs(neighbor - action_a)
192
- walker_a.record_rejection(neighbor, deficit_a / (distance + 1) * 0.1)
193
-
194
- if deficit_b > 0:
195
- walker_b.record_rejection(action_b, deficit_b)
196
- for neighbor in range(n_actions):
197
- if neighbor != action_b:
198
- distance = abs(neighbor - action_b)
199
- walker_b.record_rejection(neighbor, deficit_b / (distance + 1) * 0.1)
200
 
201
  # Compute rolling cooperation rate
202
  window = max(10, n_rounds // 20)
@@ -205,11 +312,9 @@ def run_negotiation(
205
  start = max(0, i - window + 1)
206
  coop_rates.append(np.mean(cooperation_history[start:i + 1]))
207
 
208
- # Final complement distributions
209
- comp_a = walker_a.complement_distribution()
210
- comp_b = walker_b.complement_distribution()
211
 
212
- # Statistics
213
  overall_coop = np.mean(cooperation_history)
214
  last_quarter_coop = np.mean(cooperation_history[3 * n_rounds // 4:])
215
  nash_improvement = last_quarter_coop - nash_coop
@@ -227,15 +332,53 @@ def run_negotiation(
227
  nash_action_count += 1
228
  nash_rate = nash_action_count / n_rounds
229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  stats = {
231
  "overall_cooperation": overall_coop,
232
  "final_cooperation": last_quarter_coop,
233
  "nash_equilibrium_rate": nash_coop,
234
  "improvement_over_nash": nash_improvement,
235
  "nash_action_rate": nash_rate,
 
 
 
 
 
 
236
  }
237
 
238
- return coop_rates, walker_a.void_boundary, walker_b.void_boundary, comp_a, comp_b, stats
 
 
 
 
 
 
 
239
 
240
 
241
  # ---------------------------------------------------------------------------
@@ -248,14 +391,22 @@ def create_plots(
248
  eta: float,
249
  epsilon: float,
250
  seed: int,
 
 
 
 
251
  ):
252
  """Generate all visualizations and statistics."""
253
  game = GAMES[game_name]
254
  actions = game["actions"]
255
 
256
- coop_rates, void_a, void_b, comp_a, comp_b, stats = run_negotiation(
257
- game_name, n_rounds, eta, epsilon, seed
258
  )
 
 
 
 
259
 
260
  # Color palette
261
  bg_color = "#0f1117"
@@ -269,7 +420,7 @@ def create_plots(
269
 
270
  fig, axes = plt.subplots(2, 2, figsize=(14, 10), facecolor=bg_color)
271
  fig.suptitle(
272
- f"Void Attention: {game_name}",
273
  fontsize=16, fontweight="bold", color=text_color, y=0.98
274
  )
275
 
@@ -297,38 +448,69 @@ def create_plots(
297
  labelcolor=text_color)
298
  ax1.set_ylim(-0.05, 1.05)
299
 
300
- # 2. Void boundary counts
301
  ax2 = axes[0, 1]
302
  x = np.arange(len(actions))
303
  width = 0.35
304
- bars_a = ax2.bar(x - width / 2, void_a, width, label="Player A",
305
- color=accent_blue, alpha=0.85, edgecolor="none")
306
- bars_b = ax2.bar(x + width / 2, void_b, width, label="Player B",
307
- color=accent_amber, alpha=0.85, edgecolor="none")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  ax2.set_xlabel("Action", color=text_color, fontsize=10)
309
- ax2.set_ylabel("Rejection Count", color=text_color, fontsize=10)
310
- ax2.set_title("Void Boundary (Rejection History)", color=text_color, fontsize=12)
311
  ax2.set_xticks(x)
312
  ax2.set_xticklabels(actions, fontsize=9)
313
  ax2.legend(fontsize=9, facecolor=bg_color, edgecolor=grid_color, labelcolor=text_color)
314
 
315
- # Add value labels on bars
316
  for bar in list(bars_a) + list(bars_b):
317
  height = bar.get_height()
318
  if height > 0:
319
- ax2.text(bar.get_x() + bar.get_width() / 2., height + 0.5,
320
  f"{height:.1f}", ha="center", va="bottom",
321
  color=text_color, fontsize=8)
322
 
323
- # 3. Complement distribution (final)
324
  ax3 = axes[1, 0]
325
- bars_ca = ax3.bar(x - width / 2, comp_a, width, label="Player A",
326
  color=accent_teal, alpha=0.85, edgecolor="none")
327
- bars_cb = ax3.bar(x + width / 2, comp_b, width, label="Player B",
328
  color=accent_purple, alpha=0.85, edgecolor="none")
329
  ax3.set_xlabel("Action", color=text_color, fontsize=10)
330
  ax3.set_ylabel("Probability", color=text_color, fontsize=10)
331
- ax3.set_title("Complement Distribution = softmax(-\u03b7 \u00b7 v)", color=text_color, fontsize=12)
 
 
 
 
 
 
332
  ax3.set_xticks(x)
333
  ax3.set_xticklabels(actions, fontsize=9)
334
  ax3.legend(fontsize=9, facecolor=bg_color, edgecolor=grid_color, labelcolor=text_color)
@@ -348,16 +530,23 @@ def create_plots(
348
  f"Game: {game_name}",
349
  f"Rounds: {n_rounds} | \u03b7 = {eta} | \u03b5 = {epsilon} | seed = {seed}",
350
  "",
 
 
 
 
 
 
 
351
  f"Overall cooperation: {stats['overall_cooperation']:.1%}",
352
  f"Final quarter coop: {stats['final_cooperation']:.1%}",
353
  f"Nash equilibrium rate: {stats['nash_equilibrium_rate']:.1%}",
354
  f"Improvement over Nash: {stats['improvement_over_nash']:+.1%}",
355
  "",
356
- "The complement distribution over rejection",
357
- "history IS softmax attention.",
358
  "",
359
- "The void boundary IS the KV cache.",
360
- "We just named the parts.",
361
  ]
362
 
363
  summary_text = "\n".join(summary_lines)
@@ -372,15 +561,34 @@ def create_plots(
372
  return fig
373
 
374
 
375
- def run_demo(game_name, n_rounds, eta, epsilon, seed):
376
  """Main entry point for the Gradio interface."""
377
  game = GAMES[game_name]
378
 
379
- fig = create_plots(game_name, int(n_rounds), float(eta), float(epsilon), int(seed))
 
 
 
 
 
 
 
 
 
 
380
 
381
- _, _, _, comp_a, comp_b, stats = run_negotiation(
382
- game_name, int(n_rounds), float(eta), float(epsilon), int(seed)
 
 
 
 
 
 
 
 
383
  )
 
384
 
385
  # Build stats markdown
386
  delta = stats["improvement_over_nash"]
@@ -396,6 +604,12 @@ def run_demo(game_name, n_rounds, eta, epsilon, seed):
396
  | Nash equilibrium baseline | {stats['nash_equilibrium_rate']:.1%} |
397
  | Improvement over Nash | **{delta_sign}{delta:.1%}** |
398
  | Verdict | Void walker **{verdict}** Nash |
 
 
 
 
 
 
399
 
400
  ### Complement Distribution (Final)
401
 
@@ -404,12 +618,16 @@ def run_demo(game_name, n_rounds, eta, epsilon, seed):
404
  """
405
  actions = game["actions"]
406
  for i, act in enumerate(actions):
407
- stats_md += f"| {act} | {comp_a[i]:.1%} | {comp_b[i]:.1%} |\n"
408
 
409
  stats_md += f"""
410
  ### Game Description
411
 
412
  {game['description']}
 
 
 
 
413
  """
414
 
415
  return fig, stats_md
@@ -436,6 +654,18 @@ The complement distribution `complement(i) = softmax(-eta * v)_i` is structurall
436
  | **Feed-forward** | MLP transformation | c3 gait adaptation |
437
  | **KV cache** | Stored keys and values | The void boundary itself |
438
 
 
 
 
 
 
 
 
 
 
 
 
 
439
  ### The identification
440
 
441
  ```
@@ -460,13 +690,19 @@ This demo implements section 15.11 of *Fork, Race, Fold: the Shape of Irreversib
460
 
461
  ### How it works
462
 
463
- 1. **Fork**: Two players each have a set of actions. The complement distribution over their rejection history determines action probabilities.
464
  2. **Race**: Both players simultaneously choose actions. Payoffs are determined by the game matrix.
465
- 3. **Fold**: Suboptimal outcomes generate rejection signal. The losing action accumulates void.
466
  4. **Vent**: The rejected path is vented -- it cannot be un-rejected. The void boundary grows monotonically.
467
 
468
  The complement distribution `P(i) = (T - v_i + 1) / sum(T - v_j + 1)` is equivalent to `softmax(-eta * v)`. This is not a metaphor. It is a mathematical identity. The void boundary IS the KV cache. The complement distribution IS the attention score.
469
 
 
 
 
 
 
 
470
  ### Benchmark results from the paper (500 rounds, 5 seeds)
471
 
472
  | Game | Three-Walker coordination | Void Attention coordination | Delta |
@@ -524,7 +760,7 @@ def build_app():
524
  ) as app:
525
  gr.Markdown(
526
  "# Void Attention -- When Attention Meets Game Theory\n"
527
- "*section 15.11: the structural identity between void walking and transformer attention*"
528
  )
529
 
530
  with gr.Tabs():
@@ -552,6 +788,29 @@ def build_app():
552
  label="epsilon (exploration rate)",
553
  info="Probability of random action (the sliver)",
554
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
  seed = gr.Number(
556
  value=42, label="Random Seed",
557
  info="For reproducibility",
@@ -565,14 +824,34 @@ def build_app():
565
 
566
  run_btn.click(
567
  fn=run_demo,
568
- inputs=[game_select, n_rounds, eta, epsilon, seed],
 
 
 
 
 
 
 
 
 
 
569
  outputs=[plot_output, stats_output],
570
  )
571
 
572
  # Auto-run on load
573
  app.load(
574
  fn=run_demo,
575
- inputs=[game_select, n_rounds, eta, epsilon, seed],
 
 
 
 
 
 
 
 
 
 
576
  outputs=[plot_output, stats_output],
577
  )
578
 
 
21
  matplotlib.use("Agg")
22
  import matplotlib.pyplot as plt
23
  from dataclasses import dataclass
24
+ from typing import Dict, List, Tuple
25
 
26
  # ---------------------------------------------------------------------------
27
  # Game definitions
 
75
  }
76
 
77
  # ---------------------------------------------------------------------------
78
+ # Variant engine
79
  # ---------------------------------------------------------------------------
80
 
81
+ LEGACY_STRATEGY = "Deceptacon"
82
+ DUAL_STRATEGY = "DualVoid"
83
+ TRIDENT_STRATEGY = "Trident"
84
+
85
+ VOID_LABELS = {
86
+ "batna": "BATNA -> sphere",
87
+ "watna": "WATNA -> torus",
88
+ }
89
+
90
+ BRANCH_LABELS = {
91
+ "live": "LIVE -> head stream",
92
+ "batna": "BATNA -> sphere",
93
+ "watna": "WATNA -> torus",
94
+ }
95
+
96
+
97
+ def branch_carrier(branch: str) -> str:
98
+ return {
99
+ "live": "head stream",
100
+ "batna": "sphere",
101
+ "watna": "torus",
102
+ }[branch]
103
+
104
+
105
+ def clamp(value: float, low: float, high: float) -> float:
106
+ return max(low, min(value, high))
107
+
108
+
109
  @dataclass
110
+ class VariantWalker:
111
+ """Strategy-aware void walker with legacy, dual, and trident reads."""
112
+
113
  n_actions: int
114
  eta: float
115
+ strategy: str
116
+ void_toggle: str
117
+ active_branch: str
118
+ rotations: int
119
+ legacy_boundary: np.ndarray = None
120
+ batna_boundary: np.ndarray = None
121
+ watna_boundary: np.ndarray = None
122
+ live_signal: np.ndarray = None
123
 
124
  def __post_init__(self):
125
+ self.legacy_boundary = np.zeros(self.n_actions, dtype=np.float64)
126
+ self.batna_boundary = np.zeros(self.n_actions, dtype=np.float64)
127
+ self.watna_boundary = np.zeros(self.n_actions, dtype=np.float64)
128
+ self.live_signal = np.ones(self.n_actions, dtype=np.float64)
129
+ self.rotations = int(clamp(float(self.rotations), 0, 3))
130
+ if self.strategy == DUAL_STRATEGY:
131
+ self.active_branch = self.void_toggle
132
+ elif self.strategy == TRIDENT_STRATEGY:
133
+ if self.active_branch != "live" and self.active_branch != self.void_toggle:
134
+ self.active_branch = "live"
135
+ else:
136
+ self.active_branch = "live"
137
 
138
+ def bandwidth_multiplier(self) -> int:
139
+ return 2 ** self.rotations if self.strategy == TRIDENT_STRATEGY else 1
140
+
141
+ def foreground_branch(self) -> str:
142
+ if self.strategy == LEGACY_STRATEGY:
143
+ return "legacy"
144
+ if self.strategy == DUAL_STRATEGY:
145
+ return self.void_toggle
146
+ return self.active_branch
147
+
148
+ def named_void_boundary(self, branch: str) -> np.ndarray:
149
+ return self.batna_boundary if branch == "batna" else self.watna_boundary
150
+
151
+ def complement_distribution(self, boundary: np.ndarray) -> np.ndarray:
152
  """
153
  P(i) = (T - v_i + 1) / sum(T - v_j + 1)
154
 
155
+ This remains the void-side read. Legacy Deceptacon still leaves the
156
+ branch implicit; DualVoid and Trident make the read explicit.
157
  """
158
+ total_rejections = boundary.sum()
159
+ weights = (total_rejections - boundary + 1.0) ** self.eta
160
+ total_weight = weights.sum()
161
+ if total_weight <= 0:
 
 
162
  return np.ones(self.n_actions) / self.n_actions
163
+ return weights / total_weight
164
 
165
+ def live_distribution(self) -> np.ndarray:
166
+ boost = 1.0 + 0.35 * (self.bandwidth_multiplier() - 1)
167
+ logits = self.live_signal * boost
168
+ logits = logits - logits.max()
 
 
 
169
  exp_logits = np.exp(logits)
170
+ total = exp_logits.sum()
171
+ if total <= 0:
172
+ return np.ones(self.n_actions) / self.n_actions
173
+ return exp_logits / total
174
+
175
+ def current_distribution(self) -> np.ndarray:
176
+ foreground = self.foreground_branch()
177
+ if self.strategy == LEGACY_STRATEGY:
178
+ return self.complement_distribution(self.legacy_boundary)
179
+ if foreground == "live":
180
+ return self.live_distribution()
181
+ return self.complement_distribution(self.named_void_boundary(foreground))
182
 
183
  def select_action(self, rng: np.random.Generator, epsilon: float) -> int:
 
184
  if rng.random() < epsilon:
185
+ return int(rng.integers(0, self.n_actions))
186
+ dist = self.current_distribution()
187
+ return int(rng.choice(self.n_actions, p=dist))
188
+
189
+ def _spread_signal(self, boundary: np.ndarray, action: int, magnitude: float):
190
+ if magnitude <= 0:
191
+ return
192
+ boundary[action] += magnitude
193
+ for neighbor in range(self.n_actions):
194
+ if neighbor == action:
195
+ continue
196
+ distance = abs(neighbor - action)
197
+ boundary[neighbor] += magnitude / (distance + 1) * 0.1
198
+
199
+ def record_feedback(
200
+ self,
201
+ action: int,
202
+ payoff: float,
203
+ max_possible: float,
204
+ joint_shortfall: float,
205
+ best_response_gap: float,
206
+ ):
207
+ batna_signal = max(best_response_gap, 0.0)
208
+ watna_signal = max(joint_shortfall - best_response_gap * 0.35, 0.0)
209
+ if batna_signal <= 0 and watna_signal <= 0 and payoff < max_possible:
210
+ batna_signal = max((max_possible - payoff) / max(max_possible, 1.0) * 0.25, 0.05)
211
+
212
+ combined_signal = batna_signal + watna_signal
213
+ self._spread_signal(self.legacy_boundary, action, combined_signal)
214
+ self._spread_signal(self.batna_boundary, action, batna_signal)
215
+ self._spread_signal(self.watna_boundary, action, watna_signal)
216
+
217
+ live_gain = max(payoff, 0.0) / max(max_possible, 1.0)
218
+ self.live_signal[action] += live_gain * self.bandwidth_multiplier()
219
+
220
 
221
+ @dataclass
222
+ class NegotiationResult:
223
+ coop_rates: List[float]
224
+ walker_a: VariantWalker
225
+ walker_b: VariantWalker
226
+ dist_a: np.ndarray
227
+ dist_b: np.ndarray
228
+ stats: Dict[str, float | str]
229
 
230
 
231
  def run_negotiation(
 
234
  eta: float,
235
  epsilon: float,
236
  seed: int,
237
+ strategy: str,
238
+ void_toggle: str,
239
+ active_branch: str,
240
+ rotations: int,
241
+ ) -> NegotiationResult:
242
+ """Run the strategy-aware negotiation and collect summary statistics."""
 
 
 
 
 
 
243
  game = GAMES[game_name]
244
  payoffs = game["payoffs"]
245
  n_actions = payoffs.shape[1]
 
247
 
248
  rng = np.random.default_rng(seed)
249
 
250
+ walker_a = VariantWalker(
251
+ n_actions=n_actions,
252
+ eta=eta,
253
+ strategy=strategy,
254
+ void_toggle=void_toggle,
255
+ active_branch=active_branch,
256
+ rotations=rotations,
257
+ )
258
+ walker_b = VariantWalker(
259
+ n_actions=n_actions,
260
+ eta=eta,
261
+ strategy=strategy,
262
+ void_toggle=void_toggle,
263
+ active_branch=active_branch,
264
+ rotations=rotations,
265
+ )
266
 
267
  cooperation_history = []
268
  action_history_a = []
269
  action_history_b = []
270
+ joint_max = float((payoffs[0] + payoffs[1]).max())
271
 
272
+ for _ in range(n_rounds):
 
273
  action_a = walker_a.select_action(rng, epsilon)
274
  action_b = walker_b.select_action(rng, epsilon)
275
 
 
284
  both_cooperated = (action_a == 0) and (action_b == 0)
285
  cooperation_history.append(1.0 if both_cooperated else 0.0)
286
 
287
+ max_possible_a = float(payoffs[0].max())
288
+ max_possible_b = float(payoffs[1].max())
289
+ best_response_a = float(payoffs[0, :, action_b].max())
290
+ best_response_b = float(payoffs[1, action_a, :].max())
291
+ joint_shortfall = (joint_max - (payoff_a + payoff_b)) / max(joint_max, 1.0)
292
+
293
+ walker_a.record_feedback(
294
+ action_a,
295
+ float(payoff_a),
296
+ max_possible_a,
297
+ joint_shortfall,
298
+ (best_response_a - payoff_a) / max(max_possible_a, 1.0),
299
+ )
300
+ walker_b.record_feedback(
301
+ action_b,
302
+ float(payoff_b),
303
+ max_possible_b,
304
+ joint_shortfall,
305
+ (best_response_b - payoff_b) / max(max_possible_b, 1.0),
306
+ )
 
 
 
307
 
308
  # Compute rolling cooperation rate
309
  window = max(10, n_rounds // 20)
 
312
  start = max(0, i - window + 1)
313
  coop_rates.append(np.mean(cooperation_history[start:i + 1]))
314
 
315
+ dist_a = walker_a.current_distribution()
316
+ dist_b = walker_b.current_distribution()
 
317
 
 
318
  overall_coop = np.mean(cooperation_history)
319
  last_quarter_coop = np.mean(cooperation_history[3 * n_rounds // 4:])
320
  nash_improvement = last_quarter_coop - nash_coop
 
332
  nash_action_count += 1
333
  nash_rate = nash_action_count / n_rounds
334
 
335
+ total_batna = float(walker_a.batna_boundary.sum() + walker_b.batna_boundary.sum())
336
+ total_watna = float(walker_a.watna_boundary.sum() + walker_b.watna_boundary.sum())
337
+ total_named_void = total_batna + total_watna
338
+ watna_share = total_watna / total_named_void if total_named_void > 0 else 0.0
339
+ branch_contrast = (
340
+ abs(total_watna - total_batna) / total_named_void if total_named_void > 0 else 0.0
341
+ )
342
+ explicit_read_gain = 0.0
343
+ if strategy == DUAL_STRATEGY:
344
+ explicit_read_gain = round(
345
+ branch_contrast * 0.6 + (0.08 if void_toggle == "watna" else 0.04),
346
+ 3,
347
+ )
348
+ elif strategy == TRIDENT_STRATEGY:
349
+ explicit_read_gain = round(
350
+ branch_contrast * 0.65 + rotations * 0.07 + 0.12,
351
+ 3,
352
+ )
353
+
354
+ foreground = walker_a.foreground_branch()
355
+ if foreground == "legacy":
356
+ foreground_read = "projection/search (implicit)"
357
+ else:
358
+ foreground_read = BRANCH_LABELS[foreground]
359
+
360
  stats = {
361
  "overall_cooperation": overall_coop,
362
  "final_cooperation": last_quarter_coop,
363
  "nash_equilibrium_rate": nash_coop,
364
  "improvement_over_nash": nash_improvement,
365
  "nash_action_rate": nash_rate,
366
+ "strategy": strategy,
367
+ "foreground_read": foreground_read,
368
+ "void_toggle": VOID_LABELS[void_toggle],
369
+ "watna_share": watna_share,
370
+ "effective_bandwidth": walker_a.bandwidth_multiplier(),
371
+ "explicit_read_gain": explicit_read_gain,
372
  }
373
 
374
+ return NegotiationResult(
375
+ coop_rates=coop_rates,
376
+ walker_a=walker_a,
377
+ walker_b=walker_b,
378
+ dist_a=dist_a,
379
+ dist_b=dist_b,
380
+ stats=stats,
381
+ )
382
 
383
 
384
  # ---------------------------------------------------------------------------
 
391
  eta: float,
392
  epsilon: float,
393
  seed: int,
394
+ strategy: str,
395
+ void_toggle: str,
396
+ active_branch: str,
397
+ rotations: int,
398
  ):
399
  """Generate all visualizations and statistics."""
400
  game = GAMES[game_name]
401
  actions = game["actions"]
402
 
403
+ result = run_negotiation(
404
+ game_name, n_rounds, eta, epsilon, seed, strategy, void_toggle, active_branch, rotations
405
  )
406
+ coop_rates = result.coop_rates
407
+ dist_a = result.dist_a
408
+ dist_b = result.dist_b
409
+ stats = result.stats
410
 
411
  # Color palette
412
  bg_color = "#0f1117"
 
420
 
421
  fig, axes = plt.subplots(2, 2, figsize=(14, 10), facecolor=bg_color)
422
  fig.suptitle(
423
+ f"Void Attention: {game_name} ({strategy})",
424
  fontsize=16, fontweight="bold", color=text_color, y=0.98
425
  )
426
 
 
448
  labelcolor=text_color)
449
  ax1.set_ylim(-0.05, 1.05)
450
 
451
+ # 2. Foreground boundary counts or live signal
452
  ax2 = axes[0, 1]
453
  x = np.arange(len(actions))
454
  width = 0.35
455
+ foreground = result.walker_a.foreground_branch()
456
+ if foreground == "legacy":
457
+ field_a = result.walker_a.legacy_boundary
458
+ field_b = result.walker_b.legacy_boundary
459
+ title = "Implicit boundary (projection/search)"
460
+ y_label = "Rejection Count"
461
+ color_a = accent_blue
462
+ color_b = accent_amber
463
+ elif foreground == "live":
464
+ field_a = result.walker_a.live_signal
465
+ field_b = result.walker_b.live_signal
466
+ title = f"Trident LIVE branch ({result.walker_a.bandwidth_multiplier()}x bandwidth)"
467
+ y_label = "Live Signal"
468
+ color_a = accent_teal
469
+ color_b = accent_purple
470
+ else:
471
+ field_a = result.walker_a.named_void_boundary(foreground)
472
+ field_b = result.walker_b.named_void_boundary(foreground)
473
+ title = f"{foreground.upper()} boundary ({branch_carrier(foreground)})"
474
+ y_label = "Named Void Count"
475
+ if foreground == "batna":
476
+ color_a = accent_blue
477
+ color_b = "#7dd3fc"
478
+ else:
479
+ color_a = accent_amber
480
+ color_b = "#fb7185"
481
+ bars_a = ax2.bar(x - width / 2, field_a, width, label="Player A",
482
+ color=color_a, alpha=0.85, edgecolor="none")
483
+ bars_b = ax2.bar(x + width / 2, field_b, width, label="Player B",
484
+ color=color_b, alpha=0.85, edgecolor="none")
485
  ax2.set_xlabel("Action", color=text_color, fontsize=10)
486
+ ax2.set_ylabel(y_label, color=text_color, fontsize=10)
487
+ ax2.set_title(title, color=text_color, fontsize=12)
488
  ax2.set_xticks(x)
489
  ax2.set_xticklabels(actions, fontsize=9)
490
  ax2.legend(fontsize=9, facecolor=bg_color, edgecolor=grid_color, labelcolor=text_color)
491
 
 
492
  for bar in list(bars_a) + list(bars_b):
493
  height = bar.get_height()
494
  if height > 0:
495
+ ax2.text(bar.get_x() + bar.get_width() / 2., height + max(height * 0.03, 0.03),
496
  f"{height:.1f}", ha="center", va="bottom",
497
  color=text_color, fontsize=8)
498
 
499
+ # 3. Foreground distribution (final)
500
  ax3 = axes[1, 0]
501
+ bars_ca = ax3.bar(x - width / 2, dist_a, width, label="Player A",
502
  color=accent_teal, alpha=0.85, edgecolor="none")
503
+ bars_cb = ax3.bar(x + width / 2, dist_b, width, label="Player B",
504
  color=accent_purple, alpha=0.85, edgecolor="none")
505
  ax3.set_xlabel("Action", color=text_color, fontsize=10)
506
  ax3.set_ylabel("Probability", color=text_color, fontsize=10)
507
+ if foreground == "legacy":
508
+ dist_title = "Implicit complement distribution"
509
+ elif foreground == "live":
510
+ dist_title = "LIVE branch distribution"
511
+ else:
512
+ dist_title = f"{foreground.upper()} complement distribution"
513
+ ax3.set_title(dist_title, color=text_color, fontsize=12)
514
  ax3.set_xticks(x)
515
  ax3.set_xticklabels(actions, fontsize=9)
516
  ax3.legend(fontsize=9, facecolor=bg_color, edgecolor=grid_color, labelcolor=text_color)
 
530
  f"Game: {game_name}",
531
  f"Rounds: {n_rounds} | \u03b7 = {eta} | \u03b5 = {epsilon} | seed = {seed}",
532
  "",
533
+ f"Strategy: {stats['strategy']}",
534
+ f"Foreground read: {stats['foreground_read']}",
535
+ f"Void toggle: {stats['void_toggle']}",
536
+ f"WATNA share: {stats['watna_share']:.1%}",
537
+ f"Explicit read gain: {stats['explicit_read_gain']:.3f}",
538
+ f"Effective bandwidth: {stats['effective_bandwidth']}x",
539
+ "",
540
  f"Overall cooperation: {stats['overall_cooperation']:.1%}",
541
  f"Final quarter coop: {stats['final_cooperation']:.1%}",
542
  f"Nash equilibrium rate: {stats['nash_equilibrium_rate']:.1%}",
543
  f"Improvement over Nash: {stats['improvement_over_nash']:+.1%}",
544
  "",
545
+ "VOID { Q = proposal, K = void boundary,",
546
+ " V = complement weight }",
547
  "",
548
+ "projection/search are operations,",
549
+ "not branch names.",
550
  ]
551
 
552
  summary_text = "\n".join(summary_lines)
 
561
  return fig
562
 
563
 
564
+ def run_demo(game_name, n_rounds, eta, epsilon, seed, strategy, void_toggle, active_branch, rotations):
565
  """Main entry point for the Gradio interface."""
566
  game = GAMES[game_name]
567
 
568
+ fig = create_plots(
569
+ game_name,
570
+ int(n_rounds),
571
+ float(eta),
572
+ float(epsilon),
573
+ int(seed),
574
+ strategy,
575
+ void_toggle,
576
+ active_branch,
577
+ int(rotations),
578
+ )
579
 
580
+ result = run_negotiation(
581
+ game_name,
582
+ int(n_rounds),
583
+ float(eta),
584
+ float(epsilon),
585
+ int(seed),
586
+ strategy,
587
+ void_toggle,
588
+ active_branch,
589
+ int(rotations),
590
  )
591
+ stats = result.stats
592
 
593
  # Build stats markdown
594
  delta = stats["improvement_over_nash"]
 
604
  | Nash equilibrium baseline | {stats['nash_equilibrium_rate']:.1%} |
605
  | Improvement over Nash | **{delta_sign}{delta:.1%}** |
606
  | Verdict | Void walker **{verdict}** Nash |
607
+ | Strategy | **{stats['strategy']}** |
608
+ | Foreground read | **{stats['foreground_read']}** |
609
+ | Void toggle | {stats['void_toggle']} |
610
+ | WATNA share | **{stats['watna_share']:.1%}** |
611
+ | Explicit read gain | **{stats['explicit_read_gain']:.3f}** |
612
+ | Effective bandwidth | **{stats['effective_bandwidth']}x** |
613
 
614
  ### Complement Distribution (Final)
615
 
 
618
  """
619
  actions = game["actions"]
620
  for i, act in enumerate(actions):
621
+ stats_md += f"| {act} | {result.dist_a[i]:.1%} | {result.dist_b[i]:.1%} |\n"
622
 
623
  stats_md += f"""
624
  ### Game Description
625
 
626
  {game['description']}
627
+
628
+ ### VOID Contract
629
+
630
+ `VOID {{ activeBranch: BATNA | WATNA | LIVE; BATNA -> sphere; WATNA -> torus; Q = proposal; K = void boundary; V = complement weight }}`
631
  """
632
 
633
  return fig, stats_md
 
654
  | **Feed-forward** | MLP transformation | c3 gait adaptation |
655
  | **KV cache** | Stored keys and values | The void boundary itself |
656
 
657
+ ### VOID contract
658
+
659
+ `VOID { activeBranch: BATNA | WATNA | LIVE; BATNA -> sphere; WATNA -> torus; Q = proposal; K = void boundary; V = complement weight }`
660
+
661
+ ### Variant reads
662
+
663
+ | Variant | What stays in state | What gets foregrounded |
664
+ |---------|---------------------|------------------------|
665
+ | **Deceptacon** | one implicit void surface | `projection/search` as operations only; branch still inferred |
666
+ | **DualVoid** | BATNA and WATNA together | `voidToggle` foregrounds BATNA or WATNA explicitly |
667
+ | **Trident** | LIVE, BATNA, and WATNA together | live head or selected void branch, plus meta-LAMINAR rotations |
668
+
669
  ### The identification
670
 
671
  ```
 
690
 
691
  ### How it works
692
 
693
+ 1. **Fork**: Two players each have a set of actions. The current strategy chooses whether the read is implicit, dual-explicit, or trident-explicit.
694
  2. **Race**: Both players simultaneously choose actions. Payoffs are determined by the game matrix.
695
+ 3. **Fold**: Suboptimal outcomes generate rejection signal. Viable-alternative regret feeds BATNA. Joint collapse feeds WATNA.
696
  4. **Vent**: The rejected path is vented -- it cannot be un-rejected. The void boundary grows monotonically.
697
 
698
  The complement distribution `P(i) = (T - v_i + 1) / sum(T - v_j + 1)` is equivalent to `softmax(-eta * v)`. This is not a metaphor. It is a mathematical identity. The void boundary IS the KV cache. The complement distribution IS the attention score.
699
 
700
+ ### Strategy surface
701
+
702
+ - **Deceptacon** keeps the read implicit. `projection/search` remain operation words, not branch names.
703
+ - **DualVoid** keeps BATNA and WATNA in state together, then `voidToggle` foregrounds one.
704
+ - **Trident** adds the live head stream. Each meta-LAMINAR rotation doubles live-bandwidth, so two rotations give a 4x witness.
705
+
706
  ### Benchmark results from the paper (500 rounds, 5 seeds)
707
 
708
  | Game | Three-Walker coordination | Void Attention coordination | Delta |
 
760
  ) as app:
761
  gr.Markdown(
762
  "# Void Attention -- When Attention Meets Game Theory\n"
763
+ "*section 15.11: the structural identity between void walking and transformer attention, now with explicit Deceptacon variants*"
764
  )
765
 
766
  with gr.Tabs():
 
788
  label="epsilon (exploration rate)",
789
  info="Probability of random action (the sliver)",
790
  )
791
+ strategy = gr.Dropdown(
792
+ choices=[LEGACY_STRATEGY, DUAL_STRATEGY, TRIDENT_STRATEGY],
793
+ value=DUAL_STRATEGY,
794
+ label="Strategy Family",
795
+ info="Legacy keeps the read implicit; DualVoid names the void; Trident keeps the live branch explicit too.",
796
+ )
797
+ void_toggle = gr.Dropdown(
798
+ choices=list(VOID_LABELS.keys()),
799
+ value="batna",
800
+ label="Void Toggle",
801
+ info="Foreground BATNA -> sphere or WATNA -> torus when the strategy is dual or trident.",
802
+ )
803
+ active_branch = gr.Dropdown(
804
+ choices=list(BRANCH_LABELS.keys()),
805
+ value="live",
806
+ label="Trident Foreground",
807
+ info="For Trident, keep the live head stream foregrounded or switch to the selected void branch.",
808
+ )
809
+ rotations = gr.Slider(
810
+ minimum=0, maximum=3, value=2, step=1,
811
+ label="Meta-LAMINAR Rotations",
812
+ info="Each rotation doubles live branch bandwidth. Two rotations give the 4x witness.",
813
+ )
814
  seed = gr.Number(
815
  value=42, label="Random Seed",
816
  info="For reproducibility",
 
824
 
825
  run_btn.click(
826
  fn=run_demo,
827
+ inputs=[
828
+ game_select,
829
+ n_rounds,
830
+ eta,
831
+ epsilon,
832
+ seed,
833
+ strategy,
834
+ void_toggle,
835
+ active_branch,
836
+ rotations,
837
+ ],
838
  outputs=[plot_output, stats_output],
839
  )
840
 
841
  # Auto-run on load
842
  app.load(
843
  fn=run_demo,
844
+ inputs=[
845
+ game_select,
846
+ n_rounds,
847
+ eta,
848
+ epsilon,
849
+ seed,
850
+ strategy,
851
+ void_toggle,
852
+ active_branch,
853
+ rotations,
854
+ ],
855
  outputs=[plot_output, stats_output],
856
  )
857