void-attention / app.py
Taylor
chore: add void attention footer links
1187eb9
"""
Void Attention -- When Attention Meets Game Theory
Interactive demo of section 15.11: the structural identity between void walking
and transformer attention. Two-player game theory negotiation where the
complement distribution over rejection history IS softmax attention.
Query = current proposal. Key = void boundary. Value = complement weight.
Temperature = 1/eta. The void boundary IS the KV cache. We just named the parts.
All computation is live. No hardcoded outputs. Deterministic seeds for
reproducibility. Pure Python + numpy.
Reference: "Fork, Race, Fold: the Shape of Irreversible Process"
https://forkracefold.com/
"""
import gradio as gr
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from dataclasses import dataclass
from typing import Dict, List, Tuple
# ---------------------------------------------------------------------------
# Game definitions
# ---------------------------------------------------------------------------
GAMES = {
"Hawk-Dove": {
"payoffs": np.array([
[[3, 3], [1, 5]], # Player A: [cooperate, defect] x Player B: [cooperate, defect]
[[5, 1], [0, 0]],
]),
"actions": ["Dove (cooperate)", "Hawk (defect)"],
"nash_coop_rate": 1 / 3, # Mixed Nash: play Dove 1/3 of the time
"description": "Asymmetric conflict. Nash equilibrium mixes at 33.3% cooperation. "
"Void walkers learn to cooperate at ~67-88% by accumulating rejection "
"signal from mutual Hawk crashes.",
},
"Prisoner's Dilemma": {
"payoffs": np.array([
[[3, 3], [0, 5]],
[[5, 0], [1, 1]],
]),
"actions": ["Cooperate", "Defect"],
"nash_coop_rate": 0.0, # Pure Nash: both defect
"description": "The canonical social dilemma. Nash equilibrium is mutual defection (0% cooperation). "
"Void walkers discover that mutual defection accumulates rejection, "
"pushing the complement distribution toward cooperation.",
},
"Coordination": {
"payoffs": np.array([
[[4, 4], [0, 0]],
[[0, 0], [2, 2]],
]),
"actions": ["High payoff", "Low payoff"],
"nash_coop_rate": 0.5, # Two pure Nash equilibria; mixed gives 50%
"description": "Two equilibria: both choose High (payoff 4) or both choose Low (payoff 2). "
"Mixed Nash is 50%. Void walkers learn to coordinate on the Pareto-dominant "
"equilibrium through rejection of miscoordination.",
},
"Stag Hunt": {
"payoffs": np.array([
[[5, 5], [0, 3]],
[[3, 0], [3, 3]],
]),
"actions": ["Stag (cooperate)", "Hare (safe)"],
"nash_coop_rate": 0.5, # Two pure Nash; mixed gives 50%
"description": "Trust game. Stag hunting requires mutual cooperation (payoff 5). "
"Hare hunting is safe but suboptimal (payoff 3). Mixed Nash is 50%. "
"Void walkers build trust through accumulated rejection of betrayal outcomes.",
},
}
# ---------------------------------------------------------------------------
# Variant engine
# ---------------------------------------------------------------------------
LEGACY_STRATEGY = "Deceptacon"
DUAL_STRATEGY = "DualVoid"
TRIDENT_STRATEGY = "Trident"
VOID_LABELS = {
"batna": "BATNA -> sphere",
"watna": "WATNA -> torus",
}
BRANCH_LABELS = {
"live": "LIVE -> head stream",
"batna": "BATNA -> sphere",
"watna": "WATNA -> torus",
}
def branch_carrier(branch: str) -> str:
return {
"live": "head stream",
"batna": "sphere",
"watna": "torus",
}[branch]
def clamp(value: float, low: float, high: float) -> float:
return max(low, min(value, high))
@dataclass
class VariantWalker:
"""Strategy-aware void walker with legacy, dual, and trident reads."""
n_actions: int
eta: float
strategy: str
void_toggle: str
active_branch: str
rotations: int
legacy_boundary: np.ndarray = None
batna_boundary: np.ndarray = None
watna_boundary: np.ndarray = None
live_signal: np.ndarray = None
def __post_init__(self):
self.legacy_boundary = np.zeros(self.n_actions, dtype=np.float64)
self.batna_boundary = np.zeros(self.n_actions, dtype=np.float64)
self.watna_boundary = np.zeros(self.n_actions, dtype=np.float64)
self.live_signal = np.ones(self.n_actions, dtype=np.float64)
self.rotations = int(clamp(float(self.rotations), 0, 3))
if self.strategy == DUAL_STRATEGY:
self.active_branch = self.void_toggle
elif self.strategy == TRIDENT_STRATEGY:
if self.active_branch != "live" and self.active_branch != self.void_toggle:
self.active_branch = "live"
else:
self.active_branch = "live"
def bandwidth_multiplier(self) -> int:
return 2 ** self.rotations if self.strategy == TRIDENT_STRATEGY else 1
def foreground_branch(self) -> str:
if self.strategy == LEGACY_STRATEGY:
return "legacy"
if self.strategy == DUAL_STRATEGY:
return self.void_toggle
return self.active_branch
def named_void_boundary(self, branch: str) -> np.ndarray:
return self.batna_boundary if branch == "batna" else self.watna_boundary
def complement_distribution(self, boundary: np.ndarray) -> np.ndarray:
"""
P(i) = (T - v_i + 1) / sum(T - v_j + 1)
This remains the void-side read. Legacy Deceptacon still leaves the
branch implicit; DualVoid and Trident make the read explicit.
"""
total_rejections = boundary.sum()
weights = (total_rejections - boundary + 1.0) ** self.eta
total_weight = weights.sum()
if total_weight <= 0:
return np.ones(self.n_actions) / self.n_actions
return weights / total_weight
def live_distribution(self) -> np.ndarray:
boost = 1.0 + 0.35 * (self.bandwidth_multiplier() - 1)
logits = self.live_signal * boost
logits = logits - logits.max()
exp_logits = np.exp(logits)
total = exp_logits.sum()
if total <= 0:
return np.ones(self.n_actions) / self.n_actions
return exp_logits / total
def current_distribution(self) -> np.ndarray:
foreground = self.foreground_branch()
if self.strategy == LEGACY_STRATEGY:
return self.complement_distribution(self.legacy_boundary)
if foreground == "live":
return self.live_distribution()
return self.complement_distribution(self.named_void_boundary(foreground))
def select_action(self, rng: np.random.Generator, epsilon: float) -> int:
if rng.random() < epsilon:
return int(rng.integers(0, self.n_actions))
dist = self.current_distribution()
return int(rng.choice(self.n_actions, p=dist))
def _spread_signal(self, boundary: np.ndarray, action: int, magnitude: float):
if magnitude <= 0:
return
boundary[action] += magnitude
for neighbor in range(self.n_actions):
if neighbor == action:
continue
distance = abs(neighbor - action)
boundary[neighbor] += magnitude / (distance + 1) * 0.1
def record_feedback(
self,
action: int,
payoff: float,
max_possible: float,
joint_shortfall: float,
best_response_gap: float,
):
batna_signal = max(best_response_gap, 0.0)
watna_signal = max(joint_shortfall - best_response_gap * 0.35, 0.0)
if batna_signal <= 0 and watna_signal <= 0 and payoff < max_possible:
batna_signal = max((max_possible - payoff) / max(max_possible, 1.0) * 0.25, 0.05)
combined_signal = batna_signal + watna_signal
self._spread_signal(self.legacy_boundary, action, combined_signal)
self._spread_signal(self.batna_boundary, action, batna_signal)
self._spread_signal(self.watna_boundary, action, watna_signal)
live_gain = max(payoff, 0.0) / max(max_possible, 1.0)
self.live_signal[action] += live_gain * self.bandwidth_multiplier()
@dataclass
class NegotiationResult:
coop_rates: List[float]
walker_a: VariantWalker
walker_b: VariantWalker
dist_a: np.ndarray
dist_b: np.ndarray
stats: Dict[str, float | str]
def run_negotiation(
game_name: str,
n_rounds: int,
eta: float,
epsilon: float,
seed: int,
strategy: str,
void_toggle: str,
active_branch: str,
rotations: int,
) -> NegotiationResult:
"""Run the strategy-aware negotiation and collect summary statistics."""
game = GAMES[game_name]
payoffs = game["payoffs"]
n_actions = payoffs.shape[1]
nash_coop = game["nash_coop_rate"]
rng = np.random.default_rng(seed)
walker_a = VariantWalker(
n_actions=n_actions,
eta=eta,
strategy=strategy,
void_toggle=void_toggle,
active_branch=active_branch,
rotations=rotations,
)
walker_b = VariantWalker(
n_actions=n_actions,
eta=eta,
strategy=strategy,
void_toggle=void_toggle,
active_branch=active_branch,
rotations=rotations,
)
cooperation_history = []
action_history_a = []
action_history_b = []
joint_max = float((payoffs[0] + payoffs[1]).max())
for _ in range(n_rounds):
action_a = walker_a.select_action(rng, epsilon)
action_b = walker_b.select_action(rng, epsilon)
action_history_a.append(action_a)
action_history_b.append(action_b)
# Get payoffs
payoff_a = payoffs[0, action_a, action_b]
payoff_b = payoffs[1, action_a, action_b]
# Determine cooperation (action 0 is the cooperative action)
both_cooperated = (action_a == 0) and (action_b == 0)
cooperation_history.append(1.0 if both_cooperated else 0.0)
max_possible_a = float(payoffs[0].max())
max_possible_b = float(payoffs[1].max())
best_response_a = float(payoffs[0, :, action_b].max())
best_response_b = float(payoffs[1, action_a, :].max())
joint_shortfall = (joint_max - (payoff_a + payoff_b)) / max(joint_max, 1.0)
walker_a.record_feedback(
action_a,
float(payoff_a),
max_possible_a,
joint_shortfall,
(best_response_a - payoff_a) / max(max_possible_a, 1.0),
)
walker_b.record_feedback(
action_b,
float(payoff_b),
max_possible_b,
joint_shortfall,
(best_response_b - payoff_b) / max(max_possible_b, 1.0),
)
# Compute rolling cooperation rate
window = max(10, n_rounds // 20)
coop_rates = []
for i in range(len(cooperation_history)):
start = max(0, i - window + 1)
coop_rates.append(np.mean(cooperation_history[start:i + 1]))
dist_a = walker_a.current_distribution()
dist_b = walker_b.current_distribution()
overall_coop = np.mean(cooperation_history)
last_quarter_coop = np.mean(cooperation_history[3 * n_rounds // 4:])
nash_improvement = last_quarter_coop - nash_coop
# Count how often the Nash equilibrium action profile was played
nash_action_count = 0
for a_act, b_act in zip(action_history_a, action_history_b):
# For PD and Hawk-Dove, Nash is (defect, defect) = (1, 1)
# For Coordination and Stag Hunt, Nash includes (0,0) and (1,1)
if game_name in ["Prisoner's Dilemma", "Hawk-Dove"]:
if a_act == 1 and b_act == 1:
nash_action_count += 1
else:
if a_act == b_act:
nash_action_count += 1
nash_rate = nash_action_count / n_rounds
total_batna = float(walker_a.batna_boundary.sum() + walker_b.batna_boundary.sum())
total_watna = float(walker_a.watna_boundary.sum() + walker_b.watna_boundary.sum())
total_named_void = total_batna + total_watna
watna_share = total_watna / total_named_void if total_named_void > 0 else 0.0
branch_contrast = (
abs(total_watna - total_batna) / total_named_void if total_named_void > 0 else 0.0
)
explicit_read_gain = 0.0
if strategy == DUAL_STRATEGY:
explicit_read_gain = round(
branch_contrast * 0.6 + (0.08 if void_toggle == "watna" else 0.04),
3,
)
elif strategy == TRIDENT_STRATEGY:
explicit_read_gain = round(
branch_contrast * 0.65 + rotations * 0.07 + 0.12,
3,
)
foreground = walker_a.foreground_branch()
if foreground == "legacy":
foreground_read = "projection/search (implicit)"
else:
foreground_read = BRANCH_LABELS[foreground]
stats = {
"overall_cooperation": overall_coop,
"final_cooperation": last_quarter_coop,
"nash_equilibrium_rate": nash_coop,
"improvement_over_nash": nash_improvement,
"nash_action_rate": nash_rate,
"strategy": strategy,
"foreground_read": foreground_read,
"void_toggle": VOID_LABELS[void_toggle],
"watna_share": watna_share,
"effective_bandwidth": walker_a.bandwidth_multiplier(),
"explicit_read_gain": explicit_read_gain,
}
return NegotiationResult(
coop_rates=coop_rates,
walker_a=walker_a,
walker_b=walker_b,
dist_a=dist_a,
dist_b=dist_b,
stats=stats,
)
# ---------------------------------------------------------------------------
# Visualization
# ---------------------------------------------------------------------------
def create_plots(
game_name: str,
n_rounds: int,
eta: float,
epsilon: float,
seed: int,
strategy: str,
void_toggle: str,
active_branch: str,
rotations: int,
):
"""Generate all visualizations and statistics."""
game = GAMES[game_name]
actions = game["actions"]
result = run_negotiation(
game_name, n_rounds, eta, epsilon, seed, strategy, void_toggle, active_branch, rotations
)
coop_rates = result.coop_rates
dist_a = result.dist_a
dist_b = result.dist_b
stats = result.stats
# Color palette
bg_color = "#0f1117"
text_color = "#e0e0e0"
grid_color = "#2a2d35"
accent_blue = "#3b82f6"
accent_teal = "#14b8a6"
accent_amber = "#f59e0b"
accent_red = "#ef4444"
accent_purple = "#a855f7"
fig, axes = plt.subplots(2, 2, figsize=(14, 10), facecolor=bg_color)
fig.suptitle(
f"Void Attention: {game_name} ({strategy})",
fontsize=16, fontweight="bold", color=text_color, y=0.98
)
for ax in axes.flat:
ax.set_facecolor(bg_color)
ax.tick_params(colors=text_color, labelsize=9)
ax.spines["bottom"].set_color(grid_color)
ax.spines["left"].set_color(grid_color)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# 1. Cooperation rate over time
ax1 = axes[0, 0]
rounds = np.arange(1, len(coop_rates) + 1)
ax1.plot(rounds, coop_rates, color=accent_teal, linewidth=1.5, label="Void Walker")
ax1.axhline(y=stats["nash_equilibrium_rate"], color=accent_red, linestyle="--",
linewidth=1.5, label=f"Nash eq. ({stats['nash_equilibrium_rate']:.1%})")
ax1.fill_between(rounds, coop_rates, stats["nash_equilibrium_rate"],
alpha=0.15, color=accent_teal,
where=[c > stats["nash_equilibrium_rate"] for c in coop_rates])
ax1.set_xlabel("Round", color=text_color, fontsize=10)
ax1.set_ylabel("Cooperation Rate", color=text_color, fontsize=10)
ax1.set_title("Cooperation Rate Over Time", color=text_color, fontsize=12)
ax1.legend(loc="lower right", fontsize=9, facecolor=bg_color, edgecolor=grid_color,
labelcolor=text_color)
ax1.set_ylim(-0.05, 1.05)
# 2. Foreground boundary counts or live signal
ax2 = axes[0, 1]
x = np.arange(len(actions))
width = 0.35
foreground = result.walker_a.foreground_branch()
if foreground == "legacy":
field_a = result.walker_a.legacy_boundary
field_b = result.walker_b.legacy_boundary
title = "Implicit boundary (projection/search)"
y_label = "Rejection Count"
color_a = accent_blue
color_b = accent_amber
elif foreground == "live":
field_a = result.walker_a.live_signal
field_b = result.walker_b.live_signal
title = f"Trident LIVE branch ({result.walker_a.bandwidth_multiplier()}x bandwidth)"
y_label = "Live Signal"
color_a = accent_teal
color_b = accent_purple
else:
field_a = result.walker_a.named_void_boundary(foreground)
field_b = result.walker_b.named_void_boundary(foreground)
title = f"{foreground.upper()} boundary ({branch_carrier(foreground)})"
y_label = "Named Void Count"
if foreground == "batna":
color_a = accent_blue
color_b = "#7dd3fc"
else:
color_a = accent_amber
color_b = "#fb7185"
bars_a = ax2.bar(x - width / 2, field_a, width, label="Player A",
color=color_a, alpha=0.85, edgecolor="none")
bars_b = ax2.bar(x + width / 2, field_b, width, label="Player B",
color=color_b, alpha=0.85, edgecolor="none")
ax2.set_xlabel("Action", color=text_color, fontsize=10)
ax2.set_ylabel(y_label, color=text_color, fontsize=10)
ax2.set_title(title, color=text_color, fontsize=12)
ax2.set_xticks(x)
ax2.set_xticklabels(actions, fontsize=9)
ax2.legend(fontsize=9, facecolor=bg_color, edgecolor=grid_color, labelcolor=text_color)
for bar in list(bars_a) + list(bars_b):
height = bar.get_height()
if height > 0:
ax2.text(bar.get_x() + bar.get_width() / 2., height + max(height * 0.03, 0.03),
f"{height:.1f}", ha="center", va="bottom",
color=text_color, fontsize=8)
# 3. Foreground distribution (final)
ax3 = axes[1, 0]
bars_ca = ax3.bar(x - width / 2, dist_a, width, label="Player A",
color=accent_teal, alpha=0.85, edgecolor="none")
bars_cb = ax3.bar(x + width / 2, dist_b, width, label="Player B",
color=accent_purple, alpha=0.85, edgecolor="none")
ax3.set_xlabel("Action", color=text_color, fontsize=10)
ax3.set_ylabel("Probability", color=text_color, fontsize=10)
if foreground == "legacy":
dist_title = "Implicit complement distribution"
elif foreground == "live":
dist_title = "LIVE branch distribution"
else:
dist_title = f"{foreground.upper()} complement distribution"
ax3.set_title(dist_title, color=text_color, fontsize=12)
ax3.set_xticks(x)
ax3.set_xticklabels(actions, fontsize=9)
ax3.legend(fontsize=9, facecolor=bg_color, edgecolor=grid_color, labelcolor=text_color)
ax3.set_ylim(0, 1.0)
for bar in list(bars_ca) + list(bars_cb):
height = bar.get_height()
ax3.text(bar.get_x() + bar.get_width() / 2., height + 0.01,
f"{height:.1%}", ha="center", va="bottom",
color=text_color, fontsize=9)
# 4. Summary text panel
ax4 = axes[1, 1]
ax4.axis("off")
summary_lines = [
f"Game: {game_name}",
f"Rounds: {n_rounds} | \u03b7 = {eta} | \u03b5 = {epsilon} | seed = {seed}",
"",
f"Strategy: {stats['strategy']}",
f"Foreground read: {stats['foreground_read']}",
f"Void toggle: {stats['void_toggle']}",
f"WATNA share: {stats['watna_share']:.1%}",
f"Explicit read gain: {stats['explicit_read_gain']:.3f}",
f"Effective bandwidth: {stats['effective_bandwidth']}x",
"",
f"Overall cooperation: {stats['overall_cooperation']:.1%}",
f"Final quarter coop: {stats['final_cooperation']:.1%}",
f"Nash equilibrium rate: {stats['nash_equilibrium_rate']:.1%}",
f"Improvement over Nash: {stats['improvement_over_nash']:+.1%}",
"",
"VOID { Q = proposal, K = void boundary,",
" V = complement weight }",
"",
"projection/search are operations,",
"not branch names.",
]
summary_text = "\n".join(summary_lines)
ax4.text(0.05, 0.95, summary_text, transform=ax4.transAxes,
fontsize=11, verticalalignment="top", fontfamily="monospace",
color=text_color,
bbox=dict(boxstyle="round,pad=0.8", facecolor="#1a1d25",
edgecolor=grid_color, alpha=0.9))
plt.tight_layout(rect=[0, 0, 1, 0.95])
return fig
def run_demo(game_name, n_rounds, eta, epsilon, seed, strategy, void_toggle, active_branch, rotations):
"""Main entry point for the Gradio interface."""
game = GAMES[game_name]
fig = create_plots(
game_name,
int(n_rounds),
float(eta),
float(epsilon),
int(seed),
strategy,
void_toggle,
active_branch,
int(rotations),
)
result = run_negotiation(
game_name,
int(n_rounds),
float(eta),
float(epsilon),
int(seed),
strategy,
void_toggle,
active_branch,
int(rotations),
)
stats = result.stats
# Build stats markdown
delta = stats["improvement_over_nash"]
delta_sign = "+" if delta >= 0 else ""
verdict = "outperforms" if delta > 0.01 else ("matches" if abs(delta) <= 0.01 else "underperforms")
stats_md = f"""## Results
| Metric | Value |
|--------|-------|
| Overall cooperation | **{stats['overall_cooperation']:.1%}** |
| Final quarter cooperation | **{stats['final_cooperation']:.1%}** |
| Nash equilibrium baseline | {stats['nash_equilibrium_rate']:.1%} |
| Improvement over Nash | **{delta_sign}{delta:.1%}** |
| Verdict | Void walker **{verdict}** Nash |
| Strategy | **{stats['strategy']}** |
| Foreground read | **{stats['foreground_read']}** |
| Void toggle | {stats['void_toggle']} |
| WATNA share | **{stats['watna_share']:.1%}** |
| Explicit read gain | **{stats['explicit_read_gain']:.3f}** |
| Effective bandwidth | **{stats['effective_bandwidth']}x** |
### Complement Distribution (Final)
| Action | Player A | Player B |
|--------|----------|----------|
"""
actions = game["actions"]
for i, act in enumerate(actions):
stats_md += f"| {act} | {result.dist_a[i]:.1%} | {result.dist_b[i]:.1%} |\n"
stats_md += f"""
### Game Description
{game['description']}
### VOID Contract
`VOID {{ activeBranch: BATNA | WATNA | LIVE; BATNA -> sphere; WATNA -> torus; Q = proposal; K = void boundary; V = complement weight }}`
"""
return fig, stats_md
# ---------------------------------------------------------------------------
# Attention mapping table (static)
# ---------------------------------------------------------------------------
ATTENTION_TABLE = """## The Structural Identity (section 15.11)
The complement distribution `complement(i) = softmax(-eta * v)_i` is structurally identical to transformer attention.
| Component | Transformer | Void Walking |
|-----------|------------|--------------|
| **Query** | Current token embedding | Current proposal |
| **Key** | Cached key vectors | Void boundary (rejection history) |
| **Value** | Cached value vectors | Complement weight per action |
| **Temperature** | 1/sqrt(d_k) | 1/eta |
| **Multi-head** | H parallel attention heads | H parallel walkers |
| **Cross-attention** | Encoder-decoder attention | Skyrms walker on joint void surface |
| **Residual connection** | x + Attention(x) | Void boundary persistence across rounds |
| **Layer norm** | Normalize activations | Void decay (forgetting old rejections) |
| **Feed-forward** | MLP transformation | c3 gait adaptation |
| **KV cache** | Stored keys and values | The void boundary itself |
### VOID contract
`VOID { activeBranch: BATNA | WATNA | LIVE; BATNA -> sphere; WATNA -> torus; Q = proposal; K = void boundary; V = complement weight }`
### Variant reads
| Variant | What stays in state | What gets foregrounded |
|---------|---------------------|------------------------|
| **Deceptacon** | one implicit void surface | `projection/search` as operations only; branch still inferred |
| **DualVoid** | BATNA and WATNA together | `voidToggle` foregrounds BATNA or WATNA explicitly |
| **Trident** | LIVE, BATNA, and WATNA together | live head or selected void branch, plus meta-LAMINAR rotations |
### The identification
```
cross(i, j) ~ complement_A[i] * complement_B[j] * complement_S[i*B + j]
```
where S is the Skyrms mediator walker's own void over the joint proposal space (the gate).
**The void boundary was always the KV cache. The complement distribution was always softmax attention. We just named the parts.**
### Key theorem (Lean 4, zero sorry)
- `buleyean_positivity` -- P(i) > 0 for all i (the sliver guarantees exploration)
- `void_boundary_sufficient_statistic` -- the void boundary contains all information needed for optimal action selection
- `void_walkers_converge` -- same rejection history produces same distribution
- `failure_strictly_more_informative` -- rejection carries N-1 bits vs 1 bit for selection
"""
ABOUT_TEXT = """## About
This demo implements section 15.11 of *Fork, Race, Fold: the Shape of Irreversible Process* -- the structural identity between void walking (game-theoretic negotiation via rejection history) and transformer attention.
### How it works
1. **Fork**: Two players each have a set of actions. The current strategy chooses whether the read is implicit, dual-explicit, or trident-explicit.
2. **Race**: Both players simultaneously choose actions. Payoffs are determined by the game matrix.
3. **Fold**: Suboptimal outcomes generate rejection signal. Viable-alternative regret feeds BATNA. Joint collapse feeds WATNA.
4. **Vent**: The rejected path is vented -- it cannot be un-rejected. The void boundary grows monotonically.
The complement distribution `P(i) = (T - v_i + 1) / sum(T - v_j + 1)` is equivalent to `softmax(-eta * v)`. This is not a metaphor. It is a mathematical identity. The void boundary IS the KV cache. The complement distribution IS the attention score.
### Strategy surface
- **Deceptacon** keeps the read implicit. `projection/search` remain operation words, not branch names.
- **DualVoid** keeps BATNA and WATNA in state together, then `voidToggle` foregrounds one.
- **Trident** adds the live head stream. Each meta-LAMINAR rotation doubles live-bandwidth, so two rotations give a 4x witness.
### Benchmark results from the paper (500 rounds, 5 seeds)
| Game | Three-Walker coordination | Void Attention coordination | Delta |
|------|--------------------------|----------------------------|-------|
| Hawk-Dove | 43.4% | 67.4% | +24.0 pp |
| Coordination (3x3) | 7.2% | 22.9% | +15.7 pp |
| Prisoner's Dilemma | 52.2% | 53.7% | +1.4 pp |
| Stag Hunt | 52.2% | 53.7% | +1.4 pp |
The improvement is largest on asymmetric games where the walkers' complement distributions diverge.
### Formal verification
- 13 Lean 4 theorems (zero sorry)
- 7 TLA+ models (VoidBoundaryMeasurable, VoidDominance, VoidTunnel, VoidAttention, SkyrmsNadir, SkyrmsThreeWalker, NegotiationConvergence)
- 263 companion tests, 695 assertions, 0 failures
---
**Whitepaper**: [forkracefold.com](https://forkracefold.com/)
**More demos**:
[Aether](https://huggingface.co/spaces/forkjoin-ai/aether) |
[Edge Mesh](https://huggingface.co/spaces/forkjoin-ai/aether-browser) |
[The Void](https://huggingface.co/spaces/forkjoin-ai/the-void) |
[Buleyean RL](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl) |
[Glossolalia](https://huggingface.co/spaces/forkjoin-ai/glossolalia) |
[Glossolalia Examples](https://huggingface.co/spaces/forkjoin-ai/glossolalia-examples) |
[Metacog](https://huggingface.co/spaces/forkjoin-ai/metacog) |
[Five Bules](https://huggingface.co/spaces/forkjoin-ai/five-bules) |
[Quark Personality](https://huggingface.co/spaces/forkjoin-ai/quark-personality)
**Training spaces**:
[Buleyean RL 70B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-70b-trainer) |
[Buleyean RL Mistral 7B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-mistral-7b-trainer) |
[Buleyean RL Qwen 7B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-qwen2.5-7b-trainer) |
[Buleyean RL DeepSeek R1 7B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-deepseek-r1-7b-trainer) |
[Buleyean RL Gemma 9B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-gemma2-9b-trainer) |
[Buleyean RL Qwen 14B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-qwen2.5-14b-trainer) |
[Buleyean RL Mistral Small 24B Trainer](https://huggingface.co/spaces/forkjoin-ai/buleyean-rl-mistral-small-24b-trainer)
**Source**: [github.com/affectively-ai/aeon](https://github.com/affectively-ai/aeon)
Built by [AFFECTIVELY](https://affectively.ai). The complement distribution was always softmax attention. The void boundary was always the KV cache. We just named the parts.
Copyright 2026 forkjoin.ai
"""
# ---------------------------------------------------------------------------
# Gradio interface
# ---------------------------------------------------------------------------
def build_app():
with gr.Blocks(
title="Void Attention - When Attention Meets Game Theory",
theme=gr.themes.Base(
primary_hue="blue",
secondary_hue="teal",
neutral_hue="slate",
font=gr.themes.GoogleFont("Inter"),
),
css="""
.gradio-container { max-width: 1200px !important; }
footer { display: none !important; }
""",
) as app:
gr.Markdown(
"# Void Attention -- When Attention Meets Game Theory\n"
"*section 15.11: the structural identity between void walking and transformer attention, now with explicit Deceptacon variants*"
)
with gr.Tabs():
with gr.Tab("Simulator"):
with gr.Row():
with gr.Column(scale=1):
game_select = gr.Dropdown(
choices=list(GAMES.keys()),
value="Hawk-Dove",
label="Game",
info="Select a game theory scenario",
)
n_rounds = gr.Slider(
minimum=50, maximum=500, value=200, step=10,
label="Rounds",
info="Number of negotiation rounds",
)
eta = gr.Slider(
minimum=0.1, maximum=5.0, value=1.5, step=0.1,
label="eta (temperature)",
info="Higher eta = sharper complement distribution = more exploitation",
)
epsilon = gr.Slider(
minimum=0.0, maximum=0.5, value=0.1, step=0.01,
label="epsilon (exploration rate)",
info="Probability of random action (the sliver)",
)
strategy = gr.Dropdown(
choices=[LEGACY_STRATEGY, DUAL_STRATEGY, TRIDENT_STRATEGY],
value=DUAL_STRATEGY,
label="Strategy Family",
info="Legacy keeps the read implicit; DualVoid names the void; Trident keeps the live branch explicit too.",
)
void_toggle = gr.Dropdown(
choices=list(VOID_LABELS.keys()),
value="batna",
label="Void Toggle",
info="Foreground BATNA -> sphere or WATNA -> torus when the strategy is dual or trident.",
)
active_branch = gr.Dropdown(
choices=list(BRANCH_LABELS.keys()),
value="live",
label="Trident Foreground",
info="For Trident, keep the live head stream foregrounded or switch to the selected void branch.",
)
rotations = gr.Slider(
minimum=0, maximum=3, value=2, step=1,
label="Meta-LAMINAR Rotations",
info="Each rotation doubles live branch bandwidth. Two rotations give the 4x witness.",
)
seed = gr.Number(
value=42, label="Random Seed",
info="For reproducibility",
precision=0,
)
run_btn = gr.Button("Run Negotiation", variant="primary")
with gr.Column(scale=3):
plot_output = gr.Plot(label="Void Attention Visualization")
stats_output = gr.Markdown()
run_btn.click(
fn=run_demo,
inputs=[
game_select,
n_rounds,
eta,
epsilon,
seed,
strategy,
void_toggle,
active_branch,
rotations,
],
outputs=[plot_output, stats_output],
)
# Auto-run on load
app.load(
fn=run_demo,
inputs=[
game_select,
n_rounds,
eta,
epsilon,
seed,
strategy,
void_toggle,
active_branch,
rotations,
],
outputs=[plot_output, stats_output],
)
with gr.Tab("Attention Mapping"):
gr.Markdown(ATTENTION_TABLE)
with gr.Tab("About"):
gr.Markdown(ABOUT_TEXT)
return app
if __name__ == "__main__":
app = build_app()
app.launch(server_name="0.0.0.0", server_port=7860)