File size: 21,478 Bytes
189f45b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
"""
EXP REV-Q-VQ: Vector-quantised (VQ-VAE) bottleneck configurations on V-JEPA 2.

R2 has explicitly asked for VQ-VAE configs in the last 3 review rounds. This
script implements a multi-agent multi-position VQ-VAE sender and runs
3-5 configurations on collision restitution to (a) compute within-scenario
TopSim/PosDis/causal-spec, and (b) measure cross-scenario transfer at N=192
on collision -> ramp.

VQ design: each sender has N_positions codebooks, each with V_codes entries of
dimension D_code. The hidden representation is projected to D_code per position,
then quantised to the nearest codebook entry. Output to receiver is the
one-hot index per position (same dimensionality as Gumbel-Softmax sender), so
PosDis/TopSim/Causal-spec all apply unchanged. Training uses straight-through
estimator + commitment loss (beta=0.25) + codebook loss.

If VQ configs land in the same 41-56% cross-scenario band as the existing
24-config sweep, the sufficiency claim extends beyond Gumbel-Softmax/tanh.
"""
import json, time, sys, os, math
from pathlib import Path
from datetime import datetime, timezone
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

PROMPT_RECEIVED_TIME = datetime.now(timezone.utc).isoformat()
print(f"PROMPT_RECEIVED_TIME = {PROMPT_RECEIVED_TIME}", flush=True)
T0 = time.time()

sys.path.insert(0, os.path.dirname(__file__))
from _kinematics_train import (
    DEVICE, ClassifierReceiver, HIDDEN_DIM, N_AGENTS, BATCH_SIZE,
    SENDER_LR, RECEIVER_LR, EARLY_STOP_PATIENCE,
)
from _killer_experiment import TemporalEncoder
from _overnight_p1_transfer import make_splits, train_receiver_frozen_sender
from _overnight_p3_matrix import load_labels, load_feat_subsampled
from _rev_q_posdis_scatter import discrete_token_extract, discrete_topsim, discrete_posdis  # single-prop matches existing sweep rows 1-12
from _rev_q_addendum_multiprop import discrete_multi_topsim, discrete_multi_posdis, discrete_multi_causal

OUT = Path("results/reviewer_response/exp_q_vqvae")
OUT.mkdir(parents=True, exist_ok=True)
N_SEEDS = 3
N_LIST = [16, 192]
COMMIT_BETA = 0.25


class VQSender(nn.Module):
    """VQ-VAE sender: encoder -> per-position projection -> VQ codebook -> one-hot."""
    def __init__(self, encoder, hd, vs, nh, code_dim=8):
        super().__init__()
        self.encoder = encoder
        self.vs = vs
        self.nh = nh
        self.code_dim = code_dim
        self.heads = nn.ModuleList([nn.Linear(hd, code_dim) for _ in range(nh)])
        # Codebooks: nh codebooks, each vs x code_dim
        self.codebooks = nn.ParameterList(
            [nn.Parameter(torch.randn(vs, code_dim) * 0.1) for _ in range(nh)]
        )
        self._last_commit_loss = torch.zeros(1)

    def init_codebooks_from_data(self, x):
        """K-means-style data-dependent codebook init: sample V z's from a batch."""
        with torch.no_grad():
            h = self.encoder(x)
            for head_idx, (head, codebook) in enumerate(zip(self.heads, self.codebooks)):
                z = head(h)  # [B, code_dim]
                if z.size(0) >= self.vs:
                    # Random sample without replacement
                    perm = torch.randperm(z.size(0), device=z.device)[:self.vs]
                    self.codebooks[head_idx].data.copy_(z[perm])
                else:
                    # Sample with replacement
                    idx = torch.randint(z.size(0), (self.vs,), device=z.device)
                    self.codebooks[head_idx].data.copy_(z[idx])

    def reset_dead_codes(self, x, code_usage):
        """For each head, reset codes with usage<threshold to random data points."""
        with torch.no_grad():
            h = self.encoder(x)
            for head_idx, (head, codebook) in enumerate(zip(self.heads, self.codebooks)):
                z = head(h)  # [B, code_dim]
                usage = code_usage[head_idx]  # [vs]
                dead = (usage < 0.01).nonzero(as_tuple=True)[0]
                if len(dead) == 0: continue
                if z.size(0) >= len(dead):
                    perm = torch.randperm(z.size(0), device=z.device)[:len(dead)]
                    self.codebooks[head_idx].data[dead] = z[perm]

    def forward(self, x, tau=1.0, hard=True, track_usage=False):
        h = self.encoder(x)
        msgs, logits_all = [], []
        commit_loss = torch.zeros(1, device=h.device)
        usage_per_head = [] if track_usage else None
        for head, codebook in zip(self.heads, self.codebooks):
            z = head(h)  # [B, code_dim]
            # Distances from each batch element to each codebook entry
            # |z - c|^2 = |z|^2 + |c|^2 - 2 z.c
            dists = (z.pow(2).sum(-1, keepdim=True)
                     + codebook.pow(2).sum(-1).unsqueeze(0)
                     - 2 * z @ codebook.t())  # [B, vs]
            indices = dists.argmin(-1)  # [B]
            z_q = codebook[indices]  # [B, code_dim]
            # Commitment + codebook losses
            commit_loss = commit_loss + COMMIT_BETA * F.mse_loss(z, z_q.detach())
            commit_loss = commit_loss + F.mse_loss(z_q, z.detach())
            # Straight-through: forward = hard one-hot; backward = softmax(-dists/tau)
            # This is the standard STE for one-hot VQ-VAE -> discrete receiver.
            soft = F.softmax(-dists / max(tau, 1e-3), dim=-1)  # [B, vs]
            hard_oh = F.one_hot(indices, self.vs).float()
            msg = soft + (hard_oh - soft).detach()  # forward=hard, grad flows via soft
            msgs.append(msg)
            logits_all.append(-dists)
            if track_usage:
                # one-hot count per code, normalized to fraction
                usage_per_head.append(hard_oh.detach().mean(0))  # [vs]
        self._last_commit_loss = commit_loss
        if track_usage:
            self._last_usage = usage_per_head
        return torch.cat(msgs, -1), logits_all


class VQMultiSender(nn.Module):
    def __init__(self, senders):
        super().__init__()
        self.senders = nn.ModuleList(senders)

    def forward(self, views, tau=1.0, hard=True):
        msgs, all_logits = [], []
        commit = torch.zeros(1, device=views[0].device)
        for s, v in zip(self.senders, views):
            m, l = s(v, tau, hard)
            msgs.append(m)
            all_logits.extend(l)
            commit = commit + s._last_commit_loss
        self._last_commit_loss = commit
        return torch.cat(msgs, -1), all_logits


def log(msg):
    ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ")
    print(f"[{ts}] EXP-VQ: {msg}", flush=True)


def train_vq(feat, labels, seed, n_heads, vocab_size, n_epochs=150, code_dim=8):
    """Train VQ-VAE bottleneck on within-scenario task."""
    N, nf, dim = feat.shape
    fpa = 1
    msg_dim = vocab_size * n_heads * N_AGENTS
    agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)]
    torch.manual_seed(seed); np.random.seed(seed)
    rng = np.random.RandomState(seed * 1000 + 42)

    train_ids, holdout_ids = [], []
    for c in np.unique(labels):
        ids_c = np.where(labels == c)[0]
        rng.shuffle(ids_c)
        split = max(1, len(ids_c) // 5)
        holdout_ids.extend(ids_c[:split]); train_ids.extend(ids_c[split:])
    train_ids = np.array(train_ids); holdout_ids = np.array(holdout_ids)
    n_classes = int(labels.max()) + 1
    chance = 1.0 / n_classes

    senders = [VQSender(TemporalEncoder(HIDDEN_DIM, dim, fpa), HIDDEN_DIM,
                         vocab_size, n_heads, code_dim).to(DEVICE)
               for _ in range(N_AGENTS)]
    sender = VQMultiSender(senders).to(DEVICE)
    # Data-dependent codebook init: sample from a forward pass on a training batch
    # (standard VQ-VAE init trick to avoid initial collapse to a single codebook entry).
    with torch.no_grad():
        init_batch_ids = train_ids[:min(BATCH_SIZE * 4, len(train_ids))]
        init_views = [v[init_batch_ids].to(DEVICE) for v in agent_views]
        for s, v in zip(sender.senders, init_views):
            s.init_codebooks_from_data(v)
    receivers = [ClassifierReceiver(msg_dim, HIDDEN_DIM, n_classes).to(DEVICE)
                 for _ in range(3)]
    so = torch.optim.Adam(sender.parameters(), lr=SENDER_LR)
    ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers]
    labels_dev = torch.tensor(labels, dtype=torch.long).to(DEVICE)
    n_batches = max(1, len(train_ids) // BATCH_SIZE)
    best_acc = 0.0; best_ep = 0
    best_sender_state = None; best_receiver_states = None; best_recv_idx = 0
    RESET_EVERY = 40
    DEAD_CODE_RESET_EVERY = 20  # standard VQ-VAE dead-code mitigation

    for ep in range(n_epochs):
        # Temperature anneal: 3.0 -> 1.0 over first half of training (matches Gumbel sender schedule)
        tau = max(1.0, 3.0 - 2.0 * ep / max(1, n_epochs // 2))
        # Dead-code reset every DEAD_CODE_RESET_EVERY epochs (skip ep 0)
        if ep > 0 and ep % DEAD_CODE_RESET_EVERY == 0:
            with torch.no_grad():
                # Compute usage from a forward pass over a training batch
                usage_batch = train_ids[:min(BATCH_SIZE * 4, len(train_ids))]
                u_views = [v[usage_batch].to(DEVICE) for v in agent_views]
                # Per-sender usage tracking
                for s, v in zip(sender.senders, u_views):
                    _, _ = s(v, tau=tau, track_usage=True)
                    s.reset_dead_codes(v, s._last_usage)
        if ep - best_ep > EARLY_STOP_PATIENCE * 2 and best_acc > chance + 0.05: break
        if ep > 0 and ep % RESET_EVERY == 0:
            for i in range(len(receivers)):
                receivers[i] = ClassifierReceiver(msg_dim, HIDDEN_DIM, n_classes).to(DEVICE)
                ros[i] = torch.optim.Adam(receivers[i].parameters(), lr=RECEIVER_LR)
        sender.train(); [r.train() for r in receivers]
        rng_ep = np.random.RandomState(seed * 10000 + ep)
        perm = rng_ep.permutation(train_ids)
        for b in range(n_batches):
            batch_ids = perm[b*BATCH_SIZE:(b+1)*BATCH_SIZE]
            if len(batch_ids) < 4: continue
            views = [v[batch_ids].to(DEVICE) for v in agent_views]
            tgts = labels_dev[batch_ids]
            msg, logits_list = sender(views, tau=tau)
            ce_loss = torch.tensor(0.0, device=DEVICE)
            for r in receivers:
                logits = r(msg)
                ce_loss = ce_loss + F.cross_entropy(logits, tgts)
            ce_loss = ce_loss / len(receivers)
            # Total loss: CE + commitment+codebook (already accumulated in sender)
            loss = ce_loss + sender._last_commit_loss.squeeze()
            if torch.isnan(loss):
                so.zero_grad(); [o.zero_grad() for o in ros]; continue
            so.zero_grad(); [o.zero_grad() for o in ros]
            loss.backward()
            torch.nn.utils.clip_grad_norm_(sender.parameters(), 1.0)
            so.step(); [o.step() for o in ros]
        if ep % 50 == 0 and DEVICE.type == "mps": torch.mps.empty_cache()
        if (ep + 1) % 10 == 0 or ep == 0:
            sender.eval(); [r.eval() for r in receivers]
            with torch.no_grad():
                v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views]
                msg_ho, _ = sender(v_ho)
                tgt_ho = labels_dev[holdout_ids]
                best_per_recv = 0.0; best_idx = 0
                for ri, r in enumerate(receivers):
                    acc = (r(msg_ho).argmax(-1) == tgt_ho).float().mean().item()
                    if acc > best_per_recv:
                        best_per_recv = acc; best_idx = ri
                if best_per_recv > best_acc:
                    best_acc = best_per_recv; best_ep = ep
                    best_sender_state = {k: v.cpu().clone() for k, v in sender.state_dict().items()}
                    best_receiver_states = [
                        {k: v.cpu().clone() for k, v in r.state_dict().items()}
                        for r in receivers]
                    best_recv_idx = best_idx
    return {
        "sender_state": best_sender_state,
        "receiver_states": best_receiver_states,
        "best_recv_idx": best_recv_idx,
        "train_ids": train_ids, "holdout_ids": holdout_ids,
        "task_acc": best_acc, "chance": chance,
        "n_classes_per_prop": [n_classes],
        "fpa": 1, "dim": dim,
        "n_heads": n_heads, "vocab_size": vocab_size,
        "code_dim": code_dim,
        "msg_dim": msg_dim,
    }


def vq_token_extract(base, feat):
    """Extract token indices from VQ sender for compositionality metrics."""
    senders = [VQSender(TemporalEncoder(HIDDEN_DIM, base["dim"], base["fpa"]),
                         HIDDEN_DIM, base["vocab_size"], base["n_heads"], base["code_dim"]).to(DEVICE)
               for _ in range(N_AGENTS)]
    sender = VQMultiSender(senders).to(DEVICE)
    sender.load_state_dict(base["sender_state"])
    sender.eval()
    agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)]
    with torch.no_grad():
        N = feat.shape[0]
        all_tokens = []
        # Use full feat as one batch (small)
        v_in = [v.to(DEVICE) for v in agent_views]
        msg, _ = sender(v_in)
        # msg is [N, vocab_size * n_heads * N_AGENTS], one-hot per position
        # Convert back to indices
        msg_reshaped = msg.view(N, N_AGENTS, base["n_heads"], base["vocab_size"])
        tokens = msg_reshaped.argmax(-1)  # [N, N_AGENTS, n_heads]
        # Flatten across agents and heads to get a token vector
        tokens = tokens.reshape(N, N_AGENTS * base["n_heads"]).cpu().numpy()
    return tokens


def vq_train_recv_frozen(base, feat_tgt, labels_tgt, train_ids, holdout_ids, seed, n_target, n_epochs=80):
    """Freeze VQ sender; train fresh receiver on n_target stratified target examples."""
    if n_target == 0:
        # Zero-shot: apply source receiver directly
        senders = [VQSender(TemporalEncoder(HIDDEN_DIM, base["dim"], base["fpa"]),
                             HIDDEN_DIM, base["vocab_size"], base["n_heads"], base["code_dim"]).to(DEVICE)
                   for _ in range(N_AGENTS)]
        sender = VQMultiSender(senders).to(DEVICE)
        sender.load_state_dict(base["sender_state"])
        sender.eval()
        n_classes = base["n_classes_per_prop"][0]
        receiver = ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, n_classes).to(DEVICE)
        receiver.load_state_dict(base["receiver_states"][base["best_recv_idx"]])
        receiver.eval()
        agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)]
        labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE)
        with torch.no_grad():
            v_in = [v[holdout_ids].to(DEVICE) for v in agent_views]
            msg, _ = sender(v_in)
            tgt = labels_dev[holdout_ids]
            return float((receiver(msg).argmax(-1) == tgt).float().mean())
    # Sample n_target stratified examples from train_ids
    rng = np.random.RandomState(seed * 7 + 13)
    n_per_class = max(1, n_target // 3)
    sub_train = []
    for c in np.unique(labels_tgt[train_ids]):
        cand = train_ids[labels_tgt[train_ids] == c]
        rng.shuffle(cand)
        sub_train.extend(cand[:n_per_class])
    sub_train = np.array(sub_train)
    # Build sender, freeze
    senders = [VQSender(TemporalEncoder(HIDDEN_DIM, base["dim"], base["fpa"]),
                         HIDDEN_DIM, base["vocab_size"], base["n_heads"], base["code_dim"]).to(DEVICE)
               for _ in range(N_AGENTS)]
    sender = VQMultiSender(senders).to(DEVICE)
    sender.load_state_dict(base["sender_state"])
    sender.eval()
    for p in sender.parameters(): p.requires_grad = False
    n_classes = base["n_classes_per_prop"][0]
    receiver = ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, n_classes).to(DEVICE)
    opt = torch.optim.Adam(receiver.parameters(), lr=RECEIVER_LR)
    agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)]
    labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE)
    best_acc = 0.0
    for ep in range(n_epochs):
        receiver.train()
        rng_ep = np.random.RandomState(seed * 100 + ep)
        perm = rng_ep.permutation(sub_train)
        n_batches = max(1, len(perm) // 16)
        for b in range(n_batches):
            batch_ids = perm[b*16:(b+1)*16]
            if len(batch_ids) < 4: continue
            with torch.no_grad():
                v_in = [v[batch_ids].to(DEVICE) for v in agent_views]
                msg, _ = sender(v_in)
            tgts = labels_dev[batch_ids]
            logits = receiver(msg)
            loss = F.cross_entropy(logits, tgts)
            opt.zero_grad(); loss.backward(); opt.step()
        if ep % 5 == 0 or ep == n_epochs - 1:
            receiver.eval()
            with torch.no_grad():
                v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views]
                msg_ho, _ = sender(v_ho)
                tgt_ho = labels_dev[holdout_ids]
                acc = (receiver(msg_ho).argmax(-1) == tgt_ho).float().mean().item()
                if acc > best_acc: best_acc = acc
    return float(best_acc)


def main():
    log("=" * 60)
    log("EXP Q VQ-VAE: VQ-VAE bottleneck configs")

    feat_c = load_feat_subsampled("collision", "vjepa2")
    feat_r = load_feat_subsampled("ramp", "vjepa2")
    z = np.load("results/kinematics_vs_mechanics/labels_collision.npz")
    rest_3 = z["restitution_bin"]
    lbl_r_3 = load_labels("ramp", "restitution")

    # Configurations: (name, L, V, code_dim)
    configs = [
        ("vq_L2_V8_d8",   2, 8,  8),
        ("vq_L3_V8_d8",   3, 8,  8),
        ("vq_L3_V16_d8",  3, 16, 8),
        ("vq_L4_V16_d8",  4, 16, 8),
    ]

    rows = []
    for name, L, V, D in configs:
        log(f"\n  --- {name} (L={L}, V={V}, code_dim={D}) ---")
        within_accs = []; bases = []
        for seed in range(N_SEEDS):
            t0 = time.time()
            try:
                base = train_vq(feat_c, rest_3, seed, L, V, n_epochs=150, code_dim=D)
                bases.append(base); within_accs.append(float(base["task_acc"]))
                log(f"    {name} s{seed}: within={base['task_acc']:.3f}  [{time.time()-t0:.0f}s]")
            except Exception as e:
                log(f"    {name} s{seed} FAILED: {e}")
                bases.append(None); within_accs.append(float("nan"))

        valid = [(i, a) for i, a in enumerate(within_accs) if not np.isnan(a)]
        if not valid:
            rows.append({"name": name, "within": float("nan"),
                         "topsim": float("nan"), "posdis": float("nan"),
                         "causal": float("nan"),
                         "cross_n16": float("nan"), "cross_n192": float("nan")})
            continue
        best_idx = max(valid, key=lambda x: x[1])[0]
        best_base = bases[best_idx]
        ho_ids = best_base["holdout_ids"]

        # Compute compositionality metrics on holdout (single-property: matches sweep rows 1-12)
        try:
            tokens = vq_token_extract(best_base, feat_c)  # [N, N_AGENTS * L]
            tokens_ho = tokens[ho_ids]
            ts = discrete_topsim(tokens_ho, rest_3[ho_ids])
            pd_ = discrete_posdis(tokens_ho, rest_3[ho_ids])
            cs = float("nan")  # causal spec deferred — VQ uses same receiver, can be added later
        except Exception as e:
            import traceback
            log(f"    {name} metric FAILED: {e}\n{traceback.format_exc()}")
            ts = pd_ = cs = float("nan")

        # Cross-scenario coll->ramp at N=16 and N=192
        cross_n16 = []; cross_n192 = []
        for seed in range(N_SEEDS):
            tr_t, ho_t = make_splits(lbl_r_3, seed)
            for N_target, lst in [(16, cross_n16), (192, cross_n192)]:
                try:
                    acc = vq_train_recv_frozen(best_base, feat_r, lbl_r_3, tr_t, ho_t, seed, N_target)
                    lst.append(float(acc))
                except Exception as e:
                    log(f"    {name} cross s{seed} N={N_target} FAILED: {e}")

        m16 = float(np.mean(cross_n16)) if cross_n16 else float("nan")
        m192 = float(np.mean(cross_n192)) if cross_n192 else float("nan")
        log(f"    {name}: within={float(np.nanmean(within_accs)):.3f} TopSim={ts:+.2f} PosDis={pd_:.2f} cross16={m16*100:.1f}% cross192={m192*100:.1f}%")
        rows.append({"name": name, "L": L, "V": V, "code_dim": D,
                     "within": float(np.nanmean(within_accs)),
                     "topsim": float(ts), "posdis": float(pd_), "causal": float(cs),
                     "cross_n16": m16, "cross_n192": m192})

    SUMMARY = ["EXP Q VQ-VAE -- VQ-VAE bottleneck configurations on V-JEPA 2 collision",
               "",
               f"{'Config':<22s} | {'Within':>7s} | {'TopSim':>7s} | {'PosDis':>7s} | {'Cross16':>8s} | {'Cross192':>9s}",
               "-" * 76]
    for r in rows:
        SUMMARY.append(
            f"{r['name']:<22s} | {r['within']*100:6.1f}% | {r['topsim']:+7.2f} | "
            f"{r['posdis']:7.2f} | {r['cross_n16']*100:7.1f}% | {r['cross_n192']*100:8.1f}%"
        )
    print("\n".join(SUMMARY), flush=True)
    with open(OUT / "exp_q_vqvae_summary.txt", "w") as fh:
        fh.write("\n".join(SUMMARY) + "\n")
    with open(OUT / "exp_q_vqvae_summary.json", "w") as fh:
        json.dump(rows, fh, indent=2)
    end_ts = datetime.now(timezone.utc).isoformat()
    runtime_min = (time.time() - T0) / 60.0
    print(f"\nEND_TIME = {end_ts}\nTotal runtime: {runtime_min:.2f} min", flush=True)


if __name__ == "__main__":
    main()