| """ |
| EXP REV-Q-VQ: Vector-quantised (VQ-VAE) bottleneck configurations on V-JEPA 2. |
| |
| R2 has explicitly asked for VQ-VAE configs in the last 3 review rounds. This |
| script implements a multi-agent multi-position VQ-VAE sender and runs |
| 3-5 configurations on collision restitution to (a) compute within-scenario |
| TopSim/PosDis/causal-spec, and (b) measure cross-scenario transfer at N=192 |
| on collision -> ramp. |
| |
| VQ design: each sender has N_positions codebooks, each with V_codes entries of |
| dimension D_code. The hidden representation is projected to D_code per position, |
| then quantised to the nearest codebook entry. Output to receiver is the |
| one-hot index per position (same dimensionality as Gumbel-Softmax sender), so |
| PosDis/TopSim/Causal-spec all apply unchanged. Training uses straight-through |
| estimator + commitment loss (beta=0.25) + codebook loss. |
| |
| If VQ configs land in the same 41-56% cross-scenario band as the existing |
| 24-config sweep, the sufficiency claim extends beyond Gumbel-Softmax/tanh. |
| """ |
| import json, time, sys, os, math |
| from pathlib import Path |
| from datetime import datetime, timezone |
| import numpy as np |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| PROMPT_RECEIVED_TIME = datetime.now(timezone.utc).isoformat() |
| print(f"PROMPT_RECEIVED_TIME = {PROMPT_RECEIVED_TIME}", flush=True) |
| T0 = time.time() |
|
|
| sys.path.insert(0, os.path.dirname(__file__)) |
| from _kinematics_train import ( |
| DEVICE, ClassifierReceiver, HIDDEN_DIM, N_AGENTS, BATCH_SIZE, |
| SENDER_LR, RECEIVER_LR, EARLY_STOP_PATIENCE, |
| ) |
| from _killer_experiment import TemporalEncoder |
| from _overnight_p1_transfer import make_splits, train_receiver_frozen_sender |
| from _overnight_p3_matrix import load_labels, load_feat_subsampled |
| from _rev_q_posdis_scatter import discrete_token_extract, discrete_topsim, discrete_posdis |
| from _rev_q_addendum_multiprop import discrete_multi_topsim, discrete_multi_posdis, discrete_multi_causal |
|
|
| OUT = Path("results/reviewer_response/exp_q_vqvae") |
| OUT.mkdir(parents=True, exist_ok=True) |
| N_SEEDS = 3 |
| N_LIST = [16, 192] |
| COMMIT_BETA = 0.25 |
|
|
|
|
| class VQSender(nn.Module): |
| """VQ-VAE sender: encoder -> per-position projection -> VQ codebook -> one-hot.""" |
| def __init__(self, encoder, hd, vs, nh, code_dim=8): |
| super().__init__() |
| self.encoder = encoder |
| self.vs = vs |
| self.nh = nh |
| self.code_dim = code_dim |
| self.heads = nn.ModuleList([nn.Linear(hd, code_dim) for _ in range(nh)]) |
| |
| self.codebooks = nn.ParameterList( |
| [nn.Parameter(torch.randn(vs, code_dim) * 0.1) for _ in range(nh)] |
| ) |
| self._last_commit_loss = torch.zeros(1) |
|
|
| def init_codebooks_from_data(self, x): |
| """K-means-style data-dependent codebook init: sample V z's from a batch.""" |
| with torch.no_grad(): |
| h = self.encoder(x) |
| for head_idx, (head, codebook) in enumerate(zip(self.heads, self.codebooks)): |
| z = head(h) |
| if z.size(0) >= self.vs: |
| |
| perm = torch.randperm(z.size(0), device=z.device)[:self.vs] |
| self.codebooks[head_idx].data.copy_(z[perm]) |
| else: |
| |
| idx = torch.randint(z.size(0), (self.vs,), device=z.device) |
| self.codebooks[head_idx].data.copy_(z[idx]) |
|
|
| def reset_dead_codes(self, x, code_usage): |
| """For each head, reset codes with usage<threshold to random data points.""" |
| with torch.no_grad(): |
| h = self.encoder(x) |
| for head_idx, (head, codebook) in enumerate(zip(self.heads, self.codebooks)): |
| z = head(h) |
| usage = code_usage[head_idx] |
| dead = (usage < 0.01).nonzero(as_tuple=True)[0] |
| if len(dead) == 0: continue |
| if z.size(0) >= len(dead): |
| perm = torch.randperm(z.size(0), device=z.device)[:len(dead)] |
| self.codebooks[head_idx].data[dead] = z[perm] |
|
|
| def forward(self, x, tau=1.0, hard=True, track_usage=False): |
| h = self.encoder(x) |
| msgs, logits_all = [], [] |
| commit_loss = torch.zeros(1, device=h.device) |
| usage_per_head = [] if track_usage else None |
| for head, codebook in zip(self.heads, self.codebooks): |
| z = head(h) |
| |
| |
| dists = (z.pow(2).sum(-1, keepdim=True) |
| + codebook.pow(2).sum(-1).unsqueeze(0) |
| - 2 * z @ codebook.t()) |
| indices = dists.argmin(-1) |
| z_q = codebook[indices] |
| |
| commit_loss = commit_loss + COMMIT_BETA * F.mse_loss(z, z_q.detach()) |
| commit_loss = commit_loss + F.mse_loss(z_q, z.detach()) |
| |
| |
| soft = F.softmax(-dists / max(tau, 1e-3), dim=-1) |
| hard_oh = F.one_hot(indices, self.vs).float() |
| msg = soft + (hard_oh - soft).detach() |
| msgs.append(msg) |
| logits_all.append(-dists) |
| if track_usage: |
| |
| usage_per_head.append(hard_oh.detach().mean(0)) |
| self._last_commit_loss = commit_loss |
| if track_usage: |
| self._last_usage = usage_per_head |
| return torch.cat(msgs, -1), logits_all |
|
|
|
|
| class VQMultiSender(nn.Module): |
| def __init__(self, senders): |
| super().__init__() |
| self.senders = nn.ModuleList(senders) |
|
|
| def forward(self, views, tau=1.0, hard=True): |
| msgs, all_logits = [], [] |
| commit = torch.zeros(1, device=views[0].device) |
| for s, v in zip(self.senders, views): |
| m, l = s(v, tau, hard) |
| msgs.append(m) |
| all_logits.extend(l) |
| commit = commit + s._last_commit_loss |
| self._last_commit_loss = commit |
| return torch.cat(msgs, -1), all_logits |
|
|
|
|
| def log(msg): |
| ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ") |
| print(f"[{ts}] EXP-VQ: {msg}", flush=True) |
|
|
|
|
| def train_vq(feat, labels, seed, n_heads, vocab_size, n_epochs=150, code_dim=8): |
| """Train VQ-VAE bottleneck on within-scenario task.""" |
| N, nf, dim = feat.shape |
| fpa = 1 |
| msg_dim = vocab_size * n_heads * N_AGENTS |
| agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)] |
| torch.manual_seed(seed); np.random.seed(seed) |
| rng = np.random.RandomState(seed * 1000 + 42) |
|
|
| train_ids, holdout_ids = [], [] |
| for c in np.unique(labels): |
| ids_c = np.where(labels == c)[0] |
| rng.shuffle(ids_c) |
| split = max(1, len(ids_c) // 5) |
| holdout_ids.extend(ids_c[:split]); train_ids.extend(ids_c[split:]) |
| train_ids = np.array(train_ids); holdout_ids = np.array(holdout_ids) |
| n_classes = int(labels.max()) + 1 |
| chance = 1.0 / n_classes |
|
|
| senders = [VQSender(TemporalEncoder(HIDDEN_DIM, dim, fpa), HIDDEN_DIM, |
| vocab_size, n_heads, code_dim).to(DEVICE) |
| for _ in range(N_AGENTS)] |
| sender = VQMultiSender(senders).to(DEVICE) |
| |
| |
| with torch.no_grad(): |
| init_batch_ids = train_ids[:min(BATCH_SIZE * 4, len(train_ids))] |
| init_views = [v[init_batch_ids].to(DEVICE) for v in agent_views] |
| for s, v in zip(sender.senders, init_views): |
| s.init_codebooks_from_data(v) |
| receivers = [ClassifierReceiver(msg_dim, HIDDEN_DIM, n_classes).to(DEVICE) |
| for _ in range(3)] |
| so = torch.optim.Adam(sender.parameters(), lr=SENDER_LR) |
| ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers] |
| labels_dev = torch.tensor(labels, dtype=torch.long).to(DEVICE) |
| n_batches = max(1, len(train_ids) // BATCH_SIZE) |
| best_acc = 0.0; best_ep = 0 |
| best_sender_state = None; best_receiver_states = None; best_recv_idx = 0 |
| RESET_EVERY = 40 |
| DEAD_CODE_RESET_EVERY = 20 |
|
|
| for ep in range(n_epochs): |
| |
| tau = max(1.0, 3.0 - 2.0 * ep / max(1, n_epochs // 2)) |
| |
| if ep > 0 and ep % DEAD_CODE_RESET_EVERY == 0: |
| with torch.no_grad(): |
| |
| usage_batch = train_ids[:min(BATCH_SIZE * 4, len(train_ids))] |
| u_views = [v[usage_batch].to(DEVICE) for v in agent_views] |
| |
| for s, v in zip(sender.senders, u_views): |
| _, _ = s(v, tau=tau, track_usage=True) |
| s.reset_dead_codes(v, s._last_usage) |
| if ep - best_ep > EARLY_STOP_PATIENCE * 2 and best_acc > chance + 0.05: break |
| if ep > 0 and ep % RESET_EVERY == 0: |
| for i in range(len(receivers)): |
| receivers[i] = ClassifierReceiver(msg_dim, HIDDEN_DIM, n_classes).to(DEVICE) |
| ros[i] = torch.optim.Adam(receivers[i].parameters(), lr=RECEIVER_LR) |
| sender.train(); [r.train() for r in receivers] |
| rng_ep = np.random.RandomState(seed * 10000 + ep) |
| perm = rng_ep.permutation(train_ids) |
| for b in range(n_batches): |
| batch_ids = perm[b*BATCH_SIZE:(b+1)*BATCH_SIZE] |
| if len(batch_ids) < 4: continue |
| views = [v[batch_ids].to(DEVICE) for v in agent_views] |
| tgts = labels_dev[batch_ids] |
| msg, logits_list = sender(views, tau=tau) |
| ce_loss = torch.tensor(0.0, device=DEVICE) |
| for r in receivers: |
| logits = r(msg) |
| ce_loss = ce_loss + F.cross_entropy(logits, tgts) |
| ce_loss = ce_loss / len(receivers) |
| |
| loss = ce_loss + sender._last_commit_loss.squeeze() |
| if torch.isnan(loss): |
| so.zero_grad(); [o.zero_grad() for o in ros]; continue |
| so.zero_grad(); [o.zero_grad() for o in ros] |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(sender.parameters(), 1.0) |
| so.step(); [o.step() for o in ros] |
| if ep % 50 == 0 and DEVICE.type == "mps": torch.mps.empty_cache() |
| if (ep + 1) % 10 == 0 or ep == 0: |
| sender.eval(); [r.eval() for r in receivers] |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = labels_dev[holdout_ids] |
| best_per_recv = 0.0; best_idx = 0 |
| for ri, r in enumerate(receivers): |
| acc = (r(msg_ho).argmax(-1) == tgt_ho).float().mean().item() |
| if acc > best_per_recv: |
| best_per_recv = acc; best_idx = ri |
| if best_per_recv > best_acc: |
| best_acc = best_per_recv; best_ep = ep |
| best_sender_state = {k: v.cpu().clone() for k, v in sender.state_dict().items()} |
| best_receiver_states = [ |
| {k: v.cpu().clone() for k, v in r.state_dict().items()} |
| for r in receivers] |
| best_recv_idx = best_idx |
| return { |
| "sender_state": best_sender_state, |
| "receiver_states": best_receiver_states, |
| "best_recv_idx": best_recv_idx, |
| "train_ids": train_ids, "holdout_ids": holdout_ids, |
| "task_acc": best_acc, "chance": chance, |
| "n_classes_per_prop": [n_classes], |
| "fpa": 1, "dim": dim, |
| "n_heads": n_heads, "vocab_size": vocab_size, |
| "code_dim": code_dim, |
| "msg_dim": msg_dim, |
| } |
|
|
|
|
| def vq_token_extract(base, feat): |
| """Extract token indices from VQ sender for compositionality metrics.""" |
| senders = [VQSender(TemporalEncoder(HIDDEN_DIM, base["dim"], base["fpa"]), |
| HIDDEN_DIM, base["vocab_size"], base["n_heads"], base["code_dim"]).to(DEVICE) |
| for _ in range(N_AGENTS)] |
| sender = VQMultiSender(senders).to(DEVICE) |
| sender.load_state_dict(base["sender_state"]) |
| sender.eval() |
| agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)] |
| with torch.no_grad(): |
| N = feat.shape[0] |
| all_tokens = [] |
| |
| v_in = [v.to(DEVICE) for v in agent_views] |
| msg, _ = sender(v_in) |
| |
| |
| msg_reshaped = msg.view(N, N_AGENTS, base["n_heads"], base["vocab_size"]) |
| tokens = msg_reshaped.argmax(-1) |
| |
| tokens = tokens.reshape(N, N_AGENTS * base["n_heads"]).cpu().numpy() |
| return tokens |
|
|
|
|
| def vq_train_recv_frozen(base, feat_tgt, labels_tgt, train_ids, holdout_ids, seed, n_target, n_epochs=80): |
| """Freeze VQ sender; train fresh receiver on n_target stratified target examples.""" |
| if n_target == 0: |
| |
| senders = [VQSender(TemporalEncoder(HIDDEN_DIM, base["dim"], base["fpa"]), |
| HIDDEN_DIM, base["vocab_size"], base["n_heads"], base["code_dim"]).to(DEVICE) |
| for _ in range(N_AGENTS)] |
| sender = VQMultiSender(senders).to(DEVICE) |
| sender.load_state_dict(base["sender_state"]) |
| sender.eval() |
| n_classes = base["n_classes_per_prop"][0] |
| receiver = ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, n_classes).to(DEVICE) |
| receiver.load_state_dict(base["receiver_states"][base["best_recv_idx"]]) |
| receiver.eval() |
| agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)] |
| labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE) |
| with torch.no_grad(): |
| v_in = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg, _ = sender(v_in) |
| tgt = labels_dev[holdout_ids] |
| return float((receiver(msg).argmax(-1) == tgt).float().mean()) |
| |
| rng = np.random.RandomState(seed * 7 + 13) |
| n_per_class = max(1, n_target // 3) |
| sub_train = [] |
| for c in np.unique(labels_tgt[train_ids]): |
| cand = train_ids[labels_tgt[train_ids] == c] |
| rng.shuffle(cand) |
| sub_train.extend(cand[:n_per_class]) |
| sub_train = np.array(sub_train) |
| |
| senders = [VQSender(TemporalEncoder(HIDDEN_DIM, base["dim"], base["fpa"]), |
| HIDDEN_DIM, base["vocab_size"], base["n_heads"], base["code_dim"]).to(DEVICE) |
| for _ in range(N_AGENTS)] |
| sender = VQMultiSender(senders).to(DEVICE) |
| sender.load_state_dict(base["sender_state"]) |
| sender.eval() |
| for p in sender.parameters(): p.requires_grad = False |
| n_classes = base["n_classes_per_prop"][0] |
| receiver = ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, n_classes).to(DEVICE) |
| opt = torch.optim.Adam(receiver.parameters(), lr=RECEIVER_LR) |
| agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)] |
| labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE) |
| best_acc = 0.0 |
| for ep in range(n_epochs): |
| receiver.train() |
| rng_ep = np.random.RandomState(seed * 100 + ep) |
| perm = rng_ep.permutation(sub_train) |
| n_batches = max(1, len(perm) // 16) |
| for b in range(n_batches): |
| batch_ids = perm[b*16:(b+1)*16] |
| if len(batch_ids) < 4: continue |
| with torch.no_grad(): |
| v_in = [v[batch_ids].to(DEVICE) for v in agent_views] |
| msg, _ = sender(v_in) |
| tgts = labels_dev[batch_ids] |
| logits = receiver(msg) |
| loss = F.cross_entropy(logits, tgts) |
| opt.zero_grad(); loss.backward(); opt.step() |
| if ep % 5 == 0 or ep == n_epochs - 1: |
| receiver.eval() |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = labels_dev[holdout_ids] |
| acc = (receiver(msg_ho).argmax(-1) == tgt_ho).float().mean().item() |
| if acc > best_acc: best_acc = acc |
| return float(best_acc) |
|
|
|
|
| def main(): |
| log("=" * 60) |
| log("EXP Q VQ-VAE: VQ-VAE bottleneck configs") |
|
|
| feat_c = load_feat_subsampled("collision", "vjepa2") |
| feat_r = load_feat_subsampled("ramp", "vjepa2") |
| z = np.load("results/kinematics_vs_mechanics/labels_collision.npz") |
| rest_3 = z["restitution_bin"] |
| lbl_r_3 = load_labels("ramp", "restitution") |
|
|
| |
| configs = [ |
| ("vq_L2_V8_d8", 2, 8, 8), |
| ("vq_L3_V8_d8", 3, 8, 8), |
| ("vq_L3_V16_d8", 3, 16, 8), |
| ("vq_L4_V16_d8", 4, 16, 8), |
| ] |
|
|
| rows = [] |
| for name, L, V, D in configs: |
| log(f"\n --- {name} (L={L}, V={V}, code_dim={D}) ---") |
| within_accs = []; bases = [] |
| for seed in range(N_SEEDS): |
| t0 = time.time() |
| try: |
| base = train_vq(feat_c, rest_3, seed, L, V, n_epochs=150, code_dim=D) |
| bases.append(base); within_accs.append(float(base["task_acc"])) |
| log(f" {name} s{seed}: within={base['task_acc']:.3f} [{time.time()-t0:.0f}s]") |
| except Exception as e: |
| log(f" {name} s{seed} FAILED: {e}") |
| bases.append(None); within_accs.append(float("nan")) |
|
|
| valid = [(i, a) for i, a in enumerate(within_accs) if not np.isnan(a)] |
| if not valid: |
| rows.append({"name": name, "within": float("nan"), |
| "topsim": float("nan"), "posdis": float("nan"), |
| "causal": float("nan"), |
| "cross_n16": float("nan"), "cross_n192": float("nan")}) |
| continue |
| best_idx = max(valid, key=lambda x: x[1])[0] |
| best_base = bases[best_idx] |
| ho_ids = best_base["holdout_ids"] |
|
|
| |
| try: |
| tokens = vq_token_extract(best_base, feat_c) |
| tokens_ho = tokens[ho_ids] |
| ts = discrete_topsim(tokens_ho, rest_3[ho_ids]) |
| pd_ = discrete_posdis(tokens_ho, rest_3[ho_ids]) |
| cs = float("nan") |
| except Exception as e: |
| import traceback |
| log(f" {name} metric FAILED: {e}\n{traceback.format_exc()}") |
| ts = pd_ = cs = float("nan") |
|
|
| |
| cross_n16 = []; cross_n192 = [] |
| for seed in range(N_SEEDS): |
| tr_t, ho_t = make_splits(lbl_r_3, seed) |
| for N_target, lst in [(16, cross_n16), (192, cross_n192)]: |
| try: |
| acc = vq_train_recv_frozen(best_base, feat_r, lbl_r_3, tr_t, ho_t, seed, N_target) |
| lst.append(float(acc)) |
| except Exception as e: |
| log(f" {name} cross s{seed} N={N_target} FAILED: {e}") |
|
|
| m16 = float(np.mean(cross_n16)) if cross_n16 else float("nan") |
| m192 = float(np.mean(cross_n192)) if cross_n192 else float("nan") |
| log(f" {name}: within={float(np.nanmean(within_accs)):.3f} TopSim={ts:+.2f} PosDis={pd_:.2f} cross16={m16*100:.1f}% cross192={m192*100:.1f}%") |
| rows.append({"name": name, "L": L, "V": V, "code_dim": D, |
| "within": float(np.nanmean(within_accs)), |
| "topsim": float(ts), "posdis": float(pd_), "causal": float(cs), |
| "cross_n16": m16, "cross_n192": m192}) |
|
|
| SUMMARY = ["EXP Q VQ-VAE -- VQ-VAE bottleneck configurations on V-JEPA 2 collision", |
| "", |
| f"{'Config':<22s} | {'Within':>7s} | {'TopSim':>7s} | {'PosDis':>7s} | {'Cross16':>8s} | {'Cross192':>9s}", |
| "-" * 76] |
| for r in rows: |
| SUMMARY.append( |
| f"{r['name']:<22s} | {r['within']*100:6.1f}% | {r['topsim']:+7.2f} | " |
| f"{r['posdis']:7.2f} | {r['cross_n16']*100:7.1f}% | {r['cross_n192']*100:8.1f}%" |
| ) |
| print("\n".join(SUMMARY), flush=True) |
| with open(OUT / "exp_q_vqvae_summary.txt", "w") as fh: |
| fh.write("\n".join(SUMMARY) + "\n") |
| with open(OUT / "exp_q_vqvae_summary.json", "w") as fh: |
| json.dump(rows, fh, indent=2) |
| end_ts = datetime.now(timezone.utc).isoformat() |
| runtime_min = (time.time() - T0) / 60.0 |
| print(f"\nEND_TIME = {end_ts}\nTotal runtime: {runtime_min:.2f} min", flush=True) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|