| """ |
| EXP Q ADDENDUM 2: try to legitimately reach PosDis >= 0.7 with V-JEPA 2 multi- |
| property training, so the headline 0.76 in Table 1 has a clean provenance from |
| this paper's protocol. |
| |
| The Q addendum's discrete multi-prop topped at PosDis 0.51. To push higher we: |
| - use 5-class labels (mass {1..5}, restit {0.1..0.9}) instead of 3-class bins |
| - train for 400 epochs (vs 150) with iterated-learning receiver resets every |
| 30 epochs (vs 40) |
| - sweep (L, V) in {(2,5), (3,5), (4,5)} |
| |
| For each successful config, evaluate cross-scenario coll->ramp at N=16, N=192 |
| on restitution 3-class (matches the rest of the paper's cross protocol). |
| """ |
| import json, time, sys, os, math |
| from pathlib import Path |
| from datetime import datetime, timezone |
| import numpy as np |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| sys.path.insert(0, os.path.dirname(__file__)) |
| from _kinematics_train import ( |
| DEVICE, ClassifierReceiver, |
| HIDDEN_DIM, N_AGENTS, BATCH_SIZE, SENDER_LR, RECEIVER_LR, |
| EARLY_STOP_PATIENCE, |
| ) |
| from _killer_experiment import TemporalEncoder, DiscreteSender, DiscreteMultiSender |
| from _overnight_p1_transfer import make_splits |
| from _overnight_p3_matrix import load_labels, load_feat_subsampled |
| from _rev_f_cnn_control import ci95 |
| from _rev_q_posdis_scatter import build_discrete_sender, discrete_token_extract |
| from _rev_n_multiprop_continuous import MultiPropReceiver |
| from _rev_q_addendum_multiprop import ( |
| discrete_multi_topsim, discrete_multi_posdis, discrete_multi_causal, |
| disc_multi_train_recv_frozen, |
| ) |
|
|
| OUT = Path("results/reviewer_response/exp_q_addendum2") |
| OUT.mkdir(parents=True, exist_ok=True) |
| N_SEEDS = 3 |
| N_LIST = [16, 192] |
| RESET_EVERY = 30 |
|
|
|
|
| def log(msg): |
| ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ") |
| print(f"[{ts}] EXP-QADD2: {msg}", flush=True) |
|
|
|
|
| def train_disc_multi_5class(feat, labels_list, seed, n_heads, vocab_size, |
| n_epochs=400): |
| """Discrete multi-prop with aggressive iterated learning and 5-class labels.""" |
| N, nf, dim = feat.shape |
| fpa = 1 |
| msg_dim = vocab_size * n_heads * N_AGENTS |
| agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)] |
| torch.manual_seed(seed); np.random.seed(seed) |
| rng = np.random.RandomState(seed * 1000 + 42) |
|
|
| primary = labels_list[0] |
| train_ids, holdout_ids = [], [] |
| for c in np.unique(primary): |
| ids_c = np.where(primary == c)[0] |
| rng.shuffle(ids_c) |
| split = max(1, len(ids_c) // 5) |
| holdout_ids.extend(ids_c[:split]); train_ids.extend(ids_c[split:]) |
| train_ids = np.array(train_ids); holdout_ids = np.array(holdout_ids) |
| n_classes_per_prop = [int(lbl.max()) + 1 for lbl in labels_list] |
| chance = 1.0 / max(n_classes_per_prop) |
|
|
| sender = build_discrete_sender(dim, n_heads, vocab_size, fpa) |
| receivers = [MultiPropReceiver(msg_dim, HIDDEN_DIM, n_classes_per_prop).to(DEVICE) |
| for _ in range(3)] |
| so = torch.optim.Adam(sender.parameters(), lr=SENDER_LR) |
| ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers] |
| labels_dev = [torch.tensor(lbl, dtype=torch.long).to(DEVICE) for lbl in labels_list] |
| me = math.log(vocab_size) |
| n_batches = max(1, len(train_ids) // BATCH_SIZE) |
| best_acc = 0.0; best_ep = 0 |
| best_sender_state = None; best_receiver_states = None; best_recv_idx = 0 |
|
|
| for ep in range(n_epochs): |
| if ep - best_ep > EARLY_STOP_PATIENCE * 2 and best_acc > chance + 0.05: break |
| if ep > 0 and ep % RESET_EVERY == 0: |
| for i in range(len(receivers)): |
| receivers[i] = MultiPropReceiver(msg_dim, HIDDEN_DIM, n_classes_per_prop).to(DEVICE) |
| ros[i] = torch.optim.Adam(receivers[i].parameters(), lr=RECEIVER_LR) |
| sender.train(); [r.train() for r in receivers] |
| tau = 3.0 + (1.0 - 3.0) * ep / max(1, n_epochs - 1) |
| hard = ep >= 30 |
| rng_ep = np.random.RandomState(seed * 10000 + ep) |
| perm = rng_ep.permutation(train_ids) |
| for b in range(n_batches): |
| batch_ids = perm[b*BATCH_SIZE:(b+1)*BATCH_SIZE] |
| if len(batch_ids) < 4: continue |
| views = [v[batch_ids].to(DEVICE) for v in agent_views] |
| tgts = [ld[batch_ids] for ld in labels_dev] |
| msg, logits_list = sender(views, tau=tau, hard=hard) |
| loss = torch.tensor(0.0, device=DEVICE) |
| for r in receivers: |
| head_logits = r(msg) |
| for hl, tgt in zip(head_logits, tgts): |
| loss = loss + F.cross_entropy(hl, tgt) |
| loss = loss / (len(receivers) * len(tgts)) |
| for lg in logits_list: |
| lp = F.log_softmax(lg, -1); p = lp.exp().clamp(min=1e-8) |
| ent = -(p * lp).sum(-1).mean() |
| if ent / me < 0.1: loss = loss - 0.03 * ent |
| if torch.isnan(loss): |
| so.zero_grad(); [o.zero_grad() for o in ros]; continue |
| so.zero_grad(); [o.zero_grad() for o in ros] |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(sender.parameters(), 1.0) |
| so.step(); [o.step() for o in ros] |
| if ep % 50 == 0 and DEVICE.type == "mps": torch.mps.empty_cache() |
| if (ep + 1) % 10 == 0 or ep == 0: |
| sender.eval(); [r.eval() for r in receivers] |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = [ld[holdout_ids] for ld in labels_dev] |
| best_per_recv = 0.0; best_idx = 0 |
| for ri, r in enumerate(receivers): |
| head_logits = r(msg_ho) |
| accs = [(hl.argmax(-1) == tgt).float().mean().item() |
| for hl, tgt in zip(head_logits, tgt_ho)] |
| combined = float(np.mean(accs)) |
| if combined > best_per_recv: |
| best_per_recv = combined; best_idx = ri |
| if best_per_recv > best_acc: |
| best_acc = best_per_recv; best_ep = ep |
| best_sender_state = {k: v.cpu().clone() for k, v in sender.state_dict().items()} |
| best_receiver_states = [ |
| {k: v.cpu().clone() for k, v in r.state_dict().items()} |
| for r in receivers] |
| best_recv_idx = best_idx |
| return { |
| "sender_state": best_sender_state, |
| "receiver_states": best_receiver_states, |
| "best_recv_idx": best_recv_idx, |
| "train_ids": train_ids, "holdout_ids": holdout_ids, |
| "task_acc": best_acc, "chance": chance, |
| "n_classes_per_prop": n_classes_per_prop, |
| "fpa": 1, "dim": dim, |
| "n_heads": n_heads, "vocab_size": vocab_size, |
| "msg_dim": msg_dim, |
| } |
|
|
|
|
| def main(): |
| t0 = time.time() |
| log("=" * 60) |
| log("EXP Q ADDENDUM 2: try to reach PosDis >= 0.7 with multi-prop V-JEPA 2") |
|
|
| feat_c = load_feat_subsampled("collision", "vjepa2") |
| feat_r = load_feat_subsampled("ramp", "vjepa2") |
| z = np.load("results/kinematics_vs_mechanics/labels_collision.npz") |
| |
| mass_5 = (z["mass_scalar"].astype(int) - 1).astype(np.int64) |
| rest_levels = [0.1, 0.3, 0.5, 0.7, 0.9] |
| rest_5 = np.zeros(len(z["restitution_scalar"]), dtype=np.int64) |
| for i, lvl in enumerate(rest_levels): |
| rest_5[np.isclose(z["restitution_scalar"], lvl)] = i |
| log(f" collision mass(5)={np.bincount(mass_5).tolist()} " |
| f"restit(5)={np.bincount(rest_5).tolist()}") |
| lbl_r_3 = load_labels("ramp", "restitution") |
|
|
| rows = [] |
| configs = [(2, 5), (3, 5), (4, 5)] |
| for H, V in configs: |
| name = f"disc_multi5_L{H}_V{V}" |
| log(f"\n --- {name} (5-class mass + 5-class restit, 400 epochs) ---") |
| within_accs = []; bases = [] |
| for seed in range(N_SEEDS): |
| t_s = time.time() |
| try: |
| base = train_disc_multi_5class(feat_c, [mass_5, rest_5], seed, H, V) |
| bases.append(base); within_accs.append(float(base["task_acc"])) |
| log(f" {name} s{seed}: combined within={base['task_acc']:.3f} " |
| f"[{time.time()-t_s:.0f}s]") |
| except Exception as e: |
| log(f" {name} s{seed} FAILED: {e}") |
| bases.append(None); within_accs.append(float("nan")) |
| valid = [(i, a) for i, a in enumerate(within_accs) if not np.isnan(a)] |
| if not valid: continue |
| best_idx = max(valid, key=lambda x: x[1])[0] |
| best_base = bases[best_idx] |
| ho_ids = best_base["holdout_ids"] |
| try: |
| tokens = discrete_token_extract(best_base, feat_c) |
| tokens_ho = tokens[ho_ids] |
| ts = discrete_multi_topsim(tokens_ho, [mass_5[ho_ids], rest_5[ho_ids]]) |
| pd_, _ = discrete_multi_posdis(tokens_ho, [mass_5[ho_ids], rest_5[ho_ids]]) |
| base_pp, drops = discrete_multi_causal(best_base, feat_c, |
| [mass_5, rest_5], ho_ids) |
| cs = float(drops.max()) |
| except Exception as e: |
| log(f" metrics FAILED: {e}") |
| ts = pd_ = cs = float("nan") |
| |
| |
| |
| cross = {n: [] for n in N_LIST} |
| for seed, base in enumerate(bases): |
| if base is None: |
| for n in N_LIST: cross[n].append(float("nan")) |
| continue |
| tr_t, ho_t = make_splits(lbl_r_3, seed) |
| for n in N_LIST: |
| try: |
| acc = disc_multi_train_recv_frozen(base, feat_r, lbl_r_3, |
| tr_t, ho_t, seed, n) |
| cross[n].append(float(acc)) |
| except Exception as e: |
| log(f" {name} s{seed} N={n} FAILED: {e}") |
| cross[n].append(float("nan")) |
| wm = float(np.mean([a for a in within_accs if not np.isnan(a)])) |
| cm = {n: float(np.mean([x for x in cross[n] if not np.isnan(x)])) |
| if any(not np.isnan(x) for x in cross[n]) else float("nan") |
| for n in N_LIST} |
| log(f" {name}: within={wm:.3f} TopSim={ts:.3f} PosDis={pd_:.3f} " |
| f"CausalSpec={cs:.3f} cross16={cm[16]:.3f} cross192={cm[192]:.3f}") |
| rows.append({ |
| "name": name, "type": "discrete_multi5", |
| "n_heads": H, "vocab_size": V, |
| "within": wm, "topsim": ts, "posdis": pd_, "causal_spec": cs, |
| "cross_n16": cm[16], "cross_n192": cm[192], |
| }) |
|
|
| |
| original_12 = [ |
| ("disc_L2_V5", "discrete", 0.88, 0.20, 0.02, 41.7, 43.9), |
| ("disc_L2_V10", "discrete", 0.84, 0.25, 0.05, 46.1, 41.7), |
| ("disc_L3_V5", "discrete", 0.84, 0.13, 0.02, 43.3, 42.8), |
| ("disc_L3_V10", "discrete", 0.84, 0.12, 0.01, 43.3, 45.6), |
| ("disc_L4_V5", "discrete", 0.90, 0.10, 0.01, 41.1, 42.2), |
| ("disc_L4_V10", "discrete", 0.82, 0.08, 0.02, 45.0, 45.0), |
| ("disc_L5_V5", "discrete", 0.89, 0.07, 0.02, 40.0, 43.9), |
| ("cont_dim2", "continuous", 0.92, 0.15, 0.20, 48.9, 54.4), |
| ("cont_dim3", "continuous", 0.91, 0.15, 0.02, 40.6, 41.1), |
| ("cont_dim5", "continuous", 0.89, 0.06, 0.03, 47.2, 43.9), |
| ("cont_dim10", "continuous", 0.88, 0.04, 0.01, 47.8, 48.3), |
| ("cont_dim20", "continuous", 0.90, 0.02, 0.00, 48.9, 55.0), |
| ] |
| addendum_3 = [ |
| ("disc_multi_L3_V5", "discrete_multi", 0.59, 0.51, 0.06, 40.0, 46.1), |
| ("disc_multi_L4_V10", "discrete_multi", 0.68, 0.48, 0.01, 45.6, 50.6), |
| ("cont_multi_dim3", "continuous_multi", 0.72, 0.40, 0.10, 50.6, 55.0), |
| ] |
| all_rows = list(original_12) + list(addendum_3) |
| for r in rows: |
| all_rows.append(( |
| r["name"], r["type"], r["topsim"], r["posdis"], r["causal_spec"], |
| r["cross_n16"] * 100 if r["cross_n16"] <= 1 else r["cross_n16"], |
| r["cross_n192"] * 100 if r["cross_n192"] <= 1 else r["cross_n192"], |
| )) |
|
|
| from scipy.stats import spearmanr |
| def safe_corr(idx_x, idx_y): |
| x = []; y = [] |
| for r in all_rows: |
| if not (np.isnan(r[idx_x]) or np.isnan(r[idx_y])): |
| x.append(r[idx_x]); y.append(r[idx_y]) |
| if len(x) < 4 or np.std(x) < 1e-9 or np.std(y) < 1e-9: |
| return float("nan"), float("nan") |
| rho, p = spearmanr(x, y) |
| return float(rho), float(p) |
|
|
| |
| def bootstrap_ci(idx_x, idx_y, n_boot=2000): |
| x_arr = np.array([r[idx_x] for r in all_rows |
| if not (np.isnan(r[idx_x]) or np.isnan(r[idx_y]))]) |
| y_arr = np.array([r[idx_y] for r in all_rows |
| if not (np.isnan(r[idx_x]) or np.isnan(r[idx_y]))]) |
| if len(x_arr) < 4: return (float("nan"), float("nan")) |
| rng = np.random.RandomState(42) |
| rhos = [] |
| for _ in range(n_boot): |
| idx = rng.randint(0, len(x_arr), len(x_arr)) |
| xs = x_arr[idx]; ys = y_arr[idx] |
| if np.std(xs) < 1e-9 or np.std(ys) < 1e-9: continue |
| rho, _ = spearmanr(xs, ys) |
| if not np.isnan(rho): rhos.append(rho) |
| if len(rhos) < 100: return (float("nan"), float("nan")) |
| return float(np.percentile(rhos, 2.5)), float(np.percentile(rhos, 97.5)) |
|
|
| corrs = {} |
| cis = {} |
| for met_name, met_idx in [("topsim", 2), ("posdis", 3), ("causal", 4)]: |
| for tgt_name, tgt_idx in [("n16", 5), ("n192", 6)]: |
| corrs[(met_name, tgt_name)] = safe_corr(met_idx, tgt_idx) |
| cis[(met_name, tgt_name)] = bootstrap_ci(met_idx, tgt_idx) |
|
|
| lines = [ |
| "EXP Q ADDENDUM 2 -- multi-property 5-class boost to reach high PosDis", |
| "", |
| f"{'Config':<22s} | {'TopSim':<8s} | {'PosDis':<8s} | {'CausalSpec':<12s} | " |
| f"{'Cross 16':<10s} | {'Cross 192':<10s}", |
| "-" * 90, |
| ] |
| for r in all_rows: |
| name, typ, ts, pd_, cs, c16, c192 = r |
| lines.append(f"{name:<22s} | {ts:+.2f} | {pd_:.2f} | {cs:.2f} " |
| f"| {c16:5.1f}% | {c192:5.1f}%") |
|
|
| lines.append("") |
| lines.append(f"FULL SWEEP SPEARMAN with bootstrap 95% CI (N={len(all_rows)}):") |
| for tgt in ["n16", "n192"]: |
| lines.append(f" vs cross_{tgt}:") |
| for met, label in [("topsim", "TopSim"), ("posdis", "PosDis"), |
| ("causal", "CausalSpec")]: |
| rho, p = corrs[(met, tgt)] |
| ci_lo, ci_hi = cis[(met, tgt)] |
| lines.append(f" {label:<12s}: rho={rho:+.2f} p={p:.3f} " |
| f"95% CI=[{ci_lo:+.2f}, {ci_hi:+.2f}]") |
|
|
| abs_max_rho = 0 |
| for k, (rho, p) in corrs.items(): |
| if not np.isnan(rho): abs_max_rho = max(abs_max_rho, abs(rho)) |
| lines.append(f"\nMax |rho| across 6 tests: {abs_max_rho:.2f}") |
| lines.append(f"Max PosDis in this paper's sweep: " |
| f"{max((r[3] for r in all_rows if not np.isnan(r[3])), default=float('nan')):.2f}") |
| lines.append(f"\nTotal runtime: {(time.time()-t0)/60:.1f} min") |
|
|
| summary = "\n".join(lines) |
| (OUT / "exp_q_addendum2_summary.txt").write_text(summary + "\n") |
| (OUT / "exp_q_addendum2_summary.json").write_text(json.dumps({ |
| "new_rows_5class": rows, |
| "all_rows_combined": [{"name": r[0], "type": r[1], "topsim": r[2], |
| "posdis": r[3], "causal_spec": r[4], |
| "cross_n16": r[5], "cross_n192": r[6]} |
| for r in all_rows], |
| "spearman": {f"{m}__{n}": list(v) for (m, n), v in corrs.items()}, |
| "bootstrap_95ci": {f"{m}__{n}": list(v) for (m, n), v in cis.items()}, |
| }, indent=2, default=str)) |
| print("\n" + summary, flush=True) |
| log(f"DONE in {(time.time()-t0)/60:.1f} min") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|