| """ |
| EXP M (reviewer_response): CONTINUOUS COMPOSITIONAL BASELINE. |
| |
| The #1 reviewer objection: "you only tested DISCRETE bottleneck codes. |
| Continuous factorized representations might transfer fine." |
| |
| This experiment trains a CONTINUOUS bottleneck (same encoder + multi-agent |
| structure, but tanh-bounded real-valued codes instead of Gumbel one-hot) |
| on V-JEPA 2 collision restitution. We measure within-scenario TopSim, |
| PosDis, and causal specificity, then run the same N-shot cross-scenario |
| curve as Exp I. |
| |
| Two variants tried: |
| - code_dim=10 per agent (matches discrete dimensionally: 4 agents x 10 |
| = 40-dim message, same as discrete K=5 vocab x 2 heads x 4 agents) |
| - code_dim=3 per agent (small bottleneck, matches Option B from prompt) |
| |
| If continuous bottleneck plateaus at 45-50% (like discrete), the |
| "compositionality without invariance" claim survives discretization. |
| If it recovers like a linear probe (60-84%), the claim must narrow to |
| discrete codes specifically. |
| """ |
| import json, time, sys, os, math |
| from pathlib import Path |
| from datetime import datetime, timezone |
| import numpy as np |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| sys.path.insert(0, os.path.dirname(__file__)) |
| from _kinematics_train import ( |
| DEVICE, ClassifierReceiver, |
| HIDDEN_DIM, N_AGENTS, BATCH_SIZE, SENDER_LR, RECEIVER_LR, |
| EARLY_STOP_PATIENCE, |
| ) |
| from _killer_experiment import TemporalEncoder, ContinuousSender, ContinuousMultiSender |
| from _overnight_p1_transfer import ( |
| train_base as train_discrete_base, |
| train_receiver_frozen_sender as train_disc_recv, |
| eval_zero_shot as eval_disc_zero_shot, |
| make_splits, N_FRAMES_SUBSAMPLE, |
| ) |
| from _overnight_p3_matrix import load_labels, load_feat_subsampled |
| from _rev_f_cnn_control import ci95 |
|
|
| OUT = Path("results/reviewer_response/exp_m") |
| OUT.mkdir(parents=True, exist_ok=True) |
|
|
| N_EPOCHS = 150 |
| N_SEEDS = 5 |
| N_LIST = [0, 1, 4, 16, 64, 128, 192] |
|
|
|
|
| def log(msg): |
| ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ") |
| print(f"[{ts}] EXP-M: {msg}", flush=True) |
|
|
|
|
| |
| |
| |
|
|
| def build_continuous_sender(feat_dim, code_dim_per_agent=10, fpa=1): |
| senders = [ |
| ContinuousSender( |
| TemporalEncoder(HIDDEN_DIM, feat_dim, fpa), |
| HIDDEN_DIM, code_dim_per_agent) |
| for _ in range(N_AGENTS) |
| ] |
| return ContinuousMultiSender(senders).to(DEVICE) |
|
|
|
|
| def train_continuous_base(feat, labels, seed, code_dim_per_agent=10, |
| n_epochs=N_EPOCHS): |
| """Train continuous sender + 3 receivers (iterated learning) on (feat, labels).""" |
| N, nf, dim = feat.shape |
| fpa = 1 |
| agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)] |
| torch.manual_seed(seed); np.random.seed(seed) |
| rng = np.random.RandomState(seed * 1000 + 42) |
| train_ids, holdout_ids = [], [] |
| for c in np.unique(labels): |
| ids_c = np.where(labels == c)[0] |
| rng.shuffle(ids_c) |
| split = max(1, len(ids_c) // 5) |
| holdout_ids.extend(ids_c[:split]); train_ids.extend(ids_c[split:]) |
| train_ids = np.array(train_ids); holdout_ids = np.array(holdout_ids) |
| n_classes = int(labels.max()) + 1 |
| chance = 1.0 / n_classes |
|
|
| msg_dim = code_dim_per_agent * N_AGENTS |
| sender = build_continuous_sender(dim, code_dim_per_agent, fpa) |
| receivers = [ClassifierReceiver(msg_dim, HIDDEN_DIM, n_classes).to(DEVICE) |
| for _ in range(3)] |
| so = torch.optim.Adam(sender.parameters(), lr=SENDER_LR) |
| ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers] |
| labels_dev = torch.tensor(labels, dtype=torch.long).to(DEVICE) |
| n_batches = max(1, len(train_ids) // BATCH_SIZE) |
| best_acc = 0.0; best_ep = 0 |
| best_sender_state = None; best_receiver_states = None |
| best_recv_idx = 0 |
|
|
| for ep in range(n_epochs): |
| if ep - best_ep > EARLY_STOP_PATIENCE and best_acc > chance + 0.05: break |
| if ep > 0 and ep % 40 == 0: |
| for i in range(len(receivers)): |
| receivers[i] = ClassifierReceiver(msg_dim, HIDDEN_DIM, n_classes).to(DEVICE) |
| ros[i] = torch.optim.Adam(receivers[i].parameters(), lr=RECEIVER_LR) |
| sender.train(); [r.train() for r in receivers] |
| rng_ep = np.random.RandomState(seed * 10000 + ep) |
| perm = rng_ep.permutation(train_ids) |
| for b in range(n_batches): |
| batch_ids = perm[b*BATCH_SIZE:(b+1)*BATCH_SIZE] |
| if len(batch_ids) < 4: continue |
| views = [v[batch_ids].to(DEVICE) for v in agent_views] |
| tgt = labels_dev[batch_ids] |
| msg, _ = sender(views) |
| loss = torch.tensor(0.0, device=DEVICE) |
| for r in receivers: loss = loss + F.cross_entropy(r(msg), tgt) |
| loss = loss / len(receivers) |
| if torch.isnan(loss): |
| so.zero_grad(); [o.zero_grad() for o in ros]; continue |
| so.zero_grad(); [o.zero_grad() for o in ros] |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(sender.parameters(), 1.0) |
| so.step(); [o.step() for o in ros] |
| if ep % 50 == 0 and DEVICE.type == "mps": torch.mps.empty_cache() |
| if (ep + 1) % 10 == 0 or ep == 0: |
| sender.eval(); [r.eval() for r in receivers] |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = labels_dev[holdout_ids] |
| best_per_recv = 0.0; best_idx = 0 |
| for ri, r in enumerate(receivers): |
| preds = r(msg_ho).argmax(-1) |
| acc = (preds == tgt_ho).float().mean().item() |
| if acc > best_per_recv: |
| best_per_recv = acc; best_idx = ri |
| if best_per_recv > best_acc: |
| best_acc = best_per_recv; best_ep = ep |
| best_sender_state = {k: v.cpu().clone() for k, v in sender.state_dict().items()} |
| best_receiver_states = [ |
| {k: v.cpu().clone() for k, v in r.state_dict().items()} |
| for r in receivers] |
| best_recv_idx = best_idx |
| return { |
| "sender_state": best_sender_state, |
| "receiver_states": best_receiver_states, |
| "best_recv_idx": best_recv_idx, |
| "train_ids": train_ids, "holdout_ids": holdout_ids, |
| "task_acc": best_acc, "chance": chance, |
| "n_classes": n_classes, "fpa": 1, "dim": dim, |
| "code_dim_per_agent": code_dim_per_agent, |
| "msg_dim": msg_dim, |
| } |
|
|
|
|
| def get_continuous_messages(base, feat): |
| """Apply the trained continuous sender to features. Returns msg (N, msg_dim).""" |
| N, nf, dim = feat.shape |
| code_dim = base["code_dim_per_agent"] |
| sender = build_continuous_sender(dim, code_dim, base["fpa"]) |
| sender.load_state_dict(base["sender_state"]) |
| sender.eval().to(DEVICE) |
| agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)] |
| with torch.no_grad(): |
| views = [v.to(DEVICE) for v in agent_views] |
| msg, _ = sender(views) |
| return msg.cpu().float() |
|
|
|
|
| def eval_zero_shot_cont(base, feat_tgt, labels_tgt, ho_ids): |
| """Zero-shot apply trained sender + best receiver to target.""" |
| sender = build_continuous_sender(feat_tgt.shape[2], base["code_dim_per_agent"], base["fpa"]) |
| sender.load_state_dict(base["sender_state"]); sender.eval().to(DEVICE) |
| receivers = [ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, base["n_classes"]).to(DEVICE) |
| for _ in range(len(base["receiver_states"]))] |
| for r, s in zip(receivers, base["receiver_states"]): r.load_state_dict(s) |
| [r.eval() for r in receivers] |
| agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)] |
| labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE) |
| with torch.no_grad(): |
| v_ho = [v[ho_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = labels_dev[ho_ids] |
| best = 0.0 |
| for r in receivers: |
| preds = r(msg_ho).argmax(-1) |
| acc = (preds == tgt_ho).float().mean().item() |
| best = max(best, acc) |
| return best |
|
|
|
|
| def train_recv_frozen_cont(base, feat_tgt, labels_tgt, train_ids, holdout_ids, |
| seed, n_target, n_epochs=80): |
| """Train new receiver on n_target target examples using frozen continuous sender.""" |
| if n_target == 0: |
| return eval_zero_shot_cont(base, feat_tgt, labels_tgt, holdout_ids) |
| rng = np.random.RandomState(seed * 311 + 7 + n_target) |
| n_t_classes = int(np.max(labels_tgt)) + 1 |
| per_class = max(1, n_target // n_t_classes) |
| picks = [] |
| for c in range(n_t_classes): |
| ids_c = np.array([i for i in train_ids if labels_tgt[i] == c]) |
| if len(ids_c) == 0: continue |
| rng.shuffle(ids_c) |
| picks.extend(ids_c[:per_class]) |
| picks = np.array(picks) |
| if len(picks) > n_target: picks = picks[:n_target] |
| elif len(picks) < n_target and len(train_ids) > len(picks): |
| extras = np.array([i for i in train_ids if i not in set(picks)]) |
| rng.shuffle(extras) |
| picks = np.concatenate([picks, extras[:n_target - len(picks)]]) |
| if len(picks) < 2: return float("nan") |
|
|
| |
| sender = build_continuous_sender(feat_tgt.shape[2], base["code_dim_per_agent"], base["fpa"]) |
| sender.load_state_dict(base["sender_state"]); sender.to(DEVICE).eval() |
| for p in sender.parameters(): p.requires_grad = False |
| receivers = [ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, base["n_classes"]).to(DEVICE) |
| for _ in range(3)] |
| ros = [torch.optim.Adam(r.parameters(), lr=RECEIVER_LR) for r in receivers] |
| agent_views = [feat_tgt[:, i:i+1, :] for i in range(N_AGENTS)] |
| labels_dev = torch.tensor(labels_tgt, dtype=torch.long).to(DEVICE) |
| bs = min(BATCH_SIZE, len(picks)) |
| best = 0.0 |
| for ep in range(n_epochs): |
| [r.train() for r in receivers] |
| rng_ep = np.random.RandomState(seed * 10000 + ep) |
| perm = rng_ep.permutation(picks) |
| for b in range(max(1, len(picks) // bs)): |
| batch = perm[b*bs:(b+1)*bs] |
| if len(batch) < 2: continue |
| views = [v[batch].to(DEVICE) for v in agent_views] |
| with torch.no_grad(): |
| msg, _ = sender(views) |
| for r, o in zip(receivers, ros): |
| logits = r(msg) |
| loss = F.cross_entropy(logits, labels_dev[batch]) |
| if torch.isnan(loss): continue |
| o.zero_grad(); loss.backward(); o.step() |
| if (ep + 1) % 5 == 0 or ep == 0: |
| [r.eval() for r in receivers] |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = labels_dev[holdout_ids] |
| for r in receivers: |
| preds = r(msg_ho).argmax(-1) |
| acc = (preds == tgt_ho).float().mean().item() |
| if acc > best: best = acc |
| return best |
|
|
|
|
| |
| |
| |
|
|
| def topsim_continuous(messages, labels, n_pairs=5000): |
| """Spearman corr between L2 message-distances and L1 label-distances.""" |
| from scipy.stats import spearmanr |
| rng = np.random.RandomState(42) |
| N = len(labels) |
| msg_np = messages.numpy() if isinstance(messages, torch.Tensor) else messages |
| n_pairs = min(n_pairs, N * (N - 1) // 2) |
| msg_d = []; lbl_d = [] |
| seen = set() |
| for _ in range(n_pairs): |
| i, j = rng.randint(0, N), rng.randint(0, N) |
| if i == j or (i, j) in seen or (j, i) in seen: continue |
| seen.add((i, j)) |
| msg_d.append(np.linalg.norm(msg_np[i] - msg_np[j])) |
| lbl_d.append(abs(int(labels[i]) - int(labels[j]))) |
| if len(msg_d) < 10: return float("nan") |
| if np.std(msg_d) < 1e-9 or np.std(lbl_d) < 1e-9: |
| return float("nan") |
| rho, _ = spearmanr(msg_d, lbl_d) |
| return float(rho) if not np.isnan(rho) else 0.0 |
|
|
|
|
| def posdis_continuous_per_dim(messages, labels, n_bins=10): |
| """For each code dim, bin its values into n_bins and compute MI with labels. |
| Returns array (D,) of MI values.""" |
| msg_np = messages.numpy() if isinstance(messages, torch.Tensor) else messages |
| D = msg_np.shape[1] |
| mi_per_dim = np.zeros(D) |
| n = len(labels) |
| for d in range(D): |
| col = msg_np[:, d] |
| if col.std() < 1e-9: |
| mi_per_dim[d] = 0.0; continue |
| |
| edges = np.quantile(col, np.linspace(0, 1, n_bins + 1)[1:-1]) |
| binned = np.digitize(col, edges) |
| |
| joint = {} |
| for x, y in zip(binned, labels): |
| joint[(int(x), int(y))] = joint.get((int(x), int(y)), 0) + 1 |
| H = lambda probs: -np.sum([p * np.log(p) for p in probs if p > 0]) |
| |
| p_x = np.bincount(binned, minlength=n_bins) / n |
| p_y = np.bincount(labels, minlength=int(np.max(labels)) + 1) / n |
| H_x = H(p_x); H_y = H(p_y) |
| H_xy = 0 |
| for (x, y), c in joint.items(): |
| p = c / n |
| H_xy += -p * np.log(p) |
| mi = H_x + H_y - H_xy |
| mi_per_dim[d] = max(mi, 0.0) |
| return mi_per_dim |
|
|
|
|
| def posdis_continuous(messages, labels, n_bins=10): |
| """Average disentanglement across positions: per dim MI(top property) - |
| MI(second property), normalized. With single property here, it's just |
| relative MI heterogeneity across dims (disentanglement of the SINGLE |
| property across multiple dims). For single-attribute case, return |
| fraction of MI concentrated in one code dim.""" |
| mi = posdis_continuous_per_dim(messages, labels, n_bins=n_bins) |
| if mi.sum() < 1e-9: return float("nan") |
| |
| top = mi.max() |
| return float(top / (mi.sum() + 1e-9)) |
|
|
|
|
| def causal_specificity(base, feat, labels, holdout_ids): |
| """Mask each code dim, measure receiver accuracy drop. Returns array (D,).""" |
| sender = build_continuous_sender(feat.shape[2], base["code_dim_per_agent"], base["fpa"]) |
| sender.load_state_dict(base["sender_state"]); sender.eval().to(DEVICE) |
| receivers = [ClassifierReceiver(base["msg_dim"], HIDDEN_DIM, base["n_classes"]).to(DEVICE) |
| for _ in range(len(base["receiver_states"]))] |
| for r, s in zip(receivers, base["receiver_states"]): r.load_state_dict(s) |
| [r.eval() for r in receivers] |
| agent_views = [feat[:, i:i+1, :] for i in range(N_AGENTS)] |
| labels_dev = torch.tensor(labels, dtype=torch.long).to(DEVICE) |
| with torch.no_grad(): |
| v_ho = [v[holdout_ids].to(DEVICE) for v in agent_views] |
| msg_ho, _ = sender(v_ho) |
| tgt_ho = labels_dev[holdout_ids] |
| best_recv = receivers[base.get("best_recv_idx", 0)] |
| baseline = (best_recv(msg_ho).argmax(-1) == tgt_ho).float().mean().item() |
| D = msg_ho.shape[1] |
| drops = np.zeros(D) |
| |
| mean_vals = msg_ho.mean(dim=0) |
| for d in range(D): |
| masked = msg_ho.clone() |
| masked[:, d] = mean_vals[d] |
| acc_masked = (best_recv(masked).argmax(-1) == tgt_ho).float().mean().item() |
| drops[d] = baseline - acc_masked |
| return baseline, drops |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| t0 = time.time() |
| log("=" * 60) |
| log("EXP M: Continuous compositional baseline") |
|
|
| feat_c = load_feat_subsampled("collision", "vjepa2") |
| feat_r = load_feat_subsampled("ramp", "vjepa2") |
| feat_f = load_feat_subsampled("flat_drop", "vjepa2") |
| lbl_c = load_labels("collision", "restitution") |
| lbl_r = load_labels("ramp", "restitution") |
| lbl_f = load_labels("flat_drop", "restitution") |
| log(f" collision: {tuple(feat_c.shape)} dist={np.bincount(lbl_c).tolist()}") |
| log(f" ramp: {tuple(feat_r.shape)} dist={np.bincount(lbl_r).tolist()}") |
| log(f" flat_drop: {tuple(feat_f.shape)} dist={np.bincount(lbl_f).tolist()}") |
|
|
| variants = { |
| "continuous_dim10": 10, |
| "continuous_dim3": 3, |
| } |
|
|
| all_results = {} |
|
|
| |
| for variant_name, code_dim in variants.items(): |
| log(f"\n --- Training {variant_name} (code_dim_per_agent={code_dim}) ---") |
| bases = [] |
| within_accs = [] |
| for seed in range(N_SEEDS): |
| t_s = time.time() |
| try: |
| base = train_continuous_base(feat_c, lbl_c, seed, |
| code_dim_per_agent=code_dim, |
| n_epochs=N_EPOCHS) |
| bases.append(base); within_accs.append(float(base["task_acc"])) |
| log(f" seed {seed}: within={base['task_acc']:.3f} [{time.time()-t_s:.0f}s]") |
| except Exception as e: |
| log(f" seed {seed} FAILED: {e}") |
| bases.append(None); within_accs.append(float("nan")) |
| all_results[variant_name] = { |
| "code_dim": code_dim, |
| "bases": bases, "within": within_accs, |
| } |
|
|
| |
| |
| valid = [(i, a) for i, a in enumerate(within_accs) if not np.isnan(a)] |
| if not valid: |
| log(f" {variant_name}: no successful base"); continue |
| best_idx = max(valid, key=lambda x: x[1])[0] |
| best_base = bases[best_idx] |
| with torch.no_grad(): |
| msgs_full = get_continuous_messages(best_base, feat_c) |
| ho_ids = best_base["holdout_ids"] |
| msgs_ho = msgs_full[ho_ids] |
| lbl_ho = lbl_c[ho_ids] |
| try: |
| ts = topsim_continuous(msgs_ho, lbl_ho) |
| except Exception as e: |
| log(f" TopSim error: {e}"); ts = float("nan") |
| try: |
| pd_ = posdis_continuous(msgs_ho, lbl_ho) |
| except Exception as e: |
| log(f" PosDis error: {e}"); pd_ = float("nan") |
| try: |
| base_acc, drops = causal_specificity(best_base, feat_c, lbl_c, ho_ids) |
| cs = float(drops.max()) |
| except Exception as e: |
| log(f" causal-spec error: {e}"); cs = float("nan"); base_acc = float("nan") |
| log(f" {variant_name} within metrics (best seed): " |
| f"acc={base_acc:.3f} TopSim={ts:.3f} PosDis={pd_:.3f} " |
| f"CausalSpec(max-drop)={cs:.3f}") |
| all_results[variant_name].update({ |
| "topsim": ts, "posdis": pd_, "causal_spec_max": cs, |
| "within_for_metrics": base_acc, |
| }) |
|
|
| |
| log(f"\n --- N-shot cross-scenario (N_list={N_LIST}, 5 seeds each) ---") |
| for variant_name in variants: |
| bases = all_results[variant_name]["bases"] |
| all_results[variant_name]["cross"] = {} |
| for src, tgt, feat_tgt, lbl_tgt in [ |
| ("collision", "ramp", feat_r, lbl_r), |
| ("collision", "flat_drop", feat_f, lbl_f), |
| ]: |
| log(f" {variant_name}: {src} -> {tgt}") |
| curve = {n: [] for n in N_LIST} |
| for seed, base in enumerate(bases): |
| if base is None: |
| for n in N_LIST: curve[n].append(float("nan")) |
| continue |
| tr_t, ho_t = make_splits(lbl_tgt, seed) |
| for n in N_LIST: |
| try: |
| acc = train_recv_frozen_cont( |
| base, feat_tgt, lbl_tgt, tr_t, ho_t, seed, n) |
| except Exception as e: |
| log(f" {variant_name} {src}->{tgt} s{seed} N={n} failed: {e}") |
| acc = float("nan") |
| curve[n].append(acc) |
| all_results[variant_name]["cross"][f"{src}->{tgt}"] = curve |
| for n in N_LIST: |
| accs = curve[n] |
| v = [x for x in accs if not (isinstance(x, float) and np.isnan(x))] |
| if v: |
| log(f" {src}->{tgt} N={n}: {np.mean(v)*100:.1f}% +/- " |
| f"{(np.std(v, ddof=1) if len(v) > 1 else 0.0)*100:.1f}") |
|
|
| |
| def m(vals): |
| v = [x for x in vals if not (isinstance(x, float) and np.isnan(x))] |
| if not v: return (float("nan"), float("nan"), (float("nan"), float("nan"))) |
| mean = float(np.mean(v)) |
| std = float(np.std(v, ddof=1)) if len(v) > 1 else 0.0 |
| return (mean, std, ci95(v)) |
|
|
| lines = [ |
| "EXPERIMENT M -- CONTINUOUS COMPOSITIONAL BASELINE (V-JEPA 2, 5 seeds)", |
| "", |
| "Architecture: same TemporalEncoder + multi-agent (4) structure as the", |
| "discrete bottleneck. Each agent's sender outputs a tanh-bounded real", |
| "vector of code_dim_per_agent dims (instead of one-hot Gumbel-Softmax).", |
| "Receiver: same ClassifierReceiver MLP as discrete protocol.", |
| "Iterated learning: 3-receiver population reset every 40 epochs.", |
| "", |
| "WITHIN-SCENARIO METRICS (collision, restitution 3-class):", |
| f"{'Architecture':<26s} | {'Acc':<8s} | {'TopSim':<8s} | {'PosDis':<10s} | " |
| f"{'CausalSpec':<12s}", |
| "-" * 80, |
| ] |
| discrete_line = (f"{'Discrete (battery)':<26s} | {'94.2%':<8s} | " |
| f"{'+0.84':<8s} | {'0.76':<10s} | {'0.99':<12s}") |
| lines.append(discrete_line) |
| for variant_name in variants: |
| r = all_results[variant_name] |
| wm, ws, _ = m(r["within"]) |
| ts = r.get("topsim", float("nan")) |
| pd_ = r.get("posdis", float("nan")) |
| cs = r.get("causal_spec_max", float("nan")) |
| within_str = f"{wm*100:.1f}%+/-{ws*100:.1f}" if not np.isnan(wm) else "N/A" |
| ts_str = f"{ts:+.2f}" if not np.isnan(ts) else "N/A" |
| pd_str = f"{pd_:.2f}" if not np.isnan(pd_) else "N/A" |
| cs_str = f"{cs:.2f}" if not np.isnan(cs) else "N/A" |
| lines.append(f"{variant_name:<26s} | {within_str:<8s} | " |
| f"{ts_str:<8s} | {pd_str:<10s} | {cs_str:<12s}") |
| lines.append(f"{'Linear probe (Exp B)':<26s} | {'97.5%':<8s} | " |
| f"{'N/A':<8s} | {'N/A':<10s} | {'N/A':<12s}") |
|
|
| lines.append("") |
| lines.append("N-SHOT CROSS-SCENARIO CURVE (collision -> ramp + collision -> flat_drop):") |
| lines.append(f" reference: linear probe coll->ramp at N=192: 83.7%") |
| lines.append(f" reference: linear probe coll->flat at N=192: 62.0%") |
| lines.append(f" reference: discrete bottleneck coll->ramp 16-shot: 43.7%") |
| lines.append("") |
| for direction in ["collision->ramp", "collision->flat_drop"]: |
| lines.append(f"--- {direction} ---") |
| header = (f"{'N':<6s} | " |
| f"{'continuous_dim10':<22s} | " |
| f"{'continuous_dim3':<22s}") |
| lines.append(header); lines.append("-" * len(header)) |
| for n in N_LIST: |
| row_cells = [] |
| for variant_name in variants: |
| accs = all_results[variant_name]["cross"][direction][n] |
| mn, sd, _ = m(accs) |
| if np.isnan(mn): row_cells.append("N/A") |
| else: row_cells.append(f"{mn*100:5.1f}% +/- {sd*100:.1f}") |
| lines.append(f"{n:<6d} | {row_cells[0]:<22s} | {row_cells[1]:<22s}") |
| lines.append("") |
|
|
| |
| lines.append("VERDICT:") |
| |
| cont10_192 = []; cont3_192 = [] |
| for d in ["collision->ramp", "collision->flat_drop"]: |
| v10 = all_results["continuous_dim10"]["cross"][d][192] |
| v3 = all_results["continuous_dim3"]["cross"][d][192] |
| v10v = [x for x in v10 if not np.isnan(x)] |
| v3v = [x for x in v3 if not np.isnan(x)] |
| if v10v: cont10_192.append(float(np.mean(v10v))) |
| if v3v: cont3_192.append(float(np.mean(v3v))) |
| cont10_avg = float(np.mean(cont10_192)) if cont10_192 else float("nan") |
| cont3_avg = float(np.mean(cont3_192)) if cont3_192 else float("nan") |
| lines.append(f" Continuous-dim10 mean cross at N=192: {cont10_avg*100:.1f}%") |
| lines.append(f" Continuous-dim3 mean cross at N=192: {cont3_avg*100:.1f}%") |
| lines.append(f" Linear probe mean cross at N=192: ~73% (avg of 84% ramp, 62% flat)") |
| lines.append(f" Discrete bottleneck plateau: ~46%") |
|
|
| best_cont = max(cont10_avg, cont3_avg) if not (np.isnan(cont10_avg) and np.isnan(cont3_avg)) else float("nan") |
| if not np.isnan(best_cont): |
| if best_cont < 0.55: |
| v = (f"Continuous bottleneck plateaus at {best_cont*100:.1f}%, similar to " |
| "discrete (~46%). The compositionality-without-invariance dissociation " |
| "is NOT specific to discretization - it holds for continuous factorized " |
| "codes too. STRONG result for the paper.") |
| elif best_cont < 0.70: |
| v = (f"Continuous bottleneck reaches {best_cont*100:.1f}% at N=192 - " |
| "intermediate between discrete (46%) and linear probe (73%). Continuous " |
| "codes recover SOME cross-scenario signal beyond discrete, but stay " |
| "below an unconstrained probe. Nuanced finding.") |
| else: |
| v = (f"Continuous bottleneck recovers to {best_cont*100:.1f}% at N=192, " |
| "comparable to linear probes. The 'compositionality without invariance' " |
| "claim must be NARROWED to discrete codes specifically - continuous " |
| "factorized representations may transfer cleanly with target labels.") |
| lines.append(f" {v}") |
|
|
| lines.append("") |
| lines.append(f"Total runtime: {(time.time()-t0)/60:.1f} min") |
|
|
| |
| json_out = {} |
| for variant_name, r in all_results.items(): |
| json_out[variant_name] = { |
| "code_dim": r["code_dim"], |
| "within": r["within"], |
| "topsim": r.get("topsim", None), |
| "posdis": r.get("posdis", None), |
| "causal_spec_max": r.get("causal_spec_max", None), |
| "cross": r.get("cross", {}), |
| } |
|
|
| summary = "\n".join(lines) |
| (OUT / "exp_m_summary.txt").write_text(summary + "\n") |
| (OUT / "exp_m_summary.json").write_text(json.dumps({ |
| "config": {"n_seeds": N_SEEDS, "N_list": N_LIST, |
| "variants": list(variants.keys())}, |
| "results": json_out, |
| "runtime_s": time.time() - t0, |
| }, indent=2, default=str)) |
| print("\n" + summary, flush=True) |
| log(f"DONE in {(time.time()-t0)/60:.1f} min") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|