| """ |
| EXP REV-P101-GLOBAL: Diagnose the Phys101 LP/bottleneck dissociation collapse. |
| |
| The paper currently bins Phys101 mass per-scenario tertile (source and target use |
| DIFFERENT class boundaries). This is the standard Phys101 protocol but creates a |
| class-boundary shift on top of the feature-distribution shift, which may explain |
| why the Kubric LP/bottleneck dissociation does not replicate. |
| |
| This script re-runs Phys101 cross-scenario with GLOBAL mass tertile binning |
| (unified bin edges across all 3 subsets) and compares to per-scenario binning. |
| If LP recovers above the bottleneck under global binning, per-scenario binning |
| was the confound. If LP and bottleneck still both sit at ~45%, something else |
| (real-video feature variance, smaller pool size) is the cause. |
| """ |
| import json, time, sys, os |
| from pathlib import Path |
| from datetime import datetime, timezone |
| import numpy as np |
| import torch |
| from sklearn.linear_model import LogisticRegression |
| from sklearn.preprocessing import StandardScaler |
|
|
| PROMPT_RECEIVED_TIME = datetime.now(timezone.utc).isoformat() |
| print(f"PROMPT_RECEIVED_TIME = {PROMPT_RECEIVED_TIME}", flush=True) |
| T0 = time.time() |
|
|
| OUT = Path("results/reviewer_response/exp_phys101_global_bins") |
| OUT.mkdir(parents=True, exist_ok=True) |
| N_SEEDS = 5 |
| N_LIST = [16, 64, 192] |
| DOMAINS = ("spring", "fall", "ramp") |
| PHYS_FILES = {s: f"results/phase87_phys101_{s}_features.pt" for s in DOMAINS} |
|
|
|
|
| def log(msg): |
| ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ") |
| print(f"[{ts}] EXP-P101G: {msg}", flush=True) |
|
|
|
|
| def load_data(): |
| """Load features and mass values for all three scenarios.""" |
| out = {} |
| for s in DOMAINS: |
| d = torch.load(PHYS_FILES[s], weights_only=False, map_location="cpu") |
| feat = d["features"].float() |
| |
| pooled = feat.mean(dim=1).numpy() |
| mass = np.asarray(d["mass_values"], dtype=np.float64) |
| out[s] = (pooled, mass) |
| log(f" {s}: pooled feat={pooled.shape}, mass=[{mass.min():.1f}, {mass.max():.1f}], n={len(mass)}") |
| return out |
|
|
|
|
| def bin_global(mass_dict): |
| """Global tertile binning across all three subsets pooled.""" |
| all_mass = np.concatenate([m for _, m in mass_dict.values()]) |
| edges = np.quantile(all_mass, [1/3, 2/3]) |
| log(f" GLOBAL tertile edges (computed on union of {len(all_mass)} clips): {edges.tolist()}") |
| return {s: (np.searchsorted(edges, m).astype(np.int64), edges) |
| for s, (_, m) in mass_dict.items()} |
|
|
|
|
| def bin_per_scenario(mass_dict): |
| """Per-scenario tertile binning (the standard Phys101 protocol).""" |
| out = {} |
| for s, (_, m) in mass_dict.items(): |
| edges = np.quantile(m, [1/3, 2/3]) |
| out[s] = (np.searchsorted(edges, m).astype(np.int64), edges) |
| log(f" PER-SCEN {s} edges: {edges.tolist()}, bins={np.bincount(out[s][0], minlength=3).tolist()}") |
| return out |
|
|
|
|
| def stratified_subset(rng, y, n_per_class): |
| idxs = [] |
| for c in np.unique(y): |
| cand = np.where(y == c)[0] |
| if len(cand) == 0: |
| continue |
| chosen = rng.choice(cand, size=min(n_per_class, len(cand)), replace=False) |
| idxs.extend(chosen.tolist()) |
| return np.array(sorted(idxs)) |
|
|
|
|
| def train_lp(X_tr, y_tr, X_te, y_te): |
| sc = StandardScaler().fit(X_tr) |
| Xs_tr = sc.transform(X_tr) |
| Xs_te = sc.transform(X_te) |
| model = LogisticRegression(max_iter=2000, C=1.0, solver="lbfgs") |
| model.fit(Xs_tr, y_tr) |
| return float((model.predict(Xs_te) == y_te).mean()) |
|
|
|
|
| def stats(vals): |
| v = np.array([x for x in vals if not np.isnan(x)]) |
| if len(v) == 0: |
| return float("nan"), float("nan") |
| return float(v.mean()), float(v.std(ddof=1) if len(v) > 1 else 0.0) |
|
|
|
|
| def evaluate_lp(features, labels_dict, src, tgt, n_classes=3): |
| """LP cross-scenario: train on source, train fresh classifier with N target samples, |
| eval on remaining target samples.""" |
| X_src = features[src] |
| y_src = labels_dict[src][0] |
| X_tgt = features[tgt] |
| y_tgt = labels_dict[tgt][0] |
|
|
| smallest = min(int(np.sum(y_tgt == c)) for c in np.unique(y_tgt)) |
| results = {N: [] for N in N_LIST} |
| n0_results = [] |
|
|
| |
| for s in range(N_SEEDS): |
| try: |
| acc = train_lp(X_src, y_src, X_tgt, y_tgt) |
| n0_results.append(acc) |
| except Exception as e: |
| log(f" {src}->{tgt} N=0 s{s}: FAILED {e}") |
| n0_results.append(float("nan")) |
|
|
| |
| for N in N_LIST: |
| per_class = max(1, N // n_classes) |
| per_class = min(per_class, int(0.7 * smallest)) |
| for s in range(N_SEEDS): |
| rng = np.random.default_rng(1234 + s) |
| tgt_idx_train = stratified_subset(rng, y_tgt, per_class) |
| mask = np.ones(len(y_tgt), bool); mask[tgt_idx_train] = False |
| X_eval = X_tgt[mask]; y_eval = y_tgt[mask] |
| if len(y_eval) == 0: |
| continue |
| X_tr = np.concatenate([X_src, X_tgt[tgt_idx_train]], axis=0) |
| y_tr = np.concatenate([y_src, y_tgt[tgt_idx_train]], axis=0) |
| try: |
| acc = train_lp(X_tr, y_tr, X_eval, y_eval) |
| results[N].append(acc) |
| except Exception as e: |
| log(f" {src}->{tgt} N={N} s{s}: FAILED {e}") |
| results[N].append(float("nan")) |
| return n0_results, results |
|
|
|
|
| def main(): |
| log("=" * 60) |
| log("Phys101 cross-scenario diagnostic: global vs per-scenario binning") |
|
|
| raw = load_data() |
| features = {s: raw[s][0] for s in DOMAINS} |
|
|
| |
| log("\n=== PER-SCENARIO TERTILE BINNING (paper default) ===") |
| perscen_labels = bin_per_scenario(raw) |
|
|
| |
| log("\n=== GLOBAL TERTILE BINNING (diagnostic) ===") |
| global_labels = bin_global(raw) |
|
|
| |
| pairs = [(src, tgt) for src in DOMAINS for tgt in DOMAINS if src != tgt] |
|
|
| out = {"per_scenario": {}, "global": {}} |
| for binning_name, label_dict in [("per_scenario", perscen_labels), |
| ("global", global_labels)]: |
| log(f"\n--- {binning_name} binning ---") |
| for src, tgt in pairs: |
| log(f" LP {src}->{tgt}:") |
| n0, results = evaluate_lp(features, label_dict, src, tgt) |
| n0_m, n0_s = stats(n0) |
| log(f" N=0 (src-only): {n0_m*100:5.1f}% +/- {n0_s*100:.1f}%") |
| for N in N_LIST: |
| m, sd = stats(results[N]) |
| log(f" N={N:>3d}: {m*100:5.1f}% +/- {sd*100:.1f}%") |
| out[binning_name][f"{src}->{tgt}"] = { |
| "N0": [float(x) for x in n0], |
| "curve": {N: [float(x) for x in results[N]] for N in N_LIST}, |
| } |
|
|
| |
| def mean_across_pairs(binning): |
| rows = out[binning] |
| n0_all = [x for r in rows.values() for x in r["N0"] if not np.isnan(x)] |
| per_N = {N: [x for r in rows.values() for x in r["curve"][N] if not np.isnan(x)] |
| for N in N_LIST} |
| return {"N0": stats(n0_all), **{f"N{N}": stats(per_N[N]) for N in N_LIST}} |
|
|
| SUMMARY = ["Phys101 cross-scenario LP, per-scenario vs global tertile mass binning", |
| "(5 seeds, mean across 6 directional pairs, +/- = std across all seeds*pairs)", |
| ""] |
| for binning_name in ["per_scenario", "global"]: |
| agg = mean_across_pairs(binning_name) |
| SUMMARY.append(f"--- {binning_name} ---") |
| for k in ["N0", "N16", "N64", "N192"]: |
| m, sd = agg[k] |
| SUMMARY.append(f" {k:>5s}: {m*100:5.1f}% +/- {sd*100:.1f}%") |
| SUMMARY.append("") |
|
|
| print("\n" + "\n".join(SUMMARY), flush=True) |
| with open(OUT / "exp_phys101_global_bins_summary.txt", "w") as fh: |
| fh.write("\n".join(SUMMARY) + "\n") |
| with open(OUT / "exp_phys101_global_bins_summary.json", "w") as fh: |
| json.dump(out, fh, indent=2) |
| end_ts = datetime.now(timezone.utc).isoformat() |
| runtime_min = (time.time() - T0) / 60.0 |
| print(f"\nEND_TIME = {end_ts}\nTotal runtime: {runtime_min:.2f} min", flush=True) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|