File size: 8,286 Bytes
189f45b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | """
EXP REV-P101-GLOBAL: Diagnose the Phys101 LP/bottleneck dissociation collapse.
The paper currently bins Phys101 mass per-scenario tertile (source and target use
DIFFERENT class boundaries). This is the standard Phys101 protocol but creates a
class-boundary shift on top of the feature-distribution shift, which may explain
why the Kubric LP/bottleneck dissociation does not replicate.
This script re-runs Phys101 cross-scenario with GLOBAL mass tertile binning
(unified bin edges across all 3 subsets) and compares to per-scenario binning.
If LP recovers above the bottleneck under global binning, per-scenario binning
was the confound. If LP and bottleneck still both sit at ~45%, something else
(real-video feature variance, smaller pool size) is the cause.
"""
import json, time, sys, os
from pathlib import Path
from datetime import datetime, timezone
import numpy as np
import torch
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
PROMPT_RECEIVED_TIME = datetime.now(timezone.utc).isoformat()
print(f"PROMPT_RECEIVED_TIME = {PROMPT_RECEIVED_TIME}", flush=True)
T0 = time.time()
OUT = Path("results/reviewer_response/exp_phys101_global_bins")
OUT.mkdir(parents=True, exist_ok=True)
N_SEEDS = 5
N_LIST = [16, 64, 192]
DOMAINS = ("spring", "fall", "ramp")
PHYS_FILES = {s: f"results/phase87_phys101_{s}_features.pt" for s in DOMAINS}
def log(msg):
ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ")
print(f"[{ts}] EXP-P101G: {msg}", flush=True)
def load_data():
"""Load features and mass values for all three scenarios."""
out = {}
for s in DOMAINS:
d = torch.load(PHYS_FILES[s], weights_only=False, map_location="cpu")
feat = d["features"].float()
# L2-pool over time (8 frames -> 1024-d feature)
pooled = feat.mean(dim=1).numpy()
mass = np.asarray(d["mass_values"], dtype=np.float64)
out[s] = (pooled, mass)
log(f" {s}: pooled feat={pooled.shape}, mass=[{mass.min():.1f}, {mass.max():.1f}], n={len(mass)}")
return out
def bin_global(mass_dict):
"""Global tertile binning across all three subsets pooled."""
all_mass = np.concatenate([m for _, m in mass_dict.values()])
edges = np.quantile(all_mass, [1/3, 2/3])
log(f" GLOBAL tertile edges (computed on union of {len(all_mass)} clips): {edges.tolist()}")
return {s: (np.searchsorted(edges, m).astype(np.int64), edges)
for s, (_, m) in mass_dict.items()}
def bin_per_scenario(mass_dict):
"""Per-scenario tertile binning (the standard Phys101 protocol)."""
out = {}
for s, (_, m) in mass_dict.items():
edges = np.quantile(m, [1/3, 2/3])
out[s] = (np.searchsorted(edges, m).astype(np.int64), edges)
log(f" PER-SCEN {s} edges: {edges.tolist()}, bins={np.bincount(out[s][0], minlength=3).tolist()}")
return out
def stratified_subset(rng, y, n_per_class):
idxs = []
for c in np.unique(y):
cand = np.where(y == c)[0]
if len(cand) == 0:
continue
chosen = rng.choice(cand, size=min(n_per_class, len(cand)), replace=False)
idxs.extend(chosen.tolist())
return np.array(sorted(idxs))
def train_lp(X_tr, y_tr, X_te, y_te):
sc = StandardScaler().fit(X_tr)
Xs_tr = sc.transform(X_tr)
Xs_te = sc.transform(X_te)
model = LogisticRegression(max_iter=2000, C=1.0, solver="lbfgs")
model.fit(Xs_tr, y_tr)
return float((model.predict(Xs_te) == y_te).mean())
def stats(vals):
v = np.array([x for x in vals if not np.isnan(x)])
if len(v) == 0:
return float("nan"), float("nan")
return float(v.mean()), float(v.std(ddof=1) if len(v) > 1 else 0.0)
def evaluate_lp(features, labels_dict, src, tgt, n_classes=3):
"""LP cross-scenario: train on source, train fresh classifier with N target samples,
eval on remaining target samples."""
X_src = features[src]
y_src = labels_dict[src][0]
X_tgt = features[tgt]
y_tgt = labels_dict[tgt][0]
smallest = min(int(np.sum(y_tgt == c)) for c in np.unique(y_tgt))
results = {N: [] for N in N_LIST}
n0_results = []
# N=0: train on source only, eval on target
for s in range(N_SEEDS):
try:
acc = train_lp(X_src, y_src, X_tgt, y_tgt)
n0_results.append(acc)
except Exception as e:
log(f" {src}->{tgt} N=0 s{s}: FAILED {e}")
n0_results.append(float("nan"))
# N>0: train on source + N stratified target, eval on remaining target
for N in N_LIST:
per_class = max(1, N // n_classes)
per_class = min(per_class, int(0.7 * smallest))
for s in range(N_SEEDS):
rng = np.random.default_rng(1234 + s)
tgt_idx_train = stratified_subset(rng, y_tgt, per_class)
mask = np.ones(len(y_tgt), bool); mask[tgt_idx_train] = False
X_eval = X_tgt[mask]; y_eval = y_tgt[mask]
if len(y_eval) == 0:
continue
X_tr = np.concatenate([X_src, X_tgt[tgt_idx_train]], axis=0)
y_tr = np.concatenate([y_src, y_tgt[tgt_idx_train]], axis=0)
try:
acc = train_lp(X_tr, y_tr, X_eval, y_eval)
results[N].append(acc)
except Exception as e:
log(f" {src}->{tgt} N={N} s{s}: FAILED {e}")
results[N].append(float("nan"))
return n0_results, results
def main():
log("=" * 60)
log("Phys101 cross-scenario diagnostic: global vs per-scenario binning")
raw = load_data()
features = {s: raw[s][0] for s in DOMAINS}
# Per-scenario binning (standard, current paper protocol)
log("\n=== PER-SCENARIO TERTILE BINNING (paper default) ===")
perscen_labels = bin_per_scenario(raw)
# Global binning (diagnostic)
log("\n=== GLOBAL TERTILE BINNING (diagnostic) ===")
global_labels = bin_global(raw)
# Evaluate all 6 cross-scenario directions under both binnings
pairs = [(src, tgt) for src in DOMAINS for tgt in DOMAINS if src != tgt]
out = {"per_scenario": {}, "global": {}}
for binning_name, label_dict in [("per_scenario", perscen_labels),
("global", global_labels)]:
log(f"\n--- {binning_name} binning ---")
for src, tgt in pairs:
log(f" LP {src}->{tgt}:")
n0, results = evaluate_lp(features, label_dict, src, tgt)
n0_m, n0_s = stats(n0)
log(f" N=0 (src-only): {n0_m*100:5.1f}% +/- {n0_s*100:.1f}%")
for N in N_LIST:
m, sd = stats(results[N])
log(f" N={N:>3d}: {m*100:5.1f}% +/- {sd*100:.1f}%")
out[binning_name][f"{src}->{tgt}"] = {
"N0": [float(x) for x in n0],
"curve": {N: [float(x) for x in results[N]] for N in N_LIST},
}
# Aggregate means
def mean_across_pairs(binning):
rows = out[binning]
n0_all = [x for r in rows.values() for x in r["N0"] if not np.isnan(x)]
per_N = {N: [x for r in rows.values() for x in r["curve"][N] if not np.isnan(x)]
for N in N_LIST}
return {"N0": stats(n0_all), **{f"N{N}": stats(per_N[N]) for N in N_LIST}}
SUMMARY = ["Phys101 cross-scenario LP, per-scenario vs global tertile mass binning",
"(5 seeds, mean across 6 directional pairs, +/- = std across all seeds*pairs)",
""]
for binning_name in ["per_scenario", "global"]:
agg = mean_across_pairs(binning_name)
SUMMARY.append(f"--- {binning_name} ---")
for k in ["N0", "N16", "N64", "N192"]:
m, sd = agg[k]
SUMMARY.append(f" {k:>5s}: {m*100:5.1f}% +/- {sd*100:.1f}%")
SUMMARY.append("")
print("\n" + "\n".join(SUMMARY), flush=True)
with open(OUT / "exp_phys101_global_bins_summary.txt", "w") as fh:
fh.write("\n".join(SUMMARY) + "\n")
with open(OUT / "exp_phys101_global_bins_summary.json", "w") as fh:
json.dump(out, fh, indent=2)
end_ts = datetime.now(timezone.utc).isoformat()
runtime_min = (time.time() - T0) / 60.0
print(f"\nEND_TIME = {end_ts}\nTotal runtime: {runtime_min:.2f} min", flush=True)
if __name__ == "__main__":
main()
|