File size: 6,884 Bytes
189f45b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
"""
EXP 2ND-DIR-3SEED: 3-seed re-run of the 12 single-property configurations on
collision -> flat-drop at N=192.

R2 + R3 convergent ask: the 1-seed run in `_rev_q_2nddirection_flatdrop.py`
collapsed all 12 single-property configs to exactly 40.0% (degenerate-receiver
floor on flat-drop). This replaces those rows with proper 3-seed best-of
numbers.

Single-prop configs (matching the existing 24-config sweep rows 1-12):
  7 disc:  L=2..5 x V=5,10 subset (matching the sweep)
  5 cont:  D=2,3,5,10,20

Multi-prop rows (the original 12 multi-prop configs in the 2nd-direction sweep)
already at 45-58% with 1 seed; they are not the source of the 40% floor and
re-running them would not move the headline.
"""
import json, time, sys, os
from pathlib import Path
from datetime import datetime, timezone
import numpy as np
import torch

PROMPT_RECEIVED_TIME = datetime.now(timezone.utc).isoformat()
print(f"PROMPT_RECEIVED_TIME = {PROMPT_RECEIVED_TIME}", flush=True)
T0 = time.time()

sys.path.insert(0, os.path.dirname(__file__))
from _overnight_p1_transfer import make_splits
from _overnight_p3_matrix import load_labels, load_feat_subsampled
from _rev_q_posdis_scatter import (
    train_discrete_custom, disc_train_recv_custom,
    train_continuous_base, train_recv_frozen_cont,
)
disc_train_recv_frozen = disc_train_recv_custom  # alias

OUT = Path("results/reviewer_response/exp_2nddir_singleprop_3seed")
OUT.mkdir(parents=True, exist_ok=True)
N_SEEDS = 3
N_TARGET = 192


def log(msg):
    ts = datetime.now(timezone.utc).strftime("%H:%M:%SZ")
    print(f"[{ts}] EXP-3SEED: {msg}", flush=True)


def main():
    log("=" * 60)
    log(f"3-seed re-run: 12 single-property configs on coll -> flat-drop @ N={N_TARGET}")

    feat_c = load_feat_subsampled("collision", "vjepa2")
    feat_t = load_feat_subsampled("flat_drop", "vjepa2")
    rest_3 = np.load("results/kinematics_vs_mechanics/labels_collision.npz")["restitution_bin"]
    lbl_t_3 = load_labels("flat_drop", "restitution")

    # 7 single-prop disc + 5 single-prop cont configs (matching sweep rows 1-12)
    disc_configs = [
        ("disc_L2_V5",  2, 5),
        ("disc_L2_V10", 2, 10),
        ("disc_L3_V5",  3, 5),
        ("disc_L3_V10", 3, 10),
        ("disc_L4_V5",  4, 5),
        ("disc_L4_V10", 4, 10),
        ("disc_L5_V5",  5, 5),
    ]
    cont_configs = [
        ("cont_dim2",  2),
        ("cont_dim3",  3),
        ("cont_dim5",  5),
        ("cont_dim10", 10),
        ("cont_dim20", 20),
    ]

    rows = []

    # Discrete configs
    for name, L, V in disc_configs:
        log(f"\n  --- {name} (L={L}, V={V}) ---")
        within_seeds = []; cross_seeds = []
        for seed in range(N_SEEDS):
            t0 = time.time()
            try:
                base = train_discrete_custom(feat_c, rest_3, seed=seed, n_heads=L, vocab_size=V, n_epochs=150)
                tr_t, ho_t = make_splits(lbl_t_3, seed)
                acc = disc_train_recv_frozen(base, feat_t, lbl_t_3, tr_t, ho_t, seed=seed, n_target=N_TARGET)
                within_seeds.append(float(base["task_acc"]))
                cross_seeds.append(float(acc))
                log(f"    s{seed}: within={base['task_acc']*100:.1f}%, cross={acc*100:.1f}% [{time.time()-t0:.0f}s]")
            except Exception as e:
                import traceback
                log(f"    s{seed} FAILED: {e}\n{traceback.format_exc()[:300]}")
        if within_seeds:
            rows.append({"name": name, "kind": "disc", "L": L, "V": V,
                         "within_mean": float(np.mean(within_seeds)), "within_std": float(np.std(within_seeds)),
                         "within_max": float(np.max(within_seeds)),
                         "cross_n192_mean": float(np.mean(cross_seeds)), "cross_n192_std": float(np.std(cross_seeds)),
                         "cross_n192_max": float(np.max(cross_seeds))})

    # Continuous configs
    for name, D in cont_configs:
        log(f"\n  --- {name} (D={D}) ---")
        within_seeds = []; cross_seeds = []
        for seed in range(N_SEEDS):
            t0 = time.time()
            try:
                base = train_continuous_base(feat_c, rest_3, seed=seed, code_dim_per_agent=D, n_epochs=150)
                tr_t, ho_t = make_splits(lbl_t_3, seed)
                acc = train_recv_frozen_cont(base, feat_t, lbl_t_3, tr_t, ho_t, seed=seed, n_target=N_TARGET)
                within_seeds.append(float(base["task_acc"]))
                cross_seeds.append(float(acc))
                log(f"    s{seed}: within={base['task_acc']*100:.1f}%, cross={acc*100:.1f}% [{time.time()-t0:.0f}s]")
            except Exception as e:
                import traceback
                log(f"    s{seed} FAILED: {e}\n{traceback.format_exc()[:300]}")
        if within_seeds:
            rows.append({"name": name, "kind": "cont", "D": D,
                         "within_mean": float(np.mean(within_seeds)), "within_std": float(np.std(within_seeds)),
                         "within_max": float(np.max(within_seeds)),
                         "cross_n192_mean": float(np.mean(cross_seeds)), "cross_n192_std": float(np.std(cross_seeds)),
                         "cross_n192_max": float(np.max(cross_seeds))})

    if rows:
        SUMMARY = ["EXP 3-SEED single-prop coll->flat-drop @ N=192",
                   "",
                   f"{'Config':<14s} | {'Within (mean+-std)':>20s} | {'Cross (mean+-std)':>20s} | {'Cross max':>10s}",
                   "-" * 75]
        for r in rows:
            SUMMARY.append(
                f"{r['name']:<14s} | {r['within_mean']*100:>6.1f}+-{r['within_std']*100:>4.1f}% | "
                f"{r['cross_n192_mean']*100:>6.1f}+-{r['cross_n192_std']*100:>4.1f}% | "
                f"{r['cross_n192_max']*100:>9.1f}%"
            )
        cross_means = [r["cross_n192_mean"] for r in rows]
        cross_maxes = [r["cross_n192_max"] for r in rows]
        SUMMARY.append("")
        SUMMARY.append(f"All-config 3-seed mean cross flat-drop: {np.mean(cross_means)*100:.1f}+-{np.std(cross_means)*100:.1f}% (range {np.min(cross_means)*100:.1f}-{np.max(cross_means)*100:.1f}%)")
        SUMMARY.append(f"All-config best-of-3 cross flat-drop:   {np.mean(cross_maxes)*100:.1f}+-{np.std(cross_maxes)*100:.1f}% (range {np.min(cross_maxes)*100:.1f}-{np.max(cross_maxes)*100:.1f}%)")
        SUMMARY.append("")
        SUMMARY.append("Prior 1-seed reported all 12 configs at exactly 40.0% (degenerate-receiver floor).")
        print("\n".join(SUMMARY), flush=True)
        with open(OUT / "summary.txt", "w") as fh:
            fh.write("\n".join(SUMMARY) + "\n")
        with open(OUT / "summary.json", "w") as fh:
            json.dump(rows, fh, indent=2)
    end_ts = datetime.now(timezone.utc).isoformat()
    runtime_min = (time.time() - T0) / 60.0
    print(f"\nEND_TIME = {end_ts}\nTotal runtime: {runtime_min:.2f} min", flush=True)


if __name__ == "__main__":
    main()