AbstractPhil commited on
Commit
eff598d
·
verified ·
1 Parent(s): cf3bcc2

Create cell3_trainer.py

Browse files
Files changed (1) hide show
  1. cell3_trainer.py +246 -0
cell3_trainer.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cell 3: Trainer — Patch Cross-Attention Shape Classifier (8×16×16)
3
+ ===================================================================
4
+ Run after Cell 1 (generator) and Cell 2 (model).
5
+ Everything from prior cells is already in kernel scope.
6
+ """
7
+
8
+ import os, time, math
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ from torch.utils.data import TensorDataset, DataLoader
14
+
15
+
16
+ # === Augmentation (vectorized for 8×16×16) ====================================
17
+
18
+ def deform_grid(grid, p_dropout=0.05, p_add=0.05, p_shift=0.08):
19
+ """Vectorized voxel augmentation for 8×16×16 grids."""
20
+ B = grid.shape[0]
21
+ device = grid.device
22
+ r = torch.rand(B, 3, device=device)
23
+ out = grid.clone()
24
+
25
+ # Voxel dropout
26
+ drop_sel = (r[:, 0] < p_dropout).view(B, 1, 1, 1)
27
+ keep = torch.rand_like(out) > 0.10
28
+ out = torch.where(drop_sel, out * keep.float(), out)
29
+
30
+ # Boundary addition
31
+ add_sel = (r[:, 1] < p_add).view(B, 1, 1, 1).float()
32
+ dilated = F.max_pool3d(out.unsqueeze(1), kernel_size=3, stride=1, padding=1).squeeze(1)
33
+ boundary = ((dilated > 0.5) & (out < 0.5)).float()
34
+ add_noise = (torch.rand_like(out) < 0.2).float()
35
+ out = (out + boundary * add_noise * add_sel).clamp(max=1.0)
36
+
37
+ # Small translation (1 voxel — grid is small)
38
+ shift_sel = (r[:, 2] < p_shift)
39
+ axes = torch.randint(3, (B,), device=device)
40
+ dirs = torch.randint(0, 2, (B,), device=device) * 2 - 1
41
+
42
+ versions = []
43
+ for ax in range(3):
44
+ for d in [-1, 1]:
45
+ s = torch.roll(out, shifts=d, dims=ax + 1)
46
+ if d == 1:
47
+ if ax == 0: s[:, 0, :, :] = 0
48
+ elif ax == 1: s[:, :, 0, :] = 0
49
+ else: s[:, :, :, 0] = 0
50
+ else:
51
+ if ax == 0: s[:, -1, :, :] = 0
52
+ elif ax == 1: s[:, :, -1, :] = 0
53
+ else: s[:, :, :, -1] = 0
54
+ versions.append(s)
55
+ versions.append(out) # identity
56
+ stacked = torch.stack(versions, dim=0) # (7, B, 8, 16, 16)
57
+
58
+ assign = torch.where(shift_sel, axes * 2 + (dirs == 1).long(), torch.full_like(axes, 6))
59
+ out = stacked[assign, torch.arange(B, device=device)]
60
+
61
+ return out
62
+
63
+
64
+ # === Training =================================================================
65
+
66
+ def train_vae_ca_classifier(model, train_loader, val_loader,
67
+ n_epochs=60, lr=3e-3, weight_decay=1e-4,
68
+ grad_clip=1.0, device='cuda',
69
+ checkpoint_dir='/content/checkpoints_vae_ca'):
70
+ device = torch.device(device if torch.cuda.is_available() else 'cpu')
71
+ model = model.to(device)
72
+
73
+ amp_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
74
+
75
+ optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
76
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_epochs, eta_min=lr * 0.01)
77
+
78
+ ce_loss_fn = nn.CrossEntropyLoss()
79
+ bce_loss_fn = nn.BCEWithLogitsLoss()
80
+
81
+ # From Cell 1 globals
82
+ dim_labels = torch.tensor([CLASS_META[n]["dim"] for n in CLASS_NAMES], dtype=torch.long, device=device)
83
+ curved_labels = torch.tensor([1.0 if CLASS_META[n]["curved"] else 0.0 for n in CLASS_NAMES], device=device)
84
+ curv_type_labels = torch.tensor([CURV_TO_IDX[CLASS_META[n]["curvature"]] for n in CLASS_NAMES], dtype=torch.long, device=device)
85
+
86
+ os.makedirs(checkpoint_dir, exist_ok=True)
87
+
88
+ best_acc = 0.0
89
+ print("=" * 70)
90
+
91
+ for epoch in range(1, n_epochs + 1):
92
+ model.train()
93
+ t0 = time.time()
94
+ total_loss = 0
95
+ correct = 0
96
+ total = 0
97
+
98
+ for grid, label in train_loader:
99
+ grid = grid.to(device, non_blocking=True)
100
+ label = label.to(device, non_blocking=True)
101
+
102
+ # Augmentation
103
+ grid = deform_grid(grid)
104
+
105
+ with torch.amp.autocast('cuda', dtype=amp_dtype):
106
+ out = model(grid, labels=label)
107
+
108
+ loss_cls = ce_loss_fn(out["class_logits"], label)
109
+ batch_dims = dim_labels[label]
110
+ loss_dim = ce_loss_fn(out["dim_logits"], batch_dims)
111
+ batch_curved = curved_labels[label].unsqueeze(-1)
112
+ loss_curved = bce_loss_fn(out["is_curved_pred"], batch_curved)
113
+ batch_curv_type = curv_type_labels[label]
114
+ loss_curv = ce_loss_fn(out["curv_type_logits"], batch_curv_type)
115
+
116
+ loss = loss_cls + 0.3 * loss_dim + 0.3 * loss_curved + 0.2 * loss_curv
117
+
118
+ optimizer.zero_grad(set_to_none=True)
119
+ if amp_dtype == torch.float16:
120
+ scaler = torch.amp.GradScaler('cuda')
121
+ scaler.scale(loss).backward()
122
+ scaler.unscale_(optimizer)
123
+ nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
124
+ scaler.step(optimizer)
125
+ scaler.update()
126
+ else:
127
+ loss.backward()
128
+ nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
129
+ optimizer.step()
130
+
131
+ total_loss += loss.item()
132
+ pred = out["class_logits"].argmax(dim=-1)
133
+ correct += (pred == label).sum().item()
134
+ total += label.shape[0]
135
+
136
+ scheduler.step()
137
+
138
+ # === Validation ===
139
+ model.eval()
140
+ val_correct = 0
141
+ val_total = 0
142
+ curved_correct = 0
143
+ curved_total = 0
144
+ per_class_correct = torch.zeros(NUM_CLASSES, device=device)
145
+ per_class_total = torch.zeros(NUM_CLASSES, device=device)
146
+
147
+ with torch.no_grad():
148
+ for grid, label in val_loader:
149
+ grid = grid.to(device, non_blocking=True)
150
+ label = label.to(device, non_blocking=True)
151
+
152
+ with torch.amp.autocast('cuda', dtype=amp_dtype):
153
+ out = model(grid)
154
+
155
+ pred = out["class_logits"].argmax(dim=-1)
156
+ val_correct += (pred == label).sum().item()
157
+ val_total += label.shape[0]
158
+
159
+ curved_pred = (out["is_curved_pred"].squeeze(-1) > 0.0).float()
160
+ curved_true = curved_labels[label]
161
+ curved_correct += (curved_pred == curved_true).sum().item()
162
+ curved_total += label.shape[0]
163
+
164
+ for c in range(NUM_CLASSES):
165
+ mask = label == c
166
+ per_class_total[c] += mask.sum()
167
+ per_class_correct[c] += (pred[mask] == c).sum()
168
+
169
+ val_acc = val_correct / val_total
170
+ curved_acc = curved_correct / curved_total
171
+ train_acc = correct / total
172
+ train_loss = total_loss / len(train_loader)
173
+ elapsed = time.time() - t0
174
+ sps = total / elapsed
175
+
176
+ per_class_acc = per_class_correct / per_class_total.clamp(min=1)
177
+ worst = per_class_acc.argsort()[:10]
178
+
179
+ print(f"\nEpoch {epoch}/{n_epochs} | {elapsed:.1f}s | {sps:.0f} samp/s")
180
+ print(f" Train: loss={train_loss:.4f} acc={train_acc:.4f}")
181
+ print(f" Val: acc={val_acc:.4f} curved={curved_acc:.4f}")
182
+ print(f" LR: {scheduler.get_last_lr()[0]:.6f}")
183
+ print(f" Worst classes:")
184
+ for idx in worst:
185
+ idx = idx.item()
186
+ acc = per_class_acc[idx].item() * 100
187
+ print(f" {CLASS_NAMES[idx]:20s} {acc:5.1f}%")
188
+
189
+ if val_acc > best_acc:
190
+ best_acc = val_acc
191
+ torch.save(model.state_dict(), os.path.join(checkpoint_dir, 'best.pt'))
192
+ # Also save to flat path for Cell 5 compat
193
+ torch.save(model.state_dict(), '/content/best_vae_ca_classifier.pt')
194
+ print(f" ★ New best: {best_acc:.2%}")
195
+
196
+ # Always save latest
197
+ torch.save(model.state_dict(), os.path.join(checkpoint_dir, 'latest.pt'))
198
+
199
+ print(f"\n{'=' * 70}")
200
+ print(f"Training complete. Best val acc: {best_acc:.2%}")
201
+ return best_acc
202
+
203
+
204
+ # === Run ======================================================================
205
+
206
+ print("=" * 70)
207
+ print(f"Patch Cross-Attention Shape Classifier — {GZ}×{GY}×{GX}")
208
+ print(f" Patches: {MACRO_Z}×{MACRO_Y}×{MACRO_X} = {MACRO_N} of {PATCH_Z}×{PATCH_Y}×{PATCH_X}")
209
+ print("=" * 70)
210
+
211
+ print("\nGenerating training data...")
212
+ gen = ShapeGenerator(seed=42)
213
+ train_data = gen.generate_dataset(2000, seed=42)
214
+ val_data = gen.generate_dataset(400, seed=999)
215
+
216
+ print(f" Generated {len(train_data['grids'])} train + {len(val_data['grids'])} val")
217
+ print(f" Classes: {NUM_CLASSES}")
218
+ occ = train_data['grids'].reshape(len(train_data['grids']), -1).sum(axis=1)
219
+ print(f" Avg occupied voxels: {occ.mean():.1f} / {GZ*GY*GX} ({occ.mean()/(GZ*GY*GX)*100:.1f}%)")
220
+
221
+ batch_size = 1024
222
+ train_ds = TensorDataset(torch.from_numpy(train_data['grids']).float(),
223
+ torch.from_numpy(train_data['labels']).long())
224
+ val_ds = TensorDataset(torch.from_numpy(val_data['grids']).float(),
225
+ torch.from_numpy(val_data['labels']).long())
226
+
227
+ train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True,
228
+ num_workers=2, pin_memory=True, drop_last=True)
229
+ val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False,
230
+ num_workers=2, pin_memory=True)
231
+
232
+ print(f" Batch size: {batch_size}")
233
+ print(f" Train batches: {len(train_loader)} | Val batches: {len(val_loader)}")
234
+
235
+ model = PatchCrossAttentionClassifier(n_classes=NUM_CLASSES)
236
+ n_params = sum(p.numel() for p in model.parameters())
237
+ print(f" Model: {n_params:,} params")
238
+
239
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
240
+ print(f" Device: {device}")
241
+
242
+ best_acc = train_vae_ca_classifier(
243
+ model, train_loader, val_loader,
244
+ n_epochs=60, lr=3e-3, weight_decay=1e-4, device=device)
245
+
246
+ print(f"\nDone. Best accuracy: {best_acc:.2%}")