genai_test / train.py
Kyo-Kai's picture
Upload 12 files
d824e43 verified
# train.py
import os
import torch
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
import math
from models.wgan_gp import Generator, Discriminator, gradient_penalty, aux_losses
from train_utils.preprocess_s1 import preprocess_s1
# ----------------------------
# Utility: vectorized adjacency
# ----------------------------
def build_adj_from_eleblock_vectorized(ele_block, nmax_batch):
"""
Vectorized adjacency build from a padded ele_nod block.
ele_block: float tensor [B, max_elems*2] (padded with -1 for invalid)
nmax_batch: int, number of nodes to consider for this batch (<= global nmax)
Returns:
A_real: float tensor [B, nmax_batch, nmax_batch] with 0/1 entries.
"""
B = ele_block.size(0)
device = ele_block.device
# reshape to [B, M, 2]
pairs = ele_block.view(B, -1, 2).long()
rows = pairs[..., 0] # [B, M]
cols = pairs[..., 1] # [B, M]
# valid entries: both in [0, nmax_batch)
valid = (rows >= 0) & (cols >= 0) & (rows < nmax_batch) & (cols < nmax_batch)
if valid.any():
bidx = torch.arange(B, device=device).unsqueeze(1).expand_as(rows)
b_sel = bidx[valid]
r_sel = rows[valid]
c_sel = cols[valid]
A = torch.zeros(B, nmax_batch, nmax_batch, device=device)
A[b_sel, r_sel, c_sel] = 1.0
A[b_sel, c_sel, r_sel] = 1.0
# zero diagonal
idx = torch.arange(nmax_batch, device=device)
A[:, idx, idx] = 0.0
return A
else:
return torch.zeros(B, nmax_batch, nmax_batch, device=device)
# ----------------------------
# Dataset with size info (from legacy padded matrix)
# ----------------------------
class TrussFlatDataset(Dataset):
"""
Uses preprocess_s1() output (padded flat vectors), but carries per-sample sizes.
"""
def __init__(self, data_array: np.ndarray, metadata: dict):
super().__init__()
self.data = torch.tensor(data_array, dtype=torch.float32)
self.meta = metadata
self.max_nodes = int(self.meta['max_nodes'])
self.max_elems = int(self.meta['max_elements'])
self.size_info = torch.tensor(self.meta['size_info'], dtype=torch.long) # [N, 2] = (n_nodes, n_elems)
def __len__(self):
return self.data.size(0)
def __getitem__(self, idx):
flat = self.data[idx]
n_nodes, n_elems = self.size_info[idx].tolist()
return flat, n_nodes, n_elems
# ----------------------------
# Length-bucketed sampler
# ----------------------------
def make_length_buckets(size_info: np.ndarray, bucket_width_nodes=16, bucket_width_elems=32):
"""
Assign samples to (node_bucket, elem_bucket) bins to reduce padding overhead per batch.
Returns: list of lists of indices (buckets).
"""
buckets = {}
for i, (n, e) in enumerate(size_info):
bn = (int(n) // bucket_width_nodes) * bucket_width_nodes
be = (int(e) // bucket_width_elems) * bucket_width_elems
key = (bn, be)
buckets.setdefault(key, []).append(i)
# Return buckets sorted by size (optional)
return [idxs for _, idxs in sorted(buckets.items(), key=lambda kv: (kv[0][0], kv[0][1]))]
def bucketed_batch_sampler(size_info: np.ndarray, batch_size=16,
bucket_width_nodes=16, bucket_width_elems=32):
"""
Yields lists of indices; each batch comes from a single bucket.
"""
buckets = make_length_buckets(size_info, bucket_width_nodes, bucket_width_elems)
for idxs in buckets:
# shuffle within each bucket:
idxs = np.array(idxs)
perm = np.random.permutation(len(idxs))
idxs = idxs[perm]
# emit batches
for start in range(0, len(idxs), batch_size):
yield idxs[start:start+batch_size].tolist()
# ----------------------------
# Collate: dynamic per-batch padding & masks
# ----------------------------
def collate_truss(batch, max_nodes_global, max_elems_global):
"""
batch: list of (flat, n_nodes, n_elems)
Returns tensors cropped to per-batch maximum sizes (dramatically smaller than global),
along with masks and split blocks.
"""
flats, n_nodes_list, n_elems_list = zip(*batch)
B = len(flats)
# Per-batch maxima
nmax_b = max(n_nodes_list)
emax_b = max(n_elems_list)
flats = torch.stack(flats, 0) # [B, total_dim]
# split from flat (global layout)
node_end = max_nodes_global * 2
ele_end = node_end + max_elems_global * 2
nodes_full = flats[:, :node_end].view(B, max_nodes_global, 2)
ele_block_full = flats[:, node_end:ele_end] # [B, max_elems_global*2]
# Crop to batch maxima (big speed/memory win)
nodes = nodes_full[:, :nmax_b, :] # [B, nmax_b, 2]
ele_block = ele_block_full[:, : (emax_b * 2)] # [B, emax_b*2]
# Node mask from coords (nonzero rows). More robust: consider a tiny eps.
node_mask = (nodes.abs().sum(-1) > 0).float() # [B, nmax_b]
# Build dense adjacency (vectorized) for real graphs
A_real = build_adj_from_eleblock_vectorized(ele_block, nmax_b)
# Mask adjacency by node existence
m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
A_real = A_real * m
# Ensure zero diagonal
idx = torch.arange(nmax_b, device=A_real.device)
A_real[:, idx, idx] = 0.0
# Prepare conditioning per sample (normalized by GLOBAL maxima)
n_nodes_norm = torch.tensor([n / max_nodes_global for n in n_nodes_list], dtype=torch.float32, device=flats.device).unsqueeze(1)
n_elems_norm = torch.tensor([e / max_elems_global for e in n_elems_list], dtype=torch.float32, device=flats.device).unsqueeze(1)
# If height/spacing are needed here, you can plug them from file; for now random as before
height = torch.rand(B, 1, device=flats.device) * 0.8 + 0.2
spacing = torch.rand(B, 1, device=flats.device) * 0.8 + 0.2
cond = torch.cat([n_nodes_norm, n_elems_norm, height, spacing], dim=1) # [B,4]
return nodes, node_mask, A_real, cond, nmax_b
# ----------------------------
# Connectivity penalty (vectorized)
# ----------------------------
def connectivity_penalty(fake_nodes_mask, fake_A_soft):
"""
Penalize if too many isolated/invalid nodes: encourages edges incident to active nodes.
fake_nodes_mask: [B, N]
fake_A_soft: [B, N, N]
Returns scalar tensor.
"""
# degree per node = sum_j A_ij
deg = fake_A_soft.sum(dim=-1) # [B, N]
active = (fake_nodes_mask > 0.5).float()
iso = (deg < 1e-3).float() * active
# penalty: mean number of isolated active nodes
return iso.mean()
# ----------------------------
# Training
# ----------------------------
def train_wgan_gp(device='cuda',
n_epochs=100, batch_size=16, latent_dim=128,
n_critic=5, lr_g=2e-4, lr_d=1e-4,
lambda_gp=10, lambda_connect=5.0,
save_path='models/checkpoints',
bucket_width_nodes=16, bucket_width_elems=32):
"""
Train WGAN-GP with:
- vectorized adjacency build
- length-bucketed batching
- dynamic per-batch padding & masking
"""
if not torch.cuda.is_available():
print("⚠️ CUDA not available, training on CPU will be slow.")
device = 'cpu'
device = torch.device(device)
# Load preprocessed (legacy padded) data but with size_info for bucketing
data_array, metadata = preprocess_s1()
dataset = TrussFlatDataset(data_array, metadata)
# Bucketed batch sampler (reduces padding)
size_info = metadata['size_info']
sampler = list(bucketed_batch_sampler(size_info,
batch_size=batch_size,
bucket_width_nodes=bucket_width_nodes,
bucket_width_elems=bucket_width_elems))
# We’ll build our own DataLoader-like loop b/c we use a custom sampler + collate
max_nodes_global = metadata['max_nodes']
max_elems_global = metadata['max_elements']
# Init models at GLOBAL maxima; we’ll slice per-batch for the critic pass
cond_dim = 4 # [normed n_nodes, n_elems, height, spacing]
generator = Generator(latent_dim=latent_dim, nmax=max_nodes_global, cond_dim=cond_dim).to(device)
discriminator = Discriminator(nmax=max_nodes_global, cond_dim=cond_dim).to(device)
opt_g = optim.Adam(generator.parameters(), lr=lr_g, betas=(0.0, 0.99))
opt_d = optim.Adam(discriminator.parameters(), lr=lr_d, betas=(0.0, 0.99))
best_score = float('-inf')
g_loss_ema = 0.0
epoch_losses = []
os.makedirs(save_path, exist_ok=True)
print(f"Starting training on {len(dataset)} samples with {len(sampler)} bucketed batches per epoch...")
try:
for epoch in range(n_epochs):
epoch_d_loss, epoch_g_loss = 0.0, 0.0
# Shuffle order of batches each epoch for better mixing
perm_batches = np.random.permutation(len(sampler)).tolist()
for b_id in perm_batches:
idxs = sampler[b_id]
batch = [dataset[i] for i in idxs]
# --- Collate (dynamic crop + vectorized adjacency)
nodes_real, node_mask_real, A_real, cond, nmax_b = collate_truss(
batch, max_nodes_global, max_elems_global
)
nodes_real = nodes_real.to(device)
node_mask_real = node_mask_real.to(device)
A_real = A_real.to(device)
cond = cond.to(device)
bs = nodes_real.size(0)
# ---- Train Discriminator ----
for _ in range(n_critic):
z = torch.randn(bs, latent_dim, device=device)
nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
# Slice generator outputs to per-batch effective size for the critic:
nodes_f_b = nodes_f[:, :nmax_b, :]
nmask_f_b = nmask_f[:, :nmax_b]
A_f_b = A_f[:, :nmax_b, :nmax_b]
elog_f_b = elog_f[:, :nmax_b, :nmax_b] # just for aux losses later
d_real = discriminator(nodes_real, A_real, node_mask_real, cond)
d_fake = discriminator(nodes_f_b.detach(), A_f_b.detach(), nmask_f_b.detach(), cond)
gp = gradient_penalty(
discriminator,
(nodes_real, node_mask_real, A_real, cond),
(nodes_f_b.detach(), nmask_f_b.detach(), A_f_b.detach(), cond),
lambda_gp
)
loss_d = -(d_real.mean() - d_fake.mean()) + gp
opt_d.zero_grad(set_to_none=True)
loss_d.backward()
opt_d.step()
# ---- Train Generator ----
z = torch.randn(bs, latent_dim, device=device)
nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
nodes_f_b = nodes_f[:, :nmax_b, :]
nmask_f_b = nmask_f[:, :nmax_b]
A_f_b = A_f[:, :nmax_b, :nmax_b]
elog_f_b = elog_f[:, :nmax_b, :nmax_b]
d_fake = discriminator(nodes_f_b, A_f_b, nmask_f_b, cond)
adv_loss = -d_fake.mean()
L_nodes, L_mask, L_edges = aux_losses(
nodes_f_b, nmask_f_b, elog_f_b,
nodes_real, node_mask_real, A_real
)
loss_g = adv_loss + 10 * L_nodes + 1 * L_mask + 5 * L_edges
if lambda_connect > 0:
loss_g += lambda_connect * connectivity_penalty(nmask_f_b, A_f_b)
opt_g.zero_grad(set_to_none=True)
loss_g.backward()
opt_g.step()
epoch_d_loss += loss_d.item()
epoch_g_loss += loss_g.item()
# ---- Logging & checkpointing ----
num_batches = len(sampler)
epoch_d_loss /= num_batches
epoch_g_loss /= num_batches
epoch_losses.append({'epoch': epoch + 1,
'd_loss': epoch_d_loss,
'g_loss': epoch_g_loss})
# quick graph stats (last batch’s fakes)
with torch.no_grad():
deg_mean = float(A_f_b.mean().item() * nmax_b) # rough proxy
conn_proxy = float((A_f_b.sum(dim=-1) > 0.5).float().mean().item())
print(f"[{epoch+1}/{n_epochs}] D: {epoch_d_loss:.3f} | G: {epoch_g_loss:.3f} "
f"| N_b:{nmax_b:3d} | DegMean~{deg_mean:.2f} | Conn~{conn_proxy:.2f}")
# Use exponential moving average to judge improvement
ema_beta = 0.9
if epoch == 0:
g_loss_ema = epoch_g_loss
else:
g_loss_ema = ema_beta * g_loss_ema + (1 - ema_beta) * epoch_g_loss
# Evaluate composite "stability score"
# Here we prioritize low |G_loss| (close to zero) and high connectivity
score = -abs(epoch_g_loss) + 0.5 * conn_proxy # weights can be tuned
if score > best_score:
best_score = score
torch.save(generator.state_dict(), os.path.join(save_path, f'generator_best.pth'))
torch.save(discriminator.state_dict(), os.path.join(save_path, f'discriminator_best.pth'))
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
print(f"✅ New best model at epoch {epoch+1}: score={score:.3f}, G_loss={epoch_g_loss:.3f}, Conn={conn_proxy:.2f}")
# Also save periodic checkpoints (for recovery)
if (epoch + 1) % 10 == 0:
torch.save(generator.state_dict(), os.path.join(save_path, f'generator_epoch{epoch+1}.pth'))
torch.save(discriminator.state_dict(), os.path.join(save_path, f'discriminator_epoch{epoch+1}.pth'))
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
print(f"💾 Periodic checkpoint saved at epoch {epoch+1}")
# Final save
np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
print(f"Training complete. Models saved to {save_path}.")
except KeyboardInterrupt:
print("\n⚠️ Training interrupted by user. Saving current state...")
os.makedirs(save_path, exist_ok=True)
np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
torch.save(generator.state_dict(), os.path.join(save_path, 'generator_interrupted.pth'))
torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator_interrupted.pth'))
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
print(f"🟡 Interrupted state saved to {save_path}.")
raise
if __name__ == '__main__':
train_wgan_gp(device='cuda' if torch.cuda.is_available() else 'cpu')