|
|
|
|
|
import os
|
|
|
import torch
|
|
|
import torch.optim as optim
|
|
|
import numpy as np
|
|
|
from torch.utils.data import DataLoader, Dataset
|
|
|
import torch.nn.functional as F
|
|
|
import math
|
|
|
|
|
|
from models.wgan_gp import Generator, Discriminator, gradient_penalty, aux_losses
|
|
|
from train_utils.preprocess_s1 import preprocess_s1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_adj_from_eleblock_vectorized(ele_block, nmax_batch):
|
|
|
"""
|
|
|
Vectorized adjacency build from a padded ele_nod block.
|
|
|
ele_block: float tensor [B, max_elems*2] (padded with -1 for invalid)
|
|
|
nmax_batch: int, number of nodes to consider for this batch (<= global nmax)
|
|
|
Returns:
|
|
|
A_real: float tensor [B, nmax_batch, nmax_batch] with 0/1 entries.
|
|
|
"""
|
|
|
B = ele_block.size(0)
|
|
|
device = ele_block.device
|
|
|
|
|
|
pairs = ele_block.view(B, -1, 2).long()
|
|
|
rows = pairs[..., 0]
|
|
|
cols = pairs[..., 1]
|
|
|
|
|
|
valid = (rows >= 0) & (cols >= 0) & (rows < nmax_batch) & (cols < nmax_batch)
|
|
|
if valid.any():
|
|
|
bidx = torch.arange(B, device=device).unsqueeze(1).expand_as(rows)
|
|
|
b_sel = bidx[valid]
|
|
|
r_sel = rows[valid]
|
|
|
c_sel = cols[valid]
|
|
|
A = torch.zeros(B, nmax_batch, nmax_batch, device=device)
|
|
|
A[b_sel, r_sel, c_sel] = 1.0
|
|
|
A[b_sel, c_sel, r_sel] = 1.0
|
|
|
|
|
|
idx = torch.arange(nmax_batch, device=device)
|
|
|
A[:, idx, idx] = 0.0
|
|
|
return A
|
|
|
else:
|
|
|
return torch.zeros(B, nmax_batch, nmax_batch, device=device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TrussFlatDataset(Dataset):
|
|
|
"""
|
|
|
Uses preprocess_s1() output (padded flat vectors), but carries per-sample sizes.
|
|
|
"""
|
|
|
def __init__(self, data_array: np.ndarray, metadata: dict):
|
|
|
super().__init__()
|
|
|
self.data = torch.tensor(data_array, dtype=torch.float32)
|
|
|
self.meta = metadata
|
|
|
self.max_nodes = int(self.meta['max_nodes'])
|
|
|
self.max_elems = int(self.meta['max_elements'])
|
|
|
self.size_info = torch.tensor(self.meta['size_info'], dtype=torch.long)
|
|
|
|
|
|
def __len__(self):
|
|
|
return self.data.size(0)
|
|
|
|
|
|
def __getitem__(self, idx):
|
|
|
flat = self.data[idx]
|
|
|
n_nodes, n_elems = self.size_info[idx].tolist()
|
|
|
return flat, n_nodes, n_elems
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def make_length_buckets(size_info: np.ndarray, bucket_width_nodes=16, bucket_width_elems=32):
|
|
|
"""
|
|
|
Assign samples to (node_bucket, elem_bucket) bins to reduce padding overhead per batch.
|
|
|
Returns: list of lists of indices (buckets).
|
|
|
"""
|
|
|
buckets = {}
|
|
|
for i, (n, e) in enumerate(size_info):
|
|
|
bn = (int(n) // bucket_width_nodes) * bucket_width_nodes
|
|
|
be = (int(e) // bucket_width_elems) * bucket_width_elems
|
|
|
key = (bn, be)
|
|
|
buckets.setdefault(key, []).append(i)
|
|
|
|
|
|
return [idxs for _, idxs in sorted(buckets.items(), key=lambda kv: (kv[0][0], kv[0][1]))]
|
|
|
|
|
|
|
|
|
def bucketed_batch_sampler(size_info: np.ndarray, batch_size=16,
|
|
|
bucket_width_nodes=16, bucket_width_elems=32):
|
|
|
"""
|
|
|
Yields lists of indices; each batch comes from a single bucket.
|
|
|
"""
|
|
|
buckets = make_length_buckets(size_info, bucket_width_nodes, bucket_width_elems)
|
|
|
for idxs in buckets:
|
|
|
|
|
|
idxs = np.array(idxs)
|
|
|
perm = np.random.permutation(len(idxs))
|
|
|
idxs = idxs[perm]
|
|
|
|
|
|
for start in range(0, len(idxs), batch_size):
|
|
|
yield idxs[start:start+batch_size].tolist()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def collate_truss(batch, max_nodes_global, max_elems_global):
|
|
|
"""
|
|
|
batch: list of (flat, n_nodes, n_elems)
|
|
|
Returns tensors cropped to per-batch maximum sizes (dramatically smaller than global),
|
|
|
along with masks and split blocks.
|
|
|
"""
|
|
|
flats, n_nodes_list, n_elems_list = zip(*batch)
|
|
|
B = len(flats)
|
|
|
|
|
|
nmax_b = max(n_nodes_list)
|
|
|
emax_b = max(n_elems_list)
|
|
|
|
|
|
flats = torch.stack(flats, 0)
|
|
|
|
|
|
node_end = max_nodes_global * 2
|
|
|
ele_end = node_end + max_elems_global * 2
|
|
|
|
|
|
nodes_full = flats[:, :node_end].view(B, max_nodes_global, 2)
|
|
|
ele_block_full = flats[:, node_end:ele_end]
|
|
|
|
|
|
|
|
|
nodes = nodes_full[:, :nmax_b, :]
|
|
|
ele_block = ele_block_full[:, : (emax_b * 2)]
|
|
|
|
|
|
|
|
|
node_mask = (nodes.abs().sum(-1) > 0).float()
|
|
|
|
|
|
|
|
|
A_real = build_adj_from_eleblock_vectorized(ele_block, nmax_b)
|
|
|
|
|
|
m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
|
|
|
A_real = A_real * m
|
|
|
|
|
|
idx = torch.arange(nmax_b, device=A_real.device)
|
|
|
A_real[:, idx, idx] = 0.0
|
|
|
|
|
|
|
|
|
n_nodes_norm = torch.tensor([n / max_nodes_global for n in n_nodes_list], dtype=torch.float32, device=flats.device).unsqueeze(1)
|
|
|
n_elems_norm = torch.tensor([e / max_elems_global for e in n_elems_list], dtype=torch.float32, device=flats.device).unsqueeze(1)
|
|
|
|
|
|
height = torch.rand(B, 1, device=flats.device) * 0.8 + 0.2
|
|
|
spacing = torch.rand(B, 1, device=flats.device) * 0.8 + 0.2
|
|
|
cond = torch.cat([n_nodes_norm, n_elems_norm, height, spacing], dim=1)
|
|
|
|
|
|
return nodes, node_mask, A_real, cond, nmax_b
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def connectivity_penalty(fake_nodes_mask, fake_A_soft):
|
|
|
"""
|
|
|
Penalize if too many isolated/invalid nodes: encourages edges incident to active nodes.
|
|
|
fake_nodes_mask: [B, N]
|
|
|
fake_A_soft: [B, N, N]
|
|
|
Returns scalar tensor.
|
|
|
"""
|
|
|
|
|
|
deg = fake_A_soft.sum(dim=-1)
|
|
|
active = (fake_nodes_mask > 0.5).float()
|
|
|
iso = (deg < 1e-3).float() * active
|
|
|
|
|
|
return iso.mean()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train_wgan_gp(device='cuda',
|
|
|
n_epochs=100, batch_size=16, latent_dim=128,
|
|
|
n_critic=5, lr_g=2e-4, lr_d=1e-4,
|
|
|
lambda_gp=10, lambda_connect=5.0,
|
|
|
save_path='models/checkpoints',
|
|
|
bucket_width_nodes=16, bucket_width_elems=32):
|
|
|
"""
|
|
|
Train WGAN-GP with:
|
|
|
- vectorized adjacency build
|
|
|
- length-bucketed batching
|
|
|
- dynamic per-batch padding & masking
|
|
|
"""
|
|
|
if not torch.cuda.is_available():
|
|
|
print("⚠️ CUDA not available, training on CPU will be slow.")
|
|
|
device = 'cpu'
|
|
|
device = torch.device(device)
|
|
|
|
|
|
|
|
|
data_array, metadata = preprocess_s1()
|
|
|
dataset = TrussFlatDataset(data_array, metadata)
|
|
|
|
|
|
|
|
|
size_info = metadata['size_info']
|
|
|
sampler = list(bucketed_batch_sampler(size_info,
|
|
|
batch_size=batch_size,
|
|
|
bucket_width_nodes=bucket_width_nodes,
|
|
|
bucket_width_elems=bucket_width_elems))
|
|
|
|
|
|
|
|
|
max_nodes_global = metadata['max_nodes']
|
|
|
max_elems_global = metadata['max_elements']
|
|
|
|
|
|
|
|
|
cond_dim = 4
|
|
|
generator = Generator(latent_dim=latent_dim, nmax=max_nodes_global, cond_dim=cond_dim).to(device)
|
|
|
discriminator = Discriminator(nmax=max_nodes_global, cond_dim=cond_dim).to(device)
|
|
|
|
|
|
opt_g = optim.Adam(generator.parameters(), lr=lr_g, betas=(0.0, 0.99))
|
|
|
opt_d = optim.Adam(discriminator.parameters(), lr=lr_d, betas=(0.0, 0.99))
|
|
|
|
|
|
best_score = float('-inf')
|
|
|
g_loss_ema = 0.0
|
|
|
epoch_losses = []
|
|
|
os.makedirs(save_path, exist_ok=True)
|
|
|
|
|
|
print(f"Starting training on {len(dataset)} samples with {len(sampler)} bucketed batches per epoch...")
|
|
|
|
|
|
try:
|
|
|
for epoch in range(n_epochs):
|
|
|
epoch_d_loss, epoch_g_loss = 0.0, 0.0
|
|
|
|
|
|
|
|
|
perm_batches = np.random.permutation(len(sampler)).tolist()
|
|
|
|
|
|
for b_id in perm_batches:
|
|
|
idxs = sampler[b_id]
|
|
|
batch = [dataset[i] for i in idxs]
|
|
|
|
|
|
|
|
|
nodes_real, node_mask_real, A_real, cond, nmax_b = collate_truss(
|
|
|
batch, max_nodes_global, max_elems_global
|
|
|
)
|
|
|
nodes_real = nodes_real.to(device)
|
|
|
node_mask_real = node_mask_real.to(device)
|
|
|
A_real = A_real.to(device)
|
|
|
cond = cond.to(device)
|
|
|
bs = nodes_real.size(0)
|
|
|
|
|
|
|
|
|
for _ in range(n_critic):
|
|
|
z = torch.randn(bs, latent_dim, device=device)
|
|
|
nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
|
|
|
|
|
|
|
|
|
nodes_f_b = nodes_f[:, :nmax_b, :]
|
|
|
nmask_f_b = nmask_f[:, :nmax_b]
|
|
|
A_f_b = A_f[:, :nmax_b, :nmax_b]
|
|
|
elog_f_b = elog_f[:, :nmax_b, :nmax_b]
|
|
|
|
|
|
d_real = discriminator(nodes_real, A_real, node_mask_real, cond)
|
|
|
d_fake = discriminator(nodes_f_b.detach(), A_f_b.detach(), nmask_f_b.detach(), cond)
|
|
|
|
|
|
gp = gradient_penalty(
|
|
|
discriminator,
|
|
|
(nodes_real, node_mask_real, A_real, cond),
|
|
|
(nodes_f_b.detach(), nmask_f_b.detach(), A_f_b.detach(), cond),
|
|
|
lambda_gp
|
|
|
)
|
|
|
loss_d = -(d_real.mean() - d_fake.mean()) + gp
|
|
|
opt_d.zero_grad(set_to_none=True)
|
|
|
loss_d.backward()
|
|
|
opt_d.step()
|
|
|
|
|
|
|
|
|
z = torch.randn(bs, latent_dim, device=device)
|
|
|
nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
|
|
|
nodes_f_b = nodes_f[:, :nmax_b, :]
|
|
|
nmask_f_b = nmask_f[:, :nmax_b]
|
|
|
A_f_b = A_f[:, :nmax_b, :nmax_b]
|
|
|
elog_f_b = elog_f[:, :nmax_b, :nmax_b]
|
|
|
|
|
|
d_fake = discriminator(nodes_f_b, A_f_b, nmask_f_b, cond)
|
|
|
adv_loss = -d_fake.mean()
|
|
|
|
|
|
L_nodes, L_mask, L_edges = aux_losses(
|
|
|
nodes_f_b, nmask_f_b, elog_f_b,
|
|
|
nodes_real, node_mask_real, A_real
|
|
|
)
|
|
|
|
|
|
loss_g = adv_loss + 10 * L_nodes + 1 * L_mask + 5 * L_edges
|
|
|
|
|
|
if lambda_connect > 0:
|
|
|
loss_g += lambda_connect * connectivity_penalty(nmask_f_b, A_f_b)
|
|
|
|
|
|
opt_g.zero_grad(set_to_none=True)
|
|
|
loss_g.backward()
|
|
|
opt_g.step()
|
|
|
|
|
|
epoch_d_loss += loss_d.item()
|
|
|
epoch_g_loss += loss_g.item()
|
|
|
|
|
|
|
|
|
num_batches = len(sampler)
|
|
|
epoch_d_loss /= num_batches
|
|
|
epoch_g_loss /= num_batches
|
|
|
epoch_losses.append({'epoch': epoch + 1,
|
|
|
'd_loss': epoch_d_loss,
|
|
|
'g_loss': epoch_g_loss})
|
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
deg_mean = float(A_f_b.mean().item() * nmax_b)
|
|
|
conn_proxy = float((A_f_b.sum(dim=-1) > 0.5).float().mean().item())
|
|
|
|
|
|
print(f"[{epoch+1}/{n_epochs}] D: {epoch_d_loss:.3f} | G: {epoch_g_loss:.3f} "
|
|
|
f"| N_b:{nmax_b:3d} | DegMean~{deg_mean:.2f} | Conn~{conn_proxy:.2f}")
|
|
|
|
|
|
|
|
|
ema_beta = 0.9
|
|
|
if epoch == 0:
|
|
|
g_loss_ema = epoch_g_loss
|
|
|
else:
|
|
|
g_loss_ema = ema_beta * g_loss_ema + (1 - ema_beta) * epoch_g_loss
|
|
|
|
|
|
|
|
|
|
|
|
score = -abs(epoch_g_loss) + 0.5 * conn_proxy
|
|
|
|
|
|
if score > best_score:
|
|
|
best_score = score
|
|
|
torch.save(generator.state_dict(), os.path.join(save_path, f'generator_best.pth'))
|
|
|
torch.save(discriminator.state_dict(), os.path.join(save_path, f'discriminator_best.pth'))
|
|
|
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
|
|
|
print(f"✅ New best model at epoch {epoch+1}: score={score:.3f}, G_loss={epoch_g_loss:.3f}, Conn={conn_proxy:.2f}")
|
|
|
|
|
|
|
|
|
if (epoch + 1) % 10 == 0:
|
|
|
torch.save(generator.state_dict(), os.path.join(save_path, f'generator_epoch{epoch+1}.pth'))
|
|
|
torch.save(discriminator.state_dict(), os.path.join(save_path, f'discriminator_epoch{epoch+1}.pth'))
|
|
|
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
|
|
|
print(f"💾 Periodic checkpoint saved at epoch {epoch+1}")
|
|
|
|
|
|
|
|
|
np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
|
|
|
print(f"Training complete. Models saved to {save_path}.")
|
|
|
|
|
|
except KeyboardInterrupt:
|
|
|
print("\n⚠️ Training interrupted by user. Saving current state...")
|
|
|
os.makedirs(save_path, exist_ok=True)
|
|
|
np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
|
|
|
torch.save(generator.state_dict(), os.path.join(save_path, 'generator_interrupted.pth'))
|
|
|
torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator_interrupted.pth'))
|
|
|
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
|
|
|
print(f"🟡 Interrupted state saved to {save_path}.")
|
|
|
raise
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
train_wgan_gp(device='cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|