|
|
| import torch
|
| import torch.nn as nn
|
| import torch.nn.functional as F
|
|
|
| class Generator(nn.Module):
|
| """
|
| Generator for WGAN-GP with structured output heads.
|
| Outputs continuous node coordinates, node existence logits, and edge logits.
|
| NOTE: Keeps a fixed 'nmax' head, but we will DYNAMICALLY SLICE per batch in train.py.
|
| """
|
| def __init__(self, latent_dim=128, nmax=121, cond_dim=4):
|
| super(Generator, self).__init__()
|
| d = 512
|
| self.fc = nn.Sequential(
|
| nn.Linear(latent_dim + cond_dim, d),
|
| nn.LeakyReLU(0.2, True),
|
| nn.Linear(d, d),
|
| nn.LeakyReLU(0.2, True),
|
| )
|
| self.out_nodes = nn.Linear(d, nmax * 2)
|
| self.out_nmask = nn.Linear(d, nmax)
|
| self.out_edges = nn.Linear(d, nmax * nmax)
|
|
|
| self.nmax = nmax
|
|
|
| def forward(self, z, cond):
|
| h = self.fc(torch.cat([z, cond], dim=1))
|
| nodes = torch.tanh(self.out_nodes(h)).view(-1, self.nmax, 2)
|
| nlog = self.out_nmask(h).view(-1, self.nmax)
|
| elog = self.out_edges(h).view(-1, self.nmax, self.nmax)
|
|
|
|
|
| elog = 0.5 * (elog + elog.transpose(-1, -2))
|
| diag_idx = torch.arange(self.nmax, device=elog.device)
|
| elog[:, diag_idx, diag_idx] = float('-inf')
|
| node_mask = torch.sigmoid(nlog)
|
| m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
|
| A_soft = torch.sigmoid(elog) * m
|
| return nodes, node_mask, A_soft, elog, nlog
|
|
|
|
|
| class Discriminator(nn.Module):
|
| """
|
| Discriminator for WGAN-GP with a light graph-aware critic.
|
| Uses one-hop message passing with masked mean pooling.
|
| Accepts dynamically sliced [B, N_b, ...] tensors (train loop will slice).
|
| """
|
| def __init__(self, nmax=121, cond_dim=4, hid=256):
|
| super(Discriminator, self).__init__()
|
| self.node_mlp = nn.Sequential(
|
| nn.Linear(2, hid), nn.LeakyReLU(0.2, True),
|
| nn.Linear(hid, hid), nn.LeakyReLU(0.2, True)
|
| )
|
| self.edge_mlp = nn.Sequential(
|
| nn.Linear(2*hid + 1, hid), nn.LeakyReLU(0.2, True),
|
| nn.Linear(hid, hid), nn.LeakyReLU(0.2, True)
|
| )
|
| self.head = nn.Sequential(
|
| nn.Linear(2*hid + 1 + cond_dim, hid),
|
| nn.LeakyReLU(0.2, True),
|
| nn.Linear(hid, 1)
|
| )
|
| self.nmax = nmax
|
|
|
| def forward(self, nodes, A_soft, node_mask, cond):
|
| """
|
| nodes: [B, N, 2]
|
| A_soft: [B, N, N]
|
| node_mask: [B, N] (float in [0,1])
|
| cond: [B, C]
|
| """
|
| B, N, _ = nodes.shape
|
| m = node_mask.unsqueeze(-1)
|
| h = self.node_mlp(nodes) * m
|
| h_msg = torch.matmul(A_soft, h)
|
| edge_stat = A_soft.mean(dim=-1, keepdim=True)
|
| pair = torch.cat([h, h_msg, edge_stat], dim=-1) * m
|
|
|
| denom = node_mask.sum(dim=1, keepdim=True).clamp_min(1e-6)
|
| node_feat = (pair.sum(dim=1) / denom)
|
| x = torch.cat([node_feat, cond], dim=-1)
|
| return self.head(x)
|
|
|
|
|
| def gradient_penalty(discriminator, real_data, fake_data, lambda_gp=10):
|
| """
|
| Computes gradient penalty for structured inputs.
|
| real_data: (nodes, node_mask, A_soft, cond)
|
| fake_data: (nodes, node_mask, A_soft, cond)
|
| """
|
| device = real_data[0].device
|
| B = real_data[0].size(0)
|
|
|
| alpha_nodes = torch.rand(B, 1, 1, device=device)
|
| alpha_mask = torch.rand(B, 1, device=device)
|
| alpha_adj = torch.rand(B, 1, 1, device=device)
|
| alpha_cond = torch.rand(B, 1, device=device)
|
|
|
| nodes_i = (alpha_nodes * real_data[0] + (1 - alpha_nodes) * fake_data[0]).requires_grad_(True)
|
| mask_i = (alpha_mask * real_data[1] + (1 - alpha_mask) * fake_data[1]).requires_grad_(True)
|
| adj_i = (alpha_adj * real_data[2] + (1 - alpha_adj) * fake_data[2]).requires_grad_(True)
|
| cond_i = (alpha_cond * real_data[3] + (1 - alpha_cond) * fake_data[3]).requires_grad_(True)
|
|
|
| d_interpolates = discriminator(nodes_i, adj_i, mask_i, cond_i)
|
| fake = torch.ones_like(d_interpolates, device=device)
|
| grads = torch.autograd.grad(
|
| outputs=d_interpolates,
|
| inputs=[nodes_i, mask_i, adj_i, cond_i],
|
| grad_outputs=fake,
|
| create_graph=True,
|
| retain_graph=True,
|
| only_inputs=True
|
| )
|
| grads = torch.cat([g.reshape(B, -1) for g in grads], dim=1)
|
| gp = ((grads.norm(2, dim=1) - 1) ** 2).mean()
|
| return lambda_gp * gp
|
|
|
|
|
| def aux_losses(nodes, node_mask, elog, nodes_real, node_mask_real, A_real):
|
| """
|
| Masked auxiliary supervised reconstruction losses.
|
| """
|
| m_node = node_mask_real
|
| m_edges = (m_node.unsqueeze(-1) * m_node.unsqueeze(-2)).bool()
|
| L_nodes = ((nodes - nodes_real)**2).sum(dim=-1)
|
| L_nodes = (L_nodes * m_node).sum() / (m_node.sum() + 1e-6)
|
|
|
|
|
| L_mask = F.binary_cross_entropy_with_logits(
|
| torch.logit(node_mask.clamp(1e-6, 1-1e-6)), node_mask_real, reduction='mean')
|
|
|
| triu = torch.triu(torch.ones_like(A_real), diagonal=1).bool()
|
| mask_tri = (m_edges & triu)
|
|
|
| L_edges = F.binary_cross_entropy_with_logits(
|
| elog[mask_tri], (A_real*1.0)[mask_tri], reduction='mean')
|
| return L_nodes, L_mask, L_edges
|
|
|