genai_test / test.py
Kyo-Kai's picture
Upload test.py
16f2d68 verified
# test.py
import os
import torch
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from models.wgan_gp import Generator
from train_utils.preprocess_s1 import preprocess_s1
# ---------------------------------------------------------------
# Helper: reconstruct and plot real samples
# ---------------------------------------------------------------
def compute_graph_metrics(nodes, A_soft, node_mask):
"""
Evaluate connectivity and degree stats from generated graphs.
Returns a dict with mean metrics across the batch:
- 'connectivity': average number of connected components (lower is better; 1.0 means fully connected)
- 'degree_mean': average node degree over valid nodes
- 'degree_var' : variance of node degrees over valid nodes
Notes:
* A_soft is binarized at 0.5 to build an undirected simple graph.
* node_mask indicates which nodes are valid (not padding).
"""
metrics = {'connectivity': [], 'degree_mean': [], 'degree_var': []}
# Move to CPU/NumPy
if isinstance(nodes, torch.Tensor): nodes = nodes.detach().cpu().numpy()
if isinstance(A_soft, torch.Tensor): A = (A_soft.detach().cpu().numpy() > 0.5).astype(int)
else: A = (A_soft > 0.5).astype(int)
if isinstance(node_mask, torch.Tensor): node_mask = node_mask.detach().cpu().numpy()
B, N, _ = nodes.shape
for b in range(B):
valid = node_mask[b] > 0.5
idxs = np.where(valid)[0]
if idxs.size == 0:
metrics['connectivity'].append(0)
metrics['degree_mean'].append(0)
metrics['degree_var'].append(0)
continue
G = nx.Graph()
G.add_nodes_from(idxs)
# add edges only among valid nodes
Ab = A[b]
for i in range(len(idxs)):
for j in range(i + 1, len(idxs)):
u, v = idxs[i], idxs[j]
if Ab[u, v] > 0:
G.add_edge(u, v)
if len(G.nodes) > 0:
degs = [d for _, d in G.degree()]
metrics['connectivity'].append(nx.number_connected_components(G))
metrics['degree_mean'].append(np.mean(degs))
metrics['degree_var'].append(np.var(degs))
else:
metrics['connectivity'].append(0)
metrics['degree_mean'].append(0)
metrics['degree_var'].append(0)
# Return batch means
return {k: float(np.mean(v)) if len(v) > 0 else 0.0 for k, v in metrics.items()}
def reconstruct_sample(sample_np, metadata, index=None):
"""
Reconstruct nodal coordinates, ele_nod, pel from a flattened dataset sample.
"""
max_nodes = metadata['max_nodes']
max_elements = metadata['max_elements']
nodal_end = max_nodes * 2
ele_end = nodal_end + max_elements * 2
pel_end = ele_end + max_elements * 4
nodal_flat = sample_np[:nodal_end]
ele_flat = sample_np[nodal_end:ele_end]
pel_flat = sample_np[ele_end:pel_end]
# Estimate actual counts (nonzero or non-negative)
nodal_coord = nodal_flat.reshape(-1, 2)
valid_nodes = np.where(np.abs(nodal_coord).sum(-1) > 0)[0]
nodal_coord = nodal_coord[valid_nodes]
ele_nod = ele_flat.reshape(-1, 2).astype(int)
valid_edges = (ele_nod[:, 0] >= 0) & (ele_nod[:, 1] >= 0)
ele_nod = ele_nod[valid_edges]
pel = pel_flat.reshape(-1, 4)[valid_edges]
# Filter to existing node indices
ele_nod = ele_nod[
(ele_nod[:, 0] < len(nodal_coord)) & (ele_nod[:, 1] < len(nodal_coord))
]
return nodal_coord, ele_nod, pel
def plot_truss(nodal_coord, ele_nod, title="Truss", ax=None, color_nodes="blue"):
"""Plot truss structure with nodes and connecting edges."""
if ax is None:
fig, ax = plt.subplots(figsize=(5, 4))
if len(nodal_coord) == 0:
return ax
# Plot nodes
ax.scatter(nodal_coord[:, 0], nodal_coord[:, 1], c=color_nodes, s=40, label="nodes")
# Plot edges
for e in ele_nod:
if 0 <= e[0] < len(nodal_coord) and 0 <= e[1] < len(nodal_coord):
x1, y1 = nodal_coord[e[0]]
x2, y2 = nodal_coord[e[1]]
ax.plot([x1, x2], [y1, y2], "k-", lw=1.2, alpha=0.7)
ax.set_aspect("equal")
ax.set_title(title)
ax.grid(alpha=0.3)
return ax
# ---------------------------------------------------------------
# Main evaluation + visualization
# ---------------------------------------------------------------
def evaluate_and_visualize(checkpoints_path, n_samples=6, device="cpu"):
"""
Load the trained generator, generate samples, plot real vs generated trusses,
show loss curves, and basic topology metrics.
"""
device = torch.device(device)
# ---- Load model + metadata ----
metadata = np.load(os.path.join(checkpoints_path, "metadata.npy"), allow_pickle=True).item()
generator_state = torch.load(os.path.join(checkpoints_path, "generator.pth"), map_location=device)
epoch_losses = np.load(os.path.join(checkpoints_path, "epoch_losses.npy"), allow_pickle=True)
generator = Generator(latent_dim=128, nmax=metadata["max_nodes"], cond_dim=4).to(device)
generator.load_state_dict(generator_state)
generator.eval()
print(f"✅ Generator loaded. Using max_nodes={metadata['max_nodes']}, max_elements={metadata['max_elements']}")
# ---- Load some real samples ----
real_data, _ = preprocess_s1(normalize_type=None)
real_samples = real_data[:n_samples]
# ---- Generate fake trusses ----
with torch.no_grad():
z = torch.randn(n_samples, 128, device=device)
# Conditioning: average-case normalized values
cond_vals = [1.0, 1.0, 0.5, 0.5]
cond = torch.tensor([cond_vals] * n_samples, dtype=torch.float32, device=device)
nodes, node_mask, A_soft, _, _ = generator(z, cond)
nodes = nodes.cpu().numpy()
node_mask = node_mask.cpu().numpy()
A_soft = A_soft.cpu().numpy()
# ---- Build graph structures from generator outputs ----
fake_nodal_coords, fake_ele_nods = [], []
for b in range(n_samples):
valid_nodes = np.where(node_mask[b] > 0.5)[0]
coords = nodes[b, valid_nodes]
edges = []
for i in range(len(valid_nodes)):
for j in range(i + 1, len(valid_nodes)):
if A_soft[b, valid_nodes[i], valid_nodes[j]] > 0.5:
edges.append([i, j])
fake_nodal_coords.append(coords)
fake_ele_nods.append(np.array(edges, dtype=int))
# ---- Plot real vs generated ----
fig, axes = plt.subplots(2, n_samples, figsize=(3 * n_samples, 6))
axes = axes.flatten()
for i in range(n_samples):
nodal_real, ele_nod_real, _ = reconstruct_sample(real_samples[i], metadata)
plot_truss(nodal_real, ele_nod_real, f"Real {i+1}", ax=axes[i], color_nodes="tab:blue")
plot_truss(fake_nodal_coords[i], fake_ele_nods[i], f"Gen {i+1}", ax=axes[i + n_samples], color_nodes="tab:orange")
plt.tight_layout()
plt.show()
# ---- Plot training losses ----
if isinstance(epoch_losses, np.ndarray) and epoch_losses.size > 0:
# Convert to list of dicts if needed
if not isinstance(epoch_losses[0], dict):
epoch_losses = list(epoch_losses)
else:
print("⚠️ No training losses found.")
epoch_losses = []
if len(epoch_losses) > 0:
epochs = [e["epoch"] for e in epoch_losses]
d_losses = [e["d_loss"] for e in epoch_losses]
g_losses = [e["g_loss"] for e in epoch_losses]
plt.figure(figsize=(9, 4))
plt.plot(epochs, d_losses, "r-", label="Discriminator Loss")
plt.plot(epochs, g_losses, "b-", label="Generator Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training Loss Curves (WGAN-GP)")
plt.grid(alpha=0.3)
plt.legend()
plt.show()
final = epoch_losses[-1]
print(f"📉 Final Epoch {final['epoch']}: D={final['d_loss']:.3f}, G={final['g_loss']:.3f}")
else:
print("⚠️ No losses to plot.")
# ---- Compute metrics for generated batch ----
batch_size = min(5, n_samples)
nodes_batch = torch.tensor(nodes[:batch_size], dtype=torch.float32)
node_mask_batch = torch.tensor(node_mask[:batch_size], dtype=torch.float32)
A_soft_batch = torch.tensor(A_soft[:batch_size], dtype=torch.float32)
metrics = compute_graph_metrics(nodes_batch, A_soft_batch, node_mask_batch)
print(f"📊 Generated graph metrics:\n{metrics}")
# ---------------------------------------------------------------
# Entry point
# ---------------------------------------------------------------
if __name__ == "__main__":
evaluate_and_visualize("models/checkpoints", n_samples=6)