Kyo-Kai commited on
Commit
d824e43
·
verified ·
1 Parent(s): 63f5740

Upload 12 files

Browse files
models/wgan_gp.py CHANGED
@@ -7,8 +7,9 @@ class Generator(nn.Module):
7
  """
8
  Generator for WGAN-GP with structured output heads.
9
  Outputs continuous node coordinates, node existence logits, and edge logits.
 
10
  """
11
- def __init__(self, latent_dim=128, nmax=121, cond_dim=4): # n_nodes, n_edges, height, spacing
12
  super(Generator, self).__init__()
13
  d = 512
14
  self.fc = nn.Sequential(
@@ -31,7 +32,8 @@ class Generator(nn.Module):
31
 
32
  # enforce symmetry and zero diagonal
33
  elog = 0.5 * (elog + elog.transpose(-1, -2))
34
- elog[:, torch.arange(self.nmax), torch.arange(self.nmax)] = 0
 
35
  node_mask = torch.sigmoid(nlog)
36
  m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
37
  A_soft = torch.sigmoid(elog) * m
@@ -41,26 +43,42 @@ class Generator(nn.Module):
41
  class Discriminator(nn.Module):
42
  """
43
  Discriminator for WGAN-GP with a light graph-aware critic.
 
 
44
  """
45
  def __init__(self, nmax=121, cond_dim=4, hid=256):
46
  super(Discriminator, self).__init__()
47
- self.node_mlp = nn.Sequential(nn.Linear(2, hid), nn.LeakyReLU(0.2, True),
48
- nn.Linear(hid, hid), nn.LeakyReLU(0.2, True))
49
- self.edge_mlp = nn.Sequential(nn.Linear(2*hid+1, hid), nn.LeakyReLU(0.2, True),
50
- nn.Linear(hid, hid), nn.LeakyReLU(0.2, True))
51
- self.head = nn.Sequential(nn.Linear(2*hid + 1 + cond_dim, hid), # note +1 fix
52
- nn.LeakyReLU(0.2, True),
53
- nn.Linear(hid, 1))
 
 
 
 
 
 
54
  self.nmax = nmax
55
 
56
  def forward(self, nodes, A_soft, node_mask, cond):
57
- # nodes: [B,N,2], A_soft: [B,N,N], node_mask: [B,N]
 
 
 
 
 
58
  B, N, _ = nodes.shape
59
- m = node_mask.unsqueeze(-1) # [B,N,1]
60
- h = self.node_mlp(nodes) * m
61
- h_msg = torch.matmul(A_soft, h) # [B,N,hid]
62
- pair = torch.cat([h, h_msg, A_soft.mean(dim=-1, keepdim=True)], dim=-1) * m
63
- node_feat = (pair.sum(dim=1) / (node_mask.sum(dim=1, keepdim=True)+1e-6))
 
 
 
64
  x = torch.cat([node_feat, cond], dim=-1)
65
  return self.head(x)
66
 
@@ -68,19 +86,21 @@ class Discriminator(nn.Module):
68
  def gradient_penalty(discriminator, real_data, fake_data, lambda_gp=10):
69
  """
70
  Computes gradient penalty for structured inputs.
 
 
71
  """
72
  device = real_data[0].device
73
  B = real_data[0].size(0)
74
  # Interpolation coefficients
75
  alpha_nodes = torch.rand(B, 1, 1, device=device)
76
- alpha_mask = torch.rand(B, 1, device=device)
77
- alpha_adj = torch.rand(B, 1, 1, device=device)
78
- alpha_cond = torch.rand(B, 1, device=device)
79
 
80
  nodes_i = (alpha_nodes * real_data[0] + (1 - alpha_nodes) * fake_data[0]).requires_grad_(True)
81
- mask_i = (alpha_mask * real_data[1] + (1 - alpha_mask) * fake_data[1]).requires_grad_(True)
82
- adj_i = (alpha_adj * real_data[2] + (1 - alpha_adj) * fake_data[2]).requires_grad_(True)
83
- cond_i = (alpha_cond * real_data[3] + (1 - alpha_cond) * fake_data[3]).requires_grad_(True)
84
 
85
  d_interpolates = discriminator(nodes_i, adj_i, mask_i, cond_i)
86
  fake = torch.ones_like(d_interpolates, device=device)
@@ -105,10 +125,14 @@ def aux_losses(nodes, node_mask, elog, nodes_real, node_mask_real, A_real):
105
  m_edges = (m_node.unsqueeze(-1) * m_node.unsqueeze(-2)).bool()
106
  L_nodes = ((nodes - nodes_real)**2).sum(dim=-1)
107
  L_nodes = (L_nodes * m_node).sum() / (m_node.sum() + 1e-6)
 
 
108
  L_mask = F.binary_cross_entropy_with_logits(
109
- torch.logit(node_mask.clamp(1e-6,1-1e-6)), node_mask_real, reduction='mean')
 
110
  triu = torch.triu(torch.ones_like(A_real), diagonal=1).bool()
111
  mask_tri = (m_edges & triu)
 
112
  L_edges = F.binary_cross_entropy_with_logits(
113
  elog[mask_tri], (A_real*1.0)[mask_tri], reduction='mean')
114
  return L_nodes, L_mask, L_edges
 
7
  """
8
  Generator for WGAN-GP with structured output heads.
9
  Outputs continuous node coordinates, node existence logits, and edge logits.
10
+ NOTE: Keeps a fixed 'nmax' head, but we will DYNAMICALLY SLICE per batch in train.py.
11
  """
12
+ def __init__(self, latent_dim=128, nmax=121, cond_dim=4):
13
  super(Generator, self).__init__()
14
  d = 512
15
  self.fc = nn.Sequential(
 
32
 
33
  # enforce symmetry and zero diagonal
34
  elog = 0.5 * (elog + elog.transpose(-1, -2))
35
+ diag_idx = torch.arange(self.nmax, device=elog.device)
36
+ elog[:, diag_idx, diag_idx] = float('-inf') # prevent self-edges BEFORE sigmoid/BCE
37
  node_mask = torch.sigmoid(nlog)
38
  m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
39
  A_soft = torch.sigmoid(elog) * m
 
43
  class Discriminator(nn.Module):
44
  """
45
  Discriminator for WGAN-GP with a light graph-aware critic.
46
+ Uses one-hop message passing with masked mean pooling.
47
+ Accepts dynamically sliced [B, N_b, ...] tensors (train loop will slice).
48
  """
49
  def __init__(self, nmax=121, cond_dim=4, hid=256):
50
  super(Discriminator, self).__init__()
51
+ self.node_mlp = nn.Sequential(
52
+ nn.Linear(2, hid), nn.LeakyReLU(0.2, True),
53
+ nn.Linear(hid, hid), nn.LeakyReLU(0.2, True)
54
+ )
55
+ self.edge_mlp = nn.Sequential(
56
+ nn.Linear(2*hid + 1, hid), nn.LeakyReLU(0.2, True),
57
+ nn.Linear(hid, hid), nn.LeakyReLU(0.2, True)
58
+ )
59
+ self.head = nn.Sequential(
60
+ nn.Linear(2*hid + 1 + cond_dim, hid),
61
+ nn.LeakyReLU(0.2, True),
62
+ nn.Linear(hid, 1)
63
+ )
64
  self.nmax = nmax
65
 
66
  def forward(self, nodes, A_soft, node_mask, cond):
67
+ """
68
+ nodes: [B, N, 2]
69
+ A_soft: [B, N, N]
70
+ node_mask: [B, N] (float in [0,1])
71
+ cond: [B, C]
72
+ """
73
  B, N, _ = nodes.shape
74
+ m = node_mask.unsqueeze(-1) # [B,N,1]
75
+ h = self.node_mlp(nodes) * m # [B,N,hid]
76
+ h_msg = torch.matmul(A_soft, h) # [B,N,hid]
77
+ edge_stat = A_soft.mean(dim=-1, keepdim=True) # [B,N,1]
78
+ pair = torch.cat([h, h_msg, edge_stat], dim=-1) * m
79
+ # masked mean over nodes
80
+ denom = node_mask.sum(dim=1, keepdim=True).clamp_min(1e-6) # [B,1]
81
+ node_feat = (pair.sum(dim=1) / denom) # [B, 2*hid+1]
82
  x = torch.cat([node_feat, cond], dim=-1)
83
  return self.head(x)
84
 
 
86
  def gradient_penalty(discriminator, real_data, fake_data, lambda_gp=10):
87
  """
88
  Computes gradient penalty for structured inputs.
89
+ real_data: (nodes, node_mask, A_soft, cond)
90
+ fake_data: (nodes, node_mask, A_soft, cond)
91
  """
92
  device = real_data[0].device
93
  B = real_data[0].size(0)
94
  # Interpolation coefficients
95
  alpha_nodes = torch.rand(B, 1, 1, device=device)
96
+ alpha_mask = torch.rand(B, 1, device=device)
97
+ alpha_adj = torch.rand(B, 1, 1, device=device)
98
+ alpha_cond = torch.rand(B, 1, device=device)
99
 
100
  nodes_i = (alpha_nodes * real_data[0] + (1 - alpha_nodes) * fake_data[0]).requires_grad_(True)
101
+ mask_i = (alpha_mask * real_data[1] + (1 - alpha_mask) * fake_data[1]).requires_grad_(True)
102
+ adj_i = (alpha_adj * real_data[2] + (1 - alpha_adj) * fake_data[2]).requires_grad_(True)
103
+ cond_i = (alpha_cond * real_data[3] + (1 - alpha_cond) * fake_data[3]).requires_grad_(True)
104
 
105
  d_interpolates = discriminator(nodes_i, adj_i, mask_i, cond_i)
106
  fake = torch.ones_like(d_interpolates, device=device)
 
125
  m_edges = (m_node.unsqueeze(-1) * m_node.unsqueeze(-2)).bool()
126
  L_nodes = ((nodes - nodes_real)**2).sum(dim=-1)
127
  L_nodes = (L_nodes * m_node).sum() / (m_node.sum() + 1e-6)
128
+
129
+ # encourage node mask to match
130
  L_mask = F.binary_cross_entropy_with_logits(
131
+ torch.logit(node_mask.clamp(1e-6, 1-1e-6)), node_mask_real, reduction='mean')
132
+
133
  triu = torch.triu(torch.ones_like(A_real), diagonal=1).bool()
134
  mask_tri = (m_edges & triu)
135
+ # use elog (pre-sigmoid) v.s. binary target
136
  L_edges = F.binary_cross_entropy_with_logits(
137
  elog[mask_tri], (A_real*1.0)[mask_tri], reduction='mean')
138
  return L_nodes, L_mask, L_edges
test.py CHANGED
@@ -1,151 +1,173 @@
 
1
  import os
2
  import torch
3
  import numpy as np
4
  import matplotlib.pyplot as plt
5
  from models.wgan_gp import Generator
6
  from train_utils.preprocess_s1 import preprocess_s1
 
7
 
8
- def reconstruct_sample(sample_np, metadata, full_output_dim, index=None):
 
 
 
 
9
  """
10
- Reconstruct nodal coordinates, ele_nod, pel from flattened sample.
11
  """
12
- if index is not None:
13
- # Get per-sample metadata if available
14
- per_sample_metadata = metadata.get('per_sample_metadata', [])
15
- actual_nodes = per_sample_metadata[index]['n_nod_tot'] if index < len(per_sample_metadata) else metadata['max_nodes']
16
- actual_elements = per_sample_metadata[index]['n_ele_tot'] if index < len(per_sample_metadata) else metadata['max_elements']
17
- else:
18
- actual_nodes = metadata['max_nodes']
19
- actual_elements = metadata['max_elements']
20
-
21
  max_nodes = metadata['max_nodes']
22
  max_elements = metadata['max_elements']
23
 
24
- # Indices
25
- nodal_start = 0
26
  nodal_end = max_nodes * 2
27
- ele_nod_start = nodal_end
28
- ele_nod_end = ele_nod_start + max_elements * 2
29
- pel_start = ele_nod_end
30
- pel_end = pel_start + max_elements * 4
31
-
32
- # Extract and reshape
33
- nodal_flat = sample_np[nodal_start:nodal_end][:actual_nodes * 2] # (actual_nodes * 2,)
34
- nodal_coord = nodal_flat.reshape(-1, 2) # (actual_nodes, 2)
35
- ele_nod_flat = sample_np[ele_nod_start:ele_nod_end][:actual_elements * 2] # (actual_elements * 2,)
36
- ele_nod = ele_nod_flat.reshape(-1, 2).astype(int) # (actual_elements, 2)
37
- pel_flat = sample_np[pel_start:pel_end][:actual_elements * 4] # (actual_elements * 4,)
38
- pel = pel_flat.reshape(-1, 4) # (actual_elements, 4)
39
-
40
- # Filter valid edges: only if node indices are valid and exist
41
- valid_mask = (ele_nod[:, 0] < actual_nodes) & (ele_nod[:, 1] < actual_nodes) & (ele_nod[:, 0] >= 0) & (ele_nod[:, 1] >= 0)
42
- ele_nod = ele_nod[valid_mask]
43
- pel = pel[valid_mask]
 
 
 
 
44
 
45
  return nodal_coord, ele_nod, pel
46
 
47
- def plot_truss(nodal_coord, ele_nod, title="Generated Truss", ax=None):
48
- """
49
- Plot truss structure.
50
- """
51
  if ax is None:
52
- fig, ax = plt.subplots(figsize=(8, 6))
 
 
53
 
54
  # Plot nodes
55
- ax.scatter(nodal_coord[:, 0], nodal_coord[:, 1], c='blue', s=50, label='Nodes')
56
 
57
  # Plot edges
58
  for e in ele_nod:
59
- if len(e) == 2 and e[0] < len(nodal_coord) and e[1] < len(nodal_coord):
60
  x1, y1 = nodal_coord[e[0]]
61
  x2, y2 = nodal_coord[e[1]]
62
- ax.plot([x1, x2], [y1, y2], 'k-', lw=1.5, alpha=0.7)
63
 
64
- ax.set_aspect('equal')
65
  ax.set_title(title)
66
  ax.grid(alpha=0.3)
67
- ax.legend()
68
  return ax
69
 
70
- def evaluate_and_visualize(checkpoints_path, n_samples=9, device='cpu'):
 
 
 
 
71
  """
72
- Load best generator, generate samples, visualize, and show metrics.
 
73
  """
74
  device = torch.device(device)
75
 
76
- # Load metadata and models
77
- metadata = np.load(os.path.join(checkpoints_path, 'metadata.npy'), allow_pickle=True).item()
78
- generator_state = torch.load(os.path.join(checkpoints_path, 'generator.pth'), map_location=device)
79
- epoch_losses = np.load(os.path.join(checkpoints_path, 'epoch_losses.npy'), allow_pickle=True)
80
 
81
- generator = Generator(latent_dim=128, output_dim=metadata['total_dim']).to(device)
82
  generator.load_state_dict(generator_state)
83
  generator.eval()
 
84
 
85
- print("Generator loaded. Metadata:", {k: v for k, v in metadata.items() if k != 'per_sample_metadata' and k != 'npz_files'})
86
-
87
- # Load real data for comparison
88
  real_data, _ = preprocess_s1(normalize_type=None)
89
  real_samples = real_data[:n_samples]
90
 
91
- # Generate fake samples
92
  with torch.no_grad():
93
  z = torch.randn(n_samples, 128, device=device)
94
- fake_samples = generator(z).cpu().numpy()
95
-
96
- # Plot real vs generated
97
- fig, axes = plt.subplots(2, n_samples, figsize=(4*n_samples, 8))
98
- axes = axes.flatten() if n_samples > 1 else [axes]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  for i in range(n_samples):
101
- # Real
102
- nodal_real, ele_nod_real, _ = reconstruct_sample(real_samples[i], metadata, metadata['total_dim'], i)
103
- plot_truss(nodal_real, ele_nod_real, f"Real {i+1}", ax=axes[i])
104
-
105
- # Generated
106
- nodal_fake, ele_nod_fake, _ = reconstruct_sample(fake_samples[i], metadata, metadata['total_dim'], i)
107
- plot_truss(nodal_fake, ele_nod_fake, f"Gen {i+1}", ax=axes[i + n_samples])
108
 
109
  plt.tight_layout()
110
  plt.show()
111
 
112
- # Training metrics plot
113
- losses = epoch_losses
114
- epochs = [loss['epoch'] for loss in losses]
115
- d_losses = [loss['d_loss'] for loss in losses]
116
- g_losses = [loss['g_loss'] for loss in losses]
117
-
118
- fig2, ax1 = plt.subplots(figsize=(10, 5))
119
- ax1.plot(epochs, d_losses, label='Discriminator Loss', c='red')
120
- ax1.plot(epochs, g_losses, label='Generator Loss', c='blue')
121
- ax1.set_xlabel('Epoch')
122
- ax1.set_ylabel('Loss')
123
- ax1.set_title('Training Losses')
124
- ax1.legend()
125
- ax1.grid(alpha=0.3)
126
- plt.show()
127
-
128
- print("Training complete! Final epoch losses:")
129
- final = losses[-1] if losses else {'d_loss': 0, 'g_loss': 0}
130
- print(f"Final D Loss: {final['d_loss']:.4f}, G Loss: {final['g_loss']:.4f}")
131
-
132
- # Generated metrics (using batch of fakes for quick eval)
133
- from train_utils.preprocess_s1 import compute_sequence_lengths
134
- fake_metrics = []
135
- for sample_np in fake_samples[:min(5, n_samples)]: # Small batch
136
- fake_tensor = torch.tensor([sample_np], dtype=torch.float32)
137
- metrics = compute_graph_metrics(fake_tensor, metadata)
138
- fake_metrics.append(metrics)
139
-
140
- avg_metrics = {k: np.mean([m[k] for m in fake_metrics]) for k in fake_metrics[0]}
141
- print(f"Generated Metrics: {avg_metrics}")
142
-
143
- def compute_graph_metrics(fake_samples, metadata):
144
- """
145
- Quick graph metrics for generated samples.
146
- """
147
- from train import compute_graph_metrics as train_compute # Reuse
148
- return train_compute(fake_samples, metadata)
149
-
150
- if __name__ == '__main__':
151
- evaluate_and_visualize('models/checkpoints', n_samples=6)
 
 
 
 
1
+ # test.py
2
  import os
3
  import torch
4
  import numpy as np
5
  import matplotlib.pyplot as plt
6
  from models.wgan_gp import Generator
7
  from train_utils.preprocess_s1 import preprocess_s1
8
+ from train import compute_graph_metrics
9
 
10
+
11
+ # ---------------------------------------------------------------
12
+ # Helper: reconstruct and plot real samples
13
+ # ---------------------------------------------------------------
14
+ def reconstruct_sample(sample_np, metadata, index=None):
15
  """
16
+ Reconstruct nodal coordinates, ele_nod, pel from a flattened dataset sample.
17
  """
 
 
 
 
 
 
 
 
 
18
  max_nodes = metadata['max_nodes']
19
  max_elements = metadata['max_elements']
20
 
 
 
21
  nodal_end = max_nodes * 2
22
+ ele_end = nodal_end + max_elements * 2
23
+ pel_end = ele_end + max_elements * 4
24
+
25
+ nodal_flat = sample_np[:nodal_end]
26
+ ele_flat = sample_np[nodal_end:ele_end]
27
+ pel_flat = sample_np[ele_end:pel_end]
28
+
29
+ # Estimate actual counts (nonzero or non-negative)
30
+ nodal_coord = nodal_flat.reshape(-1, 2)
31
+ valid_nodes = np.where(np.abs(nodal_coord).sum(-1) > 0)[0]
32
+ nodal_coord = nodal_coord[valid_nodes]
33
+
34
+ ele_nod = ele_flat.reshape(-1, 2).astype(int)
35
+ valid_edges = (ele_nod[:, 0] >= 0) & (ele_nod[:, 1] >= 0)
36
+ ele_nod = ele_nod[valid_edges]
37
+ pel = pel_flat.reshape(-1, 4)[valid_edges]
38
+
39
+ # Filter to existing node indices
40
+ ele_nod = ele_nod[
41
+ (ele_nod[:, 0] < len(nodal_coord)) & (ele_nod[:, 1] < len(nodal_coord))
42
+ ]
43
 
44
  return nodal_coord, ele_nod, pel
45
 
46
+
47
+ def plot_truss(nodal_coord, ele_nod, title="Truss", ax=None, color_nodes="blue"):
48
+ """Plot truss structure with nodes and connecting edges."""
 
49
  if ax is None:
50
+ fig, ax = plt.subplots(figsize=(5, 4))
51
+ if len(nodal_coord) == 0:
52
+ return ax
53
 
54
  # Plot nodes
55
+ ax.scatter(nodal_coord[:, 0], nodal_coord[:, 1], c=color_nodes, s=40, label="nodes")
56
 
57
  # Plot edges
58
  for e in ele_nod:
59
+ if 0 <= e[0] < len(nodal_coord) and 0 <= e[1] < len(nodal_coord):
60
  x1, y1 = nodal_coord[e[0]]
61
  x2, y2 = nodal_coord[e[1]]
62
+ ax.plot([x1, x2], [y1, y2], "k-", lw=1.2, alpha=0.7)
63
 
64
+ ax.set_aspect("equal")
65
  ax.set_title(title)
66
  ax.grid(alpha=0.3)
 
67
  return ax
68
 
69
+
70
+ # ---------------------------------------------------------------
71
+ # Main evaluation + visualization
72
+ # ---------------------------------------------------------------
73
+ def evaluate_and_visualize(checkpoints_path, n_samples=6, device="cpu"):
74
  """
75
+ Load the trained generator, generate samples, plot real vs generated trusses,
76
+ show loss curves, and basic topology metrics.
77
  """
78
  device = torch.device(device)
79
 
80
+ # ---- Load model + metadata ----
81
+ metadata = np.load(os.path.join(checkpoints_path, "metadata.npy"), allow_pickle=True).item()
82
+ generator_state = torch.load(os.path.join(checkpoints_path, "generator.pth"), map_location=device)
83
+ epoch_losses = np.load(os.path.join(checkpoints_path, "epoch_losses.npy"), allow_pickle=True)
84
 
85
+ generator = Generator(latent_dim=128, nmax=metadata["max_nodes"], cond_dim=4).to(device)
86
  generator.load_state_dict(generator_state)
87
  generator.eval()
88
+ print(f"✅ Generator loaded. Using max_nodes={metadata['max_nodes']}, max_elements={metadata['max_elements']}")
89
 
90
+ # ---- Load some real samples ----
 
 
91
  real_data, _ = preprocess_s1(normalize_type=None)
92
  real_samples = real_data[:n_samples]
93
 
94
+ # ---- Generate fake trusses ----
95
  with torch.no_grad():
96
  z = torch.randn(n_samples, 128, device=device)
97
+ # Conditioning: average-case normalized values
98
+ cond_vals = [1.0, 1.0, 0.5, 0.5]
99
+ cond = torch.tensor([cond_vals] * n_samples, dtype=torch.float32, device=device)
100
+ nodes, node_mask, A_soft, _, _ = generator(z, cond)
101
+
102
+ nodes = nodes.cpu().numpy()
103
+ node_mask = node_mask.cpu().numpy()
104
+ A_soft = A_soft.cpu().numpy()
105
+
106
+ # ---- Build graph structures from generator outputs ----
107
+ fake_nodal_coords, fake_ele_nods = [], []
108
+ for b in range(n_samples):
109
+ valid_nodes = np.where(node_mask[b] > 0.5)[0]
110
+ coords = nodes[b, valid_nodes]
111
+ edges = []
112
+ for i in range(len(valid_nodes)):
113
+ for j in range(i + 1, len(valid_nodes)):
114
+ if A_soft[b, valid_nodes[i], valid_nodes[j]] > 0.5:
115
+ edges.append([i, j])
116
+ fake_nodal_coords.append(coords)
117
+ fake_ele_nods.append(np.array(edges, dtype=int))
118
+
119
+ # ---- Plot real vs generated ----
120
+ fig, axes = plt.subplots(2, n_samples, figsize=(3 * n_samples, 6))
121
+ axes = axes.flatten()
122
 
123
  for i in range(n_samples):
124
+ nodal_real, ele_nod_real, _ = reconstruct_sample(real_samples[i], metadata)
125
+ plot_truss(nodal_real, ele_nod_real, f"Real {i+1}", ax=axes[i], color_nodes="tab:blue")
126
+ plot_truss(fake_nodal_coords[i], fake_ele_nods[i], f"Gen {i+1}", ax=axes[i + n_samples], color_nodes="tab:orange")
 
 
 
 
127
 
128
  plt.tight_layout()
129
  plt.show()
130
 
131
+ # ---- Plot training losses ----
132
+ if isinstance(epoch_losses, np.ndarray) and epoch_losses.size > 0:
133
+ # Convert to list of dicts if needed
134
+ if not isinstance(epoch_losses[0], dict):
135
+ epoch_losses = list(epoch_losses)
136
+ else:
137
+ print("⚠️ No training losses found.")
138
+ epoch_losses = []
139
+
140
+ if len(epoch_losses) > 0:
141
+ epochs = [e["epoch"] for e in epoch_losses]
142
+ d_losses = [e["d_loss"] for e in epoch_losses]
143
+ g_losses = [e["g_loss"] for e in epoch_losses]
144
+
145
+ plt.figure(figsize=(9, 4))
146
+ plt.plot(epochs, d_losses, "r-", label="Discriminator Loss")
147
+ plt.plot(epochs, g_losses, "b-", label="Generator Loss")
148
+ plt.xlabel("Epoch")
149
+ plt.ylabel("Loss")
150
+ plt.title("Training Loss Curves (WGAN-GP)")
151
+ plt.grid(alpha=0.3)
152
+ plt.legend()
153
+ plt.show()
154
+
155
+ final = epoch_losses[-1]
156
+ print(f"📉 Final Epoch {final['epoch']}: D={final['d_loss']:.3f}, G={final['g_loss']:.3f}")
157
+ else:
158
+ print("⚠️ No losses to plot.")
159
+
160
+ # ---- Compute metrics for generated batch ----
161
+ batch_size = min(5, n_samples)
162
+ nodes_batch = torch.tensor(nodes[:batch_size], dtype=torch.float32)
163
+ node_mask_batch = torch.tensor(node_mask[:batch_size], dtype=torch.float32)
164
+ A_soft_batch = torch.tensor(A_soft[:batch_size], dtype=torch.float32)
165
+ metrics = compute_graph_metrics(nodes_batch, A_soft_batch, node_mask_batch)
166
+ print(f"📊 Generated graph metrics:\n{metrics}")
167
+
168
+
169
+ # ---------------------------------------------------------------
170
+ # Entry point
171
+ # ---------------------------------------------------------------
172
+ if __name__ == "__main__":
173
+ evaluate_and_visualize("models/checkpoints", n_samples=6)
train.py CHANGED
@@ -1,187 +1,340 @@
 
1
  import os
2
  import torch
3
  import torch.optim as optim
4
  import numpy as np
5
- from torch.utils.data import DataLoader, TensorDataset
 
 
 
6
  from models.wgan_gp import Generator, Discriminator, gradient_penalty, aux_losses
7
  from train_utils.preprocess_s1 import preprocess_s1
8
- import networkx as nx
9
- import torch.nn.functional as F
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
 
12
- def connectivity_penalty(fake_samples, metadata):
 
 
 
13
  """
14
- Penalize generated trusses if disconnected (few valid edges per node).
15
  """
16
- batch_size = fake_samples.size(0)
17
- max_nodes = metadata['max_nodes']
18
- penalty = 0.0
19
- for i in range(batch_size):
20
- sample = fake_samples[i].detach().cpu().numpy()
21
- node_block = sample[:max_nodes * 2]
22
- ele_block = sample[max_nodes * 2 : max_nodes * 2 + metadata['max_elements'] * 2]
23
- node_ids = np.unique(ele_block.astype(int))
24
- valid_nodes = [n for n in node_ids if 0 <= n < max_nodes]
25
- penalty += (max_nodes - len(valid_nodes))
26
- return torch.tensor(penalty, dtype=torch.float32, device=fake_samples.device)
27
-
28
-
29
- def compute_graph_metrics(nodes, A_soft, node_mask):
 
 
 
 
 
 
 
30
  """
31
- Evaluate connectivity and degree stats from generated graphs.
 
32
  """
33
- metrics = {'connectivity': [], 'degree_mean': [], 'degree_var': []}
34
- nodes = nodes.detach().cpu().numpy()
35
- A = (A_soft.detach().cpu().numpy() > 0.5).astype(int)
36
- node_mask = node_mask.detach().cpu().numpy()
37
- B, N, _ = nodes.shape
38
- for b in range(B):
39
- valid = node_mask[b] > 0.5
40
- G = nx.Graph()
41
- idxs = np.where(valid)[0]
42
- G.add_nodes_from(idxs)
43
- for i in idxs:
44
- for j in idxs:
45
- if i < j and A[b, i, j] > 0:
46
- G.add_edge(i, j)
47
- if len(G.nodes) > 0:
48
- degs = [d for _, d in G.degree()]
49
- metrics['connectivity'].append(nx.number_connected_components(G))
50
- metrics['degree_mean'].append(np.mean(degs))
51
- metrics['degree_var'].append(np.var(degs))
52
- else:
53
- metrics['connectivity'].append(0)
54
- metrics['degree_mean'].append(0)
55
- metrics['degree_var'].append(0)
56
- return {k: np.mean(v) if v else 0 for k, v in metrics.items()}
57
-
58
-
59
- def train_wgan_gp(device='cuda' if torch.cuda.is_available() else 'cpu',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  n_epochs=100, batch_size=16, latent_dim=128,
61
- n_critic=3, lr_g=2e-4, lr_d=1e-4,
62
- lambda_gp=10, lambda_connect=0.0,
63
- save_path='models/checkpoints'):
 
64
  """
65
- Train WGAN-GP with structured Generator and Discriminator.
66
- Includes keyboard interrupt save and best-model checkpointing.
 
 
67
  """
 
 
 
68
  device = torch.device(device)
69
 
70
- # Load data
71
  data_array, metadata = preprocess_s1()
72
- dataset = TensorDataset(torch.tensor(data_array, dtype=torch.float32))
73
- dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
74
- nmax = metadata['max_nodes']
75
- max_elems = metadata['max_elements']
 
 
 
 
76
 
77
- # Init models
 
 
 
 
78
  cond_dim = 4 # [normed n_nodes, n_elems, height, spacing]
79
- generator = Generator(latent_dim=latent_dim, nmax=nmax, cond_dim=cond_dim).to(device)
80
- discriminator = Discriminator(nmax=nmax, cond_dim=cond_dim).to(device)
81
 
82
  opt_g = optim.Adam(generator.parameters(), lr=lr_g, betas=(0.0, 0.99))
83
  opt_d = optim.Adam(discriminator.parameters(), lr=lr_d, betas=(0.0, 0.99))
84
 
85
- best_g_loss = float('inf')
 
86
  epoch_losses = []
87
  os.makedirs(save_path, exist_ok=True)
88
 
89
- def make_cond(meta_batch, bs):
90
- """Create conditioning vector matching current batch size."""
91
- n_nodes = meta_batch['max_nodes'] / metadata['max_nodes']
92
- n_elems = meta_batch['max_elements'] / metadata['max_elements']
93
- height = np.random.uniform(0.2, 1.0)
94
- spacing = np.random.uniform(0.2, 1.0)
95
- cond = np.array([n_nodes, n_elems, height, spacing], dtype=np.float32)
96
- cond = np.tile(cond, (bs, 1))
97
- return torch.tensor(cond, device=device)
98
 
99
  try:
100
- print(f"Starting training on {len(dataset)} samples...")
101
  for epoch in range(n_epochs):
102
- epoch_d_loss, epoch_g_loss = 0, 0
103
-
104
- for real_flat, in dataloader:
105
- real_flat = real_flat.to(device)
106
- bs = real_flat.size(0)
107
- cond = make_cond(metadata, bs)
108
-
109
- # Extract structured tensors
110
- nodes_real = real_flat[:, : nmax * 2].view(bs, nmax, 2)
111
- node_mask_real = (nodes_real.abs().sum(-1) > 0).float()
112
-
113
- # Build adjacency from ele_nod
114
- ele_block = real_flat[:, nmax * 2 : nmax * 2 + max_elems * 2]
115
- A_real = torch.zeros(bs, nmax, nmax, device=device)
116
- for b in range(bs):
117
- for i in range(0, max_elems * 2, 2):
118
- n1 = int(ele_block[b, i].item())
119
- n2 = int(ele_block[b, i + 1].item())
120
- if 0 <= n1 < nmax and 0 <= n2 < nmax:
121
- A_real[b, n1, n2] = 1
122
- A_real[b, n2, n1] = 1
123
- A_real = A_real * (node_mask_real.unsqueeze(-1) * node_mask_real.unsqueeze(-2))
124
- A_real[:, torch.arange(nmax), torch.arange(nmax)] = 0 # zero diagonal per batch
125
-
126
- cond = make_cond(metadata, bs)
127
 
128
  # ---- Train Discriminator ----
129
  for _ in range(n_critic):
130
  z = torch.randn(bs, latent_dim, device=device)
131
  nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
 
 
 
 
 
 
 
132
  d_real = discriminator(nodes_real, A_real, node_mask_real, cond)
133
- d_fake = discriminator(nodes_f.detach(), A_f.detach(), nmask_f.detach(), cond)
134
- gp = gradient_penalty(discriminator,
135
- (nodes_real, node_mask_real, A_real, cond),
136
- (nodes_f.detach(), nmask_f.detach(), A_f.detach(), cond),
137
- lambda_gp)
 
 
 
138
  loss_d = -(d_real.mean() - d_fake.mean()) + gp
139
- opt_d.zero_grad()
140
  loss_d.backward()
141
  opt_d.step()
142
 
143
  # ---- Train Generator ----
144
  z = torch.randn(bs, latent_dim, device=device)
145
  nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
146
- d_fake = discriminator(nodes_f, A_f, nmask_f, cond)
 
 
 
 
 
147
  adv_loss = -d_fake.mean()
148
 
149
- L_nodes, L_mask, L_edges = aux_losses(nodes_f, nmask_f, elog_f,
150
- nodes_real, node_mask_real, A_real)
 
 
 
151
  loss_g = adv_loss + 10 * L_nodes + 1 * L_mask + 5 * L_edges
152
 
153
  if lambda_connect > 0:
154
- flat_fake = torch.cat([nodes_f.view(bs, -1),
155
- A_f.view(bs, -1)], dim=1)
156
- loss_g += lambda_connect * connectivity_penalty(flat_fake, metadata)
157
 
158
- opt_g.zero_grad()
159
  loss_g.backward()
160
  opt_g.step()
161
 
162
  epoch_d_loss += loss_d.item()
163
  epoch_g_loss += loss_g.item()
164
 
165
- # ---- Logging and checkpointing ----
166
- epoch_d_loss /= len(dataloader)
167
- epoch_g_loss /= len(dataloader)
 
168
  epoch_losses.append({'epoch': epoch + 1,
169
  'd_loss': epoch_d_loss,
170
  'g_loss': epoch_g_loss})
171
 
172
- if (epoch + 1) % 5 == 0:
173
- metrics = compute_graph_metrics(nodes_f, A_f, nmask_f)
174
- print(f"[{epoch+1}/{n_epochs}] D Loss: {epoch_d_loss:.2f}, "
175
- f"G Loss: {epoch_g_loss:.2f}, "
176
- f"DegMean: {metrics['degree_mean']:.2f}, "
177
- f"Conn: {metrics['connectivity']:.2f}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
- if epoch_g_loss < best_g_loss:
180
- best_g_loss = epoch_g_loss
181
- torch.save(generator.state_dict(), os.path.join(save_path, 'generator.pth'))
182
- torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator.pth'))
183
- np.save(os.path.join(save_path, 'metadata.npy'), metadata)
184
- print(f" New best G loss {best_g_loss:.2f} — models saved.")
185
 
186
  # Final save
187
  np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
@@ -195,7 +348,7 @@ def train_wgan_gp(device='cuda' if torch.cuda.is_available() else 'cpu',
195
  torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator_interrupted.pth'))
196
  np.save(os.path.join(save_path, 'metadata.npy'), metadata)
197
  print(f"🟡 Interrupted state saved to {save_path}.")
198
- raise # Re-raise to exit
199
 
200
 
201
  if __name__ == '__main__':
 
1
+ # train.py
2
  import os
3
  import torch
4
  import torch.optim as optim
5
  import numpy as np
6
+ from torch.utils.data import DataLoader, Dataset
7
+ import torch.nn.functional as F
8
+ import math
9
+
10
  from models.wgan_gp import Generator, Discriminator, gradient_penalty, aux_losses
11
  from train_utils.preprocess_s1 import preprocess_s1
12
+
13
+ # ----------------------------
14
+ # Utility: vectorized adjacency
15
+ # ----------------------------
16
+ def build_adj_from_eleblock_vectorized(ele_block, nmax_batch):
17
+ """
18
+ Vectorized adjacency build from a padded ele_nod block.
19
+ ele_block: float tensor [B, max_elems*2] (padded with -1 for invalid)
20
+ nmax_batch: int, number of nodes to consider for this batch (<= global nmax)
21
+ Returns:
22
+ A_real: float tensor [B, nmax_batch, nmax_batch] with 0/1 entries.
23
+ """
24
+ B = ele_block.size(0)
25
+ device = ele_block.device
26
+ # reshape to [B, M, 2]
27
+ pairs = ele_block.view(B, -1, 2).long()
28
+ rows = pairs[..., 0] # [B, M]
29
+ cols = pairs[..., 1] # [B, M]
30
+ # valid entries: both in [0, nmax_batch)
31
+ valid = (rows >= 0) & (cols >= 0) & (rows < nmax_batch) & (cols < nmax_batch)
32
+ if valid.any():
33
+ bidx = torch.arange(B, device=device).unsqueeze(1).expand_as(rows)
34
+ b_sel = bidx[valid]
35
+ r_sel = rows[valid]
36
+ c_sel = cols[valid]
37
+ A = torch.zeros(B, nmax_batch, nmax_batch, device=device)
38
+ A[b_sel, r_sel, c_sel] = 1.0
39
+ A[b_sel, c_sel, r_sel] = 1.0
40
+ # zero diagonal
41
+ idx = torch.arange(nmax_batch, device=device)
42
+ A[:, idx, idx] = 0.0
43
+ return A
44
+ else:
45
+ return torch.zeros(B, nmax_batch, nmax_batch, device=device)
46
 
47
 
48
+ # ----------------------------
49
+ # Dataset with size info (from legacy padded matrix)
50
+ # ----------------------------
51
+ class TrussFlatDataset(Dataset):
52
  """
53
+ Uses preprocess_s1() output (padded flat vectors), but carries per-sample sizes.
54
  """
55
+ def __init__(self, data_array: np.ndarray, metadata: dict):
56
+ super().__init__()
57
+ self.data = torch.tensor(data_array, dtype=torch.float32)
58
+ self.meta = metadata
59
+ self.max_nodes = int(self.meta['max_nodes'])
60
+ self.max_elems = int(self.meta['max_elements'])
61
+ self.size_info = torch.tensor(self.meta['size_info'], dtype=torch.long) # [N, 2] = (n_nodes, n_elems)
62
+
63
+ def __len__(self):
64
+ return self.data.size(0)
65
+
66
+ def __getitem__(self, idx):
67
+ flat = self.data[idx]
68
+ n_nodes, n_elems = self.size_info[idx].tolist()
69
+ return flat, n_nodes, n_elems
70
+
71
+
72
+ # ----------------------------
73
+ # Length-bucketed sampler
74
+ # ----------------------------
75
+ def make_length_buckets(size_info: np.ndarray, bucket_width_nodes=16, bucket_width_elems=32):
76
  """
77
+ Assign samples to (node_bucket, elem_bucket) bins to reduce padding overhead per batch.
78
+ Returns: list of lists of indices (buckets).
79
  """
80
+ buckets = {}
81
+ for i, (n, e) in enumerate(size_info):
82
+ bn = (int(n) // bucket_width_nodes) * bucket_width_nodes
83
+ be = (int(e) // bucket_width_elems) * bucket_width_elems
84
+ key = (bn, be)
85
+ buckets.setdefault(key, []).append(i)
86
+ # Return buckets sorted by size (optional)
87
+ return [idxs for _, idxs in sorted(buckets.items(), key=lambda kv: (kv[0][0], kv[0][1]))]
88
+
89
+
90
+ def bucketed_batch_sampler(size_info: np.ndarray, batch_size=16,
91
+ bucket_width_nodes=16, bucket_width_elems=32):
92
+ """
93
+ Yields lists of indices; each batch comes from a single bucket.
94
+ """
95
+ buckets = make_length_buckets(size_info, bucket_width_nodes, bucket_width_elems)
96
+ for idxs in buckets:
97
+ # shuffle within each bucket:
98
+ idxs = np.array(idxs)
99
+ perm = np.random.permutation(len(idxs))
100
+ idxs = idxs[perm]
101
+ # emit batches
102
+ for start in range(0, len(idxs), batch_size):
103
+ yield idxs[start:start+batch_size].tolist()
104
+
105
+
106
+ # ----------------------------
107
+ # Collate: dynamic per-batch padding & masks
108
+ # ----------------------------
109
+ def collate_truss(batch, max_nodes_global, max_elems_global):
110
+ """
111
+ batch: list of (flat, n_nodes, n_elems)
112
+ Returns tensors cropped to per-batch maximum sizes (dramatically smaller than global),
113
+ along with masks and split blocks.
114
+ """
115
+ flats, n_nodes_list, n_elems_list = zip(*batch)
116
+ B = len(flats)
117
+ # Per-batch maxima
118
+ nmax_b = max(n_nodes_list)
119
+ emax_b = max(n_elems_list)
120
+
121
+ flats = torch.stack(flats, 0) # [B, total_dim]
122
+ # split from flat (global layout)
123
+ node_end = max_nodes_global * 2
124
+ ele_end = node_end + max_elems_global * 2
125
+
126
+ nodes_full = flats[:, :node_end].view(B, max_nodes_global, 2)
127
+ ele_block_full = flats[:, node_end:ele_end] # [B, max_elems_global*2]
128
+
129
+ # Crop to batch maxima (big speed/memory win)
130
+ nodes = nodes_full[:, :nmax_b, :] # [B, nmax_b, 2]
131
+ ele_block = ele_block_full[:, : (emax_b * 2)] # [B, emax_b*2]
132
+
133
+ # Node mask from coords (nonzero rows). More robust: consider a tiny eps.
134
+ node_mask = (nodes.abs().sum(-1) > 0).float() # [B, nmax_b]
135
+
136
+ # Build dense adjacency (vectorized) for real graphs
137
+ A_real = build_adj_from_eleblock_vectorized(ele_block, nmax_b)
138
+ # Mask adjacency by node existence
139
+ m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
140
+ A_real = A_real * m
141
+ # Ensure zero diagonal
142
+ idx = torch.arange(nmax_b, device=A_real.device)
143
+ A_real[:, idx, idx] = 0.0
144
+
145
+ # Prepare conditioning per sample (normalized by GLOBAL maxima)
146
+ n_nodes_norm = torch.tensor([n / max_nodes_global for n in n_nodes_list], dtype=torch.float32, device=flats.device).unsqueeze(1)
147
+ n_elems_norm = torch.tensor([e / max_elems_global for e in n_elems_list], dtype=torch.float32, device=flats.device).unsqueeze(1)
148
+ # If height/spacing are needed here, you can plug them from file; for now random as before
149
+ height = torch.rand(B, 1, device=flats.device) * 0.8 + 0.2
150
+ spacing = torch.rand(B, 1, device=flats.device) * 0.8 + 0.2
151
+ cond = torch.cat([n_nodes_norm, n_elems_norm, height, spacing], dim=1) # [B,4]
152
+
153
+ return nodes, node_mask, A_real, cond, nmax_b
154
+
155
+
156
+ # ----------------------------
157
+ # Connectivity penalty (vectorized)
158
+ # ----------------------------
159
+ def connectivity_penalty(fake_nodes_mask, fake_A_soft):
160
+ """
161
+ Penalize if too many isolated/invalid nodes: encourages edges incident to active nodes.
162
+ fake_nodes_mask: [B, N]
163
+ fake_A_soft: [B, N, N]
164
+ Returns scalar tensor.
165
+ """
166
+ # degree per node = sum_j A_ij
167
+ deg = fake_A_soft.sum(dim=-1) # [B, N]
168
+ active = (fake_nodes_mask > 0.5).float()
169
+ iso = (deg < 1e-3).float() * active
170
+ # penalty: mean number of isolated active nodes
171
+ return iso.mean()
172
+
173
+
174
+ # ----------------------------
175
+ # Training
176
+ # ----------------------------
177
+ def train_wgan_gp(device='cuda',
178
  n_epochs=100, batch_size=16, latent_dim=128,
179
+ n_critic=5, lr_g=2e-4, lr_d=1e-4,
180
+ lambda_gp=10, lambda_connect=5.0,
181
+ save_path='models/checkpoints',
182
+ bucket_width_nodes=16, bucket_width_elems=32):
183
  """
184
+ Train WGAN-GP with:
185
+ - vectorized adjacency build
186
+ - length-bucketed batching
187
+ - dynamic per-batch padding & masking
188
  """
189
+ if not torch.cuda.is_available():
190
+ print("⚠️ CUDA not available, training on CPU will be slow.")
191
+ device = 'cpu'
192
  device = torch.device(device)
193
 
194
+ # Load preprocessed (legacy padded) data but with size_info for bucketing
195
  data_array, metadata = preprocess_s1()
196
+ dataset = TrussFlatDataset(data_array, metadata)
197
+
198
+ # Bucketed batch sampler (reduces padding)
199
+ size_info = metadata['size_info']
200
+ sampler = list(bucketed_batch_sampler(size_info,
201
+ batch_size=batch_size,
202
+ bucket_width_nodes=bucket_width_nodes,
203
+ bucket_width_elems=bucket_width_elems))
204
 
205
+ # We’ll build our own DataLoader-like loop b/c we use a custom sampler + collate
206
+ max_nodes_global = metadata['max_nodes']
207
+ max_elems_global = metadata['max_elements']
208
+
209
+ # Init models at GLOBAL maxima; we’ll slice per-batch for the critic pass
210
  cond_dim = 4 # [normed n_nodes, n_elems, height, spacing]
211
+ generator = Generator(latent_dim=latent_dim, nmax=max_nodes_global, cond_dim=cond_dim).to(device)
212
+ discriminator = Discriminator(nmax=max_nodes_global, cond_dim=cond_dim).to(device)
213
 
214
  opt_g = optim.Adam(generator.parameters(), lr=lr_g, betas=(0.0, 0.99))
215
  opt_d = optim.Adam(discriminator.parameters(), lr=lr_d, betas=(0.0, 0.99))
216
 
217
+ best_score = float('-inf')
218
+ g_loss_ema = 0.0
219
  epoch_losses = []
220
  os.makedirs(save_path, exist_ok=True)
221
 
222
+ print(f"Starting training on {len(dataset)} samples with {len(sampler)} bucketed batches per epoch...")
 
 
 
 
 
 
 
 
223
 
224
  try:
 
225
  for epoch in range(n_epochs):
226
+ epoch_d_loss, epoch_g_loss = 0.0, 0.0
227
+
228
+ # Shuffle order of batches each epoch for better mixing
229
+ perm_batches = np.random.permutation(len(sampler)).tolist()
230
+
231
+ for b_id in perm_batches:
232
+ idxs = sampler[b_id]
233
+ batch = [dataset[i] for i in idxs]
234
+
235
+ # --- Collate (dynamic crop + vectorized adjacency)
236
+ nodes_real, node_mask_real, A_real, cond, nmax_b = collate_truss(
237
+ batch, max_nodes_global, max_elems_global
238
+ )
239
+ nodes_real = nodes_real.to(device)
240
+ node_mask_real = node_mask_real.to(device)
241
+ A_real = A_real.to(device)
242
+ cond = cond.to(device)
243
+ bs = nodes_real.size(0)
 
 
 
 
 
 
 
244
 
245
  # ---- Train Discriminator ----
246
  for _ in range(n_critic):
247
  z = torch.randn(bs, latent_dim, device=device)
248
  nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
249
+
250
+ # Slice generator outputs to per-batch effective size for the critic:
251
+ nodes_f_b = nodes_f[:, :nmax_b, :]
252
+ nmask_f_b = nmask_f[:, :nmax_b]
253
+ A_f_b = A_f[:, :nmax_b, :nmax_b]
254
+ elog_f_b = elog_f[:, :nmax_b, :nmax_b] # just for aux losses later
255
+
256
  d_real = discriminator(nodes_real, A_real, node_mask_real, cond)
257
+ d_fake = discriminator(nodes_f_b.detach(), A_f_b.detach(), nmask_f_b.detach(), cond)
258
+
259
+ gp = gradient_penalty(
260
+ discriminator,
261
+ (nodes_real, node_mask_real, A_real, cond),
262
+ (nodes_f_b.detach(), nmask_f_b.detach(), A_f_b.detach(), cond),
263
+ lambda_gp
264
+ )
265
  loss_d = -(d_real.mean() - d_fake.mean()) + gp
266
+ opt_d.zero_grad(set_to_none=True)
267
  loss_d.backward()
268
  opt_d.step()
269
 
270
  # ---- Train Generator ----
271
  z = torch.randn(bs, latent_dim, device=device)
272
  nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
273
+ nodes_f_b = nodes_f[:, :nmax_b, :]
274
+ nmask_f_b = nmask_f[:, :nmax_b]
275
+ A_f_b = A_f[:, :nmax_b, :nmax_b]
276
+ elog_f_b = elog_f[:, :nmax_b, :nmax_b]
277
+
278
+ d_fake = discriminator(nodes_f_b, A_f_b, nmask_f_b, cond)
279
  adv_loss = -d_fake.mean()
280
 
281
+ L_nodes, L_mask, L_edges = aux_losses(
282
+ nodes_f_b, nmask_f_b, elog_f_b,
283
+ nodes_real, node_mask_real, A_real
284
+ )
285
+
286
  loss_g = adv_loss + 10 * L_nodes + 1 * L_mask + 5 * L_edges
287
 
288
  if lambda_connect > 0:
289
+ loss_g += lambda_connect * connectivity_penalty(nmask_f_b, A_f_b)
 
 
290
 
291
+ opt_g.zero_grad(set_to_none=True)
292
  loss_g.backward()
293
  opt_g.step()
294
 
295
  epoch_d_loss += loss_d.item()
296
  epoch_g_loss += loss_g.item()
297
 
298
+ # ---- Logging & checkpointing ----
299
+ num_batches = len(sampler)
300
+ epoch_d_loss /= num_batches
301
+ epoch_g_loss /= num_batches
302
  epoch_losses.append({'epoch': epoch + 1,
303
  'd_loss': epoch_d_loss,
304
  'g_loss': epoch_g_loss})
305
 
306
+ # quick graph stats (last batch’s fakes)
307
+ with torch.no_grad():
308
+ deg_mean = float(A_f_b.mean().item() * nmax_b) # rough proxy
309
+ conn_proxy = float((A_f_b.sum(dim=-1) > 0.5).float().mean().item())
310
+
311
+ print(f"[{epoch+1}/{n_epochs}] D: {epoch_d_loss:.3f} | G: {epoch_g_loss:.3f} "
312
+ f"| N_b:{nmax_b:3d} | DegMean~{deg_mean:.2f} | Conn~{conn_proxy:.2f}")
313
+
314
+ # Use exponential moving average to judge improvement
315
+ ema_beta = 0.9
316
+ if epoch == 0:
317
+ g_loss_ema = epoch_g_loss
318
+ else:
319
+ g_loss_ema = ema_beta * g_loss_ema + (1 - ema_beta) * epoch_g_loss
320
+
321
+ # Evaluate composite "stability score"
322
+ # Here we prioritize low |G_loss| (close to zero) and high connectivity
323
+ score = -abs(epoch_g_loss) + 0.5 * conn_proxy # weights can be tuned
324
+
325
+ if score > best_score:
326
+ best_score = score
327
+ torch.save(generator.state_dict(), os.path.join(save_path, f'generator_best.pth'))
328
+ torch.save(discriminator.state_dict(), os.path.join(save_path, f'discriminator_best.pth'))
329
+ np.save(os.path.join(save_path, 'metadata.npy'), metadata)
330
+ print(f"✅ New best model at epoch {epoch+1}: score={score:.3f}, G_loss={epoch_g_loss:.3f}, Conn={conn_proxy:.2f}")
331
 
332
+ # Also save periodic checkpoints (for recovery)
333
+ if (epoch + 1) % 10 == 0:
334
+ torch.save(generator.state_dict(), os.path.join(save_path, f'generator_epoch{epoch+1}.pth'))
335
+ torch.save(discriminator.state_dict(), os.path.join(save_path, f'discriminator_epoch{epoch+1}.pth'))
336
+ np.save(os.path.join(save_path, 'metadata.npy'), metadata)
337
+ print(f"💾 Periodic checkpoint saved at epoch {epoch+1}")
338
 
339
  # Final save
340
  np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
 
348
  torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator_interrupted.pth'))
349
  np.save(os.path.join(save_path, 'metadata.npy'), metadata)
350
  print(f"🟡 Interrupted state saved to {save_path}.")
351
+ raise
352
 
353
 
354
  if __name__ == '__main__':
train_utils/preprocess_s1.py CHANGED
@@ -3,15 +3,13 @@ import numpy as np
3
  import os
4
  from collections import defaultdict
5
  from scipy.stats import zscore
 
6
 
7
  def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
8
  """
9
  Compute and print min/max sequence lengths (nodes, elements) across all trusses in the dataset.
10
  Uses only min/max n_div files per mode for efficiency.
11
 
12
- Args:
13
- dataset_folder (str): Path to the dataset folder containing .npz files.
14
-
15
  Returns:
16
  Dict with keys 'min_nodes', 'max_nodes', 'min_elements', 'max_elements'.
17
  """
@@ -50,14 +48,14 @@ def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
50
  for mode in sorted(min_max_div):
51
  # For min
52
  if min_div_files[mode]:
53
- min_file = min_div_files[mode][0] # Pick first
54
  data_min = np.load(os.path.join(dataset_folder, min_file))
55
  min_n_nod = min(min_n_nod, int(data_min['n_nod_tot']))
56
  min_n_ele = min(min_n_ele, int(data_min['n_ele_tot']))
57
  data_min.close()
58
  # For max
59
  if max_div_files[mode]:
60
- max_file = max_div_files[mode][0] # Pick first
61
  data_max = np.load(os.path.join(dataset_folder, max_file))
62
  max_n_nod = max(max_n_nod, int(data_max['n_nod_tot']))
63
  max_n_ele = max(max_n_ele, int(data_max['n_ele_tot']))
@@ -66,8 +64,7 @@ def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
66
  print(f"Overall min sequence lengths: nodes={min_n_nod}, elements={min_n_ele}")
67
  print(f"Overall max sequence lengths: nodes={max_n_nod}, elements={max_n_ele}")
68
 
69
- # Additionally print out for one structure type how the keys inside look like
70
- if show_details == True:
71
  example_mode = next(iter(max_div_files))
72
  example_file = max_div_files[example_mode][0]
73
  example_data = np.load(os.path.join(dataset_folder, example_file))
@@ -77,72 +74,112 @@ def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
77
  example_data.close()
78
 
79
  return {
80
- 'min_nodes': min_n_nod,
81
- 'max_nodes': max_n_nod,
82
- 'min_elements': min_n_ele,
83
- 'max_elements': max_n_ele
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  }
 
 
85
 
86
- def pad_to_length(array, max_len):
87
  """
88
- Pad 1D array to max_len with zeros.
 
 
 
89
  """
90
- if len(array) < max_len:
91
- padded = np.zeros(max_len)
92
- padded[:len(array)] = array
93
- return padded
94
- else:
95
- return array[:max_len] # Truncate if larger (shouldn't happen)
 
96
 
97
- def preprocess_s1(dataset_folder="dataset", normalize_type=None):
98
- """
99
- Preprocesses truss dataset into flattened, padded vectors for Stage 1 GAN.
 
 
 
100
 
101
- Args:
102
- dataset_folder (str): Folder with .npz files.
103
- normalize_type (str or None): 'min_max', 'z_score', or None (no normalization).
104
 
105
- Returns:
106
- np.ndarray: Shape (n_samples, total_dim), normalized if specified.
107
- dict: Preprocessing metadata.
 
 
 
108
  """
109
- # Get max lengths dynamically
110
  lengths = compute_sequence_lengths(dataset_folder)
111
  max_nodes = lengths['max_nodes']
112
  max_elements = lengths['max_elements']
113
- total_dim = max_nodes * 2 + max_elements * 2 + max_elements * 4 # nodal (x,y) + ele_nod (2 nodes) + pel (4 props)
114
-
115
- print(f"Max nodes: {max_nodes}, max elements: {max_elements}, total dim: {total_dim}")
116
 
117
  npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
118
  samples = []
 
119
  for f in npz_files:
120
  data = np.load(os.path.join(dataset_folder, f))
121
- nodal = data['nodal_coord'].flatten() # (n_nodes * 2,)
122
- ele_nod = data['ele_nod'].flatten() # (n_elements * 2,)
123
- pel = data['pel'].flatten() # (n_elements * 4,)
 
 
 
 
124
 
125
- # Pad each
126
- nodal_padded = pad_to_length(nodal, max_nodes * 2)
127
- ele_nod_padded = pad_to_length(ele_nod, max_elements * 2)
128
- pel_padded = pad_to_length(pel, max_elements * 4)
129
 
130
- # Concatenate
131
- sample = np.concatenate([nodal_padded, ele_nod_padded, pel_padded])
132
  samples.append(sample)
133
  data.close()
134
 
135
- data_array = np.array(samples) # (n_samples, total_dim)
136
 
137
- # Normalize if requested
138
  if normalize_type == 'min_max':
139
  data_min = data_array.min()
140
  data_max = data_array.max()
141
- data_array = (data_array - data_min) / (data_max - data_min)
142
  elif normalize_type == 'z_score':
143
- data_array = zscore(data_array, axis=0) # Per feature
144
  elif normalize_type is None:
145
- pass # Already normalized in generator
146
  else:
147
  raise ValueError(f"Unknown normalize_type: {normalize_type}")
148
 
@@ -151,6 +188,7 @@ def preprocess_s1(dataset_folder="dataset", normalize_type=None):
151
  'max_elements': max_elements,
152
  'total_dim': total_dim,
153
  'n_samples': len(samples),
154
- 'normalize_type': normalize_type
 
155
  }
156
  return data_array, metadata
 
3
  import os
4
  from collections import defaultdict
5
  from scipy.stats import zscore
6
+ from typing import List, Dict, Any, Tuple
7
 
8
  def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
9
  """
10
  Compute and print min/max sequence lengths (nodes, elements) across all trusses in the dataset.
11
  Uses only min/max n_div files per mode for efficiency.
12
 
 
 
 
13
  Returns:
14
  Dict with keys 'min_nodes', 'max_nodes', 'min_elements', 'max_elements'.
15
  """
 
48
  for mode in sorted(min_max_div):
49
  # For min
50
  if min_div_files[mode]:
51
+ min_file = min_div_files[mode][0]
52
  data_min = np.load(os.path.join(dataset_folder, min_file))
53
  min_n_nod = min(min_n_nod, int(data_min['n_nod_tot']))
54
  min_n_ele = min(min_n_ele, int(data_min['n_ele_tot']))
55
  data_min.close()
56
  # For max
57
  if max_div_files[mode]:
58
+ max_file = max_div_files[mode][0]
59
  data_max = np.load(os.path.join(dataset_folder, max_file))
60
  max_n_nod = max(max_n_nod, int(data_max['n_nod_tot']))
61
  max_n_ele = max(max_n_ele, int(data_max['n_ele_tot']))
 
64
  print(f"Overall min sequence lengths: nodes={min_n_nod}, elements={min_n_ele}")
65
  print(f"Overall max sequence lengths: nodes={max_n_nod}, elements={max_n_ele}")
66
 
67
+ if show_details:
 
68
  example_mode = next(iter(max_div_files))
69
  example_file = max_div_files[example_mode][0]
70
  example_data = np.load(os.path.join(dataset_folder, example_file))
 
74
  example_data.close()
75
 
76
  return {
77
+ 'min_nodes': int(min_n_nod),
78
+ 'max_nodes': int(max_n_nod),
79
+ 'min_elements': int(min_n_ele),
80
+ 'max_elements': int(max_n_ele),
81
+ }
82
+
83
+
84
+ def _pad_1d(arr: np.ndarray, max_len: int, pad_val=0):
85
+ """Pad 1D array to max_len with pad_val."""
86
+ out = np.full((max_len,), pad_val, dtype=arr.dtype)
87
+ n = min(len(arr), max_len)
88
+ out[:n] = arr[:n]
89
+ return out
90
+
91
+
92
+ def _pack_sample(data: np.lib.npyio.NpzFile) -> Dict[str, Any]:
93
+ """Extract a single sample (no padding) with lengths."""
94
+ nodal = data['nodal_coord'] # (n_nodes, 2)
95
+ ele_nod = data['ele_nod'] # (n_elems, 2) int
96
+ pel = data['pel'] # (n_elems, 4) int (may be redundant for topology)
97
+
98
+ sample = {
99
+ 'nodal': nodal.astype(np.float32),
100
+ 'ele_nod': ele_nod.astype(np.int64),
101
+ 'pel': pel.astype(np.int64),
102
+ 'n_nodes': int(nodal.shape[0]),
103
+ 'n_elems': int(ele_nod.shape[0]),
104
+ # Optional globals (kept here for conditioning/analysis)
105
+ 'height': float(np.array(data['height']).item()) if 'height' in data else None,
106
+ 'spacing': float(np.array(data['spacing']).item()) if 'spacing' in data else None,
107
+ 'meta': {
108
+ 'n_rods': int(np.array(data['n_rods']).item()) if 'n_rods' in data else None,
109
+ 'n_beams': int(np.array(data['n_beams']).item()) if 'n_beams' in data else None,
110
+ 'mode': str(np.array(data['truss_mode']).item()) if 'truss_mode' in data else None,
111
+ }
112
  }
113
+ return sample
114
+
115
 
116
+ def load_truss_samples(dataset_folder="dataset") -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
117
  """
118
+ Load ALL samples WITHOUT padding.
119
+ Returns:
120
+ samples: list of dicts with variable-length arrays and lengths.
121
+ metadata: dict with global min/max lengths and counts.
122
  """
123
+ lengths = compute_sequence_lengths(dataset_folder)
124
+ npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
125
+ samples = []
126
+ for f in npz_files:
127
+ data = np.load(os.path.join(dataset_folder, f))
128
+ samples.append(_pack_sample(data))
129
+ data.close()
130
 
131
+ metadata = {
132
+ 'max_nodes': lengths['max_nodes'],
133
+ 'max_elements': lengths['max_elements'],
134
+ 'n_samples': len(samples),
135
+ }
136
+ return samples, metadata
137
 
 
 
 
138
 
139
+ def preprocess_s1(dataset_folder="dataset", normalize_type=None):
140
+ """
141
+ Backwards-compatible preprocessor: still returns a big padded matrix
142
+ for legacy code, but now:
143
+ - pads integer index arrays with -1 (safe),
144
+ - also returns per-sample (n_nodes, n_elems) for bucketing.
145
  """
 
146
  lengths = compute_sequence_lengths(dataset_folder)
147
  max_nodes = lengths['max_nodes']
148
  max_elements = lengths['max_elements']
149
+ total_dim = max_nodes * 2 + max_elements * 2 + max_elements * 4
 
 
150
 
151
  npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
152
  samples = []
153
+ size_info = []
154
  for f in npz_files:
155
  data = np.load(os.path.join(dataset_folder, f))
156
+ nodal = data['nodal_coord'].flatten().astype(np.float32) # (n_nodes * 2,)
157
+ ele_nod = data['ele_nod'].flatten().astype(np.int64) # (n_elems * 2,)
158
+ pel = data['pel'].flatten().astype(np.int64) # (n_elems * 4,)
159
+
160
+ n_nodes = int(data['nodal_coord'].shape[0])
161
+ n_elems = int(data['ele_nod'].shape[0])
162
+ size_info.append([n_nodes, n_elems])
163
 
164
+ # Pad with safe values: floats→0.0, ints→-1
165
+ nodal_padded = _pad_1d(nodal, max_nodes * 2, pad_val=0.0)
166
+ ele_nod_padded = _pad_1d(ele_nod, max_elements * 2, pad_val=-1)
167
+ pel_padded = _pad_1d(pel, max_elements * 4, pad_val=-1)
168
 
169
+ sample = np.concatenate([nodal_padded, ele_nod_padded, pel_padded]).astype(np.float32)
 
170
  samples.append(sample)
171
  data.close()
172
 
173
+ data_array = np.stack(samples, axis=0) # (n_samples, total_dim)
174
 
 
175
  if normalize_type == 'min_max':
176
  data_min = data_array.min()
177
  data_max = data_array.max()
178
+ data_array = (data_array - data_min) / (data_max - data_min + 1e-12)
179
  elif normalize_type == 'z_score':
180
+ data_array = zscore(data_array, axis=0)
181
  elif normalize_type is None:
182
+ pass
183
  else:
184
  raise ValueError(f"Unknown normalize_type: {normalize_type}")
185
 
 
188
  'max_elements': max_elements,
189
  'total_dim': total_dim,
190
  'n_samples': len(samples),
191
+ 'normalize_type': normalize_type,
192
+ 'size_info': np.array(size_info, dtype=np.int32) # (n_samples, 2)
193
  }
194
  return data_array, metadata
utils/dataset_helper.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import logging
4
+ from utils.truss_geometric import calculate_bridge
5
+ from utils.truss_constraints import (
6
+ calculate_essential_elements,
7
+ truss_design,
8
+ boundary_conditions,
9
+ calculate_element_properties,
10
+ )
11
+ from utils.truss_helpers import calculate_element_node
12
+
13
+ # ------------------------------------------------------------
14
+ # Logging configuration
15
+ # ------------------------------------------------------------
16
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
17
+
18
+ # ------------------------------------------------------------
19
+ # Default parameters
20
+ # ------------------------------------------------------------
21
+ n_dim = 2
22
+ n_par_nod = 2
23
+
24
+ # Material & geometry properties
25
+ width_properties = {"beam": 0.3, "column": 0.4, "rod": 0.1} # m
26
+ height_properties = {"beam": 0.35, "column": 0.4, "rod": 0.1} # m
27
+ unit_weight_properties = {"beam": 78.5, "column": 78.5, "rod": 78.5} # kN/m³
28
+ elastic_mod_properties = {"beam": 2e8, "column": 2e8, "rod": 2e8} # kN/m²
29
+ shear_mod = 8e7 # kN/m²
30
+
31
+ # ------------------------------------------------------------
32
+ # Dataset generation parameters
33
+ # ------------------------------------------------------------
34
+ span = 1.0
35
+ truss_modes = ["pratt", "howe", "warren"]
36
+ n_div_range = range(2, 61)
37
+ angle_range = range(30, 61)
38
+ dataset_folder = "dataset"
39
+ os.makedirs(dataset_folder, exist_ok=True)
40
+
41
+ # ------------------------------------------------------------
42
+ # Main generation loop
43
+ # ------------------------------------------------------------
44
+ def generate_truss_dataset():
45
+ # num_generated = len(truss_modes) * len(n_div_range) * len(angle_range) # Currently it can be pre-calculated
46
+ num_generated = 0
47
+
48
+ for truss_mode in truss_modes:
49
+ for n_div in n_div_range:
50
+ for angle in angle_range:
51
+ skip_rod = []
52
+
53
+ # Geometry
54
+ height, spacing, diag = calculate_bridge(
55
+ span, angle=angle, n_div=n_div, truss_mode=truss_mode
56
+ )
57
+
58
+ # Structural elements
59
+ n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_essential_elements(
60
+ span, spacing, truss_mode, skip_rod
61
+ )
62
+ # Re-calculate for skip_rod
63
+ skip_rod = truss_design(n_bot_beams, n_rods, truss_mode)
64
+ n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_essential_elements(
65
+ span, spacing, truss_mode, skip_rod
66
+ )
67
+
68
+ # Nodes and elements
69
+ nodal_coord, par, pel, ele_nod, n_par_tot = calculate_element_node(
70
+ span,
71
+ spacing,
72
+ height,
73
+ n_dim,
74
+ n_par_nod,
75
+ truss_mode,
76
+ skip_rod,
77
+ )
78
+
79
+ X = nodal_coord[:, 0]
80
+ Y = nodal_coord[:, 1]
81
+
82
+ # Boundary conditions
83
+ W = boundary_conditions(
84
+ n_bot_beams, n_par_nod, n_nod_tot, supports=["pin", "roller"]
85
+ )
86
+
87
+ # Initialize element property arrays
88
+ h = np.zeros(n_ele_tot, dtype=np.float32)
89
+ J = np.zeros(n_ele_tot, dtype=np.float32)
90
+ A = np.zeros(n_ele_tot, dtype=np.float32)
91
+ beta = np.zeros(n_ele_tot, dtype=np.float32)
92
+ ro = np.zeros(n_ele_tot, dtype=np.float32)
93
+ E = np.zeros(n_ele_tot, dtype=np.float32)
94
+
95
+ J, A, h, beta, ro, E, G = calculate_element_properties(
96
+ n_ele_tot,
97
+ n_columns,
98
+ n_beams,
99
+ diag,
100
+ spacing,
101
+ height,
102
+ J,
103
+ A,
104
+ h,
105
+ beta,
106
+ ro,
107
+ E,
108
+ X,
109
+ Y,
110
+ ele_nod,
111
+ shear_mod,
112
+ width_properties,
113
+ height_properties,
114
+ unit_weight_properties,
115
+ elastic_mod_properties,
116
+ truss_mode,
117
+ )
118
+
119
+ # Save dataset
120
+ filename = f"{dataset_folder}/truss_{truss_mode}_{n_div}_{angle}.npz"
121
+ np.savez(
122
+ filename,
123
+ nodal_coord=nodal_coord,
124
+ pel=pel, # Element-DoF relation
125
+ ele_nod=ele_nod, # Element-node connectivity
126
+ W=W,
127
+ J=J,
128
+ A=A,
129
+ h=h,
130
+ beta=beta,
131
+ ro=ro,
132
+ E=E,
133
+ G=G,
134
+ par=par, # Nodal-DoF relation
135
+ X=X,
136
+ Y=Y,
137
+ height=height,
138
+ spacing=spacing,
139
+ diag=diag,
140
+ truss_mode=truss_mode, # String saved as array of chars
141
+ n_div=n_div,
142
+ angle=angle,
143
+ n_columns=n_columns,
144
+ n_nod_tot=n_nod_tot,
145
+ n_rods=n_rods,
146
+ n_beams=n_beams,
147
+ n_ele_tot=n_ele_tot,
148
+ n_bot_beams=n_bot_beams,
149
+ skip_rod=np.array(skip_rod),
150
+ )
151
+
152
+ num_generated += 1
153
+
154
+ logging.info(
155
+ f"Saved truss {truss_mode}, n_div={n_div}, angle={angle} → {filename}"
156
+ )
157
+
158
+ logging.info(
159
+ f"✅ Generated and saved {num_generated} truss configurations to '{dataset_folder}'."
160
+ )
161
+
162
+
163
+ # ------------------------------------------------------------
164
+ # Script entry point
165
+ # ------------------------------------------------------------
166
+ if __name__ == "__main__":
167
+ generate_truss_dataset()
utils/dataset_verifier.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ import random
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from collections import defaultdict
7
+
8
+
9
+ def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
10
+ """
11
+ Compute and print min/max sequence lengths (nodes, elements) across all trusses in the dataset.
12
+ Uses only min/max n_div files per mode for efficiency.
13
+
14
+ Args:
15
+ dataset_folder (str): Path to the dataset folder containing .npz files.
16
+
17
+ Returns:
18
+ Dict with keys 'min_nodes', 'max_nodes', 'min_elements', 'max_elements'.
19
+ """
20
+ npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
21
+ if not npz_files:
22
+ raise ValueError(f"No .npz files found in '{dataset_folder}'.")
23
+
24
+ # First pass: compute min/max n_div per mode
25
+ min_max_div = defaultdict(lambda: (float('inf'), float('-inf')))
26
+ for f in npz_files:
27
+ parts = f[:-4].rsplit('_', 2) # e.g., ['truss_pratt', '15', '39']
28
+ if len(parts) == 3 and parts[0].startswith('truss_'):
29
+ mode = parts[0][6:] # Remove 'truss_'
30
+ n_div = int(parts[1])
31
+ min_d, max_d = min_max_div[mode]
32
+ min_max_div[mode] = (min(min_d, n_div), max(max_d, n_div))
33
+
34
+ # Second pass: collect one file per min/max per mode
35
+ min_div_files = defaultdict(list)
36
+ max_div_files = defaultdict(list)
37
+ for f in npz_files:
38
+ parts = f[:-4].rsplit('_', 2)
39
+ if len(parts) == 3 and parts[0].startswith('truss_'):
40
+ mode = parts[0][6:]
41
+ n_div = int(parts[1])
42
+ if n_div == min_max_div[mode][0]:
43
+ min_div_files[mode].append(f)
44
+ if n_div == min_max_div[mode][1]:
45
+ max_div_files[mode].append(f)
46
+
47
+ # # Print min/max n_div per mode
48
+ # print("Per truss type min/max segments (n_div):")
49
+ # for mode in sorted(min_max_div):
50
+ # mn, mx = min_max_div[mode]
51
+ # print(f"{mode}: {mn} - {mx}")
52
+
53
+ # Compute overall min/max sequence lengths by loading one min/max file per mode
54
+ min_n_nod = float('inf')
55
+ max_n_nod = 0
56
+ min_n_ele = float('inf')
57
+ max_n_ele = 0
58
+ for mode in sorted(min_max_div):
59
+ # For min
60
+ if min_div_files[mode]:
61
+ min_file = min_div_files[mode][0] # Pick first
62
+ data_min = np.load(os.path.join(dataset_folder, min_file))
63
+ min_n_nod = min(min_n_nod, int(data_min['n_nod_tot']))
64
+ min_n_ele = min(min_n_ele, int(data_min['n_ele_tot']))
65
+ data_min.close()
66
+ # For max
67
+ if max_div_files[mode]:
68
+ max_file = max_div_files[mode][0] # Pick first
69
+ data_max = np.load(os.path.join(dataset_folder, max_file))
70
+ max_n_nod = max(max_n_nod, int(data_max['n_nod_tot']))
71
+ max_n_ele = max(max_n_ele, int(data_max['n_ele_tot']))
72
+ data_max.close()
73
+
74
+ print(f"Overall min sequence lengths: nodes={min_n_nod}, elements={min_n_ele}")
75
+ print(f"Overall max sequence lengths: nodes={max_n_nod}, elements={max_n_ele}")
76
+
77
+ # Additionally print out for one structure type how the keys inside look like
78
+ if show_details == True:
79
+ example_mode = next(iter(max_div_files))
80
+ example_file = max_div_files[example_mode][0]
81
+ example_data = np.load(os.path.join(dataset_folder, example_file))
82
+ print(f"\nExample data keys from '{example_file}': {example_data.files}")
83
+ for key in example_data.files:
84
+ print(f" - {key}: shape {example_data[key].shape}, dtype {example_data[key].dtype}")
85
+ example_data.close()
86
+
87
+
88
+
89
+ return {
90
+ 'min_nodes': min_n_nod,
91
+ 'max_nodes': max_n_nod,
92
+ 'min_elements': min_n_ele,
93
+ 'max_elements': max_n_ele
94
+ }
95
+
96
+ def load_and_visualize_random_truss(dataset_folder="dataset", num_samples=1, save_fig=False):
97
+ """
98
+ Load random truss(es) from the dataset folder and visualize them.
99
+ Arranges plots vertically in figures of 3 per row (or 2 if divisible by 2 but not 3).
100
+
101
+ Args:
102
+ dataset_folder (str): Path to the dataset folder containing .npz files.
103
+ num_samples (int): Number of random trusses to load and plot (default: 1).
104
+ save_fig (bool): If True, saves each multi-plot figure to dataset_folder.
105
+
106
+ Returns:
107
+ List of dicts with loaded data for each sample (for further use).
108
+ """
109
+ npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
110
+ if not npz_files:
111
+ raise ValueError(f"No .npz files found in '{dataset_folder}'.")
112
+
113
+ samples = []
114
+ random.shuffle(npz_files)
115
+ npz_files = npz_files[:num_samples]
116
+
117
+ # Determine layout per figure
118
+ if num_samples % 3 == 0 or num_samples > 3:
119
+ n_cols = 3
120
+ elif num_samples % 2 == 0:
121
+ n_cols = 2
122
+ else:
123
+ n_cols = 1
124
+ n_rows = math.ceil(num_samples / n_cols)
125
+
126
+ fig, axes = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 3.5*n_rows))
127
+ axes = np.atleast_1d(axes).flatten()
128
+
129
+ for i, filename in enumerate(npz_files):
130
+ filepath = os.path.join(dataset_folder, filename)
131
+ data = np.load(filepath)
132
+
133
+ nodal_coord = data['nodal_coord']
134
+ ele_nod = data['ele_nod']
135
+ truss_mode = str(data['truss_mode'])
136
+ n_div = int(data['n_div'])
137
+ angle = float(data['angle'])
138
+
139
+ n_beams = int(data['n_beams'])
140
+ n_columns = int(data['n_columns'])
141
+ n_rods = int(data['n_rods'])
142
+ n_ele_tot = int(data['n_ele_tot'])
143
+
144
+ ax = axes[i]
145
+ ax.set_xlim(-0.05, 1.05)
146
+ ax.set_ylim(-0.05, max(nodal_coord[:,1]) * 1.1)
147
+ ax.set_aspect('equal')
148
+ ax.set_title(f"{truss_mode} (n_div={n_div}, angle={angle:.0f}°)")
149
+ ax.grid(True, alpha=0.3)
150
+
151
+ # Plot nodes
152
+ bottom_mask = np.abs(nodal_coord[:,1]) < 1e-6
153
+ ax.scatter(nodal_coord[bottom_mask, 0], nodal_coord[bottom_mask, 1],
154
+ c='blue', s=45, label='Bottom Nodes')
155
+ ax.scatter(nodal_coord[~bottom_mask, 0], nodal_coord[~bottom_mask, 1],
156
+ c='red', s=45, label='Top Nodes')
157
+
158
+ # Plot elements
159
+ for j in range(n_ele_tot):
160
+ node1, node2 = ele_nod[j]
161
+ x1, y1 = nodal_coord[node1]
162
+ x2, y2 = nodal_coord[node2]
163
+ if j < n_beams:
164
+ ax.plot([x1, x2], [y1, y2], 'g-', lw=2, label='Beams' if j == 0 else "")
165
+ elif j < n_beams + n_columns:
166
+ ax.plot([x1, x2], [y1, y2], 'k-', lw=3, label='Columns' if j == n_beams else "")
167
+ else:
168
+ ax.plot([x1, x2], [y1, y2], 'purple', ls='--', lw=1.5, label='Rods' if j == n_beams + n_columns else "")
169
+
170
+ ax.legend(loc='upper right', fontsize=8)
171
+ samples.append({k: data[k] for k in data.files})
172
+
173
+ # Hide any unused axes
174
+ for j in range(num_samples, len(axes)):
175
+ axes[j].axis('off')
176
+
177
+ plt.tight_layout()
178
+
179
+ if save_fig:
180
+ out_path = os.path.join(dataset_folder, f"random_truss_grid_{num_samples}.png")
181
+ plt.savefig(out_path, dpi=150, bbox_inches='tight')
182
+ print(f"Saved figure to {out_path}")
183
+
184
+ plt.show()
185
+ return samples
utils/truss_constraints.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # General parameters
2
+ span = 50 # in m
3
+ angle = 0 # in degrees
4
+ n_dim = 2 # Number of dimensions
5
+ n_nod_ele = 2 # Number of nodes per element
6
+ n_par_nod = 3 # Number of parameters per node
7
+ n_par_ele = n_par_nod * n_nod_ele # Number of parameters per element
8
+
9
+ # Plot settings
10
+ n_discritizations = 10 # Number of points for plotting
11
+ n_plots = 4 # Number of plots for bridge's truss
12
+
13
+ # Columns 40x40 and beams 30x35 in cm and rods 10x10 in cm
14
+ width_beam = 0.3
15
+ height_beam = 0.35
16
+
17
+ width_column = 0.4
18
+ height_column = 0.4
19
+
20
+ width_rod = 0.1
21
+ height_rod = 0.1
22
+
23
+ # Horizontal load on columns and angle in kN and degrees
24
+ po = 100
25
+ theta = 0
26
+
27
+ # Unit weight and elastic modulus in kN/m^3 and kN/m^2
28
+ unit_weight_steel = 78.5
29
+ elastic_mod = 21*10**7
30
+ elastic_mod_rod = 21*10**7
31
+
32
+ # Shear modulus in kN/m^2
33
+ shear_mod = 8*10**6
34
+ k_shear = 0.9
35
+
36
+
37
+
38
+ ################## HASHMAPS ##################
39
+
40
+ # put them all in a hashmap for easy access
41
+ width_properties = {}
42
+ height_properties = {}
43
+ unit_weight_properties = {}
44
+ elastic_mod_properties = {}
45
+
46
+ width_properties['beam'] = width_beam
47
+ width_properties['column'] = width_column
48
+ width_properties['rod'] = width_rod
49
+
50
+ height_properties['beam'] = height_beam
51
+ height_properties['column'] = height_column
52
+ height_properties['rod'] = height_rod
53
+
54
+ unit_weight_properties['beam'] = unit_weight_steel
55
+ unit_weight_properties['column'] = unit_weight_steel
56
+ unit_weight_properties['rod'] = unit_weight_steel
57
+
58
+ elastic_mod_properties['beam'] = elastic_mod
59
+ elastic_mod_properties['column'] = elastic_mod
60
+ elastic_mod_properties['rod'] = elastic_mod_rod
utils/truss_element_assembly.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import List, Tuple
3
+
4
+
5
+ def nodal_coords(n_nod_tot: int, n_dim: int, n_columns: int, spacing: float,
6
+ height: float, n_bot_beams: int, truss_mode: str) -> np.ndarray:
7
+ """
8
+ Calculate the coordinate of the nodes in the assembly.
9
+
10
+ Args:
11
+ n_nod_tot (int): Total number of nodes.
12
+ n_dim (int): Number of dimensions.
13
+ n_columns (int): Number of columns.
14
+ spacing (float): Spacing between columns (length of beams)
15
+ height (float): Height of the columns .
16
+ n_bot_beams (int): Number of bottom beams.
17
+ truss_mode (str): Mode of the truss ("warren" or other)
18
+ currently supports pratt and howe as other.
19
+
20
+ Returns:
21
+ np.ndarray: The nodal coordinates of the assembly.
22
+ """
23
+ nodal_coord = np.zeros((n_nod_tot, n_dim), dtype=float)
24
+
25
+ # Calculate the nodal coordinates
26
+ if truss_mode == "simple" or truss_mode == "simple_cant":
27
+ for i in range(n_nod_tot):
28
+ nodal_coord[i] += [i * spacing, 0]
29
+ elif truss_mode != "warren":
30
+ for i in range(n_columns + 2):
31
+ nodal_coord[i] += [i * spacing, 0]
32
+ if 0 < i < n_columns + 1:
33
+ nodal_coord[i + n_columns + 1] += [i * spacing, height]
34
+ else:
35
+ for i in range(n_nod_tot - n_bot_beams):
36
+ nodal_coord[i] += [i * spacing, 0]
37
+ if i > 0:
38
+ nodal_coord[i + n_bot_beams] += [i * spacing - spacing / 2, height]
39
+
40
+ return nodal_coord
41
+
42
+
43
+ def pel_ele(par: np.ndarray, n_columns: int, n_beams: int, n_rods: int,
44
+ n_par_nod: int, n_nod_tot: int, n_ele_tot: int, n_bot_beams: int,
45
+ truss_mode: str, skip_rod: List[int] = []) -> np.ndarray:
46
+ """
47
+ Calculate the parameter-element numbering relation.
48
+ Built from Beam -> Column -> Rods (considering skipped rods).
49
+
50
+ Args:
51
+ par (np.ndarray): Parameters for the elements.
52
+ n_columns (int): Number of columns.
53
+ n_beams (int): Number of beams.
54
+ n_rods (int): Number of rods.
55
+ n_par_nod (int): Number of parameters per node.
56
+ n_nod_tot (int): Total number of nodes.
57
+ n_ele_tot (int): Total number of elements.
58
+ n_bot_beams (int): Number of bottom beams.
59
+ truss_mode (str): Mode of the truss ("warren", "pratt", etc.).
60
+ skip_rod (List[int]): Rods to skip. 0-indexed.
61
+
62
+ Returns:
63
+ np.ndarray: The parameter-element numbering relation.
64
+ """
65
+ if "simple" in truss_mode:
66
+ pel = beam_pars(par, n_beams, n_par_nod, n_nod_tot, n_bot_beams, truss_mode)
67
+ else:
68
+ pel = np.zeros((n_ele_tot - len(skip_rod), 2 * n_par_nod), dtype=int)
69
+
70
+ # Calculate the element parameter relations
71
+ pel[:n_beams] = beam_pars(par, n_beams, n_par_nod, n_nod_tot, n_bot_beams, truss_mode)
72
+ if truss_mode != "warren":
73
+ pel[n_beams:n_beams + n_columns] = column_pars(par, n_columns, n_par_nod)
74
+ pel[n_beams + n_columns:] = rod_pars(par, n_rods, n_par_nod, n_nod_tot, n_bot_beams, truss_mode, skip_rod)
75
+
76
+ return pel
77
+
78
+
79
+ def beam_pars(par: np.ndarray, n_beams: int, n_par_nod: int,
80
+ n_nod_tot: int, n_bot_beams: int,
81
+ truss_mode: str) -> np.ndarray:
82
+ """
83
+ Calculate the relevant beam DoFs.
84
+
85
+ Args:
86
+ par (np.ndarray): Parameters for the elements.
87
+ n_beams (int): Number of beams.
88
+ n_par_nod (int): Number of parameters per node.
89
+ n_nod_tot (int): Total number of nodes.
90
+ n_bot_beams (int): Number of bottom beams.
91
+
92
+ Returns:
93
+ np.ndarray: The beam DoFs.
94
+ """
95
+ beams = np.zeros((n_beams, 2 * n_par_nod))
96
+
97
+ if "simple" in truss_mode:
98
+ for i in range(n_beams):
99
+ beams[i, :n_par_nod] = par[i]
100
+ beams[i, n_par_nod:] = par[i + 1]
101
+ else:
102
+ beams[:n_bot_beams, :n_par_nod] = par[:n_bot_beams]
103
+ beams[:n_bot_beams, n_par_nod:] = par[1:n_bot_beams + 1]
104
+
105
+ beams[n_bot_beams:, :n_par_nod] = par[n_bot_beams + 1:n_nod_tot - 1]
106
+ beams[n_bot_beams:, n_par_nod:] = par[n_bot_beams + 2:]
107
+
108
+ return beams
109
+
110
+
111
+ def column_pars(par: np.ndarray, n_columns: int, n_par_nod: int) -> np.ndarray:
112
+ """
113
+ Calculate the relevant column DoFs.
114
+
115
+ Args:
116
+ par (np.ndarray): Parameters for the elements.
117
+ n_columns (int): Number of columns.
118
+ n_par_nod (int): Number of parameters per node.
119
+
120
+ Returns:
121
+ np.ndarray: The column DoFs.
122
+ """
123
+ columns = np.zeros((n_columns, 2 * n_par_nod))
124
+ for i in range(1, n_columns + 1):
125
+ columns[i - 1][:n_par_nod] = par[i]
126
+ columns[i - 1][n_par_nod:] = par[i + n_columns + 1]
127
+
128
+ return columns
129
+
130
+
131
+ def rod_pars(par: np.ndarray, n_rods: int, n_par_nod: int, n_nod_tot: int,
132
+ n_bot_beams: int, truss_mode: str, skip_rod: List[int] = []) -> np.ndarray:
133
+ """
134
+ Calculate the relevant rod DoFs.
135
+
136
+ Args:
137
+ par (np.ndarray): Parameters for the elements.
138
+ n_rods (int): Number of rods.
139
+ n_par_nod (int): Number of parameters per node.
140
+ n_nod_tot (int): Total number of nodes.
141
+ n_bot_beams (int): Number of bottom beams.
142
+ truss_mode (str): Mode of the truss ("warren" or other).
143
+ skip_rod (List[int]): The rods to skip. From left to right.
144
+
145
+ Returns:
146
+ np.ndarray: The rod DoFs.
147
+ """
148
+ rods = np.zeros((n_rods - len(skip_rod), 2 * n_par_nod))
149
+
150
+ count = 0
151
+ skips = 0
152
+
153
+ for i in range(n_nod_tot - n_bot_beams - 1):
154
+ for j in range(2):
155
+ if count not in skip_rod:
156
+ rods[count - skips][n_par_nod:] = par[i + n_bot_beams + 1]
157
+ if j > 0 and truss_mode != "warren":
158
+ rods[count - skips][:n_par_nod] = par[2 * j + i]
159
+ else:
160
+ rods[count - skips][:n_par_nod] = par[j + i]
161
+ else:
162
+ skips += 1
163
+ count += 1
164
+
165
+ return rods
166
+
167
+
168
+ def fill_ele_nod(n_ele_tot: int, n_par_nod: int, pel: np.ndarray,
169
+ skip_rod: List[int] = []) -> np.ndarray:
170
+ """
171
+ Fill the element-nodal matrix.
172
+
173
+ Args:
174
+ n_ele_tot (int): Total number of elements.
175
+ n_par_nod (int): Number of parameters per node.
176
+ pel (np.ndarray): Parameter-element numbering relation.
177
+ skip_rod (List[int]): Rods to skip.
178
+
179
+ Returns:
180
+ np.ndarray: The element-nodal matrix.
181
+ """
182
+ ele_nod = np.zeros((n_ele_tot - len(skip_rod), 2), dtype=int)
183
+
184
+ ele_nod[:, 0] = pel[:, 0] // n_par_nod
185
+ ele_nod[:, 1] = pel[:, n_par_nod] // n_par_nod
186
+
187
+ return ele_nod
188
+
189
+
190
+ # If main is called, run the test
191
+ if __name__ == "__main__":
192
+ n_par_nod = 3
193
+ par = np.arange(1, 7*n_par_nod+1).reshape(7, n_par_nod)
194
+ n_beams = 5
195
+ truss_mode = "warren"
196
+ n_bot_beams = 3
197
+ n_nod_tot = 7
198
+ n_rods = 6
199
+ n_dim = 3
200
+ n_columns = 2
201
+
202
+ if truss_mode == "warren":
203
+ n_columns = 0
204
+ n_ele_tot = n_beams + n_columns + n_rods
205
+
206
+ print(par)
207
+ # beams = beam_pars(par, n_beams, n_par_nod, n_nod_tot, n_bot_beams)
208
+ # rods = rod_pars(par, n_rods, n_par_nod, n_bot_beams, skip_rod=[])
209
+ pel = pel_ele(par, n_columns, n_beams, n_rods, n_par_nod, n_nod_tot, n_ele_tot, n_bot_beams, truss_mode, skip_rod)
210
+
211
+ print(pel)
utils/truss_geometric.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ from typing import Dict, Tuple, List, Optional
5
+ from utils.truss_element_assembly import nodal_coords, pel_ele, fill_ele_nod
6
+
7
+ MIN_ANGLE = np.pi / 6
8
+ MAX_ANGLE = np.pi / 3
9
+
10
+ # Get the logger
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ ################## Geometric Functions ##################
15
+ def calculate_max_height(span: float, angle: float, spacing: float = 0,
16
+ tol_round: Optional[List] = None) -> Tuple[float, float]:
17
+ """
18
+ Calculate the maximum height and spacing for the given span and angle.
19
+ If spacing is given, no span/spacing ratio is respected.
20
+
21
+ Args:
22
+ span: The total length of the bridge.
23
+ angle: The angle of the diagonal rods in radians.
24
+ spacing: Pre-defined spacing between columns. If 0, it will be calculated.
25
+ allowed_round: The number of decimal places to round the height to. first value is for height, second for spacing.
26
+ More angles will be found with higher values.
27
+
28
+ Returns:
29
+ A tuple containing the height and spacing if valid, otherwise (-1, 0).
30
+ """
31
+ if tol_round is None:
32
+ tol_round = [3, 1]
33
+
34
+ if not MIN_ANGLE <= angle <= MAX_ANGLE:
35
+ logging.warning("The angle should be between %d and %d. "
36
+ "Defaulting to 45 degrees.", np.degrees(MIN_ANGLE), np.degrees(MAX_ANGLE))
37
+ angle = np.radians(45)
38
+
39
+ if spacing:
40
+ height = round(spacing * np.tan(angle), tol_round[0])
41
+ return height, spacing
42
+
43
+ for i in range(15, 21):
44
+ height = round(span / i, tol_round[0])
45
+ spacing = round(height / np.tan(angle), tol_round[1])
46
+ if span % spacing == 0:
47
+ return height, spacing
48
+
49
+ return -1, 0
50
+
51
+
52
+ def try_angles(span: float, base_angle: int, lower_limit: float = 30, upper_limit: float = 60,
53
+ tol_round: Optional[List] = None) -> Tuple[float, float, float]:
54
+ """
55
+ Try to find a valid height and spacing by adjusting the angle.
56
+
57
+ Args:
58
+ span: The total length of the bridge.
59
+ base_angle: The base angle of the diagonal rods in degrees.
60
+
61
+ Returns:
62
+ A tuple containing the height, spacing, and adjusted angle in degrees if found, otherwise (-1, -1, -1).
63
+ """
64
+ if tol_round is None:
65
+ tol_round = [3, 1]
66
+
67
+ # Calculate the distance of the angle from pi/3 and pi/6 to use in range
68
+ angle_diff = int(max(abs(base_angle - lower_limit), abs(base_angle - upper_limit)))
69
+
70
+ for shift in range(angle_diff):
71
+ for sign in [1, -1]:
72
+ adjusted_angle = np.radians(base_angle + sign * shift)
73
+ height, spacing = calculate_max_height(span, adjusted_angle, tol_round=tol_round)
74
+
75
+ # If a suitable height is found
76
+ if spacing:
77
+ return height, spacing, base_angle + sign * shift
78
+ return -1, -1, -1
79
+
80
+
81
+ def calculate_bridge(span: float, angle: int = 45, n_div: int = None, spacing: float = 0,
82
+ truss_mode: str = "pratt", lower_limit: int = 30,
83
+ upper_limit: int = 60, tol_round: Optional[List] = None) -> Tuple[float, float, float]:
84
+ """
85
+ Calculate the height of the bridge, spacing of columns, and diagonal length of rods.
86
+ If n_div is provided, spacing is set as span / n_div and height is adjusted accordingly (priority: n_div > spacing > angle search).
87
+ Span can be normalized to 1 for dataset generation.
88
+
89
+ Args:
90
+ span (float): The total length of the bridge (e.g., 1 for normalized).
91
+ angle (float): The angle of the diagonal rods in degrees. Default is 45 degrees.
92
+ n_div (int, optional): Number of divisions (panels/segments). If provided, sets spacing = span / n_div.
93
+ spacing (float, optional): Pre-defined spacing. Used if n_div is None.
94
+ truss_mode (str): The mode of the truss bridge (warren, pratt, howe). Defaults to pratt.
95
+
96
+ Returns:
97
+ (float, float, float): The height of the bridge, the distance between columns,
98
+ and the length of the diagonal elements (rods).
99
+ """
100
+ space_divisor = 1
101
+ if truss_mode == "warren":
102
+ space_divisor = 2 # Since nodes are in the middle of the beams
103
+
104
+ if n_div is not None:
105
+ if n_div <= 0:
106
+ raise ValueError("n_div must be a positive integer.")
107
+ spacing = span / n_div
108
+ angle_rad = np.radians(angle)
109
+ height = (spacing / space_divisor) * np.tan(angle_rad) # Consistent angle across modes
110
+ elif spacing:
111
+ angle_rad = np.radians(angle)
112
+ height, spacing = calculate_max_height(span, angle_rad, spacing, tol_round=tol_round)
113
+ else:
114
+ logging.info("Trying to find a suitable height and spacing based on the angle.")
115
+ height, spacing, used_angle = try_angles(span, angle, lower_limit,
116
+ upper_limit, tol_round=tol_round)
117
+ used_angle = round(used_angle, 2)
118
+
119
+ if height == -1:
120
+ raise RuntimeError("A suitable height for the bridge could not be found. Please adjust the span")
121
+
122
+ if angle != used_angle:
123
+ logging.warning("Adjusted angle to %.2f degrees to find a solution.", used_angle)
124
+
125
+ diag = np.sqrt((spacing / space_divisor)**2 + height**2)
126
+ return height, spacing, diag
127
+
128
+
129
+ def calculate_essential_elements(span: float, spacing: float, truss_mode: str ="pratt",
130
+ skip_rod: Optional[List] = None) -> Tuple[int, int, int, int, int, int]:
131
+ """
132
+ Calculate the number of columns, nodes, rods, beams and total elements.
133
+ (Unchanged docstring and rest.)
134
+ """
135
+ if skip_rod is None:
136
+ skip_rod = []
137
+
138
+ ratio = int(round(span / spacing)) # Fixed: Use round to handle float precision
139
+ if truss_mode != "warren":
140
+ n_columns = ratio - 1
141
+ n_beams = int(ratio * 2 - 2)
142
+ n_bot_beams = int(n_beams // 2 + 1)
143
+ n_rods = n_beams
144
+ else:
145
+ n_columns = 0
146
+ n_beams = int(ratio * 2 - 1)
147
+ n_bot_beams = int(np.ceil(n_beams / 2))
148
+ n_rods = n_beams + 1
149
+
150
+ n_rods = n_rods - len(skip_rod)
151
+ n_nod_tot = n_beams + 2
152
+ n_ele_tot = n_columns + n_rods + n_beams
153
+
154
+ return n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams
155
+
156
+
157
+ def calculate_simple_elements(span: float, spacing: float, truss_mode: str, col_placements: Optional[List] = None,
158
+ skip_col: Optional[List] = None, beam_partition: int = 1) -> Tuple[int, int, int, int, int, int]:
159
+ """
160
+ Calculate the number of columns and beams for a simple bridge, along with their spacing.
161
+
162
+ Args:
163
+ span (float): The total length of the bridge.
164
+ spacing (float): The distance between nodes (columns) in the bridge.
165
+ truss_mode (str): The mode of the truss bridge (simple, simple_cant)
166
+ Defaults to pratt.
167
+
168
+ Returns:
169
+ (int, int, int, int, int): The number of columns, nodes,
170
+ rods, beams, and total elements.
171
+ """
172
+ extra_beams = 0 # Extra beams for cantilevered bridges
173
+ if col_placements:
174
+ n_columns = len(col_placements)
175
+ else:
176
+ n_columns = int(span // spacing) + 1
177
+
178
+ if skip_col:
179
+ n_columns -= len(skip_col)
180
+
181
+ if truss_mode != "simple":
182
+ extra_beams = 1
183
+
184
+ if beam_partition > 1:
185
+ if col_placements:
186
+ raise ValueError("Cannot partition beams with custom column placements.")
187
+ if spacing // beam_partition < spacing / beam_partition:
188
+ raise ValueError("Beam partition should be a divisor of the spacing.")
189
+ n_beams = (n_columns - 1 + extra_beams) * beam_partition
190
+ else:
191
+ print("Defaulting partition to 1.")
192
+ n_beams = n_columns - 1 + extra_beams
193
+
194
+ n_nod_tot = n_beams + 2
195
+ n_ele_tot = n_columns + n_beams
196
+
197
+ return n_columns, n_nod_tot, 0, n_beams, n_ele_tot, 0
198
+
199
+
200
+ def calculate_element_node(span: float, spacing: float, height: float, n_dim: int,
201
+ n_par_nod: int, truss_mode: str = "pratt",
202
+ skip_rod: Optional[List] = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]:
203
+ """
204
+ Calculate the nodal coordinates, nodal-param relation and
205
+ element-node relationships for a truss bridge.
206
+
207
+ Args:
208
+ span (float): The total length of the bridge.
209
+ spacing (float): The distance between nodes (columns) in the bridge.
210
+ height (float): The height of the bridge.
211
+ diag (float): The length of the diagonal elements (rods) in the bridge.
212
+ n_dim (int): The number of dimensions in the bridge
213
+ (usually 2 for 2D bridges).
214
+ n_par_nod (int): The number of parameters per node
215
+ (usually 2 for 2D bridges: x and y coordinates).
216
+
217
+ Returns:
218
+ nodal_coord (numpy.ndarray): A 2D array where each row represents a
219
+ node and the columns are the x and y coordinates of the node.
220
+ par (numpy.ndarray): A 2D array where each row represents a node and
221
+ the columns are the parameters associated with the node.
222
+ pel (numpy.ndarray): A 2D array where each row represents an element (beam, column, or rod)
223
+ and the columns are the nodes associated with the element.
224
+ ele_nod (numpy.ndarray): A 2D array where each row represents an element and
225
+ the columns are the nodes associated with the element.
226
+ n_par_tot (int): The total number of parameters in the bridge.
227
+ """
228
+ # Calculate the number of columns, rods, beams, total nodes and total parameters
229
+ if skip_rod is None:
230
+ skip_rod = []
231
+
232
+ n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_essential_elements(span, spacing, truss_mode)
233
+ n_par_tot = n_nod_tot * n_par_nod
234
+
235
+ # Calculate the positions of the nodes
236
+ nodal_coord = nodal_coords(n_nod_tot, n_dim, n_columns, spacing, height, n_bot_beams, truss_mode)
237
+
238
+ # Calculate the nodal-param relation
239
+ par = np.arange(1, n_nod_tot * n_par_nod + 1).reshape(n_nod_tot, n_par_nod)
240
+
241
+ # Calculate the element-param relationships
242
+ pel = pel_ele(par, n_columns, n_beams, n_rods, n_par_nod, n_nod_tot,
243
+ n_ele_tot, n_bot_beams, truss_mode, skip_rod)
244
+
245
+ # Calculate the element-node relationships
246
+ ele_nod = fill_ele_nod(n_ele_tot, n_par_nod, pel, skip_rod)
247
+
248
+ return nodal_coord, par, pel, ele_nod, n_par_tot
249
+
250
+
251
+ def calculate_element_properties(n_ele_tot: int, n_columns: int, n_beams: int, diag: float, spacing: float,
252
+ height: float, J: np.array, A: np.array, h: np.array, beta: np.array,
253
+ ro: np.array, E: np.array, X: np.array, Y: np.array, ele_nod: List,
254
+ shear_mod: int, width_properties: Dict, height_properties: Dict,
255
+ unit_weight_properties: Dict, elastic_mod_properties: Dict,
256
+ truss_mode: str, beam_partition: int = 1) -> Tuple[np.array, np.array, np.array, np.array, np.array, np.array, np.array]:
257
+ """
258
+ Calculate the properties of the elements in the truss bridge.
259
+
260
+ Returns:
261
+ J, A, h, beta, ro, E: Numpy arrays of the moments of inertia, areas and heights, angles,
262
+ unit weights and elastic moduli of the elements. With J in m^4, A in m^2 and h in m, beta
263
+ in radians, ro in kN/m^3 and E in kN/m^2.
264
+ """
265
+ # Calculate the areas
266
+ area_beam = width_properties['beam'] * height_properties['beam']
267
+ area_column = width_properties['column'] * height_properties['column']
268
+ area_rod = width_properties['rod'] * height_properties['rod']
269
+
270
+ # Calculate the moments of inertia
271
+ inertia_beam = width_properties['beam']**3 * height_properties['beam'] / 12
272
+ inertia_column = width_properties['column']**3 * height_properties['column'] / 12
273
+ inertia_rod = width_properties['rod']**3 * height_properties['rod'] / 12
274
+
275
+ # Calculate the properties of the elements
276
+ J[:n_beams] = inertia_beam
277
+ A[:n_beams] = area_beam
278
+ if "simple" in truss_mode:
279
+ h[:n_beams] = spacing / beam_partition
280
+ else:
281
+ h[:n_beams] = spacing
282
+ ro[:n_beams] = unit_weight_properties['beam']
283
+ E[:n_beams] = elastic_mod_properties['beam']
284
+
285
+ if n_beams != n_ele_tot:
286
+ J[n_beams:n_beams + n_columns] = inertia_column
287
+ A[n_beams:n_beams + n_columns] = area_column
288
+ h[n_beams:n_beams + n_columns] = height
289
+ ro[n_beams:n_beams + n_columns] = unit_weight_properties['column']
290
+ E[n_beams:n_beams + n_columns] = elastic_mod_properties['column']
291
+
292
+ J[n_beams + n_columns:] = inertia_rod
293
+ A[n_beams + n_columns:] = area_rod
294
+ h[n_beams + n_columns:] = diag
295
+ ro[n_beams + n_columns:] = unit_weight_properties['rod']
296
+ E[n_beams + n_columns:] = elastic_mod_properties['rod']
297
+
298
+ # Debugging for any errors
299
+ if np.any(J == 0) or np.any(A == 0) or np.any(h == 0) or np.any(ro == 0) or np.any(E == 0):
300
+ raise ValueError("There are materials with no property")
301
+
302
+ for i, _ in enumerate(beta):
303
+ dx = X[ele_nod[i, 1]] - X[ele_nod[i, 0]]
304
+ if abs(h[i]) < 1e-10: # Avoid division by zero
305
+ beta[i] = 0
306
+ else:
307
+ # Ensure value is within [-1, 1] for arccos
308
+ cos_val = np.clip(dx / abs(h[i]), -1.0, 1.0)
309
+ beta[i] = np.arccos(cos_val)
310
+
311
+ G = np.full(len(E), shear_mod, dtype=np.float32)
312
+
313
+ h = h.astype(np.float32)
314
+ A = A.astype(np.float32)
315
+ E = E.astype(np.float32)
316
+ J = J.astype(np.float32)
317
+ beta = beta.astype(np.float32)
318
+ ro = ro.astype(np.float32)
319
+
320
+ return J, A, h, beta, ro, E, G
321
+
322
+
323
+ def boundary_conditions(n_bot_beams: int, n_par_nod: int, n_nod_tot: int,
324
+ supports: Optional[List[str]] = None) -> np.ndarray:
325
+ """
326
+ Calculate the boundary conditions for the truss bridge.
327
+
328
+ Returns:
329
+ A numpy array where the ith element is 1 if the i-th parameter is a boundary condition and 0 otherwise.
330
+ Defaults to pin and roller supports. Considers fixed supp if not a pin or roller.
331
+ """
332
+ if supports is None:
333
+ supports = ["pin", "roller"]
334
+
335
+ # Initialize the boundary conditions
336
+ def support_dof(support, n_par_nod):
337
+ temp = np.zeros(n_par_nod, dtype=int)
338
+ if support == "roller":
339
+ temp[1] = 1
340
+ elif support == "pin":
341
+ temp[:2] = 1
342
+ else:
343
+ temp[:] = 1
344
+ return temp
345
+
346
+ # Create the boundary array and set the boundary conditions
347
+ temp = np.zeros(n_nod_tot * n_par_nod, dtype=np.int32)
348
+ temp[:n_par_nod] = support_dof(supports[0], n_par_nod)
349
+ temp[n_par_nod*n_bot_beams:n_par_nod*(n_bot_beams+1)] = support_dof(supports[1], n_par_nod)
350
+
351
+ return temp
352
+
353
+
354
+ def truss_design(n_bot_beams: int, n_rods: int,
355
+ truss_mode: str ="pratt") -> np.ndarray:
356
+ """
357
+ Modify the number of rods to skip based on the design of the truss bridge.
358
+
359
+ Returns:
360
+ A numpy array of the rods to skip based on the truss design.
361
+ """
362
+ truss_mode = truss_mode.lower()
363
+ def pratt_howe(n_bot_beams, n_rods, start=3, mid=0):
364
+ left_side = np.arange(start, n_bot_beams, 2)
365
+ right_side = np.arange(n_bot_beams+mid, n_rods, 2)
366
+
367
+ # Check if the last rod is included in the right side and remove it to avoid invalid indexing
368
+ if truss_mode == "howe" and right_side.size > 0 and right_side[-1] == n_rods - 1:
369
+ right_side = right_side[:-1]
370
+ return np.concatenate((left_side, right_side)).tolist()
371
+
372
+ if truss_mode == "pratt":
373
+ return pratt_howe(n_bot_beams-1, n_rods-1, 2)
374
+
375
+ elif truss_mode == "howe":
376
+ return pratt_howe(n_bot_beams-1, n_rods, 1, 1)
377
+
378
+ else:
379
+ return np.array([])
380
+
381
+
382
+ def col_pos(W: np.ndarray, n_par_nod: int, X: np.ndarray, Y: Optional[np.ndarray] = None) -> np.ndarray:
383
+ """
384
+ Calculate the column positions for the truss bridge.
385
+
386
+ Args:
387
+ W: Boundary condition array (1 for column, 0 for no column)
388
+ n_par_nod: Number of parameters per node
389
+ X: X coordinates of nodes
390
+ Y: Y coordinates of nodes (optional)
391
+
392
+ Returns:
393
+ A tuple of numpy arrays (X_col, Y_col) representing the x and y coordinates of the columns.
394
+ """
395
+ y_col = []
396
+ x_col = []
397
+
398
+ for i, pos in enumerate(X):
399
+ if 1 in W[i*n_par_nod:(i+1)*n_par_nod]: # Check if there is a column at this node
400
+ x_col.append(pos)
401
+ y_col.append(0)
402
+ if Y is not None:
403
+ y_col[-1] = Y[i]
404
+
405
+ # Change to numpy arrays
406
+ x_col = np.array(x_col)
407
+ y_col = np.array(y_col)
408
+
409
+ return x_col, y_col
410
+
411
+
412
+ ################## Plotting Functions ##################
413
+ def plot_elements(ax, truss_mode, ele_nod, X, Y, h, beta):
414
+ """
415
+ Plot the structural elements (1D or 2D) on the given axes.
416
+
417
+ Args:
418
+ ax: Matplotlib axes object.
419
+ truss_mode: String indicating the truss mode ("simple" for 1D, others for 2D).
420
+ ele_nod: Numpy array of element-node connectivity.
421
+ X: Numpy array of X coordinates of nodes.
422
+ Y: Numpy array of Y coordinates of nodes.
423
+ h: Numpy array of element lengths.
424
+ beta: Numpy array of element angles in radians.
425
+ """
426
+ if "simple" in truss_mode:
427
+ # Plot each element as a horizontal line
428
+ for i in range(len(ele_nod)):
429
+ x1 = X[ele_nod[i, 0]] # Start node X
430
+ length = h[i]
431
+ x2 = x1 + length
432
+
433
+ ax.plot([x1, x2], [0, 0], 'b') # Plot element
434
+ ax.text((x1 + x2) / 2, 0, str(i), verticalalignment='bottom') # Label element
435
+
436
+ ax.plot(X, np.zeros_like(X), 'ro') # Plot nodes as red circles on the x-axis
437
+ ax.set_title('1D Element Plot')
438
+ else:
439
+ # Plot each element based on angle beta
440
+ for i in range(len(ele_nod)):
441
+ x1, y1 = X[ele_nod[i, 0]], Y[ele_nod[i, 0]] # Start node coordinates
442
+ length = h[i]
443
+ x2 = x1 + length * np.cos(beta[i])
444
+ y2 = y1 + length * np.sin(beta[i])
445
+
446
+ ax.plot([x1, x2], [y1, y2], 'b') # Plot element
447
+ ax.text((x1 + x2) / 2, (y1 + y2) / 2, str(i)) # Label element
448
+
449
+ ax.plot(X, Y, 'ro') # Plot nodes
450
+ ax.set_title('2D Element Plot')
451
+
452
+
453
+ def plot_supports(ax, X_col, Y_col, W, n_par_nod, X):
454
+ """
455
+ Plot supports (roller, pin, fixed) on the structure.
456
+
457
+ Args:
458
+ ax: Matplotlib axes object.
459
+ X_col: Numpy array of X coordinates where supports are located.
460
+ Y_col: Numpy array of Y coordinates where supports are located.
461
+ W: Numpy array representing boundary conditions (1 for restrained, 0 for free).
462
+ n_par_nod: Number of parameters per node.
463
+ support_types: List of support types to cycle through.
464
+ """
465
+ for i, _ in enumerate(X_col):
466
+ x = X_col[i]
467
+ y = Y_col[i]
468
+
469
+ # Find the node index
470
+ idx = np.where(X == x)[0][0]
471
+ support_type = sum(W[idx * n_par_nod:(idx + 1) * n_par_nod])
472
+
473
+ if support_type == 1:
474
+ # Larger circle for roller
475
+ circle_radius = 0.03 # Increased size
476
+ circle = plt.Circle((x, y - circle_radius), circle_radius, color='k', fill=True)
477
+ ax.add_patch(circle)
478
+ elif support_type == 2:
479
+ # Larger triangle for pin
480
+ triangle_base = 0.1
481
+ triangle_height = 0.05
482
+
483
+ tri_x = [x - triangle_base/2, x + triangle_base/2, x]
484
+ tri_y = [y - triangle_height, y - triangle_height, y]
485
+ ax.fill(tri_x, tri_y, 'k')
486
+ elif support_type == 3:
487
+ # Fixed support with lines
488
+ line_height = 0.015
489
+ line_width = 0.05
490
+
491
+ ax.plot([x - line_width/2, x + line_width/2],
492
+ [y - line_height, y - line_height], 'k', linewidth=2)
493
+ for j in range(5):
494
+ gap = line_width / 4
495
+ ax.plot([x - line_width/2 + j * gap, x - line_width/2 + j * gap],
496
+ [y - line_height, y - line_height - 0.02], 'k')
utils/truss_helpers.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import numpy as np
3
+ import sympy as sp
4
+ import scipy.linalg
5
+ from typing import Tuple, List, Optional
6
+ from sympy import Matrix, lambdify
7
+ import multiprocessing as mp
8
+
9
+ # Get the logger
10
+ logger = logging.getLogger(__name__)
11
+
12
+ ################## Basic Functions ##################
13
+ def initialize_symbols(n_par_ele: int) -> Tuple:
14
+ """
15
+ Create and return the symbolic variables used in the calculations.
16
+
17
+ Args:
18
+ n_par_ele: Number of parameter per element
19
+
20
+ Returns:
21
+ Returns a tuple of the symbolic variables
22
+ """
23
+ # Define symbolic variables
24
+ x, xi, h_e, beta_e, beta_curr = sp.symbols('x xi h_e beta_e beta_curr')
25
+ A_e, E_e, J_e, ro_e, T, fo_E = sp.symbols('A_e E_e J_e ro_e T fo_E')
26
+ qe = sp.symbols(f'qe:{n_par_ele}')
27
+
28
+ a_arr = sp.symbols('a:2') # for axial
29
+ b_arr = sp.symbols('b:2') # Rods
30
+ d_arr = sp.symbols('d:2') # Rods
31
+ c_arr = sp.symbols('c:4') # transversal
32
+ e_arr = sp.symbols('e:3') # timoshenko
33
+
34
+ # For global displacements
35
+ Qglo_pel_curr = sp.symbols(f'Qglo_pel_curr:{n_par_ele}')
36
+ w_arr = sp.symbols('w:2')
37
+ r_arr = sp.symbols('r:2')
38
+ f_arr = sp.symbols('f:2')
39
+ g_arr = sp.symbols('g:4')
40
+ X_old, Y_old = sp.symbols('X_old Y_old')
41
+
42
+
43
+ return (x, xi, h_e, beta_e, beta_curr, qe, a_arr, b_arr,
44
+ c_arr, d_arr, e_arr, A_e, E_e, J_e, ro_e, T, fo_E, X_old, Y_old,
45
+ Qglo_pel_curr, w_arr, r_arr, f_arr, g_arr)
46
+
47
+
48
+ def define_newton_equation(x: sp.Symbol, coeffs: List[sp.symbols]) -> sp.Expr:
49
+ """
50
+ Define a Newton polynomial equation.
51
+
52
+ Args:
53
+ x (sp.Symbol): The variable of the polynomial
54
+ coeffs (List[sp.Symbol]): The coefficients of the polynomial
55
+
56
+ Returns:
57
+ sp.Expr: The final polynomial equation
58
+ """
59
+ equation = sum(c * x**i for i, c in enumerate(coeffs))
60
+ return equation
61
+
62
+
63
+ def define_langrange_equation(xi: sp.Symbol, he: sp.Symbol,
64
+ type_beam: Optional[int] = 1) -> Tuple[sp.Matrix, sp.Matrix]:
65
+ """
66
+ Define the Langrange shape functions for the beam and rod elements.
67
+ This uses the Hermite cubic shape functions.
68
+
69
+ Args:
70
+ xi: The local coordinate
71
+ he: The element length
72
+
73
+ Returns:
74
+ Tuple[List[sp.Expr], List[sp.Expr]]: The beam and rod shape functions
75
+ """
76
+ if type_beam == 1:
77
+ N_beam_shape = sp.zeros(4, 1) # Use sp.zeros to initialize a column matrix
78
+ N_beam_shape[0] = 1 - 3*xi**2 + 2*xi**3
79
+ N_beam_shape[1] = he * (xi - 2*xi**2 + xi**3) # Corrected variable name to he
80
+ N_beam_shape[2] = 3*xi**2 - 2*xi**3
81
+ N_beam_shape[3] = he * (-xi**2 + xi**3) # Corrected variable name to he
82
+ return N_beam_shape
83
+
84
+ N_rod_shape = sp.zeros(2, 1)
85
+ N_rod_shape[0] = 1 - xi
86
+ N_rod_shape[1] = xi
87
+
88
+ return N_rod_shape
89
+
90
+
91
+ def compute_v_u(qe: List[sp.symbols], beta_e: sp.Symbol
92
+ ) -> Tuple[sp.Expr, sp.Expr, sp.Expr, sp.Expr]:
93
+ """
94
+ Compute the local displacements v and u.
95
+
96
+ Args:
97
+ qe (List[sp.Symbol]): The local displacement vector
98
+ beta_e (sp.Symbol): The angle of displacement
99
+
100
+ Returns:
101
+ Tuple[sp.Expr, sp.Expr, sp.Expr, sp.Expr]: The local displacements v and u
102
+ """
103
+ v1 = -qe[0] * sp.sin(beta_e) + qe[1] * sp.cos(beta_e)
104
+ u1 = qe[0] * sp.cos(beta_e) + qe[1] * sp.sin(beta_e)
105
+ v2 = -qe[3] * sp.sin(beta_e) + qe[4] * sp.cos(beta_e)
106
+ u2 = qe[3] * sp.cos(beta_e) + qe[4] * sp.sin(beta_e)
107
+ return v1, u1, v2, u2
108
+
109
+
110
+ def define_equilibrium_langrange(beam_type: str, u_beam: sp.Expr, v_beam: sp.Expr, alpha_beam: sp.Expr,
111
+ xi: sp.Symbol, h_e: sp.Symbol, v1: sp.Expr, u1: sp.Expr, v2: sp.Expr,
112
+ u2: sp.Expr, qe: List[sp.symbols]) -> Tuple[sp.Expr, sp.Expr, sp.Expr, sp.Expr, sp.Expr]:
113
+ """
114
+ Define the equilibrium equations for the beam or rod using Lagrange shape functions.
115
+
116
+ Returns:
117
+ List[sp.Expr]: The equilibrium equations for the beam or rod
118
+ """
119
+ N_beam_shape = define_langrange_equation(xi, h_e, type_beam=1)
120
+ N_rod_shape = define_langrange_equation(xi, h_e, type_beam=0)
121
+ theta_beam = sp.Expr(0)
122
+
123
+ # Compute local displacements
124
+ v_rod = N_rod_shape[0] * v1 + N_rod_shape[1] * v2
125
+ u_rod = N_rod_shape[0] * u1 + N_rod_shape[1] * u2
126
+
127
+ if beam_type == "bernoulli":
128
+ v_beam = N_beam_shape[0]*v1 + N_beam_shape[1]*qe[2] + N_beam_shape[2]*v2 + N_beam_shape[3]*qe[5]
129
+ u_beam = N_rod_shape[0]*u1 + N_rod_shape[1]*u2 - z * sp.diff(v_beam, xi)
130
+ else: # Timoshenko
131
+ v_beam = N_beam_shape[0]*v1 + N_beam_shape[1]*qe[2] + N_beam_shape[2]*v2 + N_beam_shape[3]*qe[5]
132
+ theta_beam = N_beam_shape[0]*qe[2] + N_beam_shape[2]*qe[5]
133
+ u_beam = N_rod_shape[0]*u1 + N_rod_shape[0]*u2 - z * theta_beam
134
+
135
+ return v_beam, u_beam, theta_beam, v_rod, u_rod,
136
+
137
+
138
+ def define_equilibrium_equations(beam_type: str, expressions: List[sp.Expr],
139
+ x: sp.Symbol, h_e: sp.Symbol, v1: sp.Expr, u1: sp.Expr, v2: sp.Expr,
140
+ u2: sp.Expr, qe: List[sp.symbols]) -> List[sp.Expr]:
141
+ """
142
+ Define the equilibrium equations for the beam or rod.
143
+
144
+ Returns:
145
+ List[sp.Expr]: The equilibrium equations for the beam or rod
146
+ """
147
+ if beam_type == "bernoulli":
148
+ v_beam, u_beam = expressions[1], expressions[0]
149
+ return [
150
+ v_beam.subs(x, 0) - v1,
151
+ sp.diff(v_beam, x).subs(x, 0) - qe[2],
152
+ v_beam.subs(x, h_e) - v2,
153
+ sp.diff(v_beam, x).subs(x, h_e) - qe[5],
154
+ u_beam.subs(x, 0) - u1,
155
+ u_beam.subs(x, h_e) - u2
156
+ ]
157
+ else:
158
+ v_beam, u_beam, alpha_beam = expressions[1], expressions[0], expressions[2]
159
+ return [
160
+ u_beam.subs(x, 0) - u1,
161
+ u_beam.subs(x, h_e) - u2,
162
+ v_beam.subs(x, 0) - v1,
163
+ v_beam.subs(x, h_e) - v2,
164
+ alpha_beam.subs(x, 0) - qe[2],
165
+ alpha_beam.subs(x, h_e) - qe[5]
166
+ ]
167
+
168
+
169
+ def define_rod_equations(u_rod: sp.Expr, v_rod: sp.Expr, x: sp.Symbol,
170
+ h_e: sp.Symbol, v1: sp.Expr, u1: sp.Expr, v2: sp.Expr,
171
+ u2: sp.Expr) -> List[sp.Expr]:
172
+ """
173
+ Define the equilibrium equations for the rod
174
+
175
+ Returns:
176
+ List[sp.Expr]: The equilibrium equations for the rod
177
+ """
178
+ return [
179
+ v_rod.subs(x, 0) - v1,
180
+ u_rod.subs(x, 0) - u1,
181
+ v_rod.subs(x, h_e) - v2,
182
+ u_rod.subs(x, h_e) - u2
183
+ ]
184
+
185
+
186
+ def apply_boundary_conditions(K: np.array, M: np.array, W: np.array,
187
+ tol: float = 1e-5) -> Tuple[np.array, np.array]:
188
+ """
189
+ Applies the boundary conditions to the stiffness and mass matrices.
190
+
191
+ Returns:
192
+ Numpy arrays of the stiffness and mass matrices with the boundary conditions applied
193
+ """
194
+ indices = np.where(W == 1)[0]
195
+
196
+ # take the max value from the diagonal
197
+ max_k = np.max(np.abs(np.diag(K)))
198
+ min_m = np.max(np.abs(np.diag(M)))
199
+ max_freq_sqrd = np.sqrt(max_k / min_m)
200
+
201
+ # Set rows and columns to zero
202
+ K[indices, :] = 0
203
+ K[:, indices] = 0
204
+ M[indices, :] = 0
205
+ M[:, indices] = 0
206
+
207
+ # Correctly set diagonal elements for these indices
208
+ for index in indices:
209
+ K[index, index] = max_freq_sqrd / tol
210
+ M[index, index] = max_freq_sqrd * tol
211
+
212
+ return K, M
213
+
214
+
215
+
216
+ ################## Analysis Functions ##################
217
+ def calculate_energies(beam_type, ve_beam, ue_beam, alpha_e_beam, ve_rod,
218
+ ue_rod, x, h_e, E_e, J_e, A_e, ro_e, G, k_shear):
219
+ """
220
+ Calculate the potential and kinetic energies of beams and rods.
221
+
222
+ Returns:
223
+ The potential and kinetic energies.
224
+ """
225
+ # Calculate chi_beam and eps_beam
226
+ if beam_type == "bernoulli":
227
+ chi_beam = sp.diff(sp.diff(ve_beam, x), x)
228
+ eps_beam = sp.diff(ue_beam, x)
229
+ pot_beam = 1 / 2 * sp.integrate(E_e * J_e * chi_beam**2 + E_e * A_e * eps_beam**2, (x, 0, h_e))
230
+ else:
231
+ eps_beam = sp.diff(ue_beam, x)
232
+ gamma_beam = sp.diff(ve_beam, x) - alpha_e_beam
233
+ chi_beam = sp.diff(alpha_e_beam, x)
234
+
235
+ # Note that k_shear and G must be changed in case they are not constant
236
+ pot_beam = 1 / 2 * sp.integrate(E_e * J_e * chi_beam**2 + E_e * A_e * eps_beam**2 + k_shear * G[0] * A_e * gamma_beam**2, (x, 0, h_e))
237
+
238
+ kin_beam = 1 / 2 * ro_e * A_e * sp.integrate(ve_beam**2 + ue_beam**2, (x, 0, h_e))
239
+
240
+ eps_rod = sp.diff(ue_rod, x)
241
+ pot_rod = 1 / 2 * sp.integrate(E_e * A_e * eps_rod**2, (x, 0, h_e))
242
+ kin_rod = 1 / 2 * ro_e * A_e * sp.integrate(ve_rod**2 + ue_rod**2, (x, 0, h_e))
243
+
244
+ return pot_beam, kin_beam, pot_rod, kin_rod
245
+
246
+
247
+ def calculate_displacement_equations(x, xi, h_e, beta_e, qe, a_arr, b_arr, c_arr, d_arr, e_arr,
248
+ beam_type, use_lagrangian: bool = True):
249
+ """
250
+ Calculate the beam displacement equations for beam (bernoulli or
251
+ timoshenko) and rod.
252
+
253
+ Returns:
254
+ Displacement functions for the beam in the u and v directions
255
+ """
256
+ # Compute local displacements
257
+ v1, u1, v2, u2 = compute_v_u(qe, beta_e)
258
+
259
+ # Define beam displacement equations
260
+ if use_lagrangian:
261
+ v_beam, u_beam, theta_beam, v_rod, u_rod = define_equilibrium_langrange(beam_type, u_beam, v_beam, alpha_beam,
262
+ xi, h_e, v1, u1, v2, u2, qe)
263
+ # Lambdify ve_beam and ue_beam
264
+ ve_beam_func = lambdify((xi, qe, h_e, beta_e), ve_beam, "numpy")
265
+ ue_beam_func = lambdify((xi, qe, h_e, beta_e), ue_beam, "numpy")
266
+ ve_rod_func = lambdify((xi, qe, h_e, beta_e), ve_rod, "numpy")
267
+ ue_rod_func = lambdify((xi, qe, h_e, beta_e), ue_rod, "numpy")
268
+ else: # Newton Interpolation
269
+ if beam_type == "bernoulli":
270
+ u_beam = define_newton_equation(x, a_arr)
271
+ v_beam = define_newton_equation(x, c_arr)
272
+ alpha_beam = sp.Expr(0)
273
+ else:
274
+ u_beam = define_newton_equation(x, a_arr)
275
+ v_beam = define_newton_equation(x, b_arr)
276
+ alpha_beam = define_newton_equation(x, e_arr)
277
+
278
+ u_rod = define_newton_equation(x, b_arr)
279
+ v_rod = define_newton_equation(x, d_arr)
280
+
281
+ # Define equilibrium equations
282
+ equations = define_equilibrium_equations(beam_type, [u_beam, v_beam, alpha_beam],
283
+ x, h_e, v1, u1, v2, u2, qe)
284
+ equations_rod = define_rod_equations(u_rod, v_rod, x, h_e, v1, u1, v2, u2)
285
+
286
+ # Define equilibrium equations
287
+ if beam_type == "bernoulli":
288
+ sol = sp.solve(equations, a_arr + c_arr)
289
+ alpha_e_beam = sp.Expr(0)
290
+ else:
291
+ sol = sp.solve(equations, a_arr + b_arr + e_arr)
292
+ alpha_e_beam = alpha_beam.subs(sol)
293
+
294
+ ve_beam = v_beam.subs(sol)
295
+ ue_beam = u_beam.subs(sol)
296
+
297
+ sol_rod = sp.solve(equations_rod, b_arr + d_arr)
298
+ ve_rod = v_rod.subs(sol_rod)
299
+ ue_rod = u_rod.subs(sol_rod)
300
+
301
+ # Lambdify ve_beam and ue_beam
302
+ ve_beam_func = lambdify((x, qe, h_e, beta_e), ve_beam, "numpy")
303
+ ue_beam_func = lambdify((x, qe, h_e, beta_e), ue_beam, "numpy")
304
+
305
+ ve_rod_func = lambdify((x, qe, h_e, beta_e), ve_rod, "numpy")
306
+ ue_rod_func = lambdify((x, qe, h_e, beta_e), ue_rod, "numpy")
307
+
308
+ return ve_beam_func, ue_beam_func, ve_beam, ue_beam, ve_rod_func, ue_rod_func, ve_rod, ue_rod, alpha_e_beam
309
+
310
+
311
+ def construct_lambdified_matrices(n_par_ele, pot_beam, kin_beam, pot_rod, kin_rod, qe, h_e, A_e, E_e, J_e, beta_e, ro_e):
312
+ """
313
+ Constructs and lambdifies the local K and M matrices.
314
+
315
+ Returns:
316
+ Lambdified functions for K and M matrices for beams and rods.
317
+ """
318
+ K_beam = sp.Matrix.zeros(n_par_ele)
319
+ M_beam = sp.Matrix.zeros(n_par_ele)
320
+ K_rod = sp.Matrix.zeros(n_par_ele)
321
+ M_rod = sp.Matrix.zeros(n_par_ele)
322
+
323
+ # Compute K_beam and M_beam
324
+ for i in range(n_par_ele):
325
+ for j in range(n_par_ele):
326
+ K_beam[i, j] = sp.diff(sp.diff(pot_beam, qe[i]), qe[j])
327
+ M_beam[i, j] = sp.diff(sp.diff(kin_beam, qe[i]), qe[j])
328
+ K_rod[i, j] = sp.diff(sp.diff(pot_rod, qe[i]), qe[j])
329
+ M_rod[i, j] = sp.diff(sp.diff(kin_rod, qe[i]), qe[j])
330
+
331
+ # Create lambdified functions for K and M matrices
332
+ K_beam_func = lambdify((h_e, A_e, E_e, J_e, beta_e), K_beam)
333
+ M_beam_func = lambdify((h_e, A_e, E_e, J_e, beta_e, ro_e), M_beam)
334
+ K_rod_func = lambdify((h_e, A_e, E_e, J_e, beta_e), K_rod)
335
+ M_rod_func = lambdify((h_e, A_e, E_e, J_e, beta_e, ro_e), M_rod)
336
+
337
+ return K_beam, M_beam, K_rod, M_rod, K_beam_func, M_beam_func, K_rod_func, M_rod_func
338
+
339
+
340
+ def assemble_global_matrices(n_par_ele: int, n_par_tot: int, n_ele_tot: int, K_beam_func: sp.lambdify,
341
+ M_beam_func: sp.lambdify, K_rod_func: sp.lambdify, M_rod_func: sp.lambdify,
342
+ h: sp.Symbol, A: sp.Symbol, E: sp.Symbol, J: sp.Symbol, beta: sp.Symbol,
343
+ ro: sp.Symbol, pel: List, n_rods: int) -> Tuple[np.array, np.array]:
344
+ """
345
+ Assembles the global stiffness and mass matrices using the lambdified functions for element matrices.
346
+
347
+ Returns:
348
+ Numeric arrays of the global stiffness and mass matrices.
349
+ """
350
+ # Initialize element stiffness matrix (Ke) and global stiffness matrix (K)
351
+ K = np.zeros((n_par_tot, n_par_tot))
352
+ M = np.zeros((n_par_tot, n_par_tot))
353
+
354
+ # Pre-compute beam and rod indices
355
+ beam_indices = np.arange(n_ele_tot - n_rods)
356
+ rod_indices = np.arange(n_ele_tot - n_rods, n_ele_tot)
357
+ logging.debug(f"Beam indices: {beam_indices} \nRod indices: {rod_indices}")
358
+
359
+ # Process beams
360
+ for e in beam_indices:
361
+ Ke = K_beam_func(h[e], A[e], E[e], J[e], beta[e])
362
+ Me = M_beam_func(h[e], A[e], E[e], J[e], beta[e], ro[e])
363
+ idx = pel[e, :] - 1 # Adjust for 0-based indexing
364
+ K[np.ix_(idx, idx)] += Ke
365
+ M[np.ix_(idx, idx)] += Me
366
+
367
+ # Process rods
368
+ for e in rod_indices:
369
+ Ke = K_rod_func(h[e], A[e], E[e], J[e], beta[e])
370
+ Me = M_rod_func(h[e], A[e], E[e], J[e], beta[e], ro[e])
371
+ idx = pel[e, :] - 1 # Adjust for 0-based indexing
372
+ K[np.ix_(idx, idx)] += Ke
373
+ M[np.ix_(idx, idx)] += Me
374
+
375
+ # Convert K and M to NumPy arrays
376
+ K = np.array(K).astype(np.float32)
377
+ M = np.array(M).astype(np.float32)
378
+
379
+ return K, M
380
+
381
+
382
+ def compute_eigenvalues_and_eigenvectors(K: np.array, M: np.array, method: str = 'numpy',
383
+ filter_numerical_stability: bool = False,
384
+ threshold: float = 1e-10) -> Tuple[np.array, np.array]:
385
+ """
386
+ Compute the eigenvalues (λ: w**2 natural frequencies)and
387
+ eigenvectors (ϕ: mode shape) of the stiffness and mass matrices.
388
+
389
+ Args:
390
+ K: Stiffness matrix
391
+ M: Mass matrix
392
+ method: 'scipy' for scipy.linalg.eigh or 'numpy' for np.linalg.eig
393
+ filter_numerical_stability: Boolean to indicate if filtering should be applied
394
+ threshold: Threshold for filtering small eigenvalues for numerical stability
395
+
396
+ Returns:
397
+ The real part of the eigenvalues (frequency) and the normalized eigenvectors (modes of vibration)
398
+ """
399
+ if method == 'numpy':
400
+ lamb, phis = np.linalg.eig(np.linalg.inv(M) @ K)
401
+ else:
402
+ if method != 'scipy':
403
+ print("Invalid method. Defaulting to scipy.")
404
+ lamb, phis = scipy.linalg.eigh(K, M)
405
+
406
+
407
+ # Get the indices that would sort lamb in descending order
408
+ idx = np.argsort(lamb)[::-1]
409
+
410
+ # Sort lamb and phis and take the real part
411
+ lamb_r = np.real(lamb[idx])
412
+ phis_r = np.real(phis[:, idx])
413
+
414
+ if filter_numerical_stability:
415
+ # Filter for numerical stability
416
+ valid_indices = lamb_r > threshold
417
+ lamb_r = lamb_r[valid_indices]
418
+ phis_r = phis_r[:, valid_indices]
419
+
420
+ # Normalize eigenvectors
421
+ n_par_tot = len(lamb_r)
422
+ phis_norm = np.zeros((phis_r.shape[0], n_par_tot))
423
+
424
+ for i in range(n_par_tot):
425
+ c = np.sqrt(np.dot(phis_r[:, i].T, M @ phis_r[:, i]))
426
+ phis_norm[:, i] = phis_r[:, i] / c
427
+
428
+ verification = np.array([np.dot(phis_norm[:, i].T, M @ phis_norm[:, i]) for i in range(n_par_tot)])
429
+ if not np.allclose(verification, 1):
430
+ logging.warning("Verification failed for eigenvectors. Results may be inaccurate.")
431
+ logging.debug("Verification results for each eigenvector (should all be 1):\n%s", verification)
432
+
433
+ verification_sum = np.sum(verification)
434
+ if not np.isclose(verification_sum, n_par_tot):
435
+ logging.warning("Sum of verification values is not equal to number of eigenvectors. Results may be inaccurate.")
436
+ logging.debug("Sum of verification values (should be equal to number of eigenvectors): %s", verification_sum)
437
+
438
+ return lamb_r, phis_norm
439
+
440
+
441
+ def get_mode_indices(lamb_r: np.array, phis_norm: np.array,
442
+ n_plots: int) -> Tuple[np.array, np.array]:
443
+ """
444
+ Calculate the periods to get the top contributing index_modes.
445
+ Periods are validated to be real numbers and positive.
446
+
447
+ Args:
448
+ lamb_r: Real part of the eigenvalues
449
+ phis_norm: Normalized eigenvectors
450
+ n_plots: Number of modes to plot
451
+
452
+ Returns:
453
+ Numpy array of the indices of the largest n_plots periods and sorted periods
454
+ """
455
+ # Calculate periods
456
+ period = 2 * np.pi / np.sqrt(lamb_r)
457
+
458
+ # Show how many invalid periods are there
459
+ logging.debug("Number of invalid periods: %s", np.sum(~np.isfinite(period)))
460
+ logging.debug("Number of negative periods: %s", np.sum(period < 0))
461
+
462
+ # Ensure periods are all real numbers and valid
463
+ valid_indices = np.isfinite(period) & (period > 0)
464
+ period = period[valid_indices]
465
+ phis_norm = phis_norm[:, valid_indices]
466
+
467
+ # Sort periods for better representation
468
+ sorted_period = np.sort(period)
469
+
470
+ # Find the indices of the largest n_plots periods
471
+ index_modes = np.argpartition(period, -n_plots)[-n_plots:]
472
+
473
+ # Sort index_modes so that the modes are in descending order of period
474
+ index_modes = index_modes[np.argsort(period[index_modes])][::-1]
475
+
476
+ return index_modes, sorted_period, period
477
+
478
+
479
+ def calculate_global_displacements(Qglo_pel_curr, beta_e, h_e, x, xi, f_arr,
480
+ g_arr, w_arr, r_arr, beam_type,
481
+ X_old, Y_old, use_lagrangian: bool = False) -> Tuple:
482
+ """
483
+ Calculate the global displacements by solving the local symbolic
484
+ equilibrium equations.
485
+
486
+ Returns:
487
+ Lambda functions for the global displacements
488
+ """
489
+
490
+ _, _, v_beam, u_beam, _, _, v_rod, u_rod, _ = calculate_displacement_equations(x, xi, h_e, beta_e, Qglo_pel_curr, f_arr, r_arr, g_arr, w_arr, g_arr,
491
+ beam_type, use_lagrangian)
492
+
493
+ # Define global displacements
494
+ u_glo_beam = u_beam * sp.cos(beta_e) - v_beam * sp.sin(beta_e)
495
+ v_glo_beam = u_beam * sp.sin(beta_e) + v_beam * sp.cos(beta_e)
496
+ u_glo_rod = u_rod * sp.cos(beta_e) - v_rod * sp.sin(beta_e)
497
+ v_glo_rod = u_rod * sp.sin(beta_e) + v_rod * sp.cos(beta_e)
498
+
499
+
500
+ # Define new coordinates
501
+ if use_lagrangian:
502
+ X_new_beam = X_old + xi * h_e * sp.cos(beta_e) + u_glo_beam
503
+ Y_new_beam = Y_old + xi * h_e * sp.sin(beta_e) + v_glo_beam
504
+ X_new_rod = X_old + xi * h_e * sp.cos(beta_e) + u_glo_rod
505
+ Y_new_rod = Y_old + xi * h_e * sp.sin(beta_e) + v_glo_rod
506
+ else:
507
+ X_new_beam = X_old + x * sp.cos(beta_e) + u_glo_beam
508
+ Y_new_beam = Y_old + x * sp.sin(beta_e) + v_glo_beam
509
+ X_new_rod = X_old + x * sp.cos(beta_e) + u_glo_rod
510
+ Y_new_rod = Y_old + x * sp.sin(beta_e) + v_glo_rod
511
+
512
+
513
+ args = (X_old, Y_old, beta_e, h_e) + tuple(Qglo_pel_curr)
514
+ if use_lagrangian:
515
+ X_new_beam_func = sp.lambdify((xi,) + args, X_new_beam, "numpy")
516
+ Y_new_beam_func = sp.lambdify((xi,) + args, Y_new_beam, "numpy")
517
+ X_new_rod_func = sp.lambdify((xi,) + args, X_new_rod, "numpy")
518
+ Y_new_rod_func = sp.lambdify((xi,) + args, Y_new_rod, "numpy")
519
+ else:
520
+ X_new_beam_func = sp.lambdify((x,) + args, X_new_beam, "numpy")
521
+ Y_new_beam_func = sp.lambdify((x,) + args, Y_new_beam, "numpy")
522
+ X_new_rod_func = sp.lambdify((x,) + args, X_new_rod, "numpy")
523
+ Y_new_rod_func = sp.lambdify((x,) + args, Y_new_rod, "numpy")
524
+
525
+ return X_new_beam, Y_new_beam, X_new_rod, Y_new_rod, X_new_beam_func, Y_new_beam_func, X_new_rod_func, Y_new_rod_func
526
+
527
+
528
+
529
+ ################## Verifications functions ##################
530
+ def print_matrix(matrix: np.array, width: int = 9, precision: int = 1, row_labels: Optional[List[str]] = None,
531
+ col_labels: Optional[List[str]] = None) -> None:
532
+ """
533
+ Print a matrix in a more readable format.
534
+
535
+ Args:
536
+ matrix: The matrix to print
537
+ width: The width of each column
538
+ precision: The number of decimal places to show
539
+ row_labels (Optional[List[str]], optional): Row labels. Defaults to numbering from 1 to n.
540
+ col_labels (Optional[List[str]], optional): Column labels. Defaults to numbering from 1 to n.
541
+ """
542
+ if row_labels is None:
543
+ row_labels = range(1, len(matrix) + 1)
544
+ if col_labels is None:
545
+ col_labels = range(1, len(matrix[0]) + 1)
546
+
547
+ # Header row
548
+ print(" " * width, end="")
549
+ for label in col_labels:
550
+ print(f"{label:>{width}}", end="")
551
+ print()
552
+
553
+ # Matrix rows
554
+ for i, row in enumerate(matrix):
555
+ print(f"{row_labels[i]:>{width}}", end="")
556
+ for val in row:
557
+ # Ensure the value is treated as a float for formatting
558
+ try:
559
+ formatted_val = f"{float(val):.{precision}e}"
560
+ except ValueError:
561
+ # If conversion to float fails, print as is
562
+ formatted_val = str(val)
563
+ print(f"{formatted_val:>{width}}", end="")
564
+ print()
565
+
566
+
567
+ import numpy as np
568
+
569
+ def check_matrix(matrix: np.ndarray, atol: float = 1e-8) -> None:
570
+ """
571
+ Check if a matrix is symmetric, well-conditioned, positive definite, and diagonally dominant.
572
+
573
+ Args:
574
+ matrix: The matrix to check
575
+ atol: The absolute tolerance for the condition check
576
+ """
577
+ # Symmetry check
578
+ if np.allclose(matrix, matrix.T, atol=atol):
579
+ print("Matrix is symmetric.")
580
+ else:
581
+ print("Matrix is not symmetric.")
582
+
583
+ # Conditioning check
584
+ try:
585
+ cond_number = np.linalg.cond(matrix)
586
+ if cond_number < 1 / atol:
587
+ print(f"Matrix is well-conditioned (Condition number: {cond_number:.2e}).")
588
+ else:
589
+ print(f"Matrix is ill-conditioned (Condition number: {cond_number:.2e}).")
590
+ except np.linalg.LinAlgError:
591
+ print("Condition number could not be computed (possibly singular matrix).")
592
+
593
+ # Positive definiteness check
594
+ try:
595
+ eigenvalues = np.linalg.eigvalsh(matrix)
596
+ if np.any(eigenvalues < 0):
597
+ print("Matrix is not positive definite.")
598
+ else:
599
+ print("Matrix is positive definite.")
600
+ except np.linalg.LinAlgError:
601
+ print("Eigenvalues could not be computed.")
602
+
603
+ # Diagonal dominance check
604
+ row_sums = np.sum(np.abs(matrix), axis=1) - np.abs(np.diag(matrix))
605
+ diagonal_elements = np.abs(np.diag(matrix))
606
+ is_dominant = diagonal_elements >= row_sums
607
+ num_wrong = np.size(matrix, 0) - np.sum(is_dominant)
608
+
609
+ if num_wrong == 0:
610
+ print("Matrix is diagonally dominant.")
611
+ else:
612
+ print(f"Number of rows not diagonally dominant: {num_wrong}")