Upload 20 files
Browse files- models/__init__.py +1 -0
- models/wgan_gp.py +114 -0
- requirements.txt +7 -0
- test.py +151 -0
- train.py +202 -0
- train_utils/__init__.py +1 -0
- train_utils/preprocess_s1.py +156 -0
- truss_generator.ipynb +0 -0
- truss_generator.py +301 -0
- truss_optimized.ipynb +0 -0
- utils/cython/setup.py +34 -0
- utils/frame_constraints.py +32 -0
- utils/frame_helpers.py +382 -0
- utils/simple_beam_analysis.py +128 -0
- utils/simple_beam_constraints.py +69 -0
- utils/simple_beam_helper.py +449 -0
- utils/truss_constraints.py +60 -0
- utils/truss_element_assembly.py +211 -0
- utils/truss_geometric.py +496 -0
- utils/truss_helpers.py +612 -0
models/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
models/wgan_gp.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# wgan_gp.py
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
class Generator(nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Generator for WGAN-GP with structured output heads.
|
| 9 |
+
Outputs continuous node coordinates, node existence logits, and edge logits.
|
| 10 |
+
"""
|
| 11 |
+
def __init__(self, latent_dim=128, nmax=121, cond_dim=4): # n_nodes, n_edges, height, spacing
|
| 12 |
+
super(Generator, self).__init__()
|
| 13 |
+
d = 512
|
| 14 |
+
self.fc = nn.Sequential(
|
| 15 |
+
nn.Linear(latent_dim + cond_dim, d),
|
| 16 |
+
nn.LeakyReLU(0.2, True),
|
| 17 |
+
nn.Linear(d, d),
|
| 18 |
+
nn.LeakyReLU(0.2, True),
|
| 19 |
+
)
|
| 20 |
+
self.out_nodes = nn.Linear(d, nmax * 2) # coords
|
| 21 |
+
self.out_nmask = nn.Linear(d, nmax) # node logits
|
| 22 |
+
self.out_edges = nn.Linear(d, nmax * nmax) # edge logits (will symmetrize)
|
| 23 |
+
|
| 24 |
+
self.nmax = nmax
|
| 25 |
+
|
| 26 |
+
def forward(self, z, cond):
|
| 27 |
+
h = self.fc(torch.cat([z, cond], dim=1))
|
| 28 |
+
nodes = torch.tanh(self.out_nodes(h)).view(-1, self.nmax, 2)
|
| 29 |
+
nlog = self.out_nmask(h).view(-1, self.nmax)
|
| 30 |
+
elog = self.out_edges(h).view(-1, self.nmax, self.nmax)
|
| 31 |
+
|
| 32 |
+
# enforce symmetry and zero diagonal
|
| 33 |
+
elog = 0.5 * (elog + elog.transpose(-1, -2))
|
| 34 |
+
elog[:, torch.arange(self.nmax), torch.arange(self.nmax)] = 0
|
| 35 |
+
node_mask = torch.sigmoid(nlog)
|
| 36 |
+
m = node_mask.unsqueeze(-1) * node_mask.unsqueeze(-2)
|
| 37 |
+
A_soft = torch.sigmoid(elog) * m
|
| 38 |
+
return nodes, node_mask, A_soft, elog, nlog
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Discriminator(nn.Module):
|
| 42 |
+
"""
|
| 43 |
+
Discriminator for WGAN-GP with a light graph-aware critic.
|
| 44 |
+
"""
|
| 45 |
+
def __init__(self, nmax=121, cond_dim=4, hid=256):
|
| 46 |
+
super(Discriminator, self).__init__()
|
| 47 |
+
self.node_mlp = nn.Sequential(nn.Linear(2, hid), nn.LeakyReLU(0.2, True),
|
| 48 |
+
nn.Linear(hid, hid), nn.LeakyReLU(0.2, True))
|
| 49 |
+
self.edge_mlp = nn.Sequential(nn.Linear(2*hid+1, hid), nn.LeakyReLU(0.2, True),
|
| 50 |
+
nn.Linear(hid, hid), nn.LeakyReLU(0.2, True))
|
| 51 |
+
self.head = nn.Sequential(nn.Linear(2*hid + 1 + cond_dim, hid), # note +1 fix
|
| 52 |
+
nn.LeakyReLU(0.2, True),
|
| 53 |
+
nn.Linear(hid, 1))
|
| 54 |
+
self.nmax = nmax
|
| 55 |
+
|
| 56 |
+
def forward(self, nodes, A_soft, node_mask, cond):
|
| 57 |
+
# nodes: [B,N,2], A_soft: [B,N,N], node_mask: [B,N]
|
| 58 |
+
B, N, _ = nodes.shape
|
| 59 |
+
m = node_mask.unsqueeze(-1) # [B,N,1]
|
| 60 |
+
h = self.node_mlp(nodes) * m
|
| 61 |
+
h_msg = torch.matmul(A_soft, h) # [B,N,hid]
|
| 62 |
+
pair = torch.cat([h, h_msg, A_soft.mean(dim=-1, keepdim=True)], dim=-1) * m
|
| 63 |
+
node_feat = (pair.sum(dim=1) / (node_mask.sum(dim=1, keepdim=True)+1e-6))
|
| 64 |
+
x = torch.cat([node_feat, cond], dim=-1)
|
| 65 |
+
return self.head(x)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def gradient_penalty(discriminator, real_data, fake_data, lambda_gp=10):
|
| 69 |
+
"""
|
| 70 |
+
Computes gradient penalty for structured inputs.
|
| 71 |
+
"""
|
| 72 |
+
device = real_data[0].device
|
| 73 |
+
B = real_data[0].size(0)
|
| 74 |
+
# Interpolation coefficients
|
| 75 |
+
alpha_nodes = torch.rand(B, 1, 1, device=device)
|
| 76 |
+
alpha_mask = torch.rand(B, 1, device=device)
|
| 77 |
+
alpha_adj = torch.rand(B, 1, 1, device=device)
|
| 78 |
+
alpha_cond = torch.rand(B, 1, device=device)
|
| 79 |
+
|
| 80 |
+
nodes_i = (alpha_nodes * real_data[0] + (1 - alpha_nodes) * fake_data[0]).requires_grad_(True)
|
| 81 |
+
mask_i = (alpha_mask * real_data[1] + (1 - alpha_mask) * fake_data[1]).requires_grad_(True)
|
| 82 |
+
adj_i = (alpha_adj * real_data[2] + (1 - alpha_adj) * fake_data[2]).requires_grad_(True)
|
| 83 |
+
cond_i = (alpha_cond * real_data[3] + (1 - alpha_cond) * fake_data[3]).requires_grad_(True)
|
| 84 |
+
|
| 85 |
+
d_interpolates = discriminator(nodes_i, adj_i, mask_i, cond_i)
|
| 86 |
+
fake = torch.ones_like(d_interpolates, device=device)
|
| 87 |
+
grads = torch.autograd.grad(
|
| 88 |
+
outputs=d_interpolates,
|
| 89 |
+
inputs=[nodes_i, mask_i, adj_i, cond_i],
|
| 90 |
+
grad_outputs=fake,
|
| 91 |
+
create_graph=True,
|
| 92 |
+
retain_graph=True,
|
| 93 |
+
only_inputs=True
|
| 94 |
+
)
|
| 95 |
+
grads = torch.cat([g.reshape(B, -1) for g in grads], dim=1)
|
| 96 |
+
gp = ((grads.norm(2, dim=1) - 1) ** 2).mean()
|
| 97 |
+
return lambda_gp * gp
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def aux_losses(nodes, node_mask, elog, nodes_real, node_mask_real, A_real):
|
| 101 |
+
"""
|
| 102 |
+
Masked auxiliary supervised reconstruction losses.
|
| 103 |
+
"""
|
| 104 |
+
m_node = node_mask_real
|
| 105 |
+
m_edges = (m_node.unsqueeze(-1) * m_node.unsqueeze(-2)).bool()
|
| 106 |
+
L_nodes = ((nodes - nodes_real)**2).sum(dim=-1)
|
| 107 |
+
L_nodes = (L_nodes * m_node).sum() / (m_node.sum() + 1e-6)
|
| 108 |
+
L_mask = F.binary_cross_entropy_with_logits(
|
| 109 |
+
torch.logit(node_mask.clamp(1e-6,1-1e-6)), node_mask_real, reduction='mean')
|
| 110 |
+
triu = torch.triu(torch.ones_like(A_real), diagonal=1).bool()
|
| 111 |
+
mask_tri = (m_edges & triu)
|
| 112 |
+
L_edges = F.binary_cross_entropy_with_logits(
|
| 113 |
+
elog[mask_tri], (A_real*1.0)[mask_tri], reduction='mean')
|
| 114 |
+
return L_nodes, L_mask, L_edges
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
--extra-index-url https://download.pytorch.org/whl/cu126
|
| 2 |
+
torch
|
| 3 |
+
torchvision
|
| 4 |
+
matplotlib
|
| 5 |
+
scipy
|
| 6 |
+
cython
|
| 7 |
+
sympy
|
test.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from models.wgan_gp import Generator
|
| 6 |
+
from train_utils.preprocess_s1 import preprocess_s1
|
| 7 |
+
|
| 8 |
+
def reconstruct_sample(sample_np, metadata, full_output_dim, index=None):
|
| 9 |
+
"""
|
| 10 |
+
Reconstruct nodal coordinates, ele_nod, pel from flattened sample.
|
| 11 |
+
"""
|
| 12 |
+
if index is not None:
|
| 13 |
+
# Get per-sample metadata if available
|
| 14 |
+
per_sample_metadata = metadata.get('per_sample_metadata', [])
|
| 15 |
+
actual_nodes = per_sample_metadata[index]['n_nod_tot'] if index < len(per_sample_metadata) else metadata['max_nodes']
|
| 16 |
+
actual_elements = per_sample_metadata[index]['n_ele_tot'] if index < len(per_sample_metadata) else metadata['max_elements']
|
| 17 |
+
else:
|
| 18 |
+
actual_nodes = metadata['max_nodes']
|
| 19 |
+
actual_elements = metadata['max_elements']
|
| 20 |
+
|
| 21 |
+
max_nodes = metadata['max_nodes']
|
| 22 |
+
max_elements = metadata['max_elements']
|
| 23 |
+
|
| 24 |
+
# Indices
|
| 25 |
+
nodal_start = 0
|
| 26 |
+
nodal_end = max_nodes * 2
|
| 27 |
+
ele_nod_start = nodal_end
|
| 28 |
+
ele_nod_end = ele_nod_start + max_elements * 2
|
| 29 |
+
pel_start = ele_nod_end
|
| 30 |
+
pel_end = pel_start + max_elements * 4
|
| 31 |
+
|
| 32 |
+
# Extract and reshape
|
| 33 |
+
nodal_flat = sample_np[nodal_start:nodal_end][:actual_nodes * 2] # (actual_nodes * 2,)
|
| 34 |
+
nodal_coord = nodal_flat.reshape(-1, 2) # (actual_nodes, 2)
|
| 35 |
+
ele_nod_flat = sample_np[ele_nod_start:ele_nod_end][:actual_elements * 2] # (actual_elements * 2,)
|
| 36 |
+
ele_nod = ele_nod_flat.reshape(-1, 2).astype(int) # (actual_elements, 2)
|
| 37 |
+
pel_flat = sample_np[pel_start:pel_end][:actual_elements * 4] # (actual_elements * 4,)
|
| 38 |
+
pel = pel_flat.reshape(-1, 4) # (actual_elements, 4)
|
| 39 |
+
|
| 40 |
+
# Filter valid edges: only if node indices are valid and exist
|
| 41 |
+
valid_mask = (ele_nod[:, 0] < actual_nodes) & (ele_nod[:, 1] < actual_nodes) & (ele_nod[:, 0] >= 0) & (ele_nod[:, 1] >= 0)
|
| 42 |
+
ele_nod = ele_nod[valid_mask]
|
| 43 |
+
pel = pel[valid_mask]
|
| 44 |
+
|
| 45 |
+
return nodal_coord, ele_nod, pel
|
| 46 |
+
|
| 47 |
+
def plot_truss(nodal_coord, ele_nod, title="Generated Truss", ax=None):
|
| 48 |
+
"""
|
| 49 |
+
Plot truss structure.
|
| 50 |
+
"""
|
| 51 |
+
if ax is None:
|
| 52 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
| 53 |
+
|
| 54 |
+
# Plot nodes
|
| 55 |
+
ax.scatter(nodal_coord[:, 0], nodal_coord[:, 1], c='blue', s=50, label='Nodes')
|
| 56 |
+
|
| 57 |
+
# Plot edges
|
| 58 |
+
for e in ele_nod:
|
| 59 |
+
if len(e) == 2 and e[0] < len(nodal_coord) and e[1] < len(nodal_coord):
|
| 60 |
+
x1, y1 = nodal_coord[e[0]]
|
| 61 |
+
x2, y2 = nodal_coord[e[1]]
|
| 62 |
+
ax.plot([x1, x2], [y1, y2], 'k-', lw=1.5, alpha=0.7)
|
| 63 |
+
|
| 64 |
+
ax.set_aspect('equal')
|
| 65 |
+
ax.set_title(title)
|
| 66 |
+
ax.grid(alpha=0.3)
|
| 67 |
+
ax.legend()
|
| 68 |
+
return ax
|
| 69 |
+
|
| 70 |
+
def evaluate_and_visualize(checkpoints_path, n_samples=9, device='cpu'):
|
| 71 |
+
"""
|
| 72 |
+
Load best generator, generate samples, visualize, and show metrics.
|
| 73 |
+
"""
|
| 74 |
+
device = torch.device(device)
|
| 75 |
+
|
| 76 |
+
# Load metadata and models
|
| 77 |
+
metadata = np.load(os.path.join(checkpoints_path, 'metadata.npy'), allow_pickle=True).item()
|
| 78 |
+
generator_state = torch.load(os.path.join(checkpoints_path, 'generator.pth'), map_location=device)
|
| 79 |
+
epoch_losses = np.load(os.path.join(checkpoints_path, 'epoch_losses.npy'), allow_pickle=True)
|
| 80 |
+
|
| 81 |
+
generator = Generator(latent_dim=128, output_dim=metadata['total_dim']).to(device)
|
| 82 |
+
generator.load_state_dict(generator_state)
|
| 83 |
+
generator.eval()
|
| 84 |
+
|
| 85 |
+
print("Generator loaded. Metadata:", {k: v for k, v in metadata.items() if k != 'per_sample_metadata' and k != 'npz_files'})
|
| 86 |
+
|
| 87 |
+
# Load real data for comparison
|
| 88 |
+
real_data, _ = preprocess_s1(normalize_type=None)
|
| 89 |
+
real_samples = real_data[:n_samples]
|
| 90 |
+
|
| 91 |
+
# Generate fake samples
|
| 92 |
+
with torch.no_grad():
|
| 93 |
+
z = torch.randn(n_samples, 128, device=device)
|
| 94 |
+
fake_samples = generator(z).cpu().numpy()
|
| 95 |
+
|
| 96 |
+
# Plot real vs generated
|
| 97 |
+
fig, axes = plt.subplots(2, n_samples, figsize=(4*n_samples, 8))
|
| 98 |
+
axes = axes.flatten() if n_samples > 1 else [axes]
|
| 99 |
+
|
| 100 |
+
for i in range(n_samples):
|
| 101 |
+
# Real
|
| 102 |
+
nodal_real, ele_nod_real, _ = reconstruct_sample(real_samples[i], metadata, metadata['total_dim'], i)
|
| 103 |
+
plot_truss(nodal_real, ele_nod_real, f"Real {i+1}", ax=axes[i])
|
| 104 |
+
|
| 105 |
+
# Generated
|
| 106 |
+
nodal_fake, ele_nod_fake, _ = reconstruct_sample(fake_samples[i], metadata, metadata['total_dim'], i)
|
| 107 |
+
plot_truss(nodal_fake, ele_nod_fake, f"Gen {i+1}", ax=axes[i + n_samples])
|
| 108 |
+
|
| 109 |
+
plt.tight_layout()
|
| 110 |
+
plt.show()
|
| 111 |
+
|
| 112 |
+
# Training metrics plot
|
| 113 |
+
losses = epoch_losses
|
| 114 |
+
epochs = [loss['epoch'] for loss in losses]
|
| 115 |
+
d_losses = [loss['d_loss'] for loss in losses]
|
| 116 |
+
g_losses = [loss['g_loss'] for loss in losses]
|
| 117 |
+
|
| 118 |
+
fig2, ax1 = plt.subplots(figsize=(10, 5))
|
| 119 |
+
ax1.plot(epochs, d_losses, label='Discriminator Loss', c='red')
|
| 120 |
+
ax1.plot(epochs, g_losses, label='Generator Loss', c='blue')
|
| 121 |
+
ax1.set_xlabel('Epoch')
|
| 122 |
+
ax1.set_ylabel('Loss')
|
| 123 |
+
ax1.set_title('Training Losses')
|
| 124 |
+
ax1.legend()
|
| 125 |
+
ax1.grid(alpha=0.3)
|
| 126 |
+
plt.show()
|
| 127 |
+
|
| 128 |
+
print("Training complete! Final epoch losses:")
|
| 129 |
+
final = losses[-1] if losses else {'d_loss': 0, 'g_loss': 0}
|
| 130 |
+
print(f"Final D Loss: {final['d_loss']:.4f}, G Loss: {final['g_loss']:.4f}")
|
| 131 |
+
|
| 132 |
+
# Generated metrics (using batch of fakes for quick eval)
|
| 133 |
+
from train_utils.preprocess_s1 import compute_sequence_lengths
|
| 134 |
+
fake_metrics = []
|
| 135 |
+
for sample_np in fake_samples[:min(5, n_samples)]: # Small batch
|
| 136 |
+
fake_tensor = torch.tensor([sample_np], dtype=torch.float32)
|
| 137 |
+
metrics = compute_graph_metrics(fake_tensor, metadata)
|
| 138 |
+
fake_metrics.append(metrics)
|
| 139 |
+
|
| 140 |
+
avg_metrics = {k: np.mean([m[k] for m in fake_metrics]) for k in fake_metrics[0]}
|
| 141 |
+
print(f"Generated Metrics: {avg_metrics}")
|
| 142 |
+
|
| 143 |
+
def compute_graph_metrics(fake_samples, metadata):
|
| 144 |
+
"""
|
| 145 |
+
Quick graph metrics for generated samples.
|
| 146 |
+
"""
|
| 147 |
+
from train import compute_graph_metrics as train_compute # Reuse
|
| 148 |
+
return train_compute(fake_samples, metadata)
|
| 149 |
+
|
| 150 |
+
if __name__ == '__main__':
|
| 151 |
+
evaluate_and_visualize('models/checkpoints', n_samples=6)
|
train.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.optim as optim
|
| 4 |
+
import numpy as np
|
| 5 |
+
from torch.utils.data import DataLoader, TensorDataset
|
| 6 |
+
from models.wgan_gp import Generator, Discriminator, gradient_penalty, aux_losses
|
| 7 |
+
from train_utils.preprocess_s1 import preprocess_s1
|
| 8 |
+
import networkx as nx
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def connectivity_penalty(fake_samples, metadata):
|
| 13 |
+
"""
|
| 14 |
+
Penalize generated trusses if disconnected (few valid edges per node).
|
| 15 |
+
"""
|
| 16 |
+
batch_size = fake_samples.size(0)
|
| 17 |
+
max_nodes = metadata['max_nodes']
|
| 18 |
+
penalty = 0.0
|
| 19 |
+
for i in range(batch_size):
|
| 20 |
+
sample = fake_samples[i].detach().cpu().numpy()
|
| 21 |
+
node_block = sample[:max_nodes * 2]
|
| 22 |
+
ele_block = sample[max_nodes * 2 : max_nodes * 2 + metadata['max_elements'] * 2]
|
| 23 |
+
node_ids = np.unique(ele_block.astype(int))
|
| 24 |
+
valid_nodes = [n for n in node_ids if 0 <= n < max_nodes]
|
| 25 |
+
penalty += (max_nodes - len(valid_nodes))
|
| 26 |
+
return torch.tensor(penalty, dtype=torch.float32, device=fake_samples.device)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def compute_graph_metrics(nodes, A_soft, node_mask):
|
| 30 |
+
"""
|
| 31 |
+
Evaluate connectivity and degree stats from generated graphs.
|
| 32 |
+
"""
|
| 33 |
+
metrics = {'connectivity': [], 'degree_mean': [], 'degree_var': []}
|
| 34 |
+
nodes = nodes.detach().cpu().numpy()
|
| 35 |
+
A = (A_soft.detach().cpu().numpy() > 0.5).astype(int)
|
| 36 |
+
node_mask = node_mask.detach().cpu().numpy()
|
| 37 |
+
B, N, _ = nodes.shape
|
| 38 |
+
for b in range(B):
|
| 39 |
+
valid = node_mask[b] > 0.5
|
| 40 |
+
G = nx.Graph()
|
| 41 |
+
idxs = np.where(valid)[0]
|
| 42 |
+
G.add_nodes_from(idxs)
|
| 43 |
+
for i in idxs:
|
| 44 |
+
for j in idxs:
|
| 45 |
+
if i < j and A[b, i, j] > 0:
|
| 46 |
+
G.add_edge(i, j)
|
| 47 |
+
if len(G.nodes) > 0:
|
| 48 |
+
degs = [d for _, d in G.degree()]
|
| 49 |
+
metrics['connectivity'].append(nx.number_connected_components(G))
|
| 50 |
+
metrics['degree_mean'].append(np.mean(degs))
|
| 51 |
+
metrics['degree_var'].append(np.var(degs))
|
| 52 |
+
else:
|
| 53 |
+
metrics['connectivity'].append(0)
|
| 54 |
+
metrics['degree_mean'].append(0)
|
| 55 |
+
metrics['degree_var'].append(0)
|
| 56 |
+
return {k: np.mean(v) if v else 0 for k, v in metrics.items()}
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def train_wgan_gp(device='cuda' if torch.cuda.is_available() else 'cpu',
|
| 60 |
+
n_epochs=100, batch_size=16, latent_dim=128,
|
| 61 |
+
n_critic=3, lr_g=2e-4, lr_d=1e-4,
|
| 62 |
+
lambda_gp=10, lambda_connect=0.0,
|
| 63 |
+
save_path='models/checkpoints'):
|
| 64 |
+
"""
|
| 65 |
+
Train WGAN-GP with structured Generator and Discriminator.
|
| 66 |
+
Includes keyboard interrupt save and best-model checkpointing.
|
| 67 |
+
"""
|
| 68 |
+
device = torch.device(device)
|
| 69 |
+
|
| 70 |
+
# Load data
|
| 71 |
+
data_array, metadata = preprocess_s1()
|
| 72 |
+
dataset = TensorDataset(torch.tensor(data_array, dtype=torch.float32))
|
| 73 |
+
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
|
| 74 |
+
nmax = metadata['max_nodes']
|
| 75 |
+
max_elems = metadata['max_elements']
|
| 76 |
+
|
| 77 |
+
# Init models
|
| 78 |
+
cond_dim = 4 # [normed n_nodes, n_elems, height, spacing]
|
| 79 |
+
generator = Generator(latent_dim=latent_dim, nmax=nmax, cond_dim=cond_dim).to(device)
|
| 80 |
+
discriminator = Discriminator(nmax=nmax, cond_dim=cond_dim).to(device)
|
| 81 |
+
|
| 82 |
+
opt_g = optim.Adam(generator.parameters(), lr=lr_g, betas=(0.0, 0.99))
|
| 83 |
+
opt_d = optim.Adam(discriminator.parameters(), lr=lr_d, betas=(0.0, 0.99))
|
| 84 |
+
|
| 85 |
+
best_g_loss = float('inf')
|
| 86 |
+
epoch_losses = []
|
| 87 |
+
os.makedirs(save_path, exist_ok=True)
|
| 88 |
+
|
| 89 |
+
def make_cond(meta_batch, bs):
|
| 90 |
+
"""Create conditioning vector matching current batch size."""
|
| 91 |
+
n_nodes = meta_batch['max_nodes'] / metadata['max_nodes']
|
| 92 |
+
n_elems = meta_batch['max_elements'] / metadata['max_elements']
|
| 93 |
+
height = np.random.uniform(0.2, 1.0)
|
| 94 |
+
spacing = np.random.uniform(0.2, 1.0)
|
| 95 |
+
cond = np.array([n_nodes, n_elems, height, spacing], dtype=np.float32)
|
| 96 |
+
cond = np.tile(cond, (bs, 1))
|
| 97 |
+
return torch.tensor(cond, device=device)
|
| 98 |
+
|
| 99 |
+
try:
|
| 100 |
+
print(f"Starting training on {len(dataset)} samples...")
|
| 101 |
+
for epoch in range(n_epochs):
|
| 102 |
+
epoch_d_loss, epoch_g_loss = 0, 0
|
| 103 |
+
|
| 104 |
+
for real_flat, in dataloader:
|
| 105 |
+
real_flat = real_flat.to(device)
|
| 106 |
+
bs = real_flat.size(0)
|
| 107 |
+
cond = make_cond(metadata, bs)
|
| 108 |
+
|
| 109 |
+
# Extract structured tensors
|
| 110 |
+
nodes_real = real_flat[:, : nmax * 2].view(bs, nmax, 2)
|
| 111 |
+
node_mask_real = (nodes_real.abs().sum(-1) > 0).float()
|
| 112 |
+
|
| 113 |
+
# Build adjacency from ele_nod
|
| 114 |
+
ele_block = real_flat[:, nmax * 2 : nmax * 2 + max_elems * 2]
|
| 115 |
+
A_real = torch.zeros(bs, nmax, nmax, device=device)
|
| 116 |
+
for b in range(bs):
|
| 117 |
+
for i in range(0, max_elems * 2, 2):
|
| 118 |
+
n1 = int(ele_block[b, i].item())
|
| 119 |
+
n2 = int(ele_block[b, i + 1].item())
|
| 120 |
+
if 0 <= n1 < nmax and 0 <= n2 < nmax:
|
| 121 |
+
A_real[b, n1, n2] = 1
|
| 122 |
+
A_real[b, n2, n1] = 1
|
| 123 |
+
A_real = A_real * (node_mask_real.unsqueeze(-1) * node_mask_real.unsqueeze(-2))
|
| 124 |
+
A_real[:, torch.arange(nmax), torch.arange(nmax)] = 0 # zero diagonal per batch
|
| 125 |
+
|
| 126 |
+
cond = make_cond(metadata, bs)
|
| 127 |
+
|
| 128 |
+
# ---- Train Discriminator ----
|
| 129 |
+
for _ in range(n_critic):
|
| 130 |
+
z = torch.randn(bs, latent_dim, device=device)
|
| 131 |
+
nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
|
| 132 |
+
d_real = discriminator(nodes_real, A_real, node_mask_real, cond)
|
| 133 |
+
d_fake = discriminator(nodes_f.detach(), A_f.detach(), nmask_f.detach(), cond)
|
| 134 |
+
gp = gradient_penalty(discriminator,
|
| 135 |
+
(nodes_real, node_mask_real, A_real, cond),
|
| 136 |
+
(nodes_f.detach(), nmask_f.detach(), A_f.detach(), cond),
|
| 137 |
+
lambda_gp)
|
| 138 |
+
loss_d = -(d_real.mean() - d_fake.mean()) + gp
|
| 139 |
+
opt_d.zero_grad()
|
| 140 |
+
loss_d.backward()
|
| 141 |
+
opt_d.step()
|
| 142 |
+
|
| 143 |
+
# ---- Train Generator ----
|
| 144 |
+
z = torch.randn(bs, latent_dim, device=device)
|
| 145 |
+
nodes_f, nmask_f, A_f, elog_f, nlog_f = generator(z, cond)
|
| 146 |
+
d_fake = discriminator(nodes_f, A_f, nmask_f, cond)
|
| 147 |
+
adv_loss = -d_fake.mean()
|
| 148 |
+
|
| 149 |
+
L_nodes, L_mask, L_edges = aux_losses(nodes_f, nmask_f, elog_f,
|
| 150 |
+
nodes_real, node_mask_real, A_real)
|
| 151 |
+
loss_g = adv_loss + 10 * L_nodes + 1 * L_mask + 5 * L_edges
|
| 152 |
+
|
| 153 |
+
if lambda_connect > 0:
|
| 154 |
+
flat_fake = torch.cat([nodes_f.view(bs, -1),
|
| 155 |
+
A_f.view(bs, -1)], dim=1)
|
| 156 |
+
loss_g += lambda_connect * connectivity_penalty(flat_fake, metadata)
|
| 157 |
+
|
| 158 |
+
opt_g.zero_grad()
|
| 159 |
+
loss_g.backward()
|
| 160 |
+
opt_g.step()
|
| 161 |
+
|
| 162 |
+
epoch_d_loss += loss_d.item()
|
| 163 |
+
epoch_g_loss += loss_g.item()
|
| 164 |
+
|
| 165 |
+
# ---- Logging and checkpointing ----
|
| 166 |
+
epoch_d_loss /= len(dataloader)
|
| 167 |
+
epoch_g_loss /= len(dataloader)
|
| 168 |
+
epoch_losses.append({'epoch': epoch + 1,
|
| 169 |
+
'd_loss': epoch_d_loss,
|
| 170 |
+
'g_loss': epoch_g_loss})
|
| 171 |
+
|
| 172 |
+
if (epoch + 1) % 5 == 0:
|
| 173 |
+
metrics = compute_graph_metrics(nodes_f, A_f, nmask_f)
|
| 174 |
+
print(f"[{epoch+1}/{n_epochs}] D Loss: {epoch_d_loss:.2f}, "
|
| 175 |
+
f"G Loss: {epoch_g_loss:.2f}, "
|
| 176 |
+
f"DegMean: {metrics['degree_mean']:.2f}, "
|
| 177 |
+
f"Conn: {metrics['connectivity']:.2f}")
|
| 178 |
+
|
| 179 |
+
if epoch_g_loss < best_g_loss:
|
| 180 |
+
best_g_loss = epoch_g_loss
|
| 181 |
+
torch.save(generator.state_dict(), os.path.join(save_path, 'generator.pth'))
|
| 182 |
+
torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator.pth'))
|
| 183 |
+
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
|
| 184 |
+
print(f"✅ New best G loss {best_g_loss:.2f} — models saved.")
|
| 185 |
+
|
| 186 |
+
# Final save
|
| 187 |
+
np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
|
| 188 |
+
print(f"Training complete. Models saved to {save_path}.")
|
| 189 |
+
|
| 190 |
+
except KeyboardInterrupt:
|
| 191 |
+
print("\n⚠️ Training interrupted by user. Saving current state...")
|
| 192 |
+
os.makedirs(save_path, exist_ok=True)
|
| 193 |
+
np.save(os.path.join(save_path, 'epoch_losses.npy'), epoch_losses)
|
| 194 |
+
torch.save(generator.state_dict(), os.path.join(save_path, 'generator_interrupted.pth'))
|
| 195 |
+
torch.save(discriminator.state_dict(), os.path.join(save_path, 'discriminator_interrupted.pth'))
|
| 196 |
+
np.save(os.path.join(save_path, 'metadata.npy'), metadata)
|
| 197 |
+
print(f"🟡 Interrupted state saved to {save_path}.")
|
| 198 |
+
raise # Re-raise to exit
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
if __name__ == '__main__':
|
| 202 |
+
train_wgan_gp(device='cuda' if torch.cuda.is_available() else 'cpu')
|
train_utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
train_utils/preprocess_s1.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# preprocess_s1.py
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from scipy.stats import zscore
|
| 6 |
+
|
| 7 |
+
def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
|
| 8 |
+
"""
|
| 9 |
+
Compute and print min/max sequence lengths (nodes, elements) across all trusses in the dataset.
|
| 10 |
+
Uses only min/max n_div files per mode for efficiency.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
dataset_folder (str): Path to the dataset folder containing .npz files.
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
Dict with keys 'min_nodes', 'max_nodes', 'min_elements', 'max_elements'.
|
| 17 |
+
"""
|
| 18 |
+
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
|
| 19 |
+
if not npz_files:
|
| 20 |
+
raise ValueError(f"No .npz files found in '{dataset_folder}'.")
|
| 21 |
+
|
| 22 |
+
# First pass: compute min/max n_div per mode
|
| 23 |
+
min_max_div = defaultdict(lambda: (float('inf'), float('-inf')))
|
| 24 |
+
for f in npz_files:
|
| 25 |
+
parts = f[:-4].rsplit('_', 2) # e.g., ['truss_pratt', '15', '39']
|
| 26 |
+
if len(parts) == 3 and parts[0].startswith('truss_'):
|
| 27 |
+
mode = parts[0][6:] # Remove 'truss_'
|
| 28 |
+
n_div = int(parts[1])
|
| 29 |
+
min_d, max_d = min_max_div[mode]
|
| 30 |
+
min_max_div[mode] = (min(min_d, n_div), max(max_d, n_div))
|
| 31 |
+
|
| 32 |
+
# Second pass: collect one file per min/max per mode
|
| 33 |
+
min_div_files = defaultdict(list)
|
| 34 |
+
max_div_files = defaultdict(list)
|
| 35 |
+
for f in npz_files:
|
| 36 |
+
parts = f[:-4].rsplit('_', 2)
|
| 37 |
+
if len(parts) == 3 and parts[0].startswith('truss_'):
|
| 38 |
+
mode = parts[0][6:]
|
| 39 |
+
n_div = int(parts[1])
|
| 40 |
+
if n_div == min_max_div[mode][0]:
|
| 41 |
+
min_div_files[mode].append(f)
|
| 42 |
+
if n_div == min_max_div[mode][1]:
|
| 43 |
+
max_div_files[mode].append(f)
|
| 44 |
+
|
| 45 |
+
# Compute overall min/max sequence lengths by loading one min/max file per mode
|
| 46 |
+
min_n_nod = float('inf')
|
| 47 |
+
max_n_nod = 0
|
| 48 |
+
min_n_ele = float('inf')
|
| 49 |
+
max_n_ele = 0
|
| 50 |
+
for mode in sorted(min_max_div):
|
| 51 |
+
# For min
|
| 52 |
+
if min_div_files[mode]:
|
| 53 |
+
min_file = min_div_files[mode][0] # Pick first
|
| 54 |
+
data_min = np.load(os.path.join(dataset_folder, min_file))
|
| 55 |
+
min_n_nod = min(min_n_nod, int(data_min['n_nod_tot']))
|
| 56 |
+
min_n_ele = min(min_n_ele, int(data_min['n_ele_tot']))
|
| 57 |
+
data_min.close()
|
| 58 |
+
# For max
|
| 59 |
+
if max_div_files[mode]:
|
| 60 |
+
max_file = max_div_files[mode][0] # Pick first
|
| 61 |
+
data_max = np.load(os.path.join(dataset_folder, max_file))
|
| 62 |
+
max_n_nod = max(max_n_nod, int(data_max['n_nod_tot']))
|
| 63 |
+
max_n_ele = max(max_n_ele, int(data_max['n_ele_tot']))
|
| 64 |
+
data_max.close()
|
| 65 |
+
|
| 66 |
+
print(f"Overall min sequence lengths: nodes={min_n_nod}, elements={min_n_ele}")
|
| 67 |
+
print(f"Overall max sequence lengths: nodes={max_n_nod}, elements={max_n_ele}")
|
| 68 |
+
|
| 69 |
+
# Additionally print out for one structure type how the keys inside look like
|
| 70 |
+
if show_details == True:
|
| 71 |
+
example_mode = next(iter(max_div_files))
|
| 72 |
+
example_file = max_div_files[example_mode][0]
|
| 73 |
+
example_data = np.load(os.path.join(dataset_folder, example_file))
|
| 74 |
+
print(f"\nExample data keys from '{example_file}': {example_data.files}")
|
| 75 |
+
for key in example_data.files:
|
| 76 |
+
print(f" - {key}: shape {example_data[key].shape}, dtype {example_data[key].dtype}")
|
| 77 |
+
example_data.close()
|
| 78 |
+
|
| 79 |
+
return {
|
| 80 |
+
'min_nodes': min_n_nod,
|
| 81 |
+
'max_nodes': max_n_nod,
|
| 82 |
+
'min_elements': min_n_ele,
|
| 83 |
+
'max_elements': max_n_ele
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
def pad_to_length(array, max_len):
|
| 87 |
+
"""
|
| 88 |
+
Pad 1D array to max_len with zeros.
|
| 89 |
+
"""
|
| 90 |
+
if len(array) < max_len:
|
| 91 |
+
padded = np.zeros(max_len)
|
| 92 |
+
padded[:len(array)] = array
|
| 93 |
+
return padded
|
| 94 |
+
else:
|
| 95 |
+
return array[:max_len] # Truncate if larger (shouldn't happen)
|
| 96 |
+
|
| 97 |
+
def preprocess_s1(dataset_folder="dataset", normalize_type=None):
|
| 98 |
+
"""
|
| 99 |
+
Preprocesses truss dataset into flattened, padded vectors for Stage 1 GAN.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
dataset_folder (str): Folder with .npz files.
|
| 103 |
+
normalize_type (str or None): 'min_max', 'z_score', or None (no normalization).
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
np.ndarray: Shape (n_samples, total_dim), normalized if specified.
|
| 107 |
+
dict: Preprocessing metadata.
|
| 108 |
+
"""
|
| 109 |
+
# Get max lengths dynamically
|
| 110 |
+
lengths = compute_sequence_lengths(dataset_folder)
|
| 111 |
+
max_nodes = lengths['max_nodes']
|
| 112 |
+
max_elements = lengths['max_elements']
|
| 113 |
+
total_dim = max_nodes * 2 + max_elements * 2 + max_elements * 4 # nodal (x,y) + ele_nod (2 nodes) + pel (4 props)
|
| 114 |
+
|
| 115 |
+
print(f"Max nodes: {max_nodes}, max elements: {max_elements}, total dim: {total_dim}")
|
| 116 |
+
|
| 117 |
+
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
|
| 118 |
+
samples = []
|
| 119 |
+
for f in npz_files:
|
| 120 |
+
data = np.load(os.path.join(dataset_folder, f))
|
| 121 |
+
nodal = data['nodal_coord'].flatten() # (n_nodes * 2,)
|
| 122 |
+
ele_nod = data['ele_nod'].flatten() # (n_elements * 2,)
|
| 123 |
+
pel = data['pel'].flatten() # (n_elements * 4,)
|
| 124 |
+
|
| 125 |
+
# Pad each
|
| 126 |
+
nodal_padded = pad_to_length(nodal, max_nodes * 2)
|
| 127 |
+
ele_nod_padded = pad_to_length(ele_nod, max_elements * 2)
|
| 128 |
+
pel_padded = pad_to_length(pel, max_elements * 4)
|
| 129 |
+
|
| 130 |
+
# Concatenate
|
| 131 |
+
sample = np.concatenate([nodal_padded, ele_nod_padded, pel_padded])
|
| 132 |
+
samples.append(sample)
|
| 133 |
+
data.close()
|
| 134 |
+
|
| 135 |
+
data_array = np.array(samples) # (n_samples, total_dim)
|
| 136 |
+
|
| 137 |
+
# Normalize if requested
|
| 138 |
+
if normalize_type == 'min_max':
|
| 139 |
+
data_min = data_array.min()
|
| 140 |
+
data_max = data_array.max()
|
| 141 |
+
data_array = (data_array - data_min) / (data_max - data_min)
|
| 142 |
+
elif normalize_type == 'z_score':
|
| 143 |
+
data_array = zscore(data_array, axis=0) # Per feature
|
| 144 |
+
elif normalize_type is None:
|
| 145 |
+
pass # Already normalized in generator
|
| 146 |
+
else:
|
| 147 |
+
raise ValueError(f"Unknown normalize_type: {normalize_type}")
|
| 148 |
+
|
| 149 |
+
metadata = {
|
| 150 |
+
'max_nodes': max_nodes,
|
| 151 |
+
'max_elements': max_elements,
|
| 152 |
+
'total_dim': total_dim,
|
| 153 |
+
'n_samples': len(samples),
|
| 154 |
+
'normalize_type': normalize_type
|
| 155 |
+
}
|
| 156 |
+
return data_array, metadata
|
truss_generator.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
truss_generator.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import math
|
| 3 |
+
import random
|
| 4 |
+
import logging
|
| 5 |
+
import numpy as np
|
| 6 |
+
import sympy as sp
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
from sympy import Matrix, lambdify
|
| 9 |
+
from utils.truss_geometric import *
|
| 10 |
+
from utils.truss_constraints import *
|
| 11 |
+
from utils.truss_helpers import *
|
| 12 |
+
from utils.simple_beam_helper import calculate_simple_essential_elements, calculate_simple_element_node
|
| 13 |
+
from sympy.utilities.codegen import codegen
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
|
| 16 |
+
# Set up logging
|
| 17 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import os
|
| 21 |
+
import logging
|
| 22 |
+
|
| 23 |
+
# Assume the following functions are defined as provided in the original code:
|
| 24 |
+
# calculate_max_height, try_angles, calculate_bridge (modified), calculate_essential_elements,
|
| 25 |
+
# calculate_element_node, boundary_conditions, truss_design, calculate_element_properties
|
| 26 |
+
# Also assume nodal_coords, pel_ele, fill_ele_nod are defined elsewhere in your codebase.
|
| 27 |
+
|
| 28 |
+
# Default values for dimensions and parameters (based on context)
|
| 29 |
+
n_dim = 2
|
| 30 |
+
n_par_nod = 2
|
| 31 |
+
|
| 32 |
+
# Default material properties (example values for steel-like; adjust as needed)
|
| 33 |
+
width_properties = {'beam': 0.3, 'column': 0.4, 'rod': 0.1} # in m
|
| 34 |
+
height_properties = {'beam': 0.35, 'column': 0.4, 'rod': 0.1} # in m
|
| 35 |
+
unit_weight_properties = {'beam': 78.5, 'column': 78.5, 'rod': 78.5} # kN/m^3
|
| 36 |
+
elastic_mod_properties = {'beam': 2e8, 'column': 2e8, 'rod': 2e8} # kN/m^2
|
| 37 |
+
shear_mod = 8e7 # kN/m^2
|
| 38 |
+
|
| 39 |
+
# Parameters for dataset generation
|
| 40 |
+
span = 1.0 # Normalized span
|
| 41 |
+
truss_modes = ["pratt", "howe", "warren"]
|
| 42 |
+
n_div_range = range(2, 61, 1) # 5,10,15,20,25,30
|
| 43 |
+
angle_range = range(30, 61, 1) # 30,35,40,45,50,55,60
|
| 44 |
+
dataset_folder = "dataset"
|
| 45 |
+
os.makedirs(dataset_folder, exist_ok=True)
|
| 46 |
+
|
| 47 |
+
num_generated = 0
|
| 48 |
+
|
| 49 |
+
for truss_mode in truss_modes:
|
| 50 |
+
for n_div in n_div_range:
|
| 51 |
+
for angle in angle_range:
|
| 52 |
+
skip_rod = []
|
| 53 |
+
# Calculate geometry using modified calculate_bridge with n_div
|
| 54 |
+
height, spacing, diag = calculate_bridge(span, angle=angle, n_div=n_div, truss_mode=truss_mode)
|
| 55 |
+
|
| 56 |
+
# Recalculate skip_rod for design
|
| 57 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_essential_elements(
|
| 58 |
+
span, spacing, truss_mode, skip_rod)
|
| 59 |
+
skip_rod = truss_design(n_bot_beams, n_rods, truss_mode)
|
| 60 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_essential_elements(
|
| 61 |
+
span, spacing, truss_mode, skip_rod)
|
| 62 |
+
|
| 63 |
+
# Generate nodes and elements
|
| 64 |
+
nodal_coord, par, pel, ele_nod, n_par_tot = calculate_element_node(span, spacing, height, n_dim,
|
| 65 |
+
n_par_nod, truss_mode, skip_rod)
|
| 66 |
+
|
| 67 |
+
# Separate nodal coordinates
|
| 68 |
+
X = nodal_coord[:, 0]
|
| 69 |
+
Y = nodal_coord[:, 1]
|
| 70 |
+
|
| 71 |
+
# Boundary conditions (assuming non-simple modes)
|
| 72 |
+
W = boundary_conditions(n_bot_beams, n_par_nod, n_nod_tot, supports=["pin", "roller"])
|
| 73 |
+
|
| 74 |
+
# Element properties
|
| 75 |
+
h = np.zeros(n_ele_tot, dtype=np.float32)
|
| 76 |
+
J = np.zeros(n_ele_tot, dtype=np.float32)
|
| 77 |
+
A = np.zeros(n_ele_tot, dtype=np.float32)
|
| 78 |
+
beta = np.zeros(n_ele_tot, dtype=np.float32)
|
| 79 |
+
ro = np.zeros(n_ele_tot, dtype=np.float32)
|
| 80 |
+
E = np.zeros(n_ele_tot, dtype=np.float32)
|
| 81 |
+
J, A, h, beta, ro, E, G = calculate_element_properties(n_ele_tot, n_columns, n_beams, diag, spacing, height,
|
| 82 |
+
J, A, h, beta, ro, E, X, Y, ele_nod, shear_mod,
|
| 83 |
+
width_properties, height_properties,
|
| 84 |
+
unit_weight_properties, elastic_mod_properties, truss_mode)
|
| 85 |
+
|
| 86 |
+
# Save to NPZ file (compresses multiple arrays; suitable for GNN/Transformer input)
|
| 87 |
+
# Extensible: Add more arrays (e.g., new properties) as keys later without breaking format
|
| 88 |
+
filename = f"{dataset_folder}/truss_{truss_mode}_{n_div}_{angle}.npz"
|
| 89 |
+
np.savez(filename,
|
| 90 |
+
nodal_coord=nodal_coord,
|
| 91 |
+
pel=pel,
|
| 92 |
+
ele_nod=ele_nod,
|
| 93 |
+
W=W,
|
| 94 |
+
J=J,
|
| 95 |
+
A=A,
|
| 96 |
+
h=h,
|
| 97 |
+
beta=beta,
|
| 98 |
+
ro=ro,
|
| 99 |
+
E=E,
|
| 100 |
+
G=G,
|
| 101 |
+
par=par, # Nodal-param relation
|
| 102 |
+
X=X,
|
| 103 |
+
Y=Y,
|
| 104 |
+
height=height,
|
| 105 |
+
spacing=spacing,
|
| 106 |
+
diag=diag,
|
| 107 |
+
truss_mode=truss_mode, # String saved as array of chars
|
| 108 |
+
n_div=n_div,
|
| 109 |
+
angle=angle,
|
| 110 |
+
n_columns=n_columns,
|
| 111 |
+
n_nod_tot=n_nod_tot,
|
| 112 |
+
n_rods=n_rods,
|
| 113 |
+
n_beams=n_beams,
|
| 114 |
+
n_ele_tot=n_ele_tot,
|
| 115 |
+
n_bot_beams=n_bot_beams,
|
| 116 |
+
skip_rod=np.array(skip_rod) # For reproducibility
|
| 117 |
+
)
|
| 118 |
+
num_generated += 1
|
| 119 |
+
|
| 120 |
+
print(f"Generated and saved {num_generated} truss configurations to '{dataset_folder}'.")
|
| 121 |
+
|
| 122 |
+
def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
|
| 123 |
+
"""
|
| 124 |
+
Compute and print min/max sequence lengths (nodes, elements) across all trusses in the dataset.
|
| 125 |
+
Uses only min/max n_div files per mode for efficiency.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
dataset_folder (str): Path to the dataset folder containing .npz files.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
Dict with keys 'min_nodes', 'max_nodes', 'min_elements', 'max_elements'.
|
| 132 |
+
"""
|
| 133 |
+
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
|
| 134 |
+
if not npz_files:
|
| 135 |
+
raise ValueError(f"No .npz files found in '{dataset_folder}'.")
|
| 136 |
+
|
| 137 |
+
# First pass: compute min/max n_div per mode
|
| 138 |
+
min_max_div = defaultdict(lambda: (float('inf'), float('-inf')))
|
| 139 |
+
for f in npz_files:
|
| 140 |
+
parts = f[:-4].rsplit('_', 2) # e.g., ['truss_pratt', '15', '39']
|
| 141 |
+
if len(parts) == 3 and parts[0].startswith('truss_'):
|
| 142 |
+
mode = parts[0][6:] # Remove 'truss_'
|
| 143 |
+
n_div = int(parts[1])
|
| 144 |
+
min_d, max_d = min_max_div[mode]
|
| 145 |
+
min_max_div[mode] = (min(min_d, n_div), max(max_d, n_div))
|
| 146 |
+
|
| 147 |
+
# Second pass: collect one file per min/max per mode
|
| 148 |
+
min_div_files = defaultdict(list)
|
| 149 |
+
max_div_files = defaultdict(list)
|
| 150 |
+
for f in npz_files:
|
| 151 |
+
parts = f[:-4].rsplit('_', 2)
|
| 152 |
+
if len(parts) == 3 and parts[0].startswith('truss_'):
|
| 153 |
+
mode = parts[0][6:]
|
| 154 |
+
n_div = int(parts[1])
|
| 155 |
+
if n_div == min_max_div[mode][0]:
|
| 156 |
+
min_div_files[mode].append(f)
|
| 157 |
+
if n_div == min_max_div[mode][1]:
|
| 158 |
+
max_div_files[mode].append(f)
|
| 159 |
+
|
| 160 |
+
# # Print min/max n_div per mode
|
| 161 |
+
# print("Per truss type min/max segments (n_div):")
|
| 162 |
+
# for mode in sorted(min_max_div):
|
| 163 |
+
# mn, mx = min_max_div[mode]
|
| 164 |
+
# print(f"{mode}: {mn} - {mx}")
|
| 165 |
+
|
| 166 |
+
# Compute overall min/max sequence lengths by loading one min/max file per mode
|
| 167 |
+
min_n_nod = float('inf')
|
| 168 |
+
max_n_nod = 0
|
| 169 |
+
min_n_ele = float('inf')
|
| 170 |
+
max_n_ele = 0
|
| 171 |
+
for mode in sorted(min_max_div):
|
| 172 |
+
# For min
|
| 173 |
+
if min_div_files[mode]:
|
| 174 |
+
min_file = min_div_files[mode][0] # Pick first
|
| 175 |
+
data_min = np.load(os.path.join(dataset_folder, min_file))
|
| 176 |
+
min_n_nod = min(min_n_nod, int(data_min['n_nod_tot']))
|
| 177 |
+
min_n_ele = min(min_n_ele, int(data_min['n_ele_tot']))
|
| 178 |
+
data_min.close()
|
| 179 |
+
# For max
|
| 180 |
+
if max_div_files[mode]:
|
| 181 |
+
max_file = max_div_files[mode][0] # Pick first
|
| 182 |
+
data_max = np.load(os.path.join(dataset_folder, max_file))
|
| 183 |
+
max_n_nod = max(max_n_nod, int(data_max['n_nod_tot']))
|
| 184 |
+
max_n_ele = max(max_n_ele, int(data_max['n_ele_tot']))
|
| 185 |
+
data_max.close()
|
| 186 |
+
|
| 187 |
+
print(f"Overall min sequence lengths: nodes={min_n_nod}, elements={min_n_ele}")
|
| 188 |
+
print(f"Overall max sequence lengths: nodes={max_n_nod}, elements={max_n_ele}")
|
| 189 |
+
|
| 190 |
+
# Additionally print out for one structure type how the keys inside look like
|
| 191 |
+
if show_details == True:
|
| 192 |
+
example_mode = next(iter(max_div_files))
|
| 193 |
+
example_file = max_div_files[example_mode][0]
|
| 194 |
+
example_data = np.load(os.path.join(dataset_folder, example_file))
|
| 195 |
+
print(f"\nExample data keys from '{example_file}': {example_data.files}")
|
| 196 |
+
for key in example_data.files:
|
| 197 |
+
print(f" - {key}: shape {example_data[key].shape}, dtype {example_data[key].dtype}")
|
| 198 |
+
example_data.close()
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
return {
|
| 203 |
+
'min_nodes': min_n_nod,
|
| 204 |
+
'max_nodes': max_n_nod,
|
| 205 |
+
'min_elements': min_n_ele,
|
| 206 |
+
'max_elements': max_n_ele
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
def load_and_visualize_random_truss(dataset_folder="dataset", num_samples=1, save_fig=False):
|
| 210 |
+
"""
|
| 211 |
+
Load random truss(es) from the dataset folder and visualize them.
|
| 212 |
+
Arranges plots vertically in figures of 3 per row (or 2 if divisible by 2 but not 3).
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
dataset_folder (str): Path to the dataset folder containing .npz files.
|
| 216 |
+
num_samples (int): Number of random trusses to load and plot (default: 1).
|
| 217 |
+
save_fig (bool): If True, saves each multi-plot figure to dataset_folder.
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
List of dicts with loaded data for each sample (for further use).
|
| 221 |
+
"""
|
| 222 |
+
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
|
| 223 |
+
if not npz_files:
|
| 224 |
+
raise ValueError(f"No .npz files found in '{dataset_folder}'.")
|
| 225 |
+
|
| 226 |
+
samples = []
|
| 227 |
+
random.shuffle(npz_files)
|
| 228 |
+
npz_files = npz_files[:num_samples]
|
| 229 |
+
|
| 230 |
+
# Determine layout per figure
|
| 231 |
+
if num_samples % 3 == 0 or num_samples > 3:
|
| 232 |
+
n_cols = 3
|
| 233 |
+
elif num_samples % 2 == 0:
|
| 234 |
+
n_cols = 2
|
| 235 |
+
else:
|
| 236 |
+
n_cols = 1
|
| 237 |
+
n_rows = math.ceil(num_samples / n_cols)
|
| 238 |
+
|
| 239 |
+
fig, axes = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 3.5*n_rows))
|
| 240 |
+
axes = np.atleast_1d(axes).flatten()
|
| 241 |
+
|
| 242 |
+
for i, filename in enumerate(npz_files):
|
| 243 |
+
filepath = os.path.join(dataset_folder, filename)
|
| 244 |
+
data = np.load(filepath)
|
| 245 |
+
|
| 246 |
+
nodal_coord = data['nodal_coord']
|
| 247 |
+
ele_nod = data['ele_nod']
|
| 248 |
+
truss_mode = str(data['truss_mode'])
|
| 249 |
+
n_div = int(data['n_div'])
|
| 250 |
+
angle = float(data['angle'])
|
| 251 |
+
|
| 252 |
+
n_beams = int(data['n_beams'])
|
| 253 |
+
n_columns = int(data['n_columns'])
|
| 254 |
+
n_rods = int(data['n_rods'])
|
| 255 |
+
n_ele_tot = int(data['n_ele_tot'])
|
| 256 |
+
|
| 257 |
+
ax = axes[i]
|
| 258 |
+
ax.set_xlim(-0.05, 1.05)
|
| 259 |
+
ax.set_ylim(-0.05, max(nodal_coord[:,1]) * 1.1)
|
| 260 |
+
ax.set_aspect('equal')
|
| 261 |
+
ax.set_title(f"{truss_mode} (n_div={n_div}, angle={angle:.0f}°)")
|
| 262 |
+
ax.grid(True, alpha=0.3)
|
| 263 |
+
|
| 264 |
+
# Plot nodes
|
| 265 |
+
bottom_mask = np.abs(nodal_coord[:,1]) < 1e-6
|
| 266 |
+
ax.scatter(nodal_coord[bottom_mask, 0], nodal_coord[bottom_mask, 1],
|
| 267 |
+
c='blue', s=45, label='Bottom Nodes')
|
| 268 |
+
ax.scatter(nodal_coord[~bottom_mask, 0], nodal_coord[~bottom_mask, 1],
|
| 269 |
+
c='red', s=45, label='Top Nodes')
|
| 270 |
+
|
| 271 |
+
# Plot elements
|
| 272 |
+
for j in range(n_ele_tot):
|
| 273 |
+
node1, node2 = ele_nod[j]
|
| 274 |
+
x1, y1 = nodal_coord[node1]
|
| 275 |
+
x2, y2 = nodal_coord[node2]
|
| 276 |
+
if j < n_beams:
|
| 277 |
+
ax.plot([x1, x2], [y1, y2], 'g-', lw=2, label='Beams' if j == 0 else "")
|
| 278 |
+
elif j < n_beams + n_columns:
|
| 279 |
+
ax.plot([x1, x2], [y1, y2], 'k-', lw=3, label='Columns' if j == n_beams else "")
|
| 280 |
+
else:
|
| 281 |
+
ax.plot([x1, x2], [y1, y2], 'purple', ls='--', lw=1.5, label='Rods' if j == n_beams + n_columns else "")
|
| 282 |
+
|
| 283 |
+
ax.legend(loc='upper right', fontsize=8)
|
| 284 |
+
samples.append({k: data[k] for k in data.files})
|
| 285 |
+
|
| 286 |
+
# Hide any unused axes
|
| 287 |
+
for j in range(num_samples, len(axes)):
|
| 288 |
+
axes[j].axis('off')
|
| 289 |
+
|
| 290 |
+
plt.tight_layout()
|
| 291 |
+
|
| 292 |
+
if save_fig:
|
| 293 |
+
out_path = os.path.join(dataset_folder, f"random_truss_grid_{num_samples}.png")
|
| 294 |
+
plt.savefig(out_path, dpi=150, bbox_inches='tight')
|
| 295 |
+
print(f"Saved figure to {out_path}")
|
| 296 |
+
|
| 297 |
+
plt.show()
|
| 298 |
+
return samples
|
| 299 |
+
|
| 300 |
+
compute_sequence_lengths(dataset_folder="dataset", show_details=True)
|
| 301 |
+
samples = load_and_visualize_random_truss(num_samples=9, save_fig=True)
|
truss_optimized.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
utils/cython/setup.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import setup, Extension
|
| 2 |
+
from Cython.Build import cythonize
|
| 3 |
+
import numpy as np
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# Get the absolute path of the current file
|
| 7 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 8 |
+
|
| 9 |
+
# Define the sources
|
| 10 |
+
sources = [
|
| 11 |
+
os.path.join(current_dir, 'assemble_matrices.pyx'),
|
| 12 |
+
os.path.join(current_dir, 'K_beam_func.c'),
|
| 13 |
+
os.path.join(current_dir, 'X_beam_func.c'),
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
# Verify files exist
|
| 17 |
+
for file_path in sources:
|
| 18 |
+
if not os.path.exists(file_path):
|
| 19 |
+
print(f"Warning: File not found: {file_path}")
|
| 20 |
+
|
| 21 |
+
# Define the extension
|
| 22 |
+
extensions = [
|
| 23 |
+
Extension(
|
| 24 |
+
"assemble_matrices",
|
| 25 |
+
sources,
|
| 26 |
+
include_dirs=[np.get_include(), current_dir],
|
| 27 |
+
extra_compile_args=["/Ox"] if os.name == 'nt' else ["-O3"],
|
| 28 |
+
)
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
setup(
|
| 32 |
+
name="assemble_matrices",
|
| 33 |
+
ext_modules=cythonize(extensions),
|
| 34 |
+
)
|
utils/frame_constraints.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Frame constants
|
| 2 |
+
N_columns = 2
|
| 3 |
+
N_floors = 5
|
| 4 |
+
N_nod_tot = (N_floors + 1) * N_columns
|
| 5 |
+
N_par_nod = 3
|
| 6 |
+
N_par_tot = N_nod_tot * N_par_nod
|
| 7 |
+
N_ele_tot = N_floors * (2 * N_columns - 1)
|
| 8 |
+
N_nod_ele = 2
|
| 9 |
+
N_par_ele = N_par_nod * N_nod_ele
|
| 10 |
+
N_tot_bound = 6
|
| 11 |
+
N_plots = 4
|
| 12 |
+
|
| 13 |
+
# Number of points for plotting
|
| 14 |
+
N_discritizations = 10
|
| 15 |
+
|
| 16 |
+
# Distance between nodes in meters
|
| 17 |
+
X_dist = 4
|
| 18 |
+
Y_dist = 3
|
| 19 |
+
|
| 20 |
+
# Columns 40x40 and beams 30x35 in cm
|
| 21 |
+
width_beam = 0.3
|
| 22 |
+
height_beam = 0.35
|
| 23 |
+
width_column = 0.4
|
| 24 |
+
height_column = 0.4
|
| 25 |
+
|
| 26 |
+
# Horizontal load on columns and angle in kN and degrees
|
| 27 |
+
po = 100
|
| 28 |
+
theta = 0
|
| 29 |
+
|
| 30 |
+
# Unit weight and elastic modulus in kN/m^3 and kN/m^2
|
| 31 |
+
unit_weight = 78.5
|
| 32 |
+
elastic_mod = 21*10**7
|
utils/frame_helpers.py
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import sympy as sp
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from sympy import Matrix, lambdify
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def calculate_X_positions(indices, N_columns, X_dist):
|
| 8 |
+
"""
|
| 9 |
+
Calculate the X positions of the nodes.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
indices (list): List or numpy array of indices from 1 to N_nod_tot
|
| 13 |
+
N_columns (int): Number of columns
|
| 14 |
+
X_dist (int): Distance between columns
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
List of X positions of the nodes
|
| 18 |
+
"""
|
| 19 |
+
X = np.zeros(len(indices))
|
| 20 |
+
X = ((indices % N_columns) - 1) * X_dist
|
| 21 |
+
np.putmask(X, X < 0, (N_columns - 1) * X_dist)
|
| 22 |
+
return X
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def calculate_Y_positions(indices, N_columns, Y_dist):
|
| 26 |
+
"""
|
| 27 |
+
Calculate the Y positions of the nodes.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
indices (list): List or numpy array of indices from 1 to N_nod_tot
|
| 31 |
+
N_columns (int): Number of columns
|
| 32 |
+
Y_dist (int): Distance between columns
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
List of Y positions of the nodes
|
| 36 |
+
"""
|
| 37 |
+
Y = np.zeros(len(indices))
|
| 38 |
+
h_assigner = np.ceil(indices / N_columns - 1)
|
| 39 |
+
h_assigner[h_assigner < 1] = 0
|
| 40 |
+
Y = Y_dist * h_assigner
|
| 41 |
+
return Y
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def calculate_element_node_indices(N_floors, N_columns):
|
| 45 |
+
"""
|
| 46 |
+
Calculate the element node indices.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
N_floors (int): Number of floors in the frame
|
| 50 |
+
N_columns (int): Number of columns
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Numpy array of element node indices
|
| 54 |
+
"""
|
| 55 |
+
# Initialize the ele_nod array
|
| 56 |
+
ele_nod = np.zeros((N_floors * (2 * N_columns - 1), 2), dtype=int)
|
| 57 |
+
|
| 58 |
+
# Calculate the indices for the vertical and horizontal elements
|
| 59 |
+
for i in range(1, N_floors + 1):
|
| 60 |
+
for j in range(1, N_columns + 1):
|
| 61 |
+
index = (i - 1) * (2 * N_columns - 1) + j - 1
|
| 62 |
+
ele_nod[index, 0] = j + (i - 1) * N_columns
|
| 63 |
+
ele_nod[index, 1] = ele_nod[index, 0] + N_columns
|
| 64 |
+
for j in range(1, N_columns):
|
| 65 |
+
index = (i - 1) * (2 * N_columns - 1) + N_columns + j - 1
|
| 66 |
+
ele_nod[index, 0] = i * N_columns + j
|
| 67 |
+
ele_nod[index, 1] = ele_nod[index, 0] + 1
|
| 68 |
+
|
| 69 |
+
return ele_nod
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def calculate_element_length(N_ele_tot, N_columns, X_dist, Y_dist):
|
| 73 |
+
"""
|
| 74 |
+
Calculate the length of the elements.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
N_ele_tot (int): Number of elements in the frame
|
| 78 |
+
N_columns (int): Number of columns
|
| 79 |
+
X_dist (int): Distance between columns
|
| 80 |
+
Y_dist (int): Height of the columns
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Numpy array of element lengths
|
| 84 |
+
"""
|
| 85 |
+
h = np.zeros(N_ele_tot)
|
| 86 |
+
|
| 87 |
+
# Calculate h, length of the elements
|
| 88 |
+
for i in range(1, N_ele_tot+1):
|
| 89 |
+
if i % (N_columns+1) != 0:
|
| 90 |
+
h[i-1] = Y_dist
|
| 91 |
+
else:
|
| 92 |
+
h[i-1] = X_dist
|
| 93 |
+
|
| 94 |
+
return h
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
#### Heavy functions
|
| 98 |
+
def initialize_symbols(N_par_ele):
|
| 99 |
+
"""
|
| 100 |
+
Create and return the symbolic variables used in the calculations.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
N_par_ele (int): Number of parameter per element
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
Returns a tuple of the symbolic variables
|
| 107 |
+
"""
|
| 108 |
+
# Define symbolic variables
|
| 109 |
+
x, h_e, beta_e, beta_curr = sp.symbols('x h_e beta_e beta_curr')
|
| 110 |
+
qe = sp.Array([sp.Symbol(f'q{i}') for i in range(1, N_par_ele+1)])
|
| 111 |
+
a0, a1, c0, c1, c2, c3 = sp.symbols('a0 a1 c0 c1 c2 c3')
|
| 112 |
+
A_e, E_e, J_e, ro_e, T, fo_E = sp.symbols('A_e E_e J_e ro_e T fo_E')
|
| 113 |
+
Qglo_pel_curr1_mode, Qglo_pel_curr2_mode, Qglo_pel_curr3_mode, Qglo_pel_curr4_mode, Qglo_pel_curr5_mode, Qglo_pel_curr6_mode= sp.symbols('Qglo_pel_curr1_mode Qglo_pel_curr2_mode Qglo_pel_curr3_mode Qglo_pel_curr4_mode Qglo_pel_curr5_mode Qglo_pel_curr6_mode')
|
| 114 |
+
X_old, Y_old = sp.symbols('X_old Y_old')
|
| 115 |
+
|
| 116 |
+
return (x, h_e, beta_e, beta_curr, qe, a0, a1, c0, c1, c2, c3, A_e, E_e, J_e, ro_e,
|
| 117 |
+
T, fo_E, Qglo_pel_curr1_mode, Qglo_pel_curr2_mode, Qglo_pel_curr3_mode,
|
| 118 |
+
Qglo_pel_curr4_mode, Qglo_pel_curr5_mode, Qglo_pel_curr6_mode, X_old, Y_old)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def calculate_energies(x, qe, h_e, beta_e, E_e, J_e, A_e, ro_e, ve_beam, ue_beam):
|
| 122 |
+
"""
|
| 123 |
+
Calculate the potential and kinetic energy of the beam.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
x (symbol): Symbolic variable for the x-coordinate
|
| 127 |
+
qe (list): List of symbolic variables for the beam parameters
|
| 128 |
+
h_e (float): Height of the beam element
|
| 129 |
+
beta_e (float): Angle of the beam element
|
| 130 |
+
E_e (float): Young's modulus of the beam material
|
| 131 |
+
J_e (float): Polar moment of inertia of the beam cross-section
|
| 132 |
+
A_e (float): Cross-sectional area of the beam
|
| 133 |
+
ro_e (float): Density of the beam material
|
| 134 |
+
ve_beam (sympy expression): Vertical displacement of the beam
|
| 135 |
+
ue_beam (sympy expression): Horizontal displacement of the beam
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
Pot_beam (sympy expression): Potential energy of the beam
|
| 139 |
+
Kin_beam (sympy expression): Kinetic energy of the beam
|
| 140 |
+
chi_beam (sympy expression): Curvature of the beam
|
| 141 |
+
eps_beam (sympy expression): Strain of the beam
|
| 142 |
+
"""
|
| 143 |
+
# Calculate chi_beam and eps_beam
|
| 144 |
+
chi_beam = sp.diff(sp.diff(ve_beam, x), x)
|
| 145 |
+
eps_beam = sp.diff(ue_beam, x)
|
| 146 |
+
|
| 147 |
+
# Calculate potential energy (Pot_beam) and kinetic energy (Kin_beam)
|
| 148 |
+
Pot_beam = 1 / 2 * sp.integrate(E_e * J_e * chi_beam**2 + E_e * A_e * eps_beam**2, (x, 0, h_e))
|
| 149 |
+
Kin_beam = 1 / 2 * ro_e * A_e * sp.integrate(ve_beam**2 + ue_beam**2, (x, 0, h_e))
|
| 150 |
+
|
| 151 |
+
return Pot_beam, Kin_beam, chi_beam, eps_beam
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def calculate_beam_displacement_equations(x, h_e, beta_e, qe, a0, a1, c0, c1, c2, c3):
|
| 155 |
+
"""
|
| 156 |
+
Calculate the beam displacement equations.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
Displacement functions for the beam in the u and v directions
|
| 160 |
+
"""
|
| 161 |
+
# Compute v1, u1, v2, u2
|
| 162 |
+
v1 = -qe[0] * sp.sin(beta_e) + qe[1] * sp.cos(beta_e)
|
| 163 |
+
u1 = qe[0] * sp.cos(beta_e) + qe[1] * sp.sin(beta_e)
|
| 164 |
+
v2 = -qe[3] * sp.sin(beta_e) + qe[4] * sp.cos(beta_e)
|
| 165 |
+
u2 = qe[3] * sp.cos(beta_e) + qe[4] * sp.sin(beta_e)
|
| 166 |
+
|
| 167 |
+
# Define beam displacement equations
|
| 168 |
+
u_beam = a0 + a1 * x
|
| 169 |
+
v_beam = c0 + c1 * x + c2 * x**2 + c3 * x**3
|
| 170 |
+
|
| 171 |
+
# Define equilibrium equations
|
| 172 |
+
equations = [
|
| 173 |
+
v_beam.subs(x, 0) - v1,
|
| 174 |
+
sp.diff(v_beam, x).subs(x, 0) - qe[2],
|
| 175 |
+
v_beam.subs(x, h_e) - v2,
|
| 176 |
+
sp.diff(v_beam, x).subs(x, h_e) - qe[5],
|
| 177 |
+
u_beam.subs(x, 0) - u1,
|
| 178 |
+
u_beam.subs(x, h_e) - u2
|
| 179 |
+
]
|
| 180 |
+
|
| 181 |
+
# Solve and assign the solution
|
| 182 |
+
sol = sp.solve(equations, (c0, c1, c2, c3, a0, a1))
|
| 183 |
+
ve_beam = v_beam.subs(sol)
|
| 184 |
+
ue_beam = u_beam.subs(sol)
|
| 185 |
+
|
| 186 |
+
# Lambdify ve_beam and ue_beam
|
| 187 |
+
ve_beam_func = lambdify((x, qe, h_e, beta_e), ve_beam, "numpy")
|
| 188 |
+
ue_beam_func = lambdify((x, qe, h_e, beta_e), ue_beam, "numpy")
|
| 189 |
+
|
| 190 |
+
return ve_beam_func, ue_beam_func, ve_beam, ue_beam
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def assemble_global_matrices(N_par_ele, N_par_tot, N_ele_tot, Pot_beam, Kin_beam, qe, h,
|
| 194 |
+
A, E, J, beta, ro, pel, h_e, A_e, E_e, J_e, beta_e, ro_e, x):
|
| 195 |
+
"""
|
| 196 |
+
Assemble the global stiffness and mass matrices by assembling the
|
| 197 |
+
symbolic element stiffness and mass matrices and converting them to
|
| 198 |
+
numeric arrays.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
Numeric arrays of the global stiffness and mass matrices
|
| 202 |
+
"""
|
| 203 |
+
K_beam = np.zeros((N_par_ele, N_par_ele), dtype=object)
|
| 204 |
+
M_beam = np.zeros((N_par_ele, N_par_ele), dtype=object)
|
| 205 |
+
|
| 206 |
+
# Compute K_beam and M_beam
|
| 207 |
+
for i in range(N_par_ele):
|
| 208 |
+
for j in range(N_par_ele):
|
| 209 |
+
K_beam[i][j] = sp.lambdify((x, h_e, A_e, E_e, J_e, beta_e, ro_e),
|
| 210 |
+
sp.diff(sp.diff(Pot_beam, qe[i]), qe[j]), 'numpy')
|
| 211 |
+
M_beam[i][j] = sp.lambdify((x, h_e, A_e, E_e, J_e, beta_e, ro_e),
|
| 212 |
+
sp.diff(sp.diff(Kin_beam, qe[i]), qe[j]), 'numpy')
|
| 213 |
+
|
| 214 |
+
# Initialize element stiffness matrix (Ke) and global stiffness matrix (K)
|
| 215 |
+
K = np.zeros((N_par_tot, N_par_tot))
|
| 216 |
+
M = np.zeros((N_par_tot, N_par_tot))
|
| 217 |
+
|
| 218 |
+
# Compute Ke, Me and assemble K, M using NumPy operations
|
| 219 |
+
for e in range(N_ele_tot):
|
| 220 |
+
for i in range(N_par_ele):
|
| 221 |
+
for j in range(N_par_ele):
|
| 222 |
+
K[pel[e, i]-1, pel[e, j]-1] += K_beam[i, j](0, h[e], A[e], E[e], J[e], beta[e], ro[e])
|
| 223 |
+
M[pel[e, i]-1, pel[e, j]-1] += M_beam[i, j](0, h[e], A[e], E[e], J[e], beta[e], ro[e])
|
| 224 |
+
|
| 225 |
+
return K, M
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def apply_boundary_conditions(N_par_tot, N_nod_tot, N_par_nod, w, K, M):
|
| 229 |
+
"""
|
| 230 |
+
Applies the boundary conditions to the stiffness and mass matrices.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
Numpy arrays of the stiffness and mass matrices with the boundary conditions applied
|
| 234 |
+
"""
|
| 235 |
+
mask = w == 1
|
| 236 |
+
|
| 237 |
+
K[mask, :] = 0
|
| 238 |
+
K[:, mask] = 0
|
| 239 |
+
M[mask, :] = 0
|
| 240 |
+
M[:, mask] = 0
|
| 241 |
+
|
| 242 |
+
# Set the diagonal elements where W is 1 to 1 or 1e-30
|
| 243 |
+
np.fill_diagonal(K, np.where(mask, 1, K.diagonal()))
|
| 244 |
+
np.fill_diagonal(M, np.where(mask, 1e-30, M.diagonal()))
|
| 245 |
+
|
| 246 |
+
return K, M
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def compute_eigenvalues_and_eigenvectors(K, M):
|
| 250 |
+
"""
|
| 251 |
+
Compute the eigenvalues and eigenvectors of the stiffness and mass matrices.
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
The real part of the eigenvalues (frequency) and the normalized eigenvectors (modes of vibration)
|
| 256 |
+
"""
|
| 257 |
+
# Compute eigenvalues (lamb) and eigenvectors (phis)
|
| 258 |
+
lamb, phis = np.linalg.eig(np.linalg.inv(M) @ K)
|
| 259 |
+
|
| 260 |
+
# Get the indices that would sort lamb in descending order
|
| 261 |
+
idx = np.argsort(lamb)[::-1]
|
| 262 |
+
|
| 263 |
+
# Sort lamb and phis
|
| 264 |
+
lamb_r = lamb[idx]
|
| 265 |
+
phis_r = phis[:, idx]
|
| 266 |
+
|
| 267 |
+
# Normalize eigenvectors
|
| 268 |
+
N_par_tot = len(lamb)
|
| 269 |
+
phis_norm = np.zeros((N_par_tot, N_par_tot))
|
| 270 |
+
for i in range(N_par_tot):
|
| 271 |
+
c = np.sqrt(np.dot(phis_r[:, i].T, M @ phis_r[:, i]))
|
| 272 |
+
phis_norm[:, i] = phis_r[:, i] / c
|
| 273 |
+
|
| 274 |
+
return lamb_r, phis_norm
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def get_mode_indices(lamb_r, phis_norm, N_plots):
|
| 278 |
+
"""
|
| 279 |
+
Calculate the periods to get the top contributing index_modes.
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
Numpy array of the indices of the largest N_plots periods
|
| 283 |
+
"""
|
| 284 |
+
# Calculate periods
|
| 285 |
+
period = 2 * np.pi / np.sqrt(lamb_r)
|
| 286 |
+
|
| 287 |
+
# Find the indices of the largest N_plots periods
|
| 288 |
+
index_modes = np.argpartition(period, -N_plots)[-N_plots:]
|
| 289 |
+
|
| 290 |
+
# Sort index_modes so that the modes are in descending order of period
|
| 291 |
+
index_modes = index_modes[np.argsort(period[index_modes])][::-1]
|
| 292 |
+
|
| 293 |
+
# Extract lambdas and corresponding eigenvectors (No longer used)
|
| 294 |
+
# lamb_plots = lamb_r[index_modes]
|
| 295 |
+
# phis_plots = phis_norm[:, index_modes]
|
| 296 |
+
|
| 297 |
+
return index_modes, period
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def calculate_global_displacements(Qglo_pel_curr1_mode, Qglo_pel_curr2_mode, Qglo_pel_curr3_mode, Qglo_pel_curr4_mode,
|
| 301 |
+
Qglo_pel_curr5_mode, Qglo_pel_curr6_mode, beta_curr, h_e):
|
| 302 |
+
"""
|
| 303 |
+
Calculate the global displacements by solving the local symbolic
|
| 304 |
+
equilibrium equations.
|
| 305 |
+
|
| 306 |
+
Returns:
|
| 307 |
+
Lambda functions for the global displacements
|
| 308 |
+
"""
|
| 309 |
+
# Define symbols
|
| 310 |
+
x, f0, f1, g0, g1, g2, g3, X_old, Y_old = sp.symbols('x f0 f1 g0 g1 g2 g3 X_old Y_old')
|
| 311 |
+
|
| 312 |
+
# Define local displacements
|
| 313 |
+
u_loc_i = Qglo_pel_curr1_mode * sp.cos(beta_curr) + Qglo_pel_curr2_mode * sp.sin(beta_curr)
|
| 314 |
+
v_loc_i = -Qglo_pel_curr1_mode * sp.sin(beta_curr) + Qglo_pel_curr2_mode * sp.cos(beta_curr)
|
| 315 |
+
u_loc_j = Qglo_pel_curr4_mode * sp.cos(beta_curr) + Qglo_pel_curr5_mode * sp.sin(beta_curr)
|
| 316 |
+
v_loc_j = -Qglo_pel_curr4_mode * sp.sin(beta_curr) + Qglo_pel_curr5_mode * sp.cos(beta_curr)
|
| 317 |
+
|
| 318 |
+
# Define beam displacements
|
| 319 |
+
u_beam = f1 * x + f0
|
| 320 |
+
v_beam = g3 * x**3 + g2 * x**2 + g1 * x + g0
|
| 321 |
+
|
| 322 |
+
# Define equilibrium equations
|
| 323 |
+
equations = [
|
| 324 |
+
v_beam.subs(x, 0) - v_loc_i,
|
| 325 |
+
sp.diff(v_beam, x).subs(x, 0) - Qglo_pel_curr3_mode,
|
| 326 |
+
v_beam.subs(x, h_e) - v_loc_j,
|
| 327 |
+
sp.diff(v_beam, x).subs(x, h_e) - Qglo_pel_curr6_mode,
|
| 328 |
+
u_beam.subs(x, 0) - u_loc_i,
|
| 329 |
+
u_beam.subs(x, h_e) - u_loc_j
|
| 330 |
+
]
|
| 331 |
+
|
| 332 |
+
# Solve the equations
|
| 333 |
+
sol = sp.solve(equations, (f0, f1, g0, g1, g2, g3))
|
| 334 |
+
|
| 335 |
+
# Assign the solution
|
| 336 |
+
f0, f1, g0, g1, g2, g3 = sol.values()
|
| 337 |
+
u_beam = u_beam.subs(sol)
|
| 338 |
+
v_beam = v_beam.subs(sol)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
# Define new coordinates using the same expressions as in the original code
|
| 342 |
+
X_new_expr = X_old + x * sp.cos(beta_curr) + u_beam * sp.cos(beta_curr) - v_beam * sp.sin(beta_curr)
|
| 343 |
+
Y_new_expr = Y_old + x * sp.sin(beta_curr) + u_beam * sp.sin(beta_curr) + v_beam * sp.cos(beta_curr)
|
| 344 |
+
|
| 345 |
+
# Substitute the solution into the expressions
|
| 346 |
+
X_new_expr_sub = X_new_expr.subs(sol)
|
| 347 |
+
Y_new_expr_sub = Y_new_expr.subs(sol)
|
| 348 |
+
|
| 349 |
+
# Convert X_new and Y_new to lambda functions
|
| 350 |
+
X_new_sub_func = lambdify(
|
| 351 |
+
(x, X_old, Y_old, beta_curr, Qglo_pel_curr1_mode, Qglo_pel_curr2_mode, Qglo_pel_curr3_mode, Qglo_pel_curr4_mode, Qglo_pel_curr5_mode, Qglo_pel_curr6_mode, h_e),
|
| 352 |
+
X_new_expr_sub,
|
| 353 |
+
"numpy",
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
Y_new_sub_func = lambdify(
|
| 357 |
+
(x, X_old, Y_old, beta_curr, Qglo_pel_curr1_mode, Qglo_pel_curr2_mode, Qglo_pel_curr3_mode, Qglo_pel_curr4_mode, Qglo_pel_curr5_mode, Qglo_pel_curr6_mode, h_e),
|
| 358 |
+
Y_new_expr_sub,
|
| 359 |
+
"numpy",
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
return X_new_sub_func, Y_new_sub_func
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def print_matrix(matrix, width=8, precision=3, row_labels=None, col_labels=None):
|
| 366 |
+
if row_labels is None:
|
| 367 |
+
row_labels = range(1, matrix.shape[0] + 1)
|
| 368 |
+
if col_labels is None:
|
| 369 |
+
col_labels = range(1, matrix.shape[1] + 1)
|
| 370 |
+
|
| 371 |
+
# Header row
|
| 372 |
+
print(" " * width, end="")
|
| 373 |
+
for label in col_labels:
|
| 374 |
+
print(f"{label:{width}}", end="")
|
| 375 |
+
print()
|
| 376 |
+
|
| 377 |
+
# Matrix rows
|
| 378 |
+
for i, row in enumerate(matrix):
|
| 379 |
+
print(f"{row_labels[i]:{width}}", end="")
|
| 380 |
+
for val in row:
|
| 381 |
+
print(f"{val:{width}.{precision}f}", end="")
|
| 382 |
+
print()
|
utils/simple_beam_analysis.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
def define_forces(W: np.ndarray, node_idxs: List, force_list: List,
|
| 5 |
+
n_par_nod: int, node_pos: Optional[List] = None):
|
| 6 |
+
"""
|
| 7 |
+
Define the forces acting on the beam. Index is 0-based.
|
| 8 |
+
"""
|
| 9 |
+
ext_forces = np.zeros((W.shape[0]), dtype=np.float32)
|
| 10 |
+
|
| 11 |
+
if len(node_idxs) != len(force_list) // n_par_nod:
|
| 12 |
+
raise ValueError("# of forces/n_par_nod != # nodes.")
|
| 13 |
+
if node_pos and len(node_pos) != len(force_list) // n_par_nod:
|
| 14 |
+
raise ValueError("# of forces/n_par_nod != # nodes positions.")
|
| 15 |
+
|
| 16 |
+
if not node_pos:
|
| 17 |
+
for i, idx in enumerate(node_idxs):
|
| 18 |
+
ext_idx = idx * n_par_nod
|
| 19 |
+
force_idx = i * n_par_nod
|
| 20 |
+
ext_forces[ext_idx:ext_idx + n_par_nod] = force_list[force_idx:force_idx + n_par_nod]
|
| 21 |
+
else:
|
| 22 |
+
for i, pos in enumerate(node_pos):
|
| 23 |
+
ext_idx = pos * n_par_nod
|
| 24 |
+
force_idx = i * n_par_nod
|
| 25 |
+
ext_forces[ext_idx:ext_idx + n_par_nod] = force_list[force_idx:force_idx + n_par_nod]
|
| 26 |
+
|
| 27 |
+
# Apply boundary conditions by making all the places where there is 1 in W to be 0 in ext_forces
|
| 28 |
+
ext_forces = ext_forces * (1 - W)
|
| 29 |
+
|
| 30 |
+
return ext_forces
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
################## Direct Stiffness Method for 2D Bernoulli Beam ##################
|
| 34 |
+
def direct_assemble_global_matrices(n_par_ele: int, n_par_tot: int, n_ele_tot: int,
|
| 35 |
+
ele_nod: np.ndarray, h: np.ndarray,
|
| 36 |
+
E: np.ndarray, J: np.ndarray, A: np.ndarray,
|
| 37 |
+
beta: np.ndarray, pel: np.ndarray) -> np.ndarray:
|
| 38 |
+
"""
|
| 39 |
+
Assemble the global stiffness matrix for 2D Bernoulli beam elements dynamically.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
n_par_ele: Number of parameters per element (6 for 2 nodes with 3 DOFs each)
|
| 43 |
+
n_par_tot: Total number of DOFs in the system
|
| 44 |
+
n_ele_tot: Total number of elements
|
| 45 |
+
ele_nod: Element-node connectivity array (n_ele_tot x 2)
|
| 46 |
+
h: Element lengths array
|
| 47 |
+
E: Young's modulus array
|
| 48 |
+
J: Second moment of inertia array
|
| 49 |
+
A: Cross-sectional area array
|
| 50 |
+
beta: Element angles array (in radians)
|
| 51 |
+
pel: DOF mapping array (n_ele_tot x n_par_ele)
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
K: Global stiffness matrix of size (n_par_tot x n_par_tot)
|
| 55 |
+
"""
|
| 56 |
+
K = np.zeros((n_par_tot, n_par_tot), dtype=np.float64)
|
| 57 |
+
|
| 58 |
+
for e in range(n_ele_tot):
|
| 59 |
+
L = h[e]
|
| 60 |
+
EA = E[e] * A[e]
|
| 61 |
+
EI = E[e] * J[e]
|
| 62 |
+
|
| 63 |
+
# Local stiffness matrix for a Bernoulli beam element (6x6)
|
| 64 |
+
EA_L = EA / L
|
| 65 |
+
EI_L3 = EI / L**3
|
| 66 |
+
EI_L2 = EI / L**2
|
| 67 |
+
EI_L1 = EI / L
|
| 68 |
+
|
| 69 |
+
# Assemble the local stiffness matrix in the local coordinate system
|
| 70 |
+
k_local = np.array([
|
| 71 |
+
[ EA_L, 0, 0, -EA_L, 0, 0],
|
| 72 |
+
[ 0, 12*EI_L3, 6*EI_L2, 0, -12*EI_L3, 6*EI_L2],
|
| 73 |
+
[ 0, 6*EI_L2, 4*EI_L1, 0, -6*EI_L2, 2*EI_L1],
|
| 74 |
+
[ -EA_L, 0, 0, EA_L, 0, 0],
|
| 75 |
+
[ 0, -12*EI_L3, -6*EI_L2, 0, 12*EI_L3, -6*EI_L2],
|
| 76 |
+
[ 0, 6*EI_L2, 2*EI_L1, 0, -6*EI_L2, 4*EI_L1]
|
| 77 |
+
])
|
| 78 |
+
|
| 79 |
+
# Transformation matrix for element orientation (6x6)
|
| 80 |
+
c = np.cos(beta[e])
|
| 81 |
+
s = np.sin(beta[e])
|
| 82 |
+
T = np.array([
|
| 83 |
+
[ c, s, 0, 0, 0, 0],
|
| 84 |
+
[-s, c, 0, 0, 0, 0],
|
| 85 |
+
[ 0, 0, 1, 0, 0, 0],
|
| 86 |
+
[ 0, 0, 0, c, s, 0],
|
| 87 |
+
[ 0, 0, 0, -s, c, 0],
|
| 88 |
+
[ 0, 0, 0, 0, 0, 1]
|
| 89 |
+
])
|
| 90 |
+
|
| 91 |
+
# Rotate local stiffness matrix to global coordinates
|
| 92 |
+
k_global = T.T @ k_local @ T
|
| 93 |
+
|
| 94 |
+
# Get global DOF indices
|
| 95 |
+
idx = pel[e, :] - 1 # Global DOF indices for element 'e'
|
| 96 |
+
|
| 97 |
+
# Assemble into global stiffness matrix
|
| 98 |
+
K[np.ix_(idx, idx)] += k_global
|
| 99 |
+
|
| 100 |
+
return K
|
| 101 |
+
|
| 102 |
+
def direct_apply_boundary_conditions(K: np.ndarray, W: np.ndarray) -> np.ndarray:
|
| 103 |
+
"""
|
| 104 |
+
Apply boundary conditions to global stiffness matrix using W array.
|
| 105 |
+
W[i] = 1 means DOF i is restrained, W[i] = 0 means DOF i is free.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
K: Global stiffness matrix
|
| 109 |
+
W: Boundary condition array (1 for restrained, 0 for free DOFs)
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
K: Modified stiffness matrix with boundary conditions applied
|
| 113 |
+
"""
|
| 114 |
+
# Make copy to avoid modifying original
|
| 115 |
+
K_bc = K.copy()
|
| 116 |
+
max_k = np.max(np.abs(np.diag(K))) # Double check this
|
| 117 |
+
|
| 118 |
+
# Find restrained DOFs
|
| 119 |
+
restrained_dofs = np.where(W == 1)[0]
|
| 120 |
+
|
| 121 |
+
# For each restrained DOF
|
| 122 |
+
for dof in restrained_dofs:
|
| 123 |
+
# Zero out row and column
|
| 124 |
+
K_bc[dof, :] = 0
|
| 125 |
+
K_bc[:, dof] = 0
|
| 126 |
+
K_bc[dof, dof] = max_k
|
| 127 |
+
|
| 128 |
+
return K_bc
|
utils/simple_beam_constraints.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# General parameters
|
| 2 |
+
span = 1.4 # in m
|
| 3 |
+
angle = 0 # in degrees
|
| 4 |
+
n_dim = 2 # Number of dimensions
|
| 5 |
+
n_nod_ele = 2 # Number of nodes per element
|
| 6 |
+
n_par_nod = 3 # Number of parameters per node
|
| 7 |
+
n_par_ele = n_par_nod * n_nod_ele # Number of parameters per element
|
| 8 |
+
|
| 9 |
+
# Plot settings
|
| 10 |
+
n_discritizations = 10 # Number of points for plotting
|
| 11 |
+
n_plots = 4 # Number of plots for bridge"s truss
|
| 12 |
+
|
| 13 |
+
# For paper verification, steel beams are 4x4 cm and concrete beams are 12x27 in cm
|
| 14 |
+
width_beam = 0.04
|
| 15 |
+
height_beam = 0.04
|
| 16 |
+
width_beam_conc = 0.12
|
| 17 |
+
height_beam_conc = 0.27
|
| 18 |
+
|
| 19 |
+
width_column = 0.4
|
| 20 |
+
height_column = 0.4
|
| 21 |
+
|
| 22 |
+
width_rod = 0.1
|
| 23 |
+
height_rod = 0.1
|
| 24 |
+
|
| 25 |
+
# Horizontal load on columns and angle in kN and degrees
|
| 26 |
+
po = 100
|
| 27 |
+
theta = 0
|
| 28 |
+
|
| 29 |
+
# Unit weight and elastic modulus in kN/m^3 and kN/m^2
|
| 30 |
+
unit_weight_steel = 78.5 # Steel 78.5 typically
|
| 31 |
+
elastic_mod = 183*10**6 # Steel 210 GPA typically
|
| 32 |
+
elastic_mod_rod = 210*10**6
|
| 33 |
+
|
| 34 |
+
# Compressive 51 Mpa and tensile 3.6 Mpa
|
| 35 |
+
# Unit weight and elastic modulus in kN/m^3 and kN/m^2
|
| 36 |
+
unit_weight_conc = 18.7 # Steel 78.5, concrete 1870
|
| 37 |
+
elastic_mod_conc = 24*10**6 # Steel 210, concrete 24
|
| 38 |
+
elastic_mod_rod = 21*10**7
|
| 39 |
+
|
| 40 |
+
# Shear modulus in kN/m^2
|
| 41 |
+
shear_mod = 8*10**6
|
| 42 |
+
k_shear = 0.9
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
################## HASHMAPS ##################
|
| 47 |
+
|
| 48 |
+
# put them all in a hashmap for easy access
|
| 49 |
+
width_properties = {}
|
| 50 |
+
height_properties = {}
|
| 51 |
+
unit_weight_properties = {}
|
| 52 |
+
elastic_mod_properties = {}
|
| 53 |
+
|
| 54 |
+
width_properties["beam"] = width_beam
|
| 55 |
+
width_properties["column"] = width_column
|
| 56 |
+
width_properties["rod"] = width_rod
|
| 57 |
+
|
| 58 |
+
height_properties["beam"] = height_beam
|
| 59 |
+
height_properties["column"] = height_column
|
| 60 |
+
height_properties["rod"] = height_rod
|
| 61 |
+
|
| 62 |
+
unit_weight_properties["beam"] = unit_weight_steel
|
| 63 |
+
unit_weight_properties["column"] = unit_weight_steel
|
| 64 |
+
unit_weight_properties["rod"] = unit_weight_steel
|
| 65 |
+
|
| 66 |
+
elastic_mod_properties["beam"] = elastic_mod
|
| 67 |
+
elastic_mod_properties["column"] = elastic_mod
|
| 68 |
+
elastic_mod_properties["rod"] = elastic_mod_rod
|
| 69 |
+
|
utils/simple_beam_helper.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import numpy as np
|
| 3 |
+
from typing import List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
# Get the logger
|
| 6 |
+
logger = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
def calculate_simple_essential_elements(span: float, spacing: Optional[float] = 0,
|
| 9 |
+
truss_mode: str = "simple", beam_partition: int = 1,
|
| 10 |
+
col_placements: Optional[List[float]] = None,
|
| 11 |
+
skip_col: Optional[List[int]] = None,
|
| 12 |
+
cantilever_sides: int = 1,
|
| 13 |
+
add_extra_pt: bool = False) -> Tuple[int, int, int, int, int, int]:
|
| 14 |
+
"""
|
| 15 |
+
Calculate the number of columns, nodes, rods, beams, and total elements for a simple beam.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
span (float): The total length of the beam.
|
| 19 |
+
spacing (float or List[float]): Distance between nodes or list of node positions.
|
| 20 |
+
truss_mode (str): The type of beam ("simple", "simple_cant").
|
| 21 |
+
beam_partition (int): Number of divisions in the beam.
|
| 22 |
+
col_placements (List[float], optional): Custom positions of the columns (supports).
|
| 23 |
+
skip_col (List[int], optional): Indices of columns to skip.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
Tuple[int, int, int, int, int, int]: Number of columns, total nodes, rods, beams, total elements, and bottom beams.
|
| 27 |
+
"""
|
| 28 |
+
if not spacing:
|
| 29 |
+
print("Spacing was not provided. Defaulting to 1. Hence a simply supported beam.")
|
| 30 |
+
spacing = 1.0
|
| 31 |
+
assert span % spacing == 0, "Spacing does not divide the span evenly."
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
n_columns = int(span // spacing) + 1
|
| 35 |
+
n_rods = 0 # Simple beams have no rods
|
| 36 |
+
n_beams = (n_columns - 1) * beam_partition
|
| 37 |
+
if truss_mode != "simple":
|
| 38 |
+
if cantilever_sides > 2:
|
| 39 |
+
print("Cantilever sides cannot be more than 2. Defaulting to 2.")
|
| 40 |
+
cantilever_sides = 2
|
| 41 |
+
n_beams += cantilever_sides * beam_partition
|
| 42 |
+
|
| 43 |
+
n_nod_tot = n_beams + 1 # Number of nodes is one more than the number of beams
|
| 44 |
+
|
| 45 |
+
if add_extra_pt and span/2 == n_nod_tot/2 * spacing:
|
| 46 |
+
print("Extra point cannot be added at the center. Defaulting to False.")
|
| 47 |
+
add_extra_pt = False
|
| 48 |
+
|
| 49 |
+
if skip_col:
|
| 50 |
+
n_nod_tot -= len(skip_col)
|
| 51 |
+
|
| 52 |
+
if add_extra_pt:
|
| 53 |
+
n_nod_tot += 1
|
| 54 |
+
n_beams += 1
|
| 55 |
+
|
| 56 |
+
n_ele_tot = n_beams # Only beams are considered elements here
|
| 57 |
+
n_bot_beams = 0 # Not applicable for simple beams
|
| 58 |
+
|
| 59 |
+
return n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def calculate_simple_element_node(span: float, spacing: Union[float, List[float]], n_dim: int,
|
| 63 |
+
n_par_nod: int, truss_mode: str = "simple",
|
| 64 |
+
beam_partition: int = 1,
|
| 65 |
+
col_placements: Optional[List[float]] = None,
|
| 66 |
+
skip_col: Optional[List[int]] = None,
|
| 67 |
+
cantilever_sides: int = 1,
|
| 68 |
+
add_extra_pt: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]:
|
| 69 |
+
"""
|
| 70 |
+
Calculate nodal coordinates, nodal-param relation, and element-node relationships for a simple beam.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
span (float): The total length of the beam.
|
| 74 |
+
spacing (float or List[float]): Distance between nodes or list of node positions.
|
| 75 |
+
n_dim (int): Number of dimensions (usually 2).
|
| 76 |
+
n_par_nod (int): Number of parameters per node (e.g., 2 for x and y coordinates).
|
| 77 |
+
truss_mode (str): The type of beam ("simple", "simple_cant").
|
| 78 |
+
skip_rod (List[int], optional): Rods to skip (not applicable for simple beams).
|
| 79 |
+
beam_partition (int): Number of divisions in the beam.
|
| 80 |
+
col_placements (List[float], optional): Custom positions of the columns (supports).
|
| 81 |
+
skip_col (List[int], optional): Indices of columns to skip.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]: Nodal coordinates, parameter matrix, parameter-element relation, element-node relation, and total parameters.
|
| 85 |
+
"""
|
| 86 |
+
# Get essential elements
|
| 87 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_simple_essential_elements(
|
| 88 |
+
span, spacing, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides, add_extra_pt)
|
| 89 |
+
|
| 90 |
+
n_par_tot = n_nod_tot * n_par_nod
|
| 91 |
+
|
| 92 |
+
# Generate nodal coordinates
|
| 93 |
+
nodal_coord = nodal_coords_simple(n_nod_tot, n_dim, spacing, span, n_beams,
|
| 94 |
+
col_placements, skip_col, beam_partition,
|
| 95 |
+
truss_mode, cantilever_sides, add_extra_pt)
|
| 96 |
+
|
| 97 |
+
# Generate nodal parameters
|
| 98 |
+
par = np.arange(1, n_par_tot + 1).reshape(n_nod_tot, n_par_nod)
|
| 99 |
+
|
| 100 |
+
# Generate element-parameter relationships
|
| 101 |
+
pel = pel_ele_simple(par, n_beams, n_par_nod)
|
| 102 |
+
|
| 103 |
+
# Generate element-node relationships
|
| 104 |
+
ele_nod = fill_ele_nod_simple(n_beams, n_par_nod, pel)
|
| 105 |
+
|
| 106 |
+
return nodal_coord, par, pel, ele_nod, n_par_tot
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def nodal_coords_simple(n_nod_tot: int, n_dim: int, spacing: Union[float, List[float]],
|
| 110 |
+
span: float, n_beams: int,
|
| 111 |
+
col_placements: Optional[List[float]] = None,
|
| 112 |
+
skip_col: Optional[List[int]] = None, beam_partition: int = 1,
|
| 113 |
+
truss_mode: str = "simple", cantilever_sides: int = 2,
|
| 114 |
+
add_extra_pt: bool = False) -> np.ndarray:
|
| 115 |
+
"""
|
| 116 |
+
Generate nodal coordinates for a simple beam.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
n_nod_tot (int): Total number of nodes.
|
| 120 |
+
n_dim (int): Number of dimensions.
|
| 121 |
+
spacing (float or List[float]): Spacing between nodes or list of node positions.
|
| 122 |
+
span (float): Total length of the beam.
|
| 123 |
+
col_placements (List[float], optional): Custom positions of the columns (supports).
|
| 124 |
+
skip_col (List[int], optional): Indices of columns to skip.
|
| 125 |
+
beam_partition (int): Number of divisions in the beam.
|
| 126 |
+
truss_mode (str): The type of beam ("simple", "simple_cant").
|
| 127 |
+
cantilever_sides (int): Number of cantilever sides. It will default to left side then right.
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
np.ndarray: The nodal coordinates.
|
| 131 |
+
"""
|
| 132 |
+
if cantilever_sides > 2:
|
| 133 |
+
print("Cantilever sides cannot be more than 2. Defaulting to 2.")
|
| 134 |
+
cantilever_sides = 2
|
| 135 |
+
|
| 136 |
+
if col_placements:
|
| 137 |
+
node_positions = col_placements
|
| 138 |
+
else:
|
| 139 |
+
if isinstance(spacing, list):
|
| 140 |
+
# Generate node positions from spacing list
|
| 141 |
+
node_positions = [0]
|
| 142 |
+
for s in spacing:
|
| 143 |
+
node_positions.append(node_positions[-1] + s)
|
| 144 |
+
else:
|
| 145 |
+
# Evenly spaced nodes
|
| 146 |
+
if truss_mode != "simple":
|
| 147 |
+
# extra_span = (n_beams - cantilever_sides * beam_partition) * spacing
|
| 148 |
+
# extra_nodes = (cantilever_sides * beam_partition) - (cantilever_sides - 1)
|
| 149 |
+
if not add_extra_pt:
|
| 150 |
+
node_positions = [i * (span + span * cantilever_sides) / n_beams for i in range(n_nod_tot)]
|
| 151 |
+
else:
|
| 152 |
+
node_positions = [i * (span + span * cantilever_sides) / n_beams for i in range(n_nod_tot-1)]
|
| 153 |
+
node_positions = node_positions[:n_nod_tot//2] + [span + span * cantilever_sides] + node_positions[n_nod_tot//2:]
|
| 154 |
+
|
| 155 |
+
else:
|
| 156 |
+
if not add_extra_pt:
|
| 157 |
+
node_positions = [i * span / n_beams for i in range(n_nod_tot)]
|
| 158 |
+
else:
|
| 159 |
+
node_positions = [i * span / n_beams for i in range(n_nod_tot-1)]
|
| 160 |
+
node_positions = node_positions[:n_nod_tot//2] + [span] + node_positions[n_nod_tot//2:]
|
| 161 |
+
|
| 162 |
+
# Apply skip_col if provided
|
| 163 |
+
if skip_col:
|
| 164 |
+
node_positions = [pos for idx, pos in enumerate(node_positions) if idx not in skip_col]
|
| 165 |
+
|
| 166 |
+
nodal_coord = np.zeros((n_nod_tot, n_dim))
|
| 167 |
+
|
| 168 |
+
logging.debug("In the nodal_coord_simple function we have: n_nod_tot: %d, \n\
|
| 169 |
+
node_positions: %s, n_beams: %d", n_nod_tot, node_positions, n_beams)
|
| 170 |
+
for i in range(n_nod_tot):
|
| 171 |
+
nodal_coord[i, 0] = node_positions[i]
|
| 172 |
+
nodal_coord[i, 1] = 0
|
| 173 |
+
|
| 174 |
+
return nodal_coord
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def pel_ele_simple(par: np.ndarray, n_beams: int, n_par_nod: int) -> np.ndarray:
|
| 178 |
+
"""
|
| 179 |
+
Generate the parameter-element numbering relation for a simple beam with 2 nodes
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
par (np.ndarray): Parameter matrix.
|
| 183 |
+
n_beams (int): Number of beams (elements).
|
| 184 |
+
n_par_nod (int): Number of parameters per node.
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
np.ndarray: The parameter-element numbering relation.
|
| 188 |
+
"""
|
| 189 |
+
pel = np.zeros((n_beams, 2 * n_par_nod), dtype=int)
|
| 190 |
+
for i in range(n_beams):
|
| 191 |
+
pel[i, :n_par_nod] = par[i]
|
| 192 |
+
pel[i, n_par_nod:] = par[i + 1]
|
| 193 |
+
|
| 194 |
+
return pel
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def fill_ele_nod_simple(n_beams: int, n_par_nod: int, pel: np.ndarray) -> np.ndarray:
|
| 198 |
+
"""
|
| 199 |
+
Generate the element-node relationships for a simple beam.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
n_beams (int): Number of beams (elements).
|
| 203 |
+
n_par_nod (int): Number of parameters per node.
|
| 204 |
+
pel (np.ndarray): Parameter-element numbering relation.
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
np.ndarray: The element-node relationships.
|
| 208 |
+
"""
|
| 209 |
+
ele_nod = np.zeros((n_beams, 2), dtype=int)
|
| 210 |
+
# print(ele_nod, pel, n_beams)
|
| 211 |
+
logging.debug("In the fill_ele_nod_simple function we have: n_beams: %d, \n\
|
| 212 |
+
ele_nod: %s, pel: %s", n_beams, ele_nod, pel)
|
| 213 |
+
for i in range(n_beams):
|
| 214 |
+
ele_nod[i, 0] = (pel[i, 0] - 1) // n_par_nod
|
| 215 |
+
ele_nod[i, 1] = (pel[i, n_par_nod] - 1) // n_par_nod
|
| 216 |
+
|
| 217 |
+
return ele_nod
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def boundary_conditions_simple(spacing: int, n_par_nod: int, n_nod_tot: int, n_columns: int, col_placements: List,
|
| 221 |
+
cantilever_sides: int, beam_partition: int, truss_mode: str = "simple",
|
| 222 |
+
default_support: str = "roller", supports: List = ["pin", "roller"]) -> np.ndarray:
|
| 223 |
+
"""
|
| 224 |
+
Generate boundary conditions for a simple beam or cantilever.
|
| 225 |
+
If custom column placements are provided, it uses those; otherwise,
|
| 226 |
+
it defaults to evenly spaced supports. The function ensures that there are enough supports
|
| 227 |
+
by defaulting to the specified default support type if necessary
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
spacing (int): Distance between nodes.
|
| 231 |
+
n_par_nod (int): Number of parameters per node (e.g., 2 for x and y coordinates).
|
| 232 |
+
n_nod_tot (int): Total number of nodes.
|
| 233 |
+
n_columns (int): Number of columns (supports).
|
| 234 |
+
col_placements (List): Custom positions of the columns (supports).
|
| 235 |
+
beam_partition (int): Number of divisions in the beam.
|
| 236 |
+
truss_mode (str, optional): The type of beam ("simple", "simple_cant"). Defaults to "simple".
|
| 237 |
+
default_support (str, optional): Default support type if not enough supports are provided. Defaults to "roller".
|
| 238 |
+
supports (List, optional): List of support types (e.g., ["pin", "roller"]). Defaults to ["pin", "roller"].
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
np.ndarray: Array representing the boundary conditions for each node.
|
| 242 |
+
"""
|
| 243 |
+
# Initialize the boundary conditions
|
| 244 |
+
def support_dof(support, n_par_nod):
|
| 245 |
+
temp = np.zeros(n_par_nod, dtype=int)
|
| 246 |
+
if support == "roller":
|
| 247 |
+
temp[1] = 1
|
| 248 |
+
elif support == "pin":
|
| 249 |
+
temp[:2] = 1
|
| 250 |
+
else:
|
| 251 |
+
temp[:] = 1
|
| 252 |
+
return temp
|
| 253 |
+
|
| 254 |
+
# TODO: Implement
|
| 255 |
+
adj_nod = 0 # Adjust the starting node
|
| 256 |
+
adj_nod_end = 0 # Adjust the ending node
|
| 257 |
+
adj_mode = 1 if truss_mode == "simple" else 0
|
| 258 |
+
if truss_mode != "simple":
|
| 259 |
+
adj_nod = beam_partition
|
| 260 |
+
if cantilever_sides >= 2:
|
| 261 |
+
adj_nod_end = beam_partition - 1
|
| 262 |
+
|
| 263 |
+
if col_placements:
|
| 264 |
+
if len(supports) < len(col_placements):
|
| 265 |
+
print("Not enough supports provided. Defaulting to pin and roller.")
|
| 266 |
+
supports = [supports[0]]
|
| 267 |
+
supports.extend(default_support * (len(col_placements) - 1))
|
| 268 |
+
elif len(supports) < n_columns:
|
| 269 |
+
print(f"Not enough supports provided for boundary conditions. \
|
| 270 |
+
Defaulting to pin then {default_support} for {n_columns - 1} columns.")
|
| 271 |
+
supports = [supports[0]]
|
| 272 |
+
supports.extend([default_support] * (n_columns - 1))
|
| 273 |
+
|
| 274 |
+
temp = np.zeros((n_nod_tot * n_par_nod), dtype=int)
|
| 275 |
+
counter = 0
|
| 276 |
+
# Counter for incrementing supports
|
| 277 |
+
# Range starts from the first non-cantiliver node till the last non-cantilever node
|
| 278 |
+
# Movevment is based on the original beam subtracted by the number of columns
|
| 279 |
+
try:
|
| 280 |
+
for i in range(adj_nod, n_nod_tot - adj_nod_end, max(n_nod_tot - adj_nod - adj_nod_end - len(supports) + adj_mode, 1)):
|
| 281 |
+
temp[i * n_par_nod:(i + 1) * n_par_nod] = support_dof(supports[counter], n_par_nod)
|
| 282 |
+
counter += 1
|
| 283 |
+
except IndexError:
|
| 284 |
+
logging.error(f"Check the last non-cantilever node or support indexes \n\
|
| 285 |
+
adj_nod: {adj_nod}, n_nod_tot: {n_nod_tot}, adj_nod_end: {adj_nod_end}, n_columns: {n_columns} supports: {supports}")
|
| 286 |
+
|
| 287 |
+
return temp
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def insert_simple_node(node_pos: float, x_pos: np.ndarray, n_par_nod: int,
|
| 291 |
+
length_arr: np.ndarray, same_property_list: List[np.ndarray],
|
| 292 |
+
node_list: List[np.ndarray], W: np.ndarray, n_ele_tot: int,
|
| 293 |
+
n_par_tot: int, n_nod_tot: int) -> Tuple:
|
| 294 |
+
"""
|
| 295 |
+
Insert a new node into the existing structure.
|
| 296 |
+
This function updates the node positions, lengths, and relationships accordingly.
|
| 297 |
+
|
| 298 |
+
Returns:
|
| 299 |
+
Updated node positions, lengths, relationships, and counts.
|
| 300 |
+
"""
|
| 301 |
+
if node_pos in x_pos:
|
| 302 |
+
print("Attempted node insertion. Node already exists")
|
| 303 |
+
return (x_pos, length_arr, *same_property_list, *node_list, W, n_ele_tot, n_par_tot, n_nod_tot)
|
| 304 |
+
|
| 305 |
+
n_ele_tot += 1
|
| 306 |
+
n_nod_tot += 1
|
| 307 |
+
n_par_tot += n_par_nod
|
| 308 |
+
node_idx = np.searchsorted(x_pos, node_pos)
|
| 309 |
+
x_pos = np.insert(x_pos, node_idx, node_pos)
|
| 310 |
+
W = np.insert(W, node_idx * n_par_nod, np.zeros(n_par_nod, dtype=np.int32), axis=0)
|
| 311 |
+
|
| 312 |
+
length_arr = np.insert(length_arr, node_idx, x_pos[node_idx+1] - node_pos)
|
| 313 |
+
length_arr[node_idx - 1] = node_pos - x_pos[node_idx - 1]
|
| 314 |
+
|
| 315 |
+
def insert_node(node_idx, node_arr, n_par_nod, increment: int = 0):
|
| 316 |
+
if not increment:
|
| 317 |
+
node_arr = np.insert(node_arr, node_idx, node_arr[node_idx - 1], axis=0)
|
| 318 |
+
else:
|
| 319 |
+
node_arr = np.append(node_arr, [node_arr[-1] + np.arange(1, len(node_arr[-1]) + 1)], axis=0)
|
| 320 |
+
return node_arr
|
| 321 |
+
|
| 322 |
+
for i in range(len(same_property_list)):
|
| 323 |
+
same_property_list[i] = insert_node(node_idx, same_property_list[i], n_par_nod)
|
| 324 |
+
|
| 325 |
+
for i in range(len(node_list)):
|
| 326 |
+
node_list[i] = insert_node(node_idx, node_list[i], n_par_nod, increment=1)
|
| 327 |
+
|
| 328 |
+
# Fix pel
|
| 329 |
+
last_pel = node_list[0][-2][n_par_nod:]
|
| 330 |
+
new_pel = last_pel + n_par_nod
|
| 331 |
+
node_list[0][-1] = np.concatenate((last_pel, new_pel))
|
| 332 |
+
|
| 333 |
+
return (x_pos, length_arr, *same_property_list, *node_list, W, n_ele_tot, n_par_tot, n_nod_tot)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# Run some test cases if its the main module
|
| 337 |
+
if __name__ == "__main__":
|
| 338 |
+
# Test case 1 with beam partition
|
| 339 |
+
test_case = 1
|
| 340 |
+
span = 25
|
| 341 |
+
spacing = 12.5
|
| 342 |
+
n_dim = 2
|
| 343 |
+
n_par_nod = 3
|
| 344 |
+
truss_mode = "simple"
|
| 345 |
+
beam_partition = 2
|
| 346 |
+
col_placements = None
|
| 347 |
+
skip_col = None
|
| 348 |
+
cantilever_sides = 2
|
| 349 |
+
|
| 350 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_simple_essential_elements(
|
| 351 |
+
span, spacing, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 352 |
+
print(
|
| 353 |
+
f"There are {n_columns} columns, {n_nod_tot} total nodes, {n_beams} beams and {n_ele_tot} total elements (excluding columns)"
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
nodal_coord, par, pel, ele_nod, n_par_tot = calculate_simple_element_node(
|
| 357 |
+
span, spacing, n_dim, n_par_nod, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 358 |
+
print("Nodal Coordinates:\n", nodal_coord)
|
| 359 |
+
print("Parameter Matrix:\n", par)
|
| 360 |
+
print("Parameter-Element Relationship:\n", pel)
|
| 361 |
+
print("Element-Node Relationship:\n", ele_nod)
|
| 362 |
+
print("Total Parameters:", n_par_tot)
|
| 363 |
+
|
| 364 |
+
print(f"Test {test_case} test done successfully\n\n")
|
| 365 |
+
|
| 366 |
+
# Test case 2 without beam partition
|
| 367 |
+
test_case = 2
|
| 368 |
+
span = 25
|
| 369 |
+
spacing = 25
|
| 370 |
+
n_dim = 2
|
| 371 |
+
n_par_nod = 2
|
| 372 |
+
truss_mode = "simple"
|
| 373 |
+
beam_partition = 1
|
| 374 |
+
col_placements = None
|
| 375 |
+
skip_col = None
|
| 376 |
+
|
| 377 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_simple_essential_elements(
|
| 378 |
+
span, spacing, truss_mode, beam_partition, col_placements, skip_col)
|
| 379 |
+
print(
|
| 380 |
+
f"There are {n_columns} columns, {n_nod_tot} total nodes, {n_beams} beams and {n_ele_tot} total elements (excluding columns)"
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
nodal_coord, par, pel, ele_nod, n_par_tot = calculate_simple_element_node(
|
| 384 |
+
span, spacing, n_dim, n_par_nod, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 385 |
+
print("Nodal Coordinates:\n", nodal_coord)
|
| 386 |
+
print("Parameter Matrix:\n", par)
|
| 387 |
+
print("Parameter-Element Relationship:\n", pel)
|
| 388 |
+
print("Element-Node Relationship:\n", ele_nod)
|
| 389 |
+
print("Total Parameters:", n_par_tot)
|
| 390 |
+
|
| 391 |
+
print(f"Test {test_case} test done successfully\n\n")
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
# Test case 3 without beam partition simple cant
|
| 395 |
+
test_case = 3
|
| 396 |
+
span = 25
|
| 397 |
+
spacing = 25
|
| 398 |
+
n_dim = 2
|
| 399 |
+
n_par_nod = 2
|
| 400 |
+
truss_mode = "simple_cant"
|
| 401 |
+
beam_partition = 1
|
| 402 |
+
col_placements = None
|
| 403 |
+
skip_col = None
|
| 404 |
+
cantilever_sides = 2
|
| 405 |
+
|
| 406 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_simple_essential_elements(
|
| 407 |
+
span, spacing, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 408 |
+
print(
|
| 409 |
+
f"There are {n_columns} columns, {n_nod_tot} total nodes, {n_beams} beams and {n_ele_tot} total elements (excluding columns)"
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
nodal_coord, par, pel, ele_nod, n_par_tot = calculate_simple_element_node(
|
| 413 |
+
span, spacing, n_dim, n_par_nod, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 414 |
+
print("Nodal Coordinates:\n", nodal_coord)
|
| 415 |
+
print("Parameter Matrix:\n", par)
|
| 416 |
+
print("Parameter-Element Relationship:\n", pel)
|
| 417 |
+
print("Element-Node Relationship:\n", ele_nod)
|
| 418 |
+
print("Total Parameters:", n_par_tot)
|
| 419 |
+
|
| 420 |
+
print(f"Test {test_case} test done successfully\n\n")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
# Test case 4 with beam partition simple cant
|
| 424 |
+
test_case = 3
|
| 425 |
+
span = 25
|
| 426 |
+
spacing = 25
|
| 427 |
+
n_dim = 2
|
| 428 |
+
n_par_nod = 2
|
| 429 |
+
truss_mode = "simple_cant"
|
| 430 |
+
beam_partition = 2
|
| 431 |
+
col_placements = None
|
| 432 |
+
skip_col = None
|
| 433 |
+
cantilever_sides = 1
|
| 434 |
+
|
| 435 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_simple_essential_elements(
|
| 436 |
+
span, spacing, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 437 |
+
print(
|
| 438 |
+
f"There are {n_columns} columns, {n_nod_tot} total nodes, {n_beams} beams and {n_ele_tot} total elements (excluding columns)"
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
nodal_coord, par, pel, ele_nod, n_par_tot = calculate_simple_element_node(
|
| 442 |
+
span, spacing, n_dim, n_par_nod, truss_mode, beam_partition, col_placements, skip_col, cantilever_sides)
|
| 443 |
+
print("Nodal Coordinates:\n", nodal_coord)
|
| 444 |
+
print("Parameter Matrix:\n", par)
|
| 445 |
+
print("Parameter-Element Relationship:\n", pel)
|
| 446 |
+
print("Element-Node Relationship:\n", ele_nod)
|
| 447 |
+
print("Total Parameters:", n_par_tot)
|
| 448 |
+
|
| 449 |
+
print(f"Test {test_case} test done successfully\n\n")
|
utils/truss_constraints.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# General parameters
|
| 2 |
+
span = 50 # in m
|
| 3 |
+
angle = 0 # in degrees
|
| 4 |
+
n_dim = 2 # Number of dimensions
|
| 5 |
+
n_nod_ele = 2 # Number of nodes per element
|
| 6 |
+
n_par_nod = 3 # Number of parameters per node
|
| 7 |
+
n_par_ele = n_par_nod * n_nod_ele # Number of parameters per element
|
| 8 |
+
|
| 9 |
+
# Plot settings
|
| 10 |
+
n_discritizations = 10 # Number of points for plotting
|
| 11 |
+
n_plots = 4 # Number of plots for bridge's truss
|
| 12 |
+
|
| 13 |
+
# Columns 40x40 and beams 30x35 in cm and rods 10x10 in cm
|
| 14 |
+
width_beam = 0.3
|
| 15 |
+
height_beam = 0.35
|
| 16 |
+
|
| 17 |
+
width_column = 0.4
|
| 18 |
+
height_column = 0.4
|
| 19 |
+
|
| 20 |
+
width_rod = 0.1
|
| 21 |
+
height_rod = 0.1
|
| 22 |
+
|
| 23 |
+
# Horizontal load on columns and angle in kN and degrees
|
| 24 |
+
po = 100
|
| 25 |
+
theta = 0
|
| 26 |
+
|
| 27 |
+
# Unit weight and elastic modulus in kN/m^3 and kN/m^2
|
| 28 |
+
unit_weight_steel = 78.5
|
| 29 |
+
elastic_mod = 21*10**7
|
| 30 |
+
elastic_mod_rod = 21*10**7
|
| 31 |
+
|
| 32 |
+
# Shear modulus in kN/m^2
|
| 33 |
+
shear_mod = 8*10**6
|
| 34 |
+
k_shear = 0.9
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
################## HASHMAPS ##################
|
| 39 |
+
|
| 40 |
+
# put them all in a hashmap for easy access
|
| 41 |
+
width_properties = {}
|
| 42 |
+
height_properties = {}
|
| 43 |
+
unit_weight_properties = {}
|
| 44 |
+
elastic_mod_properties = {}
|
| 45 |
+
|
| 46 |
+
width_properties['beam'] = width_beam
|
| 47 |
+
width_properties['column'] = width_column
|
| 48 |
+
width_properties['rod'] = width_rod
|
| 49 |
+
|
| 50 |
+
height_properties['beam'] = height_beam
|
| 51 |
+
height_properties['column'] = height_column
|
| 52 |
+
height_properties['rod'] = height_rod
|
| 53 |
+
|
| 54 |
+
unit_weight_properties['beam'] = unit_weight_steel
|
| 55 |
+
unit_weight_properties['column'] = unit_weight_steel
|
| 56 |
+
unit_weight_properties['rod'] = unit_weight_steel
|
| 57 |
+
|
| 58 |
+
elastic_mod_properties['beam'] = elastic_mod
|
| 59 |
+
elastic_mod_properties['column'] = elastic_mod
|
| 60 |
+
elastic_mod_properties['rod'] = elastic_mod_rod
|
utils/truss_element_assembly.py
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def nodal_coords(n_nod_tot: int, n_dim: int, n_columns: int, spacing: float,
|
| 6 |
+
height: float, n_bot_beams: int, truss_mode: str) -> np.ndarray:
|
| 7 |
+
"""
|
| 8 |
+
Calculate the coordinate of the nodes in the assembly.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
n_nod_tot (int): Total number of nodes.
|
| 12 |
+
n_dim (int): Number of dimensions.
|
| 13 |
+
n_columns (int): Number of columns.
|
| 14 |
+
spacing (float): Spacing between columns (length of beams)
|
| 15 |
+
height (float): Height of the columns .
|
| 16 |
+
n_bot_beams (int): Number of bottom beams.
|
| 17 |
+
truss_mode (str): Mode of the truss ("warren" or other)
|
| 18 |
+
currently supports pratt and howe as other.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
np.ndarray: The nodal coordinates of the assembly.
|
| 22 |
+
"""
|
| 23 |
+
nodal_coord = np.zeros((n_nod_tot, n_dim), dtype=float)
|
| 24 |
+
|
| 25 |
+
# Calculate the nodal coordinates
|
| 26 |
+
if truss_mode == "simple" or truss_mode == "simple_cant":
|
| 27 |
+
for i in range(n_nod_tot):
|
| 28 |
+
nodal_coord[i] += [i * spacing, 0]
|
| 29 |
+
elif truss_mode != "warren":
|
| 30 |
+
for i in range(n_columns + 2):
|
| 31 |
+
nodal_coord[i] += [i * spacing, 0]
|
| 32 |
+
if 0 < i < n_columns + 1:
|
| 33 |
+
nodal_coord[i + n_columns + 1] += [i * spacing, height]
|
| 34 |
+
else:
|
| 35 |
+
for i in range(n_nod_tot - n_bot_beams):
|
| 36 |
+
nodal_coord[i] += [i * spacing, 0]
|
| 37 |
+
if i > 0:
|
| 38 |
+
nodal_coord[i + n_bot_beams] += [i * spacing - spacing / 2, height]
|
| 39 |
+
|
| 40 |
+
return nodal_coord
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def pel_ele(par: np.ndarray, n_columns: int, n_beams: int, n_rods: int,
|
| 44 |
+
n_par_nod: int, n_nod_tot: int, n_ele_tot: int, n_bot_beams: int,
|
| 45 |
+
truss_mode: str, skip_rod: List[int] = []) -> np.ndarray:
|
| 46 |
+
"""
|
| 47 |
+
Calculate the parameter-element numbering relation.
|
| 48 |
+
Built from Beam -> Column -> Rods (considering skipped rods).
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
par (np.ndarray): Parameters for the elements.
|
| 52 |
+
n_columns (int): Number of columns.
|
| 53 |
+
n_beams (int): Number of beams.
|
| 54 |
+
n_rods (int): Number of rods.
|
| 55 |
+
n_par_nod (int): Number of parameters per node.
|
| 56 |
+
n_nod_tot (int): Total number of nodes.
|
| 57 |
+
n_ele_tot (int): Total number of elements.
|
| 58 |
+
n_bot_beams (int): Number of bottom beams.
|
| 59 |
+
truss_mode (str): Mode of the truss ("warren", "pratt", etc.).
|
| 60 |
+
skip_rod (List[int]): Rods to skip. 0-indexed.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
np.ndarray: The parameter-element numbering relation.
|
| 64 |
+
"""
|
| 65 |
+
if "simple" in truss_mode:
|
| 66 |
+
pel = beam_pars(par, n_beams, n_par_nod, n_nod_tot, n_bot_beams, truss_mode)
|
| 67 |
+
else:
|
| 68 |
+
pel = np.zeros((n_ele_tot - len(skip_rod), 2 * n_par_nod), dtype=int)
|
| 69 |
+
|
| 70 |
+
# Calculate the element parameter relations
|
| 71 |
+
pel[:n_beams] = beam_pars(par, n_beams, n_par_nod, n_nod_tot, n_bot_beams, truss_mode)
|
| 72 |
+
if truss_mode != "warren":
|
| 73 |
+
pel[n_beams:n_beams + n_columns] = column_pars(par, n_columns, n_par_nod)
|
| 74 |
+
pel[n_beams + n_columns:] = rod_pars(par, n_rods, n_par_nod, n_nod_tot, n_bot_beams, truss_mode, skip_rod)
|
| 75 |
+
|
| 76 |
+
return pel
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def beam_pars(par: np.ndarray, n_beams: int, n_par_nod: int,
|
| 80 |
+
n_nod_tot: int, n_bot_beams: int,
|
| 81 |
+
truss_mode: str) -> np.ndarray:
|
| 82 |
+
"""
|
| 83 |
+
Calculate the relevant beam DoFs.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
par (np.ndarray): Parameters for the elements.
|
| 87 |
+
n_beams (int): Number of beams.
|
| 88 |
+
n_par_nod (int): Number of parameters per node.
|
| 89 |
+
n_nod_tot (int): Total number of nodes.
|
| 90 |
+
n_bot_beams (int): Number of bottom beams.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
np.ndarray: The beam DoFs.
|
| 94 |
+
"""
|
| 95 |
+
beams = np.zeros((n_beams, 2 * n_par_nod))
|
| 96 |
+
|
| 97 |
+
if "simple" in truss_mode:
|
| 98 |
+
for i in range(n_beams):
|
| 99 |
+
beams[i, :n_par_nod] = par[i]
|
| 100 |
+
beams[i, n_par_nod:] = par[i + 1]
|
| 101 |
+
else:
|
| 102 |
+
beams[:n_bot_beams, :n_par_nod] = par[:n_bot_beams]
|
| 103 |
+
beams[:n_bot_beams, n_par_nod:] = par[1:n_bot_beams + 1]
|
| 104 |
+
|
| 105 |
+
beams[n_bot_beams:, :n_par_nod] = par[n_bot_beams + 1:n_nod_tot - 1]
|
| 106 |
+
beams[n_bot_beams:, n_par_nod:] = par[n_bot_beams + 2:]
|
| 107 |
+
|
| 108 |
+
return beams
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def column_pars(par: np.ndarray, n_columns: int, n_par_nod: int) -> np.ndarray:
|
| 112 |
+
"""
|
| 113 |
+
Calculate the relevant column DoFs.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
par (np.ndarray): Parameters for the elements.
|
| 117 |
+
n_columns (int): Number of columns.
|
| 118 |
+
n_par_nod (int): Number of parameters per node.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
np.ndarray: The column DoFs.
|
| 122 |
+
"""
|
| 123 |
+
columns = np.zeros((n_columns, 2 * n_par_nod))
|
| 124 |
+
for i in range(1, n_columns + 1):
|
| 125 |
+
columns[i - 1][:n_par_nod] = par[i]
|
| 126 |
+
columns[i - 1][n_par_nod:] = par[i + n_columns + 1]
|
| 127 |
+
|
| 128 |
+
return columns
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def rod_pars(par: np.ndarray, n_rods: int, n_par_nod: int, n_nod_tot: int,
|
| 132 |
+
n_bot_beams: int, truss_mode: str, skip_rod: List[int] = []) -> np.ndarray:
|
| 133 |
+
"""
|
| 134 |
+
Calculate the relevant rod DoFs.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
par (np.ndarray): Parameters for the elements.
|
| 138 |
+
n_rods (int): Number of rods.
|
| 139 |
+
n_par_nod (int): Number of parameters per node.
|
| 140 |
+
n_nod_tot (int): Total number of nodes.
|
| 141 |
+
n_bot_beams (int): Number of bottom beams.
|
| 142 |
+
truss_mode (str): Mode of the truss ("warren" or other).
|
| 143 |
+
skip_rod (List[int]): The rods to skip. From left to right.
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
np.ndarray: The rod DoFs.
|
| 147 |
+
"""
|
| 148 |
+
rods = np.zeros((n_rods - len(skip_rod), 2 * n_par_nod))
|
| 149 |
+
|
| 150 |
+
count = 0
|
| 151 |
+
skips = 0
|
| 152 |
+
|
| 153 |
+
for i in range(n_nod_tot - n_bot_beams - 1):
|
| 154 |
+
for j in range(2):
|
| 155 |
+
if count not in skip_rod:
|
| 156 |
+
rods[count - skips][n_par_nod:] = par[i + n_bot_beams + 1]
|
| 157 |
+
if j > 0 and truss_mode != "warren":
|
| 158 |
+
rods[count - skips][:n_par_nod] = par[2 * j + i]
|
| 159 |
+
else:
|
| 160 |
+
rods[count - skips][:n_par_nod] = par[j + i]
|
| 161 |
+
else:
|
| 162 |
+
skips += 1
|
| 163 |
+
count += 1
|
| 164 |
+
|
| 165 |
+
return rods
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def fill_ele_nod(n_ele_tot: int, n_par_nod: int, pel: np.ndarray,
|
| 169 |
+
skip_rod: List[int] = []) -> np.ndarray:
|
| 170 |
+
"""
|
| 171 |
+
Fill the element-nodal matrix.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
n_ele_tot (int): Total number of elements.
|
| 175 |
+
n_par_nod (int): Number of parameters per node.
|
| 176 |
+
pel (np.ndarray): Parameter-element numbering relation.
|
| 177 |
+
skip_rod (List[int]): Rods to skip.
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
np.ndarray: The element-nodal matrix.
|
| 181 |
+
"""
|
| 182 |
+
ele_nod = np.zeros((n_ele_tot - len(skip_rod), 2), dtype=int)
|
| 183 |
+
|
| 184 |
+
ele_nod[:, 0] = pel[:, 0] // n_par_nod
|
| 185 |
+
ele_nod[:, 1] = pel[:, n_par_nod] // n_par_nod
|
| 186 |
+
|
| 187 |
+
return ele_nod
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# If main is called, run the test
|
| 191 |
+
if __name__ == "__main__":
|
| 192 |
+
n_par_nod = 3
|
| 193 |
+
par = np.arange(1, 7*n_par_nod+1).reshape(7, n_par_nod)
|
| 194 |
+
n_beams = 5
|
| 195 |
+
truss_mode = "warren"
|
| 196 |
+
n_bot_beams = 3
|
| 197 |
+
n_nod_tot = 7
|
| 198 |
+
n_rods = 6
|
| 199 |
+
n_dim = 3
|
| 200 |
+
n_columns = 2
|
| 201 |
+
|
| 202 |
+
if truss_mode == "warren":
|
| 203 |
+
n_columns = 0
|
| 204 |
+
n_ele_tot = n_beams + n_columns + n_rods
|
| 205 |
+
|
| 206 |
+
print(par)
|
| 207 |
+
# beams = beam_pars(par, n_beams, n_par_nod, n_nod_tot, n_bot_beams)
|
| 208 |
+
# rods = rod_pars(par, n_rods, n_par_nod, n_bot_beams, skip_rod=[])
|
| 209 |
+
pel = pel_ele(par, n_columns, n_beams, n_rods, n_par_nod, n_nod_tot, n_ele_tot, n_bot_beams, truss_mode, skip_rod)
|
| 210 |
+
|
| 211 |
+
print(pel)
|
utils/truss_geometric.py
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from typing import Dict, Tuple, List, Optional
|
| 5 |
+
from utils.truss_element_assembly import nodal_coords, pel_ele, fill_ele_nod
|
| 6 |
+
|
| 7 |
+
MIN_ANGLE = np.pi / 6
|
| 8 |
+
MAX_ANGLE = np.pi / 3
|
| 9 |
+
|
| 10 |
+
# Get the logger
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
################## Geometric Functions ##################
|
| 15 |
+
def calculate_max_height(span: float, angle: float, spacing: float = 0,
|
| 16 |
+
tol_round: Optional[List] = None) -> Tuple[float, float]:
|
| 17 |
+
"""
|
| 18 |
+
Calculate the maximum height and spacing for the given span and angle.
|
| 19 |
+
If spacing is given, no span/spacing ratio is respected.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
span: The total length of the bridge.
|
| 23 |
+
angle: The angle of the diagonal rods in radians.
|
| 24 |
+
spacing: Pre-defined spacing between columns. If 0, it will be calculated.
|
| 25 |
+
allowed_round: The number of decimal places to round the height to. first value is for height, second for spacing.
|
| 26 |
+
More angles will be found with higher values.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
A tuple containing the height and spacing if valid, otherwise (-1, 0).
|
| 30 |
+
"""
|
| 31 |
+
if tol_round is None:
|
| 32 |
+
tol_round = [3, 1]
|
| 33 |
+
|
| 34 |
+
if not MIN_ANGLE <= angle <= MAX_ANGLE:
|
| 35 |
+
logging.warning("The angle should be between %d and %d. "
|
| 36 |
+
"Defaulting to 45 degrees.", np.degrees(MIN_ANGLE), np.degrees(MAX_ANGLE))
|
| 37 |
+
angle = np.radians(45)
|
| 38 |
+
|
| 39 |
+
if spacing:
|
| 40 |
+
height = round(spacing * np.tan(angle), tol_round[0])
|
| 41 |
+
return height, spacing
|
| 42 |
+
|
| 43 |
+
for i in range(15, 21):
|
| 44 |
+
height = round(span / i, tol_round[0])
|
| 45 |
+
spacing = round(height / np.tan(angle), tol_round[1])
|
| 46 |
+
if span % spacing == 0:
|
| 47 |
+
return height, spacing
|
| 48 |
+
|
| 49 |
+
return -1, 0
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def try_angles(span: float, base_angle: int, lower_limit: float = 30, upper_limit: float = 60,
|
| 53 |
+
tol_round: Optional[List] = None) -> Tuple[float, float, float]:
|
| 54 |
+
"""
|
| 55 |
+
Try to find a valid height and spacing by adjusting the angle.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
span: The total length of the bridge.
|
| 59 |
+
base_angle: The base angle of the diagonal rods in degrees.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
A tuple containing the height, spacing, and adjusted angle in degrees if found, otherwise (-1, -1, -1).
|
| 63 |
+
"""
|
| 64 |
+
if tol_round is None:
|
| 65 |
+
tol_round = [3, 1]
|
| 66 |
+
|
| 67 |
+
# Calculate the distance of the angle from pi/3 and pi/6 to use in range
|
| 68 |
+
angle_diff = int(max(abs(base_angle - lower_limit), abs(base_angle - upper_limit)))
|
| 69 |
+
|
| 70 |
+
for shift in range(angle_diff):
|
| 71 |
+
for sign in [1, -1]:
|
| 72 |
+
adjusted_angle = np.radians(base_angle + sign * shift)
|
| 73 |
+
height, spacing = calculate_max_height(span, adjusted_angle, tol_round=tol_round)
|
| 74 |
+
|
| 75 |
+
# If a suitable height is found
|
| 76 |
+
if spacing:
|
| 77 |
+
return height, spacing, base_angle + sign * shift
|
| 78 |
+
return -1, -1, -1
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def calculate_bridge(span: float, angle: int = 45, n_div: int = None, spacing: float = 0,
|
| 82 |
+
truss_mode: str = "pratt", lower_limit: int = 30,
|
| 83 |
+
upper_limit: int = 60, tol_round: Optional[List] = None) -> Tuple[float, float, float]:
|
| 84 |
+
"""
|
| 85 |
+
Calculate the height of the bridge, spacing of columns, and diagonal length of rods.
|
| 86 |
+
If n_div is provided, spacing is set as span / n_div and height is adjusted accordingly (priority: n_div > spacing > angle search).
|
| 87 |
+
Span can be normalized to 1 for dataset generation.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
span (float): The total length of the bridge (e.g., 1 for normalized).
|
| 91 |
+
angle (float): The angle of the diagonal rods in degrees. Default is 45 degrees.
|
| 92 |
+
n_div (int, optional): Number of divisions (panels/segments). If provided, sets spacing = span / n_div.
|
| 93 |
+
spacing (float, optional): Pre-defined spacing. Used if n_div is None.
|
| 94 |
+
truss_mode (str): The mode of the truss bridge (warren, pratt, howe). Defaults to pratt.
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
(float, float, float): The height of the bridge, the distance between columns,
|
| 98 |
+
and the length of the diagonal elements (rods).
|
| 99 |
+
"""
|
| 100 |
+
space_divisor = 1
|
| 101 |
+
if truss_mode == "warren":
|
| 102 |
+
space_divisor = 2 # Since nodes are in the middle of the beams
|
| 103 |
+
|
| 104 |
+
if n_div is not None:
|
| 105 |
+
if n_div <= 0:
|
| 106 |
+
raise ValueError("n_div must be a positive integer.")
|
| 107 |
+
spacing = span / n_div
|
| 108 |
+
angle_rad = np.radians(angle)
|
| 109 |
+
height = (spacing / space_divisor) * np.tan(angle_rad) # Consistent angle across modes
|
| 110 |
+
elif spacing:
|
| 111 |
+
angle_rad = np.radians(angle)
|
| 112 |
+
height, spacing = calculate_max_height(span, angle_rad, spacing, tol_round=tol_round)
|
| 113 |
+
else:
|
| 114 |
+
logging.info("Trying to find a suitable height and spacing based on the angle.")
|
| 115 |
+
height, spacing, used_angle = try_angles(span, angle, lower_limit,
|
| 116 |
+
upper_limit, tol_round=tol_round)
|
| 117 |
+
used_angle = round(used_angle, 2)
|
| 118 |
+
|
| 119 |
+
if height == -1:
|
| 120 |
+
raise RuntimeError("A suitable height for the bridge could not be found. Please adjust the span")
|
| 121 |
+
|
| 122 |
+
if angle != used_angle:
|
| 123 |
+
logging.warning("Adjusted angle to %.2f degrees to find a solution.", used_angle)
|
| 124 |
+
|
| 125 |
+
diag = np.sqrt((spacing / space_divisor)**2 + height**2)
|
| 126 |
+
return height, spacing, diag
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def calculate_essential_elements(span: float, spacing: float, truss_mode: str ="pratt",
|
| 130 |
+
skip_rod: Optional[List] = None) -> Tuple[int, int, int, int, int, int]:
|
| 131 |
+
"""
|
| 132 |
+
Calculate the number of columns, nodes, rods, beams and total elements.
|
| 133 |
+
(Unchanged docstring and rest.)
|
| 134 |
+
"""
|
| 135 |
+
if skip_rod is None:
|
| 136 |
+
skip_rod = []
|
| 137 |
+
|
| 138 |
+
ratio = int(round(span / spacing)) # Fixed: Use round to handle float precision
|
| 139 |
+
if truss_mode != "warren":
|
| 140 |
+
n_columns = ratio - 1
|
| 141 |
+
n_beams = int(ratio * 2 - 2)
|
| 142 |
+
n_bot_beams = int(n_beams // 2 + 1)
|
| 143 |
+
n_rods = n_beams
|
| 144 |
+
else:
|
| 145 |
+
n_columns = 0
|
| 146 |
+
n_beams = int(ratio * 2 - 1)
|
| 147 |
+
n_bot_beams = int(np.ceil(n_beams / 2))
|
| 148 |
+
n_rods = n_beams + 1
|
| 149 |
+
|
| 150 |
+
n_rods = n_rods - len(skip_rod)
|
| 151 |
+
n_nod_tot = n_beams + 2
|
| 152 |
+
n_ele_tot = n_columns + n_rods + n_beams
|
| 153 |
+
|
| 154 |
+
return n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def calculate_simple_elements(span: float, spacing: float, truss_mode: str, col_placements: Optional[List] = None,
|
| 158 |
+
skip_col: Optional[List] = None, beam_partition: int = 1) -> Tuple[int, int, int, int, int, int]:
|
| 159 |
+
"""
|
| 160 |
+
Calculate the number of columns and beams for a simple bridge, along with their spacing.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
span (float): The total length of the bridge.
|
| 164 |
+
spacing (float): The distance between nodes (columns) in the bridge.
|
| 165 |
+
truss_mode (str): The mode of the truss bridge (simple, simple_cant)
|
| 166 |
+
Defaults to pratt.
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
(int, int, int, int, int): The number of columns, nodes,
|
| 170 |
+
rods, beams, and total elements.
|
| 171 |
+
"""
|
| 172 |
+
extra_beams = 0 # Extra beams for cantilevered bridges
|
| 173 |
+
if col_placements:
|
| 174 |
+
n_columns = len(col_placements)
|
| 175 |
+
else:
|
| 176 |
+
n_columns = int(span // spacing) + 1
|
| 177 |
+
|
| 178 |
+
if skip_col:
|
| 179 |
+
n_columns -= len(skip_col)
|
| 180 |
+
|
| 181 |
+
if truss_mode != "simple":
|
| 182 |
+
extra_beams = 1
|
| 183 |
+
|
| 184 |
+
if beam_partition > 1:
|
| 185 |
+
if col_placements:
|
| 186 |
+
raise ValueError("Cannot partition beams with custom column placements.")
|
| 187 |
+
if spacing // beam_partition < spacing / beam_partition:
|
| 188 |
+
raise ValueError("Beam partition should be a divisor of the spacing.")
|
| 189 |
+
n_beams = (n_columns - 1 + extra_beams) * beam_partition
|
| 190 |
+
else:
|
| 191 |
+
print("Defaulting partition to 1.")
|
| 192 |
+
n_beams = n_columns - 1 + extra_beams
|
| 193 |
+
|
| 194 |
+
n_nod_tot = n_beams + 2
|
| 195 |
+
n_ele_tot = n_columns + n_beams
|
| 196 |
+
|
| 197 |
+
return n_columns, n_nod_tot, 0, n_beams, n_ele_tot, 0
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def calculate_element_node(span: float, spacing: float, height: float, n_dim: int,
|
| 201 |
+
n_par_nod: int, truss_mode: str = "pratt",
|
| 202 |
+
skip_rod: Optional[List] = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]:
|
| 203 |
+
"""
|
| 204 |
+
Calculate the nodal coordinates, nodal-param relation and
|
| 205 |
+
element-node relationships for a truss bridge.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
span (float): The total length of the bridge.
|
| 209 |
+
spacing (float): The distance between nodes (columns) in the bridge.
|
| 210 |
+
height (float): The height of the bridge.
|
| 211 |
+
diag (float): The length of the diagonal elements (rods) in the bridge.
|
| 212 |
+
n_dim (int): The number of dimensions in the bridge
|
| 213 |
+
(usually 2 for 2D bridges).
|
| 214 |
+
n_par_nod (int): The number of parameters per node
|
| 215 |
+
(usually 2 for 2D bridges: x and y coordinates).
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
nodal_coord (numpy.ndarray): A 2D array where each row represents a
|
| 219 |
+
node and the columns are the x and y coordinates of the node.
|
| 220 |
+
par (numpy.ndarray): A 2D array where each row represents a node and
|
| 221 |
+
the columns are the parameters associated with the node.
|
| 222 |
+
pel (numpy.ndarray): A 2D array where each row represents an element (beam, column, or rod)
|
| 223 |
+
and the columns are the nodes associated with the element.
|
| 224 |
+
ele_nod (numpy.ndarray): A 2D array where each row represents an element and
|
| 225 |
+
the columns are the nodes associated with the element.
|
| 226 |
+
n_par_tot (int): The total number of parameters in the bridge.
|
| 227 |
+
"""
|
| 228 |
+
# Calculate the number of columns, rods, beams, total nodes and total parameters
|
| 229 |
+
if skip_rod is None:
|
| 230 |
+
skip_rod = []
|
| 231 |
+
|
| 232 |
+
n_columns, n_nod_tot, n_rods, n_beams, n_ele_tot, n_bot_beams = calculate_essential_elements(span, spacing, truss_mode)
|
| 233 |
+
n_par_tot = n_nod_tot * n_par_nod
|
| 234 |
+
|
| 235 |
+
# Calculate the positions of the nodes
|
| 236 |
+
nodal_coord = nodal_coords(n_nod_tot, n_dim, n_columns, spacing, height, n_bot_beams, truss_mode)
|
| 237 |
+
|
| 238 |
+
# Calculate the nodal-param relation
|
| 239 |
+
par = np.arange(1, n_nod_tot * n_par_nod + 1).reshape(n_nod_tot, n_par_nod)
|
| 240 |
+
|
| 241 |
+
# Calculate the element-param relationships
|
| 242 |
+
pel = pel_ele(par, n_columns, n_beams, n_rods, n_par_nod, n_nod_tot,
|
| 243 |
+
n_ele_tot, n_bot_beams, truss_mode, skip_rod)
|
| 244 |
+
|
| 245 |
+
# Calculate the element-node relationships
|
| 246 |
+
ele_nod = fill_ele_nod(n_ele_tot, n_par_nod, pel, skip_rod)
|
| 247 |
+
|
| 248 |
+
return nodal_coord, par, pel, ele_nod, n_par_tot
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def calculate_element_properties(n_ele_tot: int, n_columns: int, n_beams: int, diag: float, spacing: float,
|
| 252 |
+
height: float, J: np.array, A: np.array, h: np.array, beta: np.array,
|
| 253 |
+
ro: np.array, E: np.array, X: np.array, Y: np.array, ele_nod: List,
|
| 254 |
+
shear_mod: int, width_properties: Dict, height_properties: Dict,
|
| 255 |
+
unit_weight_properties: Dict, elastic_mod_properties: Dict,
|
| 256 |
+
truss_mode: str, beam_partition: int = 1) -> Tuple[np.array, np.array, np.array, np.array, np.array, np.array, np.array]:
|
| 257 |
+
"""
|
| 258 |
+
Calculate the properties of the elements in the truss bridge.
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
J, A, h, beta, ro, E: Numpy arrays of the moments of inertia, areas and heights, angles,
|
| 262 |
+
unit weights and elastic moduli of the elements. With J in m^4, A in m^2 and h in m, beta
|
| 263 |
+
in radians, ro in kN/m^3 and E in kN/m^2.
|
| 264 |
+
"""
|
| 265 |
+
# Calculate the areas
|
| 266 |
+
area_beam = width_properties['beam'] * height_properties['beam']
|
| 267 |
+
area_column = width_properties['column'] * height_properties['column']
|
| 268 |
+
area_rod = width_properties['rod'] * height_properties['rod']
|
| 269 |
+
|
| 270 |
+
# Calculate the moments of inertia
|
| 271 |
+
inertia_beam = width_properties['beam']**3 * height_properties['beam'] / 12
|
| 272 |
+
inertia_column = width_properties['column']**3 * height_properties['column'] / 12
|
| 273 |
+
inertia_rod = width_properties['rod']**3 * height_properties['rod'] / 12
|
| 274 |
+
|
| 275 |
+
# Calculate the properties of the elements
|
| 276 |
+
J[:n_beams] = inertia_beam
|
| 277 |
+
A[:n_beams] = area_beam
|
| 278 |
+
if "simple" in truss_mode:
|
| 279 |
+
h[:n_beams] = spacing / beam_partition
|
| 280 |
+
else:
|
| 281 |
+
h[:n_beams] = spacing
|
| 282 |
+
ro[:n_beams] = unit_weight_properties['beam']
|
| 283 |
+
E[:n_beams] = elastic_mod_properties['beam']
|
| 284 |
+
|
| 285 |
+
if n_beams != n_ele_tot:
|
| 286 |
+
J[n_beams:n_beams + n_columns] = inertia_column
|
| 287 |
+
A[n_beams:n_beams + n_columns] = area_column
|
| 288 |
+
h[n_beams:n_beams + n_columns] = height
|
| 289 |
+
ro[n_beams:n_beams + n_columns] = unit_weight_properties['column']
|
| 290 |
+
E[n_beams:n_beams + n_columns] = elastic_mod_properties['column']
|
| 291 |
+
|
| 292 |
+
J[n_beams + n_columns:] = inertia_rod
|
| 293 |
+
A[n_beams + n_columns:] = area_rod
|
| 294 |
+
h[n_beams + n_columns:] = diag
|
| 295 |
+
ro[n_beams + n_columns:] = unit_weight_properties['rod']
|
| 296 |
+
E[n_beams + n_columns:] = elastic_mod_properties['rod']
|
| 297 |
+
|
| 298 |
+
# Debugging for any errors
|
| 299 |
+
if np.any(J == 0) or np.any(A == 0) or np.any(h == 0) or np.any(ro == 0) or np.any(E == 0):
|
| 300 |
+
raise ValueError("There are materials with no property")
|
| 301 |
+
|
| 302 |
+
for i, _ in enumerate(beta):
|
| 303 |
+
dx = X[ele_nod[i, 1]] - X[ele_nod[i, 0]]
|
| 304 |
+
if abs(h[i]) < 1e-10: # Avoid division by zero
|
| 305 |
+
beta[i] = 0
|
| 306 |
+
else:
|
| 307 |
+
# Ensure value is within [-1, 1] for arccos
|
| 308 |
+
cos_val = np.clip(dx / abs(h[i]), -1.0, 1.0)
|
| 309 |
+
beta[i] = np.arccos(cos_val)
|
| 310 |
+
|
| 311 |
+
G = np.full(len(E), shear_mod, dtype=np.float32)
|
| 312 |
+
|
| 313 |
+
h = h.astype(np.float32)
|
| 314 |
+
A = A.astype(np.float32)
|
| 315 |
+
E = E.astype(np.float32)
|
| 316 |
+
J = J.astype(np.float32)
|
| 317 |
+
beta = beta.astype(np.float32)
|
| 318 |
+
ro = ro.astype(np.float32)
|
| 319 |
+
|
| 320 |
+
return J, A, h, beta, ro, E, G
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def boundary_conditions(n_bot_beams: int, n_par_nod: int, n_nod_tot: int,
|
| 324 |
+
supports: Optional[List[str]] = None) -> np.ndarray:
|
| 325 |
+
"""
|
| 326 |
+
Calculate the boundary conditions for the truss bridge.
|
| 327 |
+
|
| 328 |
+
Returns:
|
| 329 |
+
A numpy array where the ith element is 1 if the i-th parameter is a boundary condition and 0 otherwise.
|
| 330 |
+
Defaults to pin and roller supports. Considers fixed supp if not a pin or roller.
|
| 331 |
+
"""
|
| 332 |
+
if supports is None:
|
| 333 |
+
supports = ["pin", "roller"]
|
| 334 |
+
|
| 335 |
+
# Initialize the boundary conditions
|
| 336 |
+
def support_dof(support, n_par_nod):
|
| 337 |
+
temp = np.zeros(n_par_nod, dtype=int)
|
| 338 |
+
if support == "roller":
|
| 339 |
+
temp[1] = 1
|
| 340 |
+
elif support == "pin":
|
| 341 |
+
temp[:2] = 1
|
| 342 |
+
else:
|
| 343 |
+
temp[:] = 1
|
| 344 |
+
return temp
|
| 345 |
+
|
| 346 |
+
# Create the boundary array and set the boundary conditions
|
| 347 |
+
temp = np.zeros(n_nod_tot * n_par_nod, dtype=np.int32)
|
| 348 |
+
temp[:n_par_nod] = support_dof(supports[0], n_par_nod)
|
| 349 |
+
temp[n_par_nod*n_bot_beams:n_par_nod*(n_bot_beams+1)] = support_dof(supports[1], n_par_nod)
|
| 350 |
+
|
| 351 |
+
return temp
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def truss_design(n_bot_beams: int, n_rods: int,
|
| 355 |
+
truss_mode: str ="pratt") -> np.ndarray:
|
| 356 |
+
"""
|
| 357 |
+
Modify the number of rods to skip based on the design of the truss bridge.
|
| 358 |
+
|
| 359 |
+
Returns:
|
| 360 |
+
A numpy array of the rods to skip based on the truss design.
|
| 361 |
+
"""
|
| 362 |
+
truss_mode = truss_mode.lower()
|
| 363 |
+
def pratt_howe(n_bot_beams, n_rods, start=3, mid=0):
|
| 364 |
+
left_side = np.arange(start, n_bot_beams, 2)
|
| 365 |
+
right_side = np.arange(n_bot_beams+mid, n_rods, 2)
|
| 366 |
+
|
| 367 |
+
# Check if the last rod is included in the right side and remove it to avoid invalid indexing
|
| 368 |
+
if truss_mode == "howe" and right_side.size > 0 and right_side[-1] == n_rods - 1:
|
| 369 |
+
right_side = right_side[:-1]
|
| 370 |
+
return np.concatenate((left_side, right_side)).tolist()
|
| 371 |
+
|
| 372 |
+
if truss_mode == "pratt":
|
| 373 |
+
return pratt_howe(n_bot_beams-1, n_rods-1, 2)
|
| 374 |
+
|
| 375 |
+
elif truss_mode == "howe":
|
| 376 |
+
return pratt_howe(n_bot_beams-1, n_rods, 1, 1)
|
| 377 |
+
|
| 378 |
+
else:
|
| 379 |
+
return np.array([])
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def col_pos(W: np.ndarray, n_par_nod: int, X: np.ndarray, Y: Optional[np.ndarray] = None) -> np.ndarray:
|
| 383 |
+
"""
|
| 384 |
+
Calculate the column positions for the truss bridge.
|
| 385 |
+
|
| 386 |
+
Args:
|
| 387 |
+
W: Boundary condition array (1 for column, 0 for no column)
|
| 388 |
+
n_par_nod: Number of parameters per node
|
| 389 |
+
X: X coordinates of nodes
|
| 390 |
+
Y: Y coordinates of nodes (optional)
|
| 391 |
+
|
| 392 |
+
Returns:
|
| 393 |
+
A tuple of numpy arrays (X_col, Y_col) representing the x and y coordinates of the columns.
|
| 394 |
+
"""
|
| 395 |
+
y_col = []
|
| 396 |
+
x_col = []
|
| 397 |
+
|
| 398 |
+
for i, pos in enumerate(X):
|
| 399 |
+
if 1 in W[i*n_par_nod:(i+1)*n_par_nod]: # Check if there is a column at this node
|
| 400 |
+
x_col.append(pos)
|
| 401 |
+
y_col.append(0)
|
| 402 |
+
if Y is not None:
|
| 403 |
+
y_col[-1] = Y[i]
|
| 404 |
+
|
| 405 |
+
# Change to numpy arrays
|
| 406 |
+
x_col = np.array(x_col)
|
| 407 |
+
y_col = np.array(y_col)
|
| 408 |
+
|
| 409 |
+
return x_col, y_col
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
################## Plotting Functions ##################
|
| 413 |
+
def plot_elements(ax, truss_mode, ele_nod, X, Y, h, beta):
|
| 414 |
+
"""
|
| 415 |
+
Plot the structural elements (1D or 2D) on the given axes.
|
| 416 |
+
|
| 417 |
+
Args:
|
| 418 |
+
ax: Matplotlib axes object.
|
| 419 |
+
truss_mode: String indicating the truss mode ("simple" for 1D, others for 2D).
|
| 420 |
+
ele_nod: Numpy array of element-node connectivity.
|
| 421 |
+
X: Numpy array of X coordinates of nodes.
|
| 422 |
+
Y: Numpy array of Y coordinates of nodes.
|
| 423 |
+
h: Numpy array of element lengths.
|
| 424 |
+
beta: Numpy array of element angles in radians.
|
| 425 |
+
"""
|
| 426 |
+
if "simple" in truss_mode:
|
| 427 |
+
# Plot each element as a horizontal line
|
| 428 |
+
for i in range(len(ele_nod)):
|
| 429 |
+
x1 = X[ele_nod[i, 0]] # Start node X
|
| 430 |
+
length = h[i]
|
| 431 |
+
x2 = x1 + length
|
| 432 |
+
|
| 433 |
+
ax.plot([x1, x2], [0, 0], 'b') # Plot element
|
| 434 |
+
ax.text((x1 + x2) / 2, 0, str(i), verticalalignment='bottom') # Label element
|
| 435 |
+
|
| 436 |
+
ax.plot(X, np.zeros_like(X), 'ro') # Plot nodes as red circles on the x-axis
|
| 437 |
+
ax.set_title('1D Element Plot')
|
| 438 |
+
else:
|
| 439 |
+
# Plot each element based on angle beta
|
| 440 |
+
for i in range(len(ele_nod)):
|
| 441 |
+
x1, y1 = X[ele_nod[i, 0]], Y[ele_nod[i, 0]] # Start node coordinates
|
| 442 |
+
length = h[i]
|
| 443 |
+
x2 = x1 + length * np.cos(beta[i])
|
| 444 |
+
y2 = y1 + length * np.sin(beta[i])
|
| 445 |
+
|
| 446 |
+
ax.plot([x1, x2], [y1, y2], 'b') # Plot element
|
| 447 |
+
ax.text((x1 + x2) / 2, (y1 + y2) / 2, str(i)) # Label element
|
| 448 |
+
|
| 449 |
+
ax.plot(X, Y, 'ro') # Plot nodes
|
| 450 |
+
ax.set_title('2D Element Plot')
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def plot_supports(ax, X_col, Y_col, W, n_par_nod, X):
|
| 454 |
+
"""
|
| 455 |
+
Plot supports (roller, pin, fixed) on the structure.
|
| 456 |
+
|
| 457 |
+
Args:
|
| 458 |
+
ax: Matplotlib axes object.
|
| 459 |
+
X_col: Numpy array of X coordinates where supports are located.
|
| 460 |
+
Y_col: Numpy array of Y coordinates where supports are located.
|
| 461 |
+
W: Numpy array representing boundary conditions (1 for restrained, 0 for free).
|
| 462 |
+
n_par_nod: Number of parameters per node.
|
| 463 |
+
support_types: List of support types to cycle through.
|
| 464 |
+
"""
|
| 465 |
+
for i, _ in enumerate(X_col):
|
| 466 |
+
x = X_col[i]
|
| 467 |
+
y = Y_col[i]
|
| 468 |
+
|
| 469 |
+
# Find the node index
|
| 470 |
+
idx = np.where(X == x)[0][0]
|
| 471 |
+
support_type = sum(W[idx * n_par_nod:(idx + 1) * n_par_nod])
|
| 472 |
+
|
| 473 |
+
if support_type == 1:
|
| 474 |
+
# Larger circle for roller
|
| 475 |
+
circle_radius = 0.03 # Increased size
|
| 476 |
+
circle = plt.Circle((x, y - circle_radius), circle_radius, color='k', fill=True)
|
| 477 |
+
ax.add_patch(circle)
|
| 478 |
+
elif support_type == 2:
|
| 479 |
+
# Larger triangle for pin
|
| 480 |
+
triangle_base = 0.1
|
| 481 |
+
triangle_height = 0.05
|
| 482 |
+
|
| 483 |
+
tri_x = [x - triangle_base/2, x + triangle_base/2, x]
|
| 484 |
+
tri_y = [y - triangle_height, y - triangle_height, y]
|
| 485 |
+
ax.fill(tri_x, tri_y, 'k')
|
| 486 |
+
elif support_type == 3:
|
| 487 |
+
# Fixed support with lines
|
| 488 |
+
line_height = 0.015
|
| 489 |
+
line_width = 0.05
|
| 490 |
+
|
| 491 |
+
ax.plot([x - line_width/2, x + line_width/2],
|
| 492 |
+
[y - line_height, y - line_height], 'k', linewidth=2)
|
| 493 |
+
for j in range(5):
|
| 494 |
+
gap = line_width / 4
|
| 495 |
+
ax.plot([x - line_width/2 + j * gap, x - line_width/2 + j * gap],
|
| 496 |
+
[y - line_height, y - line_height - 0.02], 'k')
|
utils/truss_helpers.py
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import numpy as np
|
| 3 |
+
import sympy as sp
|
| 4 |
+
import scipy.linalg
|
| 5 |
+
from typing import Tuple, List, Optional
|
| 6 |
+
from sympy import Matrix, lambdify
|
| 7 |
+
import multiprocessing as mp
|
| 8 |
+
|
| 9 |
+
# Get the logger
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
################## Basic Functions ##################
|
| 13 |
+
def initialize_symbols(n_par_ele: int) -> Tuple:
|
| 14 |
+
"""
|
| 15 |
+
Create and return the symbolic variables used in the calculations.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
n_par_ele: Number of parameter per element
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Returns a tuple of the symbolic variables
|
| 22 |
+
"""
|
| 23 |
+
# Define symbolic variables
|
| 24 |
+
x, xi, h_e, beta_e, beta_curr = sp.symbols('x xi h_e beta_e beta_curr')
|
| 25 |
+
A_e, E_e, J_e, ro_e, T, fo_E = sp.symbols('A_e E_e J_e ro_e T fo_E')
|
| 26 |
+
qe = sp.symbols(f'qe:{n_par_ele}')
|
| 27 |
+
|
| 28 |
+
a_arr = sp.symbols('a:2') # for axial
|
| 29 |
+
b_arr = sp.symbols('b:2') # Rods
|
| 30 |
+
d_arr = sp.symbols('d:2') # Rods
|
| 31 |
+
c_arr = sp.symbols('c:4') # transversal
|
| 32 |
+
e_arr = sp.symbols('e:3') # timoshenko
|
| 33 |
+
|
| 34 |
+
# For global displacements
|
| 35 |
+
Qglo_pel_curr = sp.symbols(f'Qglo_pel_curr:{n_par_ele}')
|
| 36 |
+
w_arr = sp.symbols('w:2')
|
| 37 |
+
r_arr = sp.symbols('r:2')
|
| 38 |
+
f_arr = sp.symbols('f:2')
|
| 39 |
+
g_arr = sp.symbols('g:4')
|
| 40 |
+
X_old, Y_old = sp.symbols('X_old Y_old')
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
return (x, xi, h_e, beta_e, beta_curr, qe, a_arr, b_arr,
|
| 44 |
+
c_arr, d_arr, e_arr, A_e, E_e, J_e, ro_e, T, fo_E, X_old, Y_old,
|
| 45 |
+
Qglo_pel_curr, w_arr, r_arr, f_arr, g_arr)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def define_newton_equation(x: sp.Symbol, coeffs: List[sp.symbols]) -> sp.Expr:
|
| 49 |
+
"""
|
| 50 |
+
Define a Newton polynomial equation.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
x (sp.Symbol): The variable of the polynomial
|
| 54 |
+
coeffs (List[sp.Symbol]): The coefficients of the polynomial
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
sp.Expr: The final polynomial equation
|
| 58 |
+
"""
|
| 59 |
+
equation = sum(c * x**i for i, c in enumerate(coeffs))
|
| 60 |
+
return equation
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def define_langrange_equation(xi: sp.Symbol, he: sp.Symbol,
|
| 64 |
+
type_beam: Optional[int] = 1) -> Tuple[sp.Matrix, sp.Matrix]:
|
| 65 |
+
"""
|
| 66 |
+
Define the Langrange shape functions for the beam and rod elements.
|
| 67 |
+
This uses the Hermite cubic shape functions.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
xi: The local coordinate
|
| 71 |
+
he: The element length
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
Tuple[List[sp.Expr], List[sp.Expr]]: The beam and rod shape functions
|
| 75 |
+
"""
|
| 76 |
+
if type_beam == 1:
|
| 77 |
+
N_beam_shape = sp.zeros(4, 1) # Use sp.zeros to initialize a column matrix
|
| 78 |
+
N_beam_shape[0] = 1 - 3*xi**2 + 2*xi**3
|
| 79 |
+
N_beam_shape[1] = he * (xi - 2*xi**2 + xi**3) # Corrected variable name to he
|
| 80 |
+
N_beam_shape[2] = 3*xi**2 - 2*xi**3
|
| 81 |
+
N_beam_shape[3] = he * (-xi**2 + xi**3) # Corrected variable name to he
|
| 82 |
+
return N_beam_shape
|
| 83 |
+
|
| 84 |
+
N_rod_shape = sp.zeros(2, 1)
|
| 85 |
+
N_rod_shape[0] = 1 - xi
|
| 86 |
+
N_rod_shape[1] = xi
|
| 87 |
+
|
| 88 |
+
return N_rod_shape
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def compute_v_u(qe: List[sp.symbols], beta_e: sp.Symbol
|
| 92 |
+
) -> Tuple[sp.Expr, sp.Expr, sp.Expr, sp.Expr]:
|
| 93 |
+
"""
|
| 94 |
+
Compute the local displacements v and u.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
qe (List[sp.Symbol]): The local displacement vector
|
| 98 |
+
beta_e (sp.Symbol): The angle of displacement
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
Tuple[sp.Expr, sp.Expr, sp.Expr, sp.Expr]: The local displacements v and u
|
| 102 |
+
"""
|
| 103 |
+
v1 = -qe[0] * sp.sin(beta_e) + qe[1] * sp.cos(beta_e)
|
| 104 |
+
u1 = qe[0] * sp.cos(beta_e) + qe[1] * sp.sin(beta_e)
|
| 105 |
+
v2 = -qe[3] * sp.sin(beta_e) + qe[4] * sp.cos(beta_e)
|
| 106 |
+
u2 = qe[3] * sp.cos(beta_e) + qe[4] * sp.sin(beta_e)
|
| 107 |
+
return v1, u1, v2, u2
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def define_equilibrium_langrange(beam_type: str, u_beam: sp.Expr, v_beam: sp.Expr, alpha_beam: sp.Expr,
|
| 111 |
+
xi: sp.Symbol, h_e: sp.Symbol, v1: sp.Expr, u1: sp.Expr, v2: sp.Expr,
|
| 112 |
+
u2: sp.Expr, qe: List[sp.symbols]) -> Tuple[sp.Expr, sp.Expr, sp.Expr, sp.Expr, sp.Expr]:
|
| 113 |
+
"""
|
| 114 |
+
Define the equilibrium equations for the beam or rod using Lagrange shape functions.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
List[sp.Expr]: The equilibrium equations for the beam or rod
|
| 118 |
+
"""
|
| 119 |
+
N_beam_shape = define_langrange_equation(xi, h_e, type_beam=1)
|
| 120 |
+
N_rod_shape = define_langrange_equation(xi, h_e, type_beam=0)
|
| 121 |
+
theta_beam = sp.Expr(0)
|
| 122 |
+
|
| 123 |
+
# Compute local displacements
|
| 124 |
+
v_rod = N_rod_shape[0] * v1 + N_rod_shape[1] * v2
|
| 125 |
+
u_rod = N_rod_shape[0] * u1 + N_rod_shape[1] * u2
|
| 126 |
+
|
| 127 |
+
if beam_type == "bernoulli":
|
| 128 |
+
v_beam = N_beam_shape[0]*v1 + N_beam_shape[1]*qe[2] + N_beam_shape[2]*v2 + N_beam_shape[3]*qe[5]
|
| 129 |
+
u_beam = N_rod_shape[0]*u1 + N_rod_shape[1]*u2 - z * sp.diff(v_beam, xi)
|
| 130 |
+
else: # Timoshenko
|
| 131 |
+
v_beam = N_beam_shape[0]*v1 + N_beam_shape[1]*qe[2] + N_beam_shape[2]*v2 + N_beam_shape[3]*qe[5]
|
| 132 |
+
theta_beam = N_beam_shape[0]*qe[2] + N_beam_shape[2]*qe[5]
|
| 133 |
+
u_beam = N_rod_shape[0]*u1 + N_rod_shape[0]*u2 - z * theta_beam
|
| 134 |
+
|
| 135 |
+
return v_beam, u_beam, theta_beam, v_rod, u_rod,
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def define_equilibrium_equations(beam_type: str, expressions: List[sp.Expr],
|
| 139 |
+
x: sp.Symbol, h_e: sp.Symbol, v1: sp.Expr, u1: sp.Expr, v2: sp.Expr,
|
| 140 |
+
u2: sp.Expr, qe: List[sp.symbols]) -> List[sp.Expr]:
|
| 141 |
+
"""
|
| 142 |
+
Define the equilibrium equations for the beam or rod.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
List[sp.Expr]: The equilibrium equations for the beam or rod
|
| 146 |
+
"""
|
| 147 |
+
if beam_type == "bernoulli":
|
| 148 |
+
v_beam, u_beam = expressions[1], expressions[0]
|
| 149 |
+
return [
|
| 150 |
+
v_beam.subs(x, 0) - v1,
|
| 151 |
+
sp.diff(v_beam, x).subs(x, 0) - qe[2],
|
| 152 |
+
v_beam.subs(x, h_e) - v2,
|
| 153 |
+
sp.diff(v_beam, x).subs(x, h_e) - qe[5],
|
| 154 |
+
u_beam.subs(x, 0) - u1,
|
| 155 |
+
u_beam.subs(x, h_e) - u2
|
| 156 |
+
]
|
| 157 |
+
else:
|
| 158 |
+
v_beam, u_beam, alpha_beam = expressions[1], expressions[0], expressions[2]
|
| 159 |
+
return [
|
| 160 |
+
u_beam.subs(x, 0) - u1,
|
| 161 |
+
u_beam.subs(x, h_e) - u2,
|
| 162 |
+
v_beam.subs(x, 0) - v1,
|
| 163 |
+
v_beam.subs(x, h_e) - v2,
|
| 164 |
+
alpha_beam.subs(x, 0) - qe[2],
|
| 165 |
+
alpha_beam.subs(x, h_e) - qe[5]
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def define_rod_equations(u_rod: sp.Expr, v_rod: sp.Expr, x: sp.Symbol,
|
| 170 |
+
h_e: sp.Symbol, v1: sp.Expr, u1: sp.Expr, v2: sp.Expr,
|
| 171 |
+
u2: sp.Expr) -> List[sp.Expr]:
|
| 172 |
+
"""
|
| 173 |
+
Define the equilibrium equations for the rod
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
List[sp.Expr]: The equilibrium equations for the rod
|
| 177 |
+
"""
|
| 178 |
+
return [
|
| 179 |
+
v_rod.subs(x, 0) - v1,
|
| 180 |
+
u_rod.subs(x, 0) - u1,
|
| 181 |
+
v_rod.subs(x, h_e) - v2,
|
| 182 |
+
u_rod.subs(x, h_e) - u2
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def apply_boundary_conditions(K: np.array, M: np.array, W: np.array,
|
| 187 |
+
tol: float = 1e-5) -> Tuple[np.array, np.array]:
|
| 188 |
+
"""
|
| 189 |
+
Applies the boundary conditions to the stiffness and mass matrices.
|
| 190 |
+
|
| 191 |
+
Returns:
|
| 192 |
+
Numpy arrays of the stiffness and mass matrices with the boundary conditions applied
|
| 193 |
+
"""
|
| 194 |
+
indices = np.where(W == 1)[0]
|
| 195 |
+
|
| 196 |
+
# take the max value from the diagonal
|
| 197 |
+
max_k = np.max(np.abs(np.diag(K)))
|
| 198 |
+
min_m = np.max(np.abs(np.diag(M)))
|
| 199 |
+
max_freq_sqrd = np.sqrt(max_k / min_m)
|
| 200 |
+
|
| 201 |
+
# Set rows and columns to zero
|
| 202 |
+
K[indices, :] = 0
|
| 203 |
+
K[:, indices] = 0
|
| 204 |
+
M[indices, :] = 0
|
| 205 |
+
M[:, indices] = 0
|
| 206 |
+
|
| 207 |
+
# Correctly set diagonal elements for these indices
|
| 208 |
+
for index in indices:
|
| 209 |
+
K[index, index] = max_freq_sqrd / tol
|
| 210 |
+
M[index, index] = max_freq_sqrd * tol
|
| 211 |
+
|
| 212 |
+
return K, M
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
################## Analysis Functions ##################
|
| 217 |
+
def calculate_energies(beam_type, ve_beam, ue_beam, alpha_e_beam, ve_rod,
|
| 218 |
+
ue_rod, x, h_e, E_e, J_e, A_e, ro_e, G, k_shear):
|
| 219 |
+
"""
|
| 220 |
+
Calculate the potential and kinetic energies of beams and rods.
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
The potential and kinetic energies.
|
| 224 |
+
"""
|
| 225 |
+
# Calculate chi_beam and eps_beam
|
| 226 |
+
if beam_type == "bernoulli":
|
| 227 |
+
chi_beam = sp.diff(sp.diff(ve_beam, x), x)
|
| 228 |
+
eps_beam = sp.diff(ue_beam, x)
|
| 229 |
+
pot_beam = 1 / 2 * sp.integrate(E_e * J_e * chi_beam**2 + E_e * A_e * eps_beam**2, (x, 0, h_e))
|
| 230 |
+
else:
|
| 231 |
+
eps_beam = sp.diff(ue_beam, x)
|
| 232 |
+
gamma_beam = sp.diff(ve_beam, x) - alpha_e_beam
|
| 233 |
+
chi_beam = sp.diff(alpha_e_beam, x)
|
| 234 |
+
|
| 235 |
+
# Note that k_shear and G must be changed in case they are not constant
|
| 236 |
+
pot_beam = 1 / 2 * sp.integrate(E_e * J_e * chi_beam**2 + E_e * A_e * eps_beam**2 + k_shear * G[0] * A_e * gamma_beam**2, (x, 0, h_e))
|
| 237 |
+
|
| 238 |
+
kin_beam = 1 / 2 * ro_e * A_e * sp.integrate(ve_beam**2 + ue_beam**2, (x, 0, h_e))
|
| 239 |
+
|
| 240 |
+
eps_rod = sp.diff(ue_rod, x)
|
| 241 |
+
pot_rod = 1 / 2 * sp.integrate(E_e * A_e * eps_rod**2, (x, 0, h_e))
|
| 242 |
+
kin_rod = 1 / 2 * ro_e * A_e * sp.integrate(ve_rod**2 + ue_rod**2, (x, 0, h_e))
|
| 243 |
+
|
| 244 |
+
return pot_beam, kin_beam, pot_rod, kin_rod
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def calculate_displacement_equations(x, xi, h_e, beta_e, qe, a_arr, b_arr, c_arr, d_arr, e_arr,
|
| 248 |
+
beam_type, use_lagrangian: bool = True):
|
| 249 |
+
"""
|
| 250 |
+
Calculate the beam displacement equations for beam (bernoulli or
|
| 251 |
+
timoshenko) and rod.
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
Displacement functions for the beam in the u and v directions
|
| 255 |
+
"""
|
| 256 |
+
# Compute local displacements
|
| 257 |
+
v1, u1, v2, u2 = compute_v_u(qe, beta_e)
|
| 258 |
+
|
| 259 |
+
# Define beam displacement equations
|
| 260 |
+
if use_lagrangian:
|
| 261 |
+
v_beam, u_beam, theta_beam, v_rod, u_rod = define_equilibrium_langrange(beam_type, u_beam, v_beam, alpha_beam,
|
| 262 |
+
xi, h_e, v1, u1, v2, u2, qe)
|
| 263 |
+
# Lambdify ve_beam and ue_beam
|
| 264 |
+
ve_beam_func = lambdify((xi, qe, h_e, beta_e), ve_beam, "numpy")
|
| 265 |
+
ue_beam_func = lambdify((xi, qe, h_e, beta_e), ue_beam, "numpy")
|
| 266 |
+
ve_rod_func = lambdify((xi, qe, h_e, beta_e), ve_rod, "numpy")
|
| 267 |
+
ue_rod_func = lambdify((xi, qe, h_e, beta_e), ue_rod, "numpy")
|
| 268 |
+
else: # Newton Interpolation
|
| 269 |
+
if beam_type == "bernoulli":
|
| 270 |
+
u_beam = define_newton_equation(x, a_arr)
|
| 271 |
+
v_beam = define_newton_equation(x, c_arr)
|
| 272 |
+
alpha_beam = sp.Expr(0)
|
| 273 |
+
else:
|
| 274 |
+
u_beam = define_newton_equation(x, a_arr)
|
| 275 |
+
v_beam = define_newton_equation(x, b_arr)
|
| 276 |
+
alpha_beam = define_newton_equation(x, e_arr)
|
| 277 |
+
|
| 278 |
+
u_rod = define_newton_equation(x, b_arr)
|
| 279 |
+
v_rod = define_newton_equation(x, d_arr)
|
| 280 |
+
|
| 281 |
+
# Define equilibrium equations
|
| 282 |
+
equations = define_equilibrium_equations(beam_type, [u_beam, v_beam, alpha_beam],
|
| 283 |
+
x, h_e, v1, u1, v2, u2, qe)
|
| 284 |
+
equations_rod = define_rod_equations(u_rod, v_rod, x, h_e, v1, u1, v2, u2)
|
| 285 |
+
|
| 286 |
+
# Define equilibrium equations
|
| 287 |
+
if beam_type == "bernoulli":
|
| 288 |
+
sol = sp.solve(equations, a_arr + c_arr)
|
| 289 |
+
alpha_e_beam = sp.Expr(0)
|
| 290 |
+
else:
|
| 291 |
+
sol = sp.solve(equations, a_arr + b_arr + e_arr)
|
| 292 |
+
alpha_e_beam = alpha_beam.subs(sol)
|
| 293 |
+
|
| 294 |
+
ve_beam = v_beam.subs(sol)
|
| 295 |
+
ue_beam = u_beam.subs(sol)
|
| 296 |
+
|
| 297 |
+
sol_rod = sp.solve(equations_rod, b_arr + d_arr)
|
| 298 |
+
ve_rod = v_rod.subs(sol_rod)
|
| 299 |
+
ue_rod = u_rod.subs(sol_rod)
|
| 300 |
+
|
| 301 |
+
# Lambdify ve_beam and ue_beam
|
| 302 |
+
ve_beam_func = lambdify((x, qe, h_e, beta_e), ve_beam, "numpy")
|
| 303 |
+
ue_beam_func = lambdify((x, qe, h_e, beta_e), ue_beam, "numpy")
|
| 304 |
+
|
| 305 |
+
ve_rod_func = lambdify((x, qe, h_e, beta_e), ve_rod, "numpy")
|
| 306 |
+
ue_rod_func = lambdify((x, qe, h_e, beta_e), ue_rod, "numpy")
|
| 307 |
+
|
| 308 |
+
return ve_beam_func, ue_beam_func, ve_beam, ue_beam, ve_rod_func, ue_rod_func, ve_rod, ue_rod, alpha_e_beam
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def construct_lambdified_matrices(n_par_ele, pot_beam, kin_beam, pot_rod, kin_rod, qe, h_e, A_e, E_e, J_e, beta_e, ro_e):
|
| 312 |
+
"""
|
| 313 |
+
Constructs and lambdifies the local K and M matrices.
|
| 314 |
+
|
| 315 |
+
Returns:
|
| 316 |
+
Lambdified functions for K and M matrices for beams and rods.
|
| 317 |
+
"""
|
| 318 |
+
K_beam = sp.Matrix.zeros(n_par_ele)
|
| 319 |
+
M_beam = sp.Matrix.zeros(n_par_ele)
|
| 320 |
+
K_rod = sp.Matrix.zeros(n_par_ele)
|
| 321 |
+
M_rod = sp.Matrix.zeros(n_par_ele)
|
| 322 |
+
|
| 323 |
+
# Compute K_beam and M_beam
|
| 324 |
+
for i in range(n_par_ele):
|
| 325 |
+
for j in range(n_par_ele):
|
| 326 |
+
K_beam[i, j] = sp.diff(sp.diff(pot_beam, qe[i]), qe[j])
|
| 327 |
+
M_beam[i, j] = sp.diff(sp.diff(kin_beam, qe[i]), qe[j])
|
| 328 |
+
K_rod[i, j] = sp.diff(sp.diff(pot_rod, qe[i]), qe[j])
|
| 329 |
+
M_rod[i, j] = sp.diff(sp.diff(kin_rod, qe[i]), qe[j])
|
| 330 |
+
|
| 331 |
+
# Create lambdified functions for K and M matrices
|
| 332 |
+
K_beam_func = lambdify((h_e, A_e, E_e, J_e, beta_e), K_beam)
|
| 333 |
+
M_beam_func = lambdify((h_e, A_e, E_e, J_e, beta_e, ro_e), M_beam)
|
| 334 |
+
K_rod_func = lambdify((h_e, A_e, E_e, J_e, beta_e), K_rod)
|
| 335 |
+
M_rod_func = lambdify((h_e, A_e, E_e, J_e, beta_e, ro_e), M_rod)
|
| 336 |
+
|
| 337 |
+
return K_beam, M_beam, K_rod, M_rod, K_beam_func, M_beam_func, K_rod_func, M_rod_func
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def assemble_global_matrices(n_par_ele: int, n_par_tot: int, n_ele_tot: int, K_beam_func: sp.lambdify,
|
| 341 |
+
M_beam_func: sp.lambdify, K_rod_func: sp.lambdify, M_rod_func: sp.lambdify,
|
| 342 |
+
h: sp.Symbol, A: sp.Symbol, E: sp.Symbol, J: sp.Symbol, beta: sp.Symbol,
|
| 343 |
+
ro: sp.Symbol, pel: List, n_rods: int) -> Tuple[np.array, np.array]:
|
| 344 |
+
"""
|
| 345 |
+
Assembles the global stiffness and mass matrices using the lambdified functions for element matrices.
|
| 346 |
+
|
| 347 |
+
Returns:
|
| 348 |
+
Numeric arrays of the global stiffness and mass matrices.
|
| 349 |
+
"""
|
| 350 |
+
# Initialize element stiffness matrix (Ke) and global stiffness matrix (K)
|
| 351 |
+
K = np.zeros((n_par_tot, n_par_tot))
|
| 352 |
+
M = np.zeros((n_par_tot, n_par_tot))
|
| 353 |
+
|
| 354 |
+
# Pre-compute beam and rod indices
|
| 355 |
+
beam_indices = np.arange(n_ele_tot - n_rods)
|
| 356 |
+
rod_indices = np.arange(n_ele_tot - n_rods, n_ele_tot)
|
| 357 |
+
logging.debug(f"Beam indices: {beam_indices} \nRod indices: {rod_indices}")
|
| 358 |
+
|
| 359 |
+
# Process beams
|
| 360 |
+
for e in beam_indices:
|
| 361 |
+
Ke = K_beam_func(h[e], A[e], E[e], J[e], beta[e])
|
| 362 |
+
Me = M_beam_func(h[e], A[e], E[e], J[e], beta[e], ro[e])
|
| 363 |
+
idx = pel[e, :] - 1 # Adjust for 0-based indexing
|
| 364 |
+
K[np.ix_(idx, idx)] += Ke
|
| 365 |
+
M[np.ix_(idx, idx)] += Me
|
| 366 |
+
|
| 367 |
+
# Process rods
|
| 368 |
+
for e in rod_indices:
|
| 369 |
+
Ke = K_rod_func(h[e], A[e], E[e], J[e], beta[e])
|
| 370 |
+
Me = M_rod_func(h[e], A[e], E[e], J[e], beta[e], ro[e])
|
| 371 |
+
idx = pel[e, :] - 1 # Adjust for 0-based indexing
|
| 372 |
+
K[np.ix_(idx, idx)] += Ke
|
| 373 |
+
M[np.ix_(idx, idx)] += Me
|
| 374 |
+
|
| 375 |
+
# Convert K and M to NumPy arrays
|
| 376 |
+
K = np.array(K).astype(np.float32)
|
| 377 |
+
M = np.array(M).astype(np.float32)
|
| 378 |
+
|
| 379 |
+
return K, M
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def compute_eigenvalues_and_eigenvectors(K: np.array, M: np.array, method: str = 'numpy',
|
| 383 |
+
filter_numerical_stability: bool = False,
|
| 384 |
+
threshold: float = 1e-10) -> Tuple[np.array, np.array]:
|
| 385 |
+
"""
|
| 386 |
+
Compute the eigenvalues (λ: w**2 natural frequencies)and
|
| 387 |
+
eigenvectors (ϕ: mode shape) of the stiffness and mass matrices.
|
| 388 |
+
|
| 389 |
+
Args:
|
| 390 |
+
K: Stiffness matrix
|
| 391 |
+
M: Mass matrix
|
| 392 |
+
method: 'scipy' for scipy.linalg.eigh or 'numpy' for np.linalg.eig
|
| 393 |
+
filter_numerical_stability: Boolean to indicate if filtering should be applied
|
| 394 |
+
threshold: Threshold for filtering small eigenvalues for numerical stability
|
| 395 |
+
|
| 396 |
+
Returns:
|
| 397 |
+
The real part of the eigenvalues (frequency) and the normalized eigenvectors (modes of vibration)
|
| 398 |
+
"""
|
| 399 |
+
if method == 'numpy':
|
| 400 |
+
lamb, phis = np.linalg.eig(np.linalg.inv(M) @ K)
|
| 401 |
+
else:
|
| 402 |
+
if method != 'scipy':
|
| 403 |
+
print("Invalid method. Defaulting to scipy.")
|
| 404 |
+
lamb, phis = scipy.linalg.eigh(K, M)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
# Get the indices that would sort lamb in descending order
|
| 408 |
+
idx = np.argsort(lamb)[::-1]
|
| 409 |
+
|
| 410 |
+
# Sort lamb and phis and take the real part
|
| 411 |
+
lamb_r = np.real(lamb[idx])
|
| 412 |
+
phis_r = np.real(phis[:, idx])
|
| 413 |
+
|
| 414 |
+
if filter_numerical_stability:
|
| 415 |
+
# Filter for numerical stability
|
| 416 |
+
valid_indices = lamb_r > threshold
|
| 417 |
+
lamb_r = lamb_r[valid_indices]
|
| 418 |
+
phis_r = phis_r[:, valid_indices]
|
| 419 |
+
|
| 420 |
+
# Normalize eigenvectors
|
| 421 |
+
n_par_tot = len(lamb_r)
|
| 422 |
+
phis_norm = np.zeros((phis_r.shape[0], n_par_tot))
|
| 423 |
+
|
| 424 |
+
for i in range(n_par_tot):
|
| 425 |
+
c = np.sqrt(np.dot(phis_r[:, i].T, M @ phis_r[:, i]))
|
| 426 |
+
phis_norm[:, i] = phis_r[:, i] / c
|
| 427 |
+
|
| 428 |
+
verification = np.array([np.dot(phis_norm[:, i].T, M @ phis_norm[:, i]) for i in range(n_par_tot)])
|
| 429 |
+
if not np.allclose(verification, 1):
|
| 430 |
+
logging.warning("Verification failed for eigenvectors. Results may be inaccurate.")
|
| 431 |
+
logging.debug("Verification results for each eigenvector (should all be 1):\n%s", verification)
|
| 432 |
+
|
| 433 |
+
verification_sum = np.sum(verification)
|
| 434 |
+
if not np.isclose(verification_sum, n_par_tot):
|
| 435 |
+
logging.warning("Sum of verification values is not equal to number of eigenvectors. Results may be inaccurate.")
|
| 436 |
+
logging.debug("Sum of verification values (should be equal to number of eigenvectors): %s", verification_sum)
|
| 437 |
+
|
| 438 |
+
return lamb_r, phis_norm
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def get_mode_indices(lamb_r: np.array, phis_norm: np.array,
|
| 442 |
+
n_plots: int) -> Tuple[np.array, np.array]:
|
| 443 |
+
"""
|
| 444 |
+
Calculate the periods to get the top contributing index_modes.
|
| 445 |
+
Periods are validated to be real numbers and positive.
|
| 446 |
+
|
| 447 |
+
Args:
|
| 448 |
+
lamb_r: Real part of the eigenvalues
|
| 449 |
+
phis_norm: Normalized eigenvectors
|
| 450 |
+
n_plots: Number of modes to plot
|
| 451 |
+
|
| 452 |
+
Returns:
|
| 453 |
+
Numpy array of the indices of the largest n_plots periods and sorted periods
|
| 454 |
+
"""
|
| 455 |
+
# Calculate periods
|
| 456 |
+
period = 2 * np.pi / np.sqrt(lamb_r)
|
| 457 |
+
|
| 458 |
+
# Show how many invalid periods are there
|
| 459 |
+
logging.debug("Number of invalid periods: %s", np.sum(~np.isfinite(period)))
|
| 460 |
+
logging.debug("Number of negative periods: %s", np.sum(period < 0))
|
| 461 |
+
|
| 462 |
+
# Ensure periods are all real numbers and valid
|
| 463 |
+
valid_indices = np.isfinite(period) & (period > 0)
|
| 464 |
+
period = period[valid_indices]
|
| 465 |
+
phis_norm = phis_norm[:, valid_indices]
|
| 466 |
+
|
| 467 |
+
# Sort periods for better representation
|
| 468 |
+
sorted_period = np.sort(period)
|
| 469 |
+
|
| 470 |
+
# Find the indices of the largest n_plots periods
|
| 471 |
+
index_modes = np.argpartition(period, -n_plots)[-n_plots:]
|
| 472 |
+
|
| 473 |
+
# Sort index_modes so that the modes are in descending order of period
|
| 474 |
+
index_modes = index_modes[np.argsort(period[index_modes])][::-1]
|
| 475 |
+
|
| 476 |
+
return index_modes, sorted_period, period
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def calculate_global_displacements(Qglo_pel_curr, beta_e, h_e, x, xi, f_arr,
|
| 480 |
+
g_arr, w_arr, r_arr, beam_type,
|
| 481 |
+
X_old, Y_old, use_lagrangian: bool = False) -> Tuple:
|
| 482 |
+
"""
|
| 483 |
+
Calculate the global displacements by solving the local symbolic
|
| 484 |
+
equilibrium equations.
|
| 485 |
+
|
| 486 |
+
Returns:
|
| 487 |
+
Lambda functions for the global displacements
|
| 488 |
+
"""
|
| 489 |
+
|
| 490 |
+
_, _, v_beam, u_beam, _, _, v_rod, u_rod, _ = calculate_displacement_equations(x, xi, h_e, beta_e, Qglo_pel_curr, f_arr, r_arr, g_arr, w_arr, g_arr,
|
| 491 |
+
beam_type, use_lagrangian)
|
| 492 |
+
|
| 493 |
+
# Define global displacements
|
| 494 |
+
u_glo_beam = u_beam * sp.cos(beta_e) - v_beam * sp.sin(beta_e)
|
| 495 |
+
v_glo_beam = u_beam * sp.sin(beta_e) + v_beam * sp.cos(beta_e)
|
| 496 |
+
u_glo_rod = u_rod * sp.cos(beta_e) - v_rod * sp.sin(beta_e)
|
| 497 |
+
v_glo_rod = u_rod * sp.sin(beta_e) + v_rod * sp.cos(beta_e)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
# Define new coordinates
|
| 501 |
+
if use_lagrangian:
|
| 502 |
+
X_new_beam = X_old + xi * h_e * sp.cos(beta_e) + u_glo_beam
|
| 503 |
+
Y_new_beam = Y_old + xi * h_e * sp.sin(beta_e) + v_glo_beam
|
| 504 |
+
X_new_rod = X_old + xi * h_e * sp.cos(beta_e) + u_glo_rod
|
| 505 |
+
Y_new_rod = Y_old + xi * h_e * sp.sin(beta_e) + v_glo_rod
|
| 506 |
+
else:
|
| 507 |
+
X_new_beam = X_old + x * sp.cos(beta_e) + u_glo_beam
|
| 508 |
+
Y_new_beam = Y_old + x * sp.sin(beta_e) + v_glo_beam
|
| 509 |
+
X_new_rod = X_old + x * sp.cos(beta_e) + u_glo_rod
|
| 510 |
+
Y_new_rod = Y_old + x * sp.sin(beta_e) + v_glo_rod
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
args = (X_old, Y_old, beta_e, h_e) + tuple(Qglo_pel_curr)
|
| 514 |
+
if use_lagrangian:
|
| 515 |
+
X_new_beam_func = sp.lambdify((xi,) + args, X_new_beam, "numpy")
|
| 516 |
+
Y_new_beam_func = sp.lambdify((xi,) + args, Y_new_beam, "numpy")
|
| 517 |
+
X_new_rod_func = sp.lambdify((xi,) + args, X_new_rod, "numpy")
|
| 518 |
+
Y_new_rod_func = sp.lambdify((xi,) + args, Y_new_rod, "numpy")
|
| 519 |
+
else:
|
| 520 |
+
X_new_beam_func = sp.lambdify((x,) + args, X_new_beam, "numpy")
|
| 521 |
+
Y_new_beam_func = sp.lambdify((x,) + args, Y_new_beam, "numpy")
|
| 522 |
+
X_new_rod_func = sp.lambdify((x,) + args, X_new_rod, "numpy")
|
| 523 |
+
Y_new_rod_func = sp.lambdify((x,) + args, Y_new_rod, "numpy")
|
| 524 |
+
|
| 525 |
+
return X_new_beam, Y_new_beam, X_new_rod, Y_new_rod, X_new_beam_func, Y_new_beam_func, X_new_rod_func, Y_new_rod_func
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
################## Verifications functions ##################
|
| 530 |
+
def print_matrix(matrix: np.array, width: int = 9, precision: int = 1, row_labels: Optional[List[str]] = None,
|
| 531 |
+
col_labels: Optional[List[str]] = None) -> None:
|
| 532 |
+
"""
|
| 533 |
+
Print a matrix in a more readable format.
|
| 534 |
+
|
| 535 |
+
Args:
|
| 536 |
+
matrix: The matrix to print
|
| 537 |
+
width: The width of each column
|
| 538 |
+
precision: The number of decimal places to show
|
| 539 |
+
row_labels (Optional[List[str]], optional): Row labels. Defaults to numbering from 1 to n.
|
| 540 |
+
col_labels (Optional[List[str]], optional): Column labels. Defaults to numbering from 1 to n.
|
| 541 |
+
"""
|
| 542 |
+
if row_labels is None:
|
| 543 |
+
row_labels = range(1, len(matrix) + 1)
|
| 544 |
+
if col_labels is None:
|
| 545 |
+
col_labels = range(1, len(matrix[0]) + 1)
|
| 546 |
+
|
| 547 |
+
# Header row
|
| 548 |
+
print(" " * width, end="")
|
| 549 |
+
for label in col_labels:
|
| 550 |
+
print(f"{label:>{width}}", end="")
|
| 551 |
+
print()
|
| 552 |
+
|
| 553 |
+
# Matrix rows
|
| 554 |
+
for i, row in enumerate(matrix):
|
| 555 |
+
print(f"{row_labels[i]:>{width}}", end="")
|
| 556 |
+
for val in row:
|
| 557 |
+
# Ensure the value is treated as a float for formatting
|
| 558 |
+
try:
|
| 559 |
+
formatted_val = f"{float(val):.{precision}e}"
|
| 560 |
+
except ValueError:
|
| 561 |
+
# If conversion to float fails, print as is
|
| 562 |
+
formatted_val = str(val)
|
| 563 |
+
print(f"{formatted_val:>{width}}", end="")
|
| 564 |
+
print()
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
import numpy as np
|
| 568 |
+
|
| 569 |
+
def check_matrix(matrix: np.ndarray, atol: float = 1e-8) -> None:
|
| 570 |
+
"""
|
| 571 |
+
Check if a matrix is symmetric, well-conditioned, positive definite, and diagonally dominant.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
matrix: The matrix to check
|
| 575 |
+
atol: The absolute tolerance for the condition check
|
| 576 |
+
"""
|
| 577 |
+
# Symmetry check
|
| 578 |
+
if np.allclose(matrix, matrix.T, atol=atol):
|
| 579 |
+
print("Matrix is symmetric.")
|
| 580 |
+
else:
|
| 581 |
+
print("Matrix is not symmetric.")
|
| 582 |
+
|
| 583 |
+
# Conditioning check
|
| 584 |
+
try:
|
| 585 |
+
cond_number = np.linalg.cond(matrix)
|
| 586 |
+
if cond_number < 1 / atol:
|
| 587 |
+
print(f"Matrix is well-conditioned (Condition number: {cond_number:.2e}).")
|
| 588 |
+
else:
|
| 589 |
+
print(f"Matrix is ill-conditioned (Condition number: {cond_number:.2e}).")
|
| 590 |
+
except np.linalg.LinAlgError:
|
| 591 |
+
print("Condition number could not be computed (possibly singular matrix).")
|
| 592 |
+
|
| 593 |
+
# Positive definiteness check
|
| 594 |
+
try:
|
| 595 |
+
eigenvalues = np.linalg.eigvalsh(matrix)
|
| 596 |
+
if np.any(eigenvalues < 0):
|
| 597 |
+
print("Matrix is not positive definite.")
|
| 598 |
+
else:
|
| 599 |
+
print("Matrix is positive definite.")
|
| 600 |
+
except np.linalg.LinAlgError:
|
| 601 |
+
print("Eigenvalues could not be computed.")
|
| 602 |
+
|
| 603 |
+
# Diagonal dominance check
|
| 604 |
+
row_sums = np.sum(np.abs(matrix), axis=1) - np.abs(np.diag(matrix))
|
| 605 |
+
diagonal_elements = np.abs(np.diag(matrix))
|
| 606 |
+
is_dominant = diagonal_elements >= row_sums
|
| 607 |
+
num_wrong = np.size(matrix, 0) - np.sum(is_dominant)
|
| 608 |
+
|
| 609 |
+
if num_wrong == 0:
|
| 610 |
+
print("Matrix is diagonally dominant.")
|
| 611 |
+
else:
|
| 612 |
+
print(f"Number of rows not diagonally dominant: {num_wrong}")
|