Injection_Molding_Design / model_inverse.py
Rui Wan
upload model
6977ea8
import torch
import numpy as np
import matplotlib.pyplot as plt
from Dataset import Dataset
# DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
DEVICE = torch.device('cpu')
# Set global plotting parameters
plt.rcParams.update({'font.size': 14,
'figure.figsize': (10, 8),
'lines.linewidth': 2,
'lines.markersize': 6,
'axes.grid': True,
'axes.labelsize': 16,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'figure.autolayout': True
})
def set_seed(seed=42):
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
class NeuralNetwork(torch.nn.Module):
def __init__(self, layer_sizes, dropout_rate=0.0, activation=torch.nn.ReLU):
super(NeuralNetwork, self).__init__()
if dropout_rate > 0:
self.dropout_layer = torch.nn.Dropout(dropout_rate)
self.layer_sizes = layer_sizes
self.layers = torch.nn.ModuleList()
for i in range(len(layer_sizes) - 2):
self.layers.append(torch.nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
self.layers.append(activation())
self.layers.append(torch.nn.Linear(layer_sizes[-2], layer_sizes[-1]))
# self.sequential = torch.nn.Sequential(*self.layers)
self.init_weights()
def init_weights(self):
for layer in self.layers:
if isinstance(layer, torch.nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
layer.bias.data.fill_(0.0)
def forward(self, x, train=True):
for layer in self.layers:
x = layer(x)
if train and hasattr(self, 'dropout_layer'):
x = self.dropout_layer(x)
return x
def predict(self, x, train=False):
self.eval()
with torch.no_grad():
return self.forward(x, train)
def train_neural_network(model, inputs, outputs, optimizer, epochs=1000, lr_scheduler=None):
model.train()
for epoch in range(epochs):
optimizer.zero_grad()
predictions = model(inputs)
loss = torch.mean(torch.square(predictions - outputs))
loss.backward()
optimizer.step()
if lr_scheduler:
lr_scheduler.step()
if epoch % 100 == 0:
print(f'Epoch {epoch}, Loss: {loss.item()}, Learning Rate: {optimizer.param_groups[0]["lr"]}')
def load_model(model_path):
checkpoint = torch.load(model_path, map_location=DEVICE)
model_config = checkpoint['model_config']
model = NeuralNetwork(model_config['layer_sizes'], dropout_rate=model_config['dropout_rate'])
model.load_state_dict(checkpoint['model_state_dict'])
print(f"Model loaded from {model_path}")
model.to(DEVICE)
model.eval()
return model
def inverse_design(gate_loc, matrix, fiber, fiber_vf, y_target, n_restarts=10, epochs=100, use_lbfgs=False, feasibility_samples=0):
model = load_model('./model_checkpoint.pth')
data = Dataset()
mat_type = data.material_map.get(matrix, 0.0)
fiber_type = data.fiber_map.get(fiber, 0.0)
y_target_norm = data.normalize_output(y_target) # (A1, B1, C1, Stress)
y_target_tensor = torch.tensor(y_target, dtype=torch.float32)
input_mean = torch.tensor(data.input_mean)
input_std = torch.tensor(data.input_std)
output_mean = torch.tensor(data.output_mean)
output_std = torch.tensor(data.output_std)
weights = torch.tensor([1.0, 1.0, 1.0], dtype=torch.float32)
bounds = torch.tensor([[1., 100.], [1., 10.], [1., 100.], [1., 100.]], dtype=torch.float32)
best = {"loss": float('inf'), "input": None, "output": None}
for restart in range(n_restarts):
z = torch.randn(4, requires_grad=True)
if use_lbfgs:
optimizer = torch.optim.LBFGS([z], lr=0.1, max_iter=epochs, line_search_fn="strong_wolfe")
steps = 1
else:
optimizer = torch.optim.Adam([z], lr=0.001)
steps = epochs
for step in range(steps):
def closure():
var = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) * torch.sigmoid(z)
optimizer.zero_grad()
input_raw = torch.cat([torch.tensor([gate_loc, mat_type, fiber_type, fiber_vf]), var]).unsqueeze(0)
input_norm = (input_raw - input_mean) / input_std
output_pred = model(input_norm, train=False)
output_pred = (output_pred * output_std) + output_mean
loss = torch.sum(weights * (output_pred - y_target_tensor) ** 2)
loss.backward()
return loss
if use_lbfgs:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
if (step + 1) % 200 == 0:
print(f'Restart {restart + 1}, Step {step + 1}, Loss: {loss.item():.6f}, grad: {z.grad.norm().item():.6f}')
with torch.no_grad():
var = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) * torch.sigmoid(z)
input_raw = torch.cat([torch.tensor([gate_loc, mat_type, fiber_type, fiber_vf]), var])
input_norm = (input_raw - input_mean) / input_std
output_pred = model.predict(input_norm)
output_pred = data.denormalize_output(output_pred.numpy())
final_loss = np.sum(weights.numpy() * (output_pred - y_target) ** 2).item()
if final_loss < best["loss"]:
best["loss"] = final_loss
best["input"] = var.detach().cpu().numpy()
best["output"] = output_pred
return best
if __name__ == "__main__":
# set_seed(5324)
# train the inverse model over springback data
# inverse_model()
# perform inverse design
import time
start_time = time.time()
best = inverse_design(gate_loc=1, matrix='PA6', fiber='CF', fiber_vf=0.4, y_target=np.array([0.45, 9.03, 1.87]), n_restarts=5, epochs=100, use_lbfgs=True)
end_time = time.time()
time_elapsed = (end_time - start_time)
print(f"Inverse design completed in {time_elapsed:.2f} seconds.")
print("Best Input:", best["input"])
print("Best Output:", best["output"])