Rui Wan commited on
Commit
6977ea8
·
1 Parent(s): c7147bd

upload model

Browse files
Data/.~lock.240_simulations_DS.xlsx# ADDED
@@ -0,0 +1 @@
 
 
1
+ ,wan6,precision,27.01.2026 21:00,/home/wan6/snap/onlyoffice-desktopeditors/890/.local/share/onlyoffice;
Dataset.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ np.random.seed(42)
5
+ epsilon = 1e-8
6
+
7
+ class Dataset:
8
+ def __init__(self, mat_name='FRP'):
9
+ filename = './Data/240_simulations_DS.xlsx'
10
+ self.df = pd.read_excel(filename, sheet_name=mat_name, header=1)
11
+ # normalize data
12
+ self.input_columns = [
13
+ 'Gate Location',
14
+ 'Matrix',
15
+ 'Fiber Type',
16
+ 'Fiber wt%(Volume Fractions)',
17
+ # 'Fiber-Matrix Combination',
18
+ 'Packing Pressure (MPa)',
19
+ 'Packing Time (s)',
20
+ 'Mold Temperature (°C)',
21
+ 'Injection Speed (cm^3/s)'
22
+ ]
23
+ self.output_columns = ['Alpha_angle_deg(ABS)', 'Beta_angle_deg(ABS)', 'Gamma_angle_deg(ABS)']
24
+
25
+ self.material_map = {'PA6': 0, 'PP': 1}
26
+ self.fiber_map = {'CF': 0, 'GF': 1}
27
+ self.combination_map = {'PA6-CF-10': 0, 'PA6-CF-15': 1, 'PA6-CF-30': 2, 'PA6-CF-40': 3,
28
+ 'PA6-GF-15': 4, 'PA6-GF-30': 5, 'PA6-GF-40': 6, 'PA6-GF-50': 7,
29
+ 'PP-CF-10': 8, 'PP-CF-15': 9, 'PP-CF-30': 10, 'PP-CF-40': 11,
30
+ 'PP-GF-15': 12, 'PP-GF-30': 13, 'PP-GF-40': 14, 'PP-GF-50': 15}
31
+ self.df['Matrix'] = self.df['Matrix'].map(self.material_map)
32
+ self.df['Fiber Type'] = self.df['Fiber Type'].map(self.fiber_map)
33
+ self.df['Fiber-Matrix Combination'] = self.df['Fiber-Matrix Combination'].map(self.combination_map)
34
+ self.df['Fiber wt%(Volume Fractions)'] = self.df['Fiber wt%(Volume Fractions)'] / 100.0
35
+
36
+ self.input_mean = self.df[self.input_columns].mean().to_numpy(dtype=np.float32)
37
+ self.input_std = self.df[self.input_columns].std().to_numpy(dtype=np.float32) + epsilon
38
+ self.output_mean = self.df[self.output_columns].mean().to_numpy(dtype=np.float32)
39
+ self.output_std = self.df[self.output_columns].std().to_numpy(dtype=np.float32) + epsilon
40
+
41
+
42
+ def get_input(self, normalize=False):
43
+ data = self.df[self.input_columns].to_numpy(dtype=np.float32)
44
+ if normalize:
45
+ data = self.normalize_input(data)
46
+ return data
47
+
48
+
49
+ def get_output(self, normalize=False):
50
+ data = self.df[self.output_columns].to_numpy(dtype=np.float32)
51
+ if normalize:
52
+ data = self.normalize_output(data)
53
+ return data
54
+
55
+ def __str__(self):
56
+ return str(self.df.head())
57
+
58
+ def normalize_input(self, input_data):
59
+ return (input_data - self.input_mean) / self.input_std
60
+
61
+ def normalize_output(self, output_data):
62
+ return (output_data - self.output_mean) / self.output_std
63
+
64
+ def denormalize_input(self, normalized_input):
65
+ return normalized_input * self.input_std + self.input_mean
66
+
67
+ def denormalize_output(self, normalized_output):
68
+ return normalized_output * self.output_std + self.output_mean
69
+
70
+ if __name__ == "__main__":
71
+ dataset = Dataset()
72
+
73
+ # Example usage
74
+ input_data = dataset.get_input(normalize=False)
75
+ output_data = dataset.get_output(normalize=False)
76
+
77
+ print("Input shape:", input_data.shape)
78
+ print("Output shape:", output_data.shape)
__pycache__/Dataset.cpython-312.pyc ADDED
Binary file (5.6 kB). View file
 
__pycache__/model.cpython-312.pyc ADDED
Binary file (3.11 kB). View file
 
app.py CHANGED
@@ -3,12 +3,13 @@
3
  import streamlit as st
4
  import pandas as pd
5
  import altair as alt
6
- import plotly.express as px
7
  from PIL import Image # Used to open and handle image files
8
  import matplotlib
9
  import matplotlib.pyplot as plt
10
  import numpy as np
11
 
 
12
 
13
  #######################
14
  # Page configuration
@@ -277,19 +278,21 @@ if st.session_state.AM_design_button_clicked == True:
277
 
278
  st.dataframe(data1, hide_index=True, width=600)
279
 
 
 
280
  data2 = pd.DataFrame({
281
- 'Packing pressure (MPa)': [24],
282
- 'Packing time (s)': [110],
283
- 'Mold temperature (C)': [29],
284
- 'Injection speed (cm^3/s)': [69]
285
  })
286
  st.dataframe(data2, hide_index=True, width=600)
287
 
288
- data3 = pd.DataFrame({
289
- 'Maximum angle A': [1.2],
290
- 'Maximum angle B': [2.2],
291
- 'Maximum angle C': [1.0]})
292
- st.dataframe(data3, hide_index=True, width=600)
293
 
294
 
295
 
 
3
  import streamlit as st
4
  import pandas as pd
5
  import altair as alt
6
+ # import plotly.express as px
7
  from PIL import Image # Used to open and handle image files
8
  import matplotlib
9
  import matplotlib.pyplot as plt
10
  import numpy as np
11
 
12
+ from model_inverse import inverse_design
13
 
14
  #######################
15
  # Page configuration
 
278
 
279
  st.dataframe(data1, hide_index=True, width=600)
280
 
281
+ best = inverse_design(gate_loc=1, matrix='PP', fiber='GF', fiber_vf=0.1, y_target=np.array([angleA, angleB, angleC]), n_restarts=5, epochs=100, use_lbfgs=True)
282
+
283
  data2 = pd.DataFrame({
284
+ 'Packing pressure (MPa)': best["input"][0],
285
+ 'Packing time (s)': best["input"][1],
286
+ 'Mold temperature (C)': best["input"][2],
287
+ 'Injection speed (cm^3/s)': best["input"][3]
288
  })
289
  st.dataframe(data2, hide_index=True, width=600)
290
 
291
+ # data3 = pd.DataFrame({
292
+ # 'Maximum angle A': [1.2],
293
+ # 'Maximum angle B': [2.2],
294
+ # 'Maximum angle C': [1.0]})
295
+ # st.dataframe(data3, hide_index=True, width=600)
296
 
297
 
298
 
main_injection.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ from Dataset import Dataset
5
+ from model import NeuralNetwork
6
+
7
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
+ # Set global plotting parameters
9
+ plt.rcParams.update({'font.size': 14,
10
+ 'figure.figsize': (10, 8),
11
+ 'lines.linewidth': 2,
12
+ 'lines.markersize': 6,
13
+ 'axes.grid': True,
14
+ 'axes.labelsize': 16,
15
+ 'legend.fontsize': 14,
16
+ 'xtick.labelsize': 14,
17
+ 'ytick.labelsize': 14,
18
+ 'figure.autolayout': True
19
+ })
20
+
21
+ def set_seed(seed=42):
22
+ np.random.seed(seed)
23
+ torch.manual_seed(seed)
24
+ if torch.cuda.is_available():
25
+ torch.cuda.manual_seed_all(seed)
26
+
27
+ def train_neural_network(model, inputs, outputs, optimizer, epochs=1000, lr_scheduler=None):
28
+ model.train()
29
+ for epoch in range(epochs):
30
+ optimizer.zero_grad()
31
+ predictions = model(inputs)
32
+ loss = torch.mean(torch.square(predictions - outputs))
33
+ loss.backward()
34
+ optimizer.step()
35
+
36
+ if lr_scheduler:
37
+ lr_scheduler.step()
38
+
39
+ if epoch % 100 == 0:
40
+ print(f'Epoch {epoch}, Loss: {loss.item()}, Learning Rate: {optimizer.param_groups[0]["lr"]}')
41
+
42
+ def main():
43
+ set_seed(42)
44
+ dataset = Dataset(mat_name='FRP')
45
+ # Load raw data; normalize using train-only statistics to avoid leakage.
46
+ inputs = dataset.get_input(normalize=False)
47
+ outputs = dataset.get_output(normalize=False)
48
+
49
+ # Train/val/test split for early stopping and unbiased test.
50
+ n = len(inputs)
51
+ perm = np.random.permutation(n)
52
+ n_train = int(0.8 * n)
53
+ n_val = int(0.1 * n)
54
+ idx_train = perm[:n_train]
55
+ idx_val = perm[n_train:n_train + n_val]
56
+ idx_test = perm[n_train + n_val:]
57
+
58
+ # Fit normalization on train split only.
59
+ input_mean = inputs[idx_train].mean(axis=0)
60
+ input_std = inputs[idx_train].std(axis=0) + 1e-8
61
+ output_mean = outputs[idx_train].mean(axis=0)
62
+ output_std = outputs[idx_train].std(axis=0) + 1e-8
63
+
64
+ inputs_norm = (inputs - input_mean) / input_std
65
+ outputs_norm = (outputs - output_mean) / output_std
66
+
67
+ inputs_train = torch.tensor(inputs_norm[idx_train], dtype=torch.float32).to(DEVICE)
68
+ outputs_train = torch.tensor(outputs_norm[idx_train], dtype=torch.float32).to(DEVICE)
69
+
70
+ inputs_val = torch.tensor(inputs_norm[idx_val], dtype=torch.float32).to(DEVICE)
71
+ outputs_val = torch.tensor(outputs_norm[idx_val], dtype=torch.float32).to(DEVICE)
72
+
73
+ inputs_test = torch.tensor(inputs_norm[idx_test], dtype=torch.float32).to(DEVICE)
74
+ outputs_test = torch.tensor(outputs_norm[idx_test], dtype=torch.float32).to(DEVICE)
75
+
76
+ # Linear regression baseline on normalized data.
77
+ X_train = np.concatenate([inputs_norm[idx_train], np.ones((len(idx_train), 1), dtype=np.float32)], axis=1)
78
+ Y_train = outputs_norm[idx_train]
79
+ coef, _, _, _ = np.linalg.lstsq(X_train, Y_train, rcond=None)
80
+
81
+ def linear_predict(x_norm):
82
+ X = np.concatenate([x_norm, np.ones((len(x_norm), 1), dtype=np.float32)], axis=1)
83
+ return X @ coef
84
+
85
+ val_pred_lr = linear_predict(inputs_norm[idx_val])
86
+ test_pred_lr = linear_predict(inputs_norm[idx_test])
87
+ val_mse_lr = np.mean((val_pred_lr - outputs_norm[idx_val]) ** 2)
88
+ test_mse_lr = np.mean((test_pred_lr - outputs_norm[idx_test]) ** 2)
89
+ print(f'Linear baseline - Val Loss: {val_mse_lr:.6f}, Test Loss: {test_mse_lr:.6f}')
90
+
91
+ # Smaller model to reduce overfitting on small data.
92
+ layer_sizes = [inputs.shape[1]] + [32] * 2 + [outputs.shape[1]]
93
+ dropout_rate = 0.2
94
+ model = NeuralNetwork(layer_sizes, dropout_rate=dropout_rate, activation=torch.nn.ReLU).to(DEVICE)
95
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)
96
+ lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.9)
97
+
98
+ # Create a proper dataset that keeps input-output pairs together
99
+ train_dataset = torch.utils.data.TensorDataset(inputs_train, outputs_train)
100
+ train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
101
+
102
+ # Train the model
103
+ epochs = 10000
104
+ best_val = float('inf')
105
+ best_state = None
106
+ patience = 800
107
+ patience_left = patience
108
+ for epoch in range(epochs):
109
+ model.train()
110
+ for inputs_batch, outputs_batch in train_loader:
111
+ inputs_batch = inputs_batch.to(DEVICE)
112
+ outputs_batch = outputs_batch.to(DEVICE)
113
+ optimizer.zero_grad()
114
+ predictions = model(inputs_batch)
115
+ loss = torch.mean(torch.square(predictions - outputs_batch))
116
+ loss.backward()
117
+ optimizer.step()
118
+
119
+ if lr_scheduler:
120
+ lr_scheduler.step()
121
+
122
+ if epoch % 500 == 0:
123
+ model.eval()
124
+ with torch.no_grad():
125
+ train_pred = model(inputs_train)
126
+ train_loss = torch.mean(torch.square(train_pred - outputs_train))
127
+ val_pred = model(inputs_val)
128
+ val_loss = torch.mean(torch.square(val_pred - outputs_val))
129
+ print(f'Epoch {epoch}, Train Loss: {train_loss.item():.6f}, Val Loss: {val_loss.item():.6f}')
130
+
131
+ # Early stopping on validation loss (checked every epoch).
132
+ model.eval()
133
+ with torch.no_grad():
134
+ val_pred = model(inputs_val)
135
+ val_loss = torch.mean(torch.square(val_pred - outputs_val))
136
+ if val_loss.item() < best_val - 1e-5:
137
+ best_val = val_loss.item()
138
+ best_state = {k: v.clone() for k, v in model.state_dict().items()}
139
+ patience_left = patience
140
+ else:
141
+ patience_left -= 1
142
+ if patience_left <= 0:
143
+ print(f'Early stopping at epoch {epoch}. Best val loss: {best_val:.6f}')
144
+ break
145
+
146
+ if best_state is not None:
147
+ model.load_state_dict(best_state)
148
+
149
+
150
+ # MC Dropout inference for predictive mean/uncertainty.
151
+ def mc_dropout_predict(model, x, n_samples=50):
152
+ model.train() # keep dropout active
153
+ preds = []
154
+ with torch.no_grad():
155
+ for _ in range(n_samples):
156
+ preds.append(model(x).unsqueeze(0))
157
+ preds = torch.cat(preds, dim=0)
158
+ return preds.mean(dim=0), preds.std(dim=0)
159
+
160
+ predictions, pred_std = mc_dropout_predict(model, inputs_test, n_samples=50)
161
+ test_loss = torch.mean(torch.square(predictions - outputs_test))
162
+ print(f'Test Loss: {test_loss.item()}. Samples: {idx_test}')
163
+
164
+ x = np.arange(0, len(idx_test))
165
+
166
+ outputs_test = outputs_test.cpu().numpy() * output_std + output_mean
167
+ predictions = predictions.cpu().numpy() * output_std + output_mean
168
+ pred_std = pred_std.cpu().numpy() * output_std
169
+ print(f'Predictive STD (A, B, C): {pred_std.mean(axis=0)}')
170
+
171
+ plt.figure(figsize=(10, 6))
172
+ plt.plot(x, outputs_test[:, 0], color='b', linestyle='--', label='True A')
173
+ plt.plot(x, predictions[:, 0], color='b', linestyle='-', label='Predicted A')
174
+ plt.plot(x, outputs_test[:, 1], color='r', linestyle='--', label='True B')
175
+ plt.plot(x, predictions[:, 1], color='r', linestyle='-', label='Predicted B')
176
+ plt.plot(x, outputs_test[:, 2], color='g', linestyle='--', label='True C')
177
+ plt.plot(x, predictions[:, 2], color='g', linestyle='-', label='Predicted C')
178
+ plt.gca().xaxis.set_major_locator(plt.MaxNLocator(integer=True))
179
+ plt.xlabel('Sample Index')
180
+ plt.xticks(ticks=range(len(idx_test)),labels=idx_test + 1)
181
+ plt.ylabel('Angle (Degrees)')
182
+ plt.title('Angle Prediction')
183
+ plt.legend(loc='upper right')
184
+ plt.savefig('angle_prediction.png')
185
+
186
+
187
+ # MSE
188
+ mse = np.mean((predictions - outputs_test) ** 2, axis=0)
189
+ print(f'Mean Squared Error for A: {mse[0]:.6f}, B: {mse[1]:.6f}, C: {mse[2]:.6f}')
190
+
191
+ # R 2 score
192
+ ss_ress = np.sum((outputs_test - predictions) ** 2, axis=0)
193
+ ss_tots = np.sum((outputs_test - np.mean(outputs_test, axis=0)) ** 2, axis=0)
194
+ r2_scores = 1 - ss_ress / ss_tots
195
+ print(f'R² Score for A: {r2_scores[0]:.6f}, B: {r2_scores[1]:.6f}, C: {r2_scores[2]:.6f}')
196
+
197
+ # Error
198
+
199
+ # Save the model
200
+ model_save_path = './model_checkpoint.pth'
201
+ model_config = {'layer_sizes': layer_sizes,
202
+ 'dropout_rate': dropout_rate
203
+ }
204
+ checkpoint = {
205
+ 'model_state_dict': model.state_dict(),
206
+ 'model_config': model_config
207
+ }
208
+ torch.save(checkpoint, model_save_path)
209
+
210
+ def load_model(model_path):
211
+ checkpoint = torch.load(model_path)
212
+ model_config = checkpoint['model_config']
213
+ model = NeuralNetwork(model_config['layer_sizes'], dropout_rate=model_config['dropout_rate'], activation=torch.nn.ReLU).to(DEVICE)
214
+ model.load_state_dict(checkpoint['model_state_dict'])
215
+ print(f"Model loaded from {model_path}")
216
+ return model
217
+
218
+
219
+ if __name__ == "__main__":
220
+ main()
221
+
222
+ # model = load_model('./model_checkpoint.pth').to(torch.device('cpu'))
model.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class NeuralNetwork(torch.nn.Module):
5
+ def __init__(self, layer_sizes, dropout_rate=0.0, activation=torch.nn.ReLU):
6
+ super(NeuralNetwork, self).__init__()
7
+
8
+ if dropout_rate > 0:
9
+ self.dropout_layer = torch.nn.Dropout(dropout_rate)
10
+
11
+ self.layer_sizes = layer_sizes
12
+ self.layers = torch.nn.ModuleList()
13
+ for i in range(len(layer_sizes) - 2):
14
+ self.layers.append(torch.nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
15
+ self.layers.append(activation())
16
+ self.layers.append(torch.nn.Linear(layer_sizes[-2], layer_sizes[-1]))
17
+
18
+ # self.sequential = torch.nn.Sequential(*self.layers)
19
+
20
+ self.init_weights()
21
+
22
+ def init_weights(self):
23
+ for layer in self.layers:
24
+ if isinstance(layer, torch.nn.Linear):
25
+ torch.nn.init.xavier_normal_(layer.weight)
26
+ layer.bias.data.fill_(0.0)
27
+
28
+ def forward(self, x):
29
+ for layer in self.layers:
30
+ x = layer(x)
31
+ # Use the module's train/eval mode to control dropout.
32
+ if self.training and hasattr(self, 'dropout_layer') and not isinstance(layer, torch.nn.Linear):
33
+ x = self.dropout_layer(x)
34
+
35
+ return x
36
+
37
+ def predict(self, x):
38
+ self.eval()
39
+ with torch.no_grad():
40
+ return self.forward(x)
model_checkpoint.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79577a07df4872d6efa8b5c87ff739110bcdea92e4e1f5fc4e3c61a7c4a9d29a
3
+ size 9153
model_inverse.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ from Dataset import Dataset
5
+
6
+ # DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7
+ DEVICE = torch.device('cpu')
8
+
9
+ # Set global plotting parameters
10
+ plt.rcParams.update({'font.size': 14,
11
+ 'figure.figsize': (10, 8),
12
+ 'lines.linewidth': 2,
13
+ 'lines.markersize': 6,
14
+ 'axes.grid': True,
15
+ 'axes.labelsize': 16,
16
+ 'legend.fontsize': 14,
17
+ 'xtick.labelsize': 14,
18
+ 'ytick.labelsize': 14,
19
+ 'figure.autolayout': True
20
+ })
21
+
22
+ def set_seed(seed=42):
23
+ np.random.seed(seed)
24
+ torch.manual_seed(seed)
25
+ if torch.cuda.is_available():
26
+ torch.cuda.manual_seed_all(seed)
27
+
28
+ class NeuralNetwork(torch.nn.Module):
29
+ def __init__(self, layer_sizes, dropout_rate=0.0, activation=torch.nn.ReLU):
30
+ super(NeuralNetwork, self).__init__()
31
+
32
+ if dropout_rate > 0:
33
+ self.dropout_layer = torch.nn.Dropout(dropout_rate)
34
+
35
+ self.layer_sizes = layer_sizes
36
+ self.layers = torch.nn.ModuleList()
37
+ for i in range(len(layer_sizes) - 2):
38
+ self.layers.append(torch.nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
39
+ self.layers.append(activation())
40
+ self.layers.append(torch.nn.Linear(layer_sizes[-2], layer_sizes[-1]))
41
+
42
+ # self.sequential = torch.nn.Sequential(*self.layers)
43
+
44
+ self.init_weights()
45
+
46
+ def init_weights(self):
47
+ for layer in self.layers:
48
+ if isinstance(layer, torch.nn.Linear):
49
+ torch.nn.init.xavier_normal_(layer.weight)
50
+ layer.bias.data.fill_(0.0)
51
+
52
+ def forward(self, x, train=True):
53
+ for layer in self.layers:
54
+ x = layer(x)
55
+ if train and hasattr(self, 'dropout_layer'):
56
+ x = self.dropout_layer(x)
57
+
58
+ return x
59
+
60
+ def predict(self, x, train=False):
61
+ self.eval()
62
+ with torch.no_grad():
63
+ return self.forward(x, train)
64
+
65
+ def train_neural_network(model, inputs, outputs, optimizer, epochs=1000, lr_scheduler=None):
66
+ model.train()
67
+ for epoch in range(epochs):
68
+ optimizer.zero_grad()
69
+ predictions = model(inputs)
70
+ loss = torch.mean(torch.square(predictions - outputs))
71
+ loss.backward()
72
+ optimizer.step()
73
+
74
+ if lr_scheduler:
75
+ lr_scheduler.step()
76
+
77
+ if epoch % 100 == 0:
78
+ print(f'Epoch {epoch}, Loss: {loss.item()}, Learning Rate: {optimizer.param_groups[0]["lr"]}')
79
+
80
+
81
+ def load_model(model_path):
82
+ checkpoint = torch.load(model_path, map_location=DEVICE)
83
+ model_config = checkpoint['model_config']
84
+ model = NeuralNetwork(model_config['layer_sizes'], dropout_rate=model_config['dropout_rate'])
85
+ model.load_state_dict(checkpoint['model_state_dict'])
86
+ print(f"Model loaded from {model_path}")
87
+
88
+ model.to(DEVICE)
89
+ model.eval()
90
+ return model
91
+
92
+ def inverse_design(gate_loc, matrix, fiber, fiber_vf, y_target, n_restarts=10, epochs=100, use_lbfgs=False, feasibility_samples=0):
93
+ model = load_model('./model_checkpoint.pth')
94
+
95
+ data = Dataset()
96
+ mat_type = data.material_map.get(matrix, 0.0)
97
+ fiber_type = data.fiber_map.get(fiber, 0.0)
98
+
99
+ y_target_norm = data.normalize_output(y_target) # (A1, B1, C1, Stress)
100
+ y_target_tensor = torch.tensor(y_target, dtype=torch.float32)
101
+ input_mean = torch.tensor(data.input_mean)
102
+ input_std = torch.tensor(data.input_std)
103
+ output_mean = torch.tensor(data.output_mean)
104
+ output_std = torch.tensor(data.output_std)
105
+
106
+
107
+ weights = torch.tensor([1.0, 1.0, 1.0], dtype=torch.float32)
108
+ bounds = torch.tensor([[1., 100.], [1., 10.], [1., 100.], [1., 100.]], dtype=torch.float32)
109
+ best = {"loss": float('inf'), "input": None, "output": None}
110
+
111
+ for restart in range(n_restarts):
112
+ z = torch.randn(4, requires_grad=True)
113
+
114
+ if use_lbfgs:
115
+ optimizer = torch.optim.LBFGS([z], lr=0.1, max_iter=epochs, line_search_fn="strong_wolfe")
116
+ steps = 1
117
+ else:
118
+ optimizer = torch.optim.Adam([z], lr=0.001)
119
+ steps = epochs
120
+
121
+ for step in range(steps):
122
+ def closure():
123
+ var = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) * torch.sigmoid(z)
124
+ optimizer.zero_grad()
125
+ input_raw = torch.cat([torch.tensor([gate_loc, mat_type, fiber_type, fiber_vf]), var]).unsqueeze(0)
126
+ input_norm = (input_raw - input_mean) / input_std
127
+ output_pred = model(input_norm, train=False)
128
+ output_pred = (output_pred * output_std) + output_mean
129
+ loss = torch.sum(weights * (output_pred - y_target_tensor) ** 2)
130
+ loss.backward()
131
+ return loss
132
+
133
+ if use_lbfgs:
134
+ loss = optimizer.step(closure)
135
+ else:
136
+ loss = closure()
137
+ optimizer.step()
138
+
139
+ if (step + 1) % 200 == 0:
140
+ print(f'Restart {restart + 1}, Step {step + 1}, Loss: {loss.item():.6f}, grad: {z.grad.norm().item():.6f}')
141
+
142
+ with torch.no_grad():
143
+ var = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) * torch.sigmoid(z)
144
+ input_raw = torch.cat([torch.tensor([gate_loc, mat_type, fiber_type, fiber_vf]), var])
145
+ input_norm = (input_raw - input_mean) / input_std
146
+ output_pred = model.predict(input_norm)
147
+ output_pred = data.denormalize_output(output_pred.numpy())
148
+ final_loss = np.sum(weights.numpy() * (output_pred - y_target) ** 2).item()
149
+ if final_loss < best["loss"]:
150
+ best["loss"] = final_loss
151
+ best["input"] = var.detach().cpu().numpy()
152
+ best["output"] = output_pred
153
+
154
+ return best
155
+
156
+
157
+ if __name__ == "__main__":
158
+ # set_seed(5324)
159
+ # train the inverse model over springback data
160
+ # inverse_model()
161
+
162
+ # perform inverse design
163
+ import time
164
+ start_time = time.time()
165
+ best = inverse_design(gate_loc=1, matrix='PA6', fiber='CF', fiber_vf=0.4, y_target=np.array([0.45, 9.03, 1.87]), n_restarts=5, epochs=100, use_lbfgs=True)
166
+ end_time = time.time()
167
+ time_elapsed = (end_time - start_time)
168
+ print(f"Inverse design completed in {time_elapsed:.2f} seconds.")
169
+ print("Best Input:", best["input"])
170
+ print("Best Output:", best["output"])
171
+