Rui Wan
commited on
Commit
·
ad04193
1
Parent(s):
936ab69
update model
Browse files- Data/FDM_192_Simulation_Matrix_Shared.xlsx +0 -0
- Dataset.py +175 -0
- __pycache__/Dataset.cpython-312.pyc +0 -0
- __pycache__/model.cpython-312.pyc +0 -0
- app.py +11 -4
- model.py +40 -0
- model_fdm.py +338 -0
- model_fdm_ckpt.pth +3 -0
Data/FDM_192_Simulation_Matrix_Shared.xlsx
ADDED
|
Binary file (37.1 kB). View file
|
|
|
Dataset.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
np.random.seed(42)
|
| 5 |
+
epsilon = 1e-8
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Dataset:
|
| 9 |
+
"""
|
| 10 |
+
Base dataset class.
|
| 11 |
+
|
| 12 |
+
Subclasses must implement:
|
| 13 |
+
- _load_dataframe()
|
| 14 |
+
- _get_columns()
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, inverse=False):
|
| 18 |
+
self.inverse = inverse
|
| 19 |
+
self.df = self._load_dataframe()
|
| 20 |
+
self.input_columns, self.output_columns = self._get_columns()
|
| 21 |
+
self._compute_stats()
|
| 22 |
+
|
| 23 |
+
def _load_dataframe(self):
|
| 24 |
+
raise NotImplementedError
|
| 25 |
+
|
| 26 |
+
def _get_columns(self):
|
| 27 |
+
raise NotImplementedError
|
| 28 |
+
|
| 29 |
+
def _compute_stats(self):
|
| 30 |
+
self.input_mean = self.df[self.input_columns].mean().to_numpy(dtype=np.float32)
|
| 31 |
+
self.input_std = self.df[self.input_columns].std().to_numpy(dtype=np.float32) + epsilon
|
| 32 |
+
self.output_mean = self.df[self.output_columns].mean().to_numpy(dtype=np.float32)
|
| 33 |
+
self.output_std = self.df[self.output_columns].std().to_numpy(dtype=np.float32) + epsilon
|
| 34 |
+
|
| 35 |
+
def get_input(self, normalize=False):
|
| 36 |
+
data = self.df[self.input_columns].to_numpy(dtype=np.float32)
|
| 37 |
+
if normalize:
|
| 38 |
+
data = self.normalize_input(data)
|
| 39 |
+
return data
|
| 40 |
+
|
| 41 |
+
def get_output(self, normalize=False):
|
| 42 |
+
data = self.df[self.output_columns].to_numpy(dtype=np.float32)
|
| 43 |
+
if normalize:
|
| 44 |
+
data = self.normalize_output(data)
|
| 45 |
+
return data
|
| 46 |
+
|
| 47 |
+
def __str__(self):
|
| 48 |
+
return str(self.df.head())
|
| 49 |
+
|
| 50 |
+
def normalize_input(self, input_data):
|
| 51 |
+
return (input_data - self.input_mean) / self.input_std
|
| 52 |
+
|
| 53 |
+
def normalize_output(self, output_data):
|
| 54 |
+
return (output_data - self.output_mean) / self.output_std
|
| 55 |
+
|
| 56 |
+
def denormalize_input(self, normalized_input):
|
| 57 |
+
return normalized_input * self.input_std + self.input_mean
|
| 58 |
+
|
| 59 |
+
def denormalize_output(self, normalized_output):
|
| 60 |
+
return normalized_output * self.output_std + self.output_mean
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class DataThermoforming(Dataset):
|
| 64 |
+
"""
|
| 65 |
+
Dataset for thermoforming process.
|
| 66 |
+
Materials: "CFPEEK", "CFPA6", or "CFRP" which includes both materials.
|
| 67 |
+
"""
|
| 68 |
+
def __init__(self, material="CFRP", inverse=False, filename="./Data/DataForThermoforming.xlsx"):
|
| 69 |
+
self.material = material
|
| 70 |
+
self.filename = filename
|
| 71 |
+
self.materials_map = {"CF/PEEK": 0.0, "CF/PA6": 1.0}
|
| 72 |
+
super().__init__(inverse=inverse)
|
| 73 |
+
|
| 74 |
+
def _load_dataframe(self):
|
| 75 |
+
df = pd.read_excel(self.filename, sheet_name=self.material)
|
| 76 |
+
df["Materials"] = df["Materials"].map(self.materials_map).astype(np.float32)
|
| 77 |
+
if self.material == "CFPEEK" or self.material == "CFRP":
|
| 78 |
+
df = df.drop([7, 78, 101, 129], axis=0)
|
| 79 |
+
return df
|
| 80 |
+
|
| 81 |
+
def _get_columns(self):
|
| 82 |
+
if self.inverse:
|
| 83 |
+
input_columns = [
|
| 84 |
+
"Materials",
|
| 85 |
+
"Ply_Number",
|
| 86 |
+
"Fiber_Volume_Fractions",
|
| 87 |
+
"A1(abs)",
|
| 88 |
+
"B1(abs)",
|
| 89 |
+
"C1(abs)",
|
| 90 |
+
"Stress(Max) MPa",
|
| 91 |
+
]
|
| 92 |
+
output_columns = [
|
| 93 |
+
"Initial_Temp (degree celsius)",
|
| 94 |
+
"Punch_Velocity (mm/s)",
|
| 95 |
+
"Cooling_Time (s)",
|
| 96 |
+
]
|
| 97 |
+
else:
|
| 98 |
+
input_columns = [
|
| 99 |
+
"Ply_Number",
|
| 100 |
+
"Fiber_Volume_Fractions",
|
| 101 |
+
"Initial_Temp (degree celsius)",
|
| 102 |
+
"Punch_Velocity (mm/s)",
|
| 103 |
+
"Cooling_Time (s)",
|
| 104 |
+
]
|
| 105 |
+
output_columns = ["A1(abs)",
|
| 106 |
+
"B1(abs)",
|
| 107 |
+
"C1(abs)",
|
| 108 |
+
"Stress(Max) MPa"]
|
| 109 |
+
return input_columns, output_columns
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class DataAdditiveManufacturing(Dataset):
|
| 113 |
+
def __init__(self, inverse=False, filename="./Data/FDM_192_Simulation_Matrix_Shared.xlsx"):
|
| 114 |
+
self.filename = filename
|
| 115 |
+
self.material_base_map = {"HDPE": 0.0, "PP": 1.0}
|
| 116 |
+
self.fiber_type_map = {"CF": 0.0, "GF": 1.0}
|
| 117 |
+
self.build_direction_map = {"Vertical": 1.0, "Horizontal": 0.0}
|
| 118 |
+
super().__init__(inverse=inverse)
|
| 119 |
+
|
| 120 |
+
def _load_dataframe(self):
|
| 121 |
+
df = pd.read_excel(self.filename, sheet_name="Batch_1")
|
| 122 |
+
df["Material_Base"] = df["Material_Base"].map(self.material_base_map).astype(np.float32)
|
| 123 |
+
df["Fiber_Type"] = df["Fiber_Type"].map(self.fiber_type_map).astype(np.float32)
|
| 124 |
+
df["Build_Direction"] = df["Build_Direction"].map(self.build_direction_map).astype(np.float32)
|
| 125 |
+
|
| 126 |
+
return df
|
| 127 |
+
|
| 128 |
+
def _get_columns(self):
|
| 129 |
+
if self.inverse:
|
| 130 |
+
input_columns = [
|
| 131 |
+
"Phi1_Change",
|
| 132 |
+
"Phi2_Change",
|
| 133 |
+
"Phi3_Change",
|
| 134 |
+
"Phi7_Change",
|
| 135 |
+
"Phi8_Change",
|
| 136 |
+
"Phi9_Change",
|
| 137 |
+
"Global_Max_Stress"
|
| 138 |
+
]
|
| 139 |
+
output_columns = [
|
| 140 |
+
"Material_Base",
|
| 141 |
+
"Fiber_Type",
|
| 142 |
+
"Vol_Fraction",
|
| 143 |
+
# "Build_Direction",
|
| 144 |
+
"Extruder_Temp",
|
| 145 |
+
"Velocity",
|
| 146 |
+
"Bed_Temp"
|
| 147 |
+
]
|
| 148 |
+
else:
|
| 149 |
+
input_columns = [
|
| 150 |
+
"Material_Base",
|
| 151 |
+
"Fiber_Type",
|
| 152 |
+
"Vol_Fraction",
|
| 153 |
+
"Build_Direction",
|
| 154 |
+
"Extruder_Temp",
|
| 155 |
+
"Velocity",
|
| 156 |
+
"Bed_Temp"
|
| 157 |
+
]
|
| 158 |
+
output_columns = [
|
| 159 |
+
# "Phi1_Change",
|
| 160 |
+
# "Phi2_Change",
|
| 161 |
+
# "Phi3_Change",
|
| 162 |
+
"Phi7_Change",
|
| 163 |
+
"Phi8_Change",
|
| 164 |
+
"Phi9_Change",
|
| 165 |
+
"Global_Max_Stress"
|
| 166 |
+
]
|
| 167 |
+
return input_columns, output_columns
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
dataset = DataAdditiveManufacturing()
|
| 171 |
+
|
| 172 |
+
input_data = dataset.get_input(normalize=False)
|
| 173 |
+
output_data = dataset.get_output(normalize=False)
|
| 174 |
+
print("Input shape:", input_data.shape)
|
| 175 |
+
print("Output shape:", output_data.shape)
|
__pycache__/Dataset.cpython-312.pyc
ADDED
|
Binary file (8.59 kB). View file
|
|
|
__pycache__/model.cpython-312.pyc
ADDED
|
Binary file (3.11 kB). View file
|
|
|
app.py
CHANGED
|
@@ -3,12 +3,14 @@
|
|
| 3 |
import streamlit as st
|
| 4 |
import pandas as pd
|
| 5 |
import altair as alt
|
| 6 |
-
import plotly.express as px
|
| 7 |
from PIL import Image # Used to open and handle image files
|
| 8 |
import matplotlib
|
| 9 |
import matplotlib.pyplot as plt
|
| 10 |
import numpy as np
|
| 11 |
|
|
|
|
|
|
|
| 12 |
|
| 13 |
#######################
|
| 14 |
# Page configuration
|
|
@@ -258,12 +260,17 @@ if st.session_state.AM_input_changed == True:
|
|
| 258 |
st.session_state.AM_input_changed = False
|
| 259 |
|
| 260 |
st.button("AM process design", width=400, on_click=AM_design_click)
|
|
|
|
| 261 |
if st.session_state.AM_design_button_clicked == True:
|
| 262 |
st.write("Process parameters")
|
| 263 |
data1 = pd.DataFrame({
|
| 264 |
'Matrix material': ['PP'],
|
| 265 |
'Fiber material': ['Glass'],
|
| 266 |
'Build direction': ['Vertical']})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
#st.table(data1)
|
| 268 |
# Define styles
|
| 269 |
styles = [
|
|
@@ -275,9 +282,9 @@ if st.session_state.AM_design_button_clicked == True:
|
|
| 275 |
st.dataframe(data1, hide_index=True, width=500)
|
| 276 |
|
| 277 |
data2 = pd.DataFrame({
|
| 278 |
-
'Nozzel velocity (cm/s)': [
|
| 279 |
-
'Extruder temperature (C)': [
|
| 280 |
-
'Bed temperature (C)': [
|
| 281 |
st.dataframe(data2, hide_index=True, width=500)
|
| 282 |
|
| 283 |
|
|
|
|
| 3 |
import streamlit as st
|
| 4 |
import pandas as pd
|
| 5 |
import altair as alt
|
| 6 |
+
# import plotly.express as px
|
| 7 |
from PIL import Image # Used to open and handle image files
|
| 8 |
import matplotlib
|
| 9 |
import matplotlib.pyplot as plt
|
| 10 |
import numpy as np
|
| 11 |
|
| 12 |
+
from model_fdm import inverse_design
|
| 13 |
+
|
| 14 |
|
| 15 |
#######################
|
| 16 |
# Page configuration
|
|
|
|
| 260 |
st.session_state.AM_input_changed = False
|
| 261 |
|
| 262 |
st.button("AM process design", width=400, on_click=AM_design_click)
|
| 263 |
+
|
| 264 |
if st.session_state.AM_design_button_clicked == True:
|
| 265 |
st.write("Process parameters")
|
| 266 |
data1 = pd.DataFrame({
|
| 267 |
'Matrix material': ['PP'],
|
| 268 |
'Fiber material': ['Glass'],
|
| 269 |
'Build direction': ['Vertical']})
|
| 270 |
+
|
| 271 |
+
y_target = np.array([angleA, angleB, angleC, max_stress])
|
| 272 |
+
best = inverse_design(material_base="PP", fiber="GF", fiber_vf=45.0,
|
| 273 |
+
y_target=y_target, n_restarts=20, epochs=100, use_lbfgs=True)
|
| 274 |
#st.table(data1)
|
| 275 |
# Define styles
|
| 276 |
styles = [
|
|
|
|
| 282 |
st.dataframe(data1, hide_index=True, width=500)
|
| 283 |
|
| 284 |
data2 = pd.DataFrame({
|
| 285 |
+
'Nozzel velocity (cm/s)': best['input'][0],
|
| 286 |
+
'Extruder temperature (C)': best['input'][1],
|
| 287 |
+
'Bed temperature (C)': best['input'][2]})
|
| 288 |
st.dataframe(data2, hide_index=True, width=500)
|
| 289 |
|
| 290 |
|
model.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class NeuralNetwork(torch.nn.Module):
|
| 5 |
+
def __init__(self, layer_sizes, dropout_rate=0.0, activation=torch.nn.ReLU):
|
| 6 |
+
super(NeuralNetwork, self).__init__()
|
| 7 |
+
|
| 8 |
+
if dropout_rate > 0:
|
| 9 |
+
self.dropout_layer = torch.nn.Dropout(dropout_rate)
|
| 10 |
+
|
| 11 |
+
self.layer_sizes = layer_sizes
|
| 12 |
+
self.layers = torch.nn.ModuleList()
|
| 13 |
+
for i in range(len(layer_sizes) - 2):
|
| 14 |
+
self.layers.append(torch.nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
|
| 15 |
+
self.layers.append(activation())
|
| 16 |
+
self.layers.append(torch.nn.Linear(layer_sizes[-2], layer_sizes[-1]))
|
| 17 |
+
|
| 18 |
+
# self.sequential = torch.nn.Sequential(*self.layers)
|
| 19 |
+
|
| 20 |
+
self.init_weights()
|
| 21 |
+
|
| 22 |
+
def init_weights(self):
|
| 23 |
+
for layer in self.layers:
|
| 24 |
+
if isinstance(layer, torch.nn.Linear):
|
| 25 |
+
torch.nn.init.xavier_normal_(layer.weight)
|
| 26 |
+
layer.bias.data.fill_(0.0)
|
| 27 |
+
|
| 28 |
+
def forward(self, x, train=True):
|
| 29 |
+
for layer in self.layers:
|
| 30 |
+
x = layer(x)
|
| 31 |
+
# Apply dropout only after activations, never on the output layer
|
| 32 |
+
if train and hasattr(self, 'dropout_layer') and not isinstance(layer, torch.nn.Linear):
|
| 33 |
+
x = self.dropout_layer(x)
|
| 34 |
+
|
| 35 |
+
return x
|
| 36 |
+
|
| 37 |
+
def predict(self, x, train=False):
|
| 38 |
+
self.eval()
|
| 39 |
+
with torch.no_grad():
|
| 40 |
+
return self.forward(x, train)
|
model_fdm.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from Dataset import DataAdditiveManufacturing, DataThermoforming
|
| 5 |
+
from model import NeuralNetwork
|
| 6 |
+
|
| 7 |
+
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 8 |
+
# Set global plotting parameters
|
| 9 |
+
plt.rcParams.update({'font.size': 14,
|
| 10 |
+
'figure.figsize': (10, 8),
|
| 11 |
+
'lines.linewidth': 2,
|
| 12 |
+
'lines.markersize': 6,
|
| 13 |
+
'axes.grid': True,
|
| 14 |
+
'axes.labelsize': 16,
|
| 15 |
+
'legend.fontsize': 10,
|
| 16 |
+
'xtick.labelsize': 14,
|
| 17 |
+
'ytick.labelsize': 14,
|
| 18 |
+
'figure.autolayout': True
|
| 19 |
+
})
|
| 20 |
+
|
| 21 |
+
def set_seed(seed=42):
|
| 22 |
+
np.random.seed(seed)
|
| 23 |
+
torch.manual_seed(seed)
|
| 24 |
+
if torch.cuda.is_available():
|
| 25 |
+
torch.cuda.manual_seed_all(seed)
|
| 26 |
+
|
| 27 |
+
def train_neural_network(model, inputs, outputs, optimizer, epochs=1000, lr_scheduler=None):
|
| 28 |
+
model.train()
|
| 29 |
+
for epoch in range(epochs):
|
| 30 |
+
optimizer.zero_grad()
|
| 31 |
+
predictions = model(inputs)
|
| 32 |
+
loss = torch.mean(torch.square(predictions - outputs))
|
| 33 |
+
loss.backward()
|
| 34 |
+
optimizer.step()
|
| 35 |
+
|
| 36 |
+
if lr_scheduler:
|
| 37 |
+
lr_scheduler.step()
|
| 38 |
+
|
| 39 |
+
if epoch % 100 == 0:
|
| 40 |
+
print(f'Epoch {epoch}, Loss: {loss.item()}, Learning Rate: {optimizer.param_groups[0]["lr"]}')
|
| 41 |
+
|
| 42 |
+
def kfold_indices(n_samples, k=5, seed=42, shuffle=True):
|
| 43 |
+
rng = np.random.default_rng(seed)
|
| 44 |
+
indices = np.arange(n_samples)
|
| 45 |
+
if shuffle:
|
| 46 |
+
rng.shuffle(indices)
|
| 47 |
+
fold_sizes = np.full(k, n_samples // k, dtype=int)
|
| 48 |
+
fold_sizes[: n_samples % k] += 1
|
| 49 |
+
current = 0
|
| 50 |
+
folds = []
|
| 51 |
+
for fold_size in fold_sizes:
|
| 52 |
+
start, stop = current, current + fold_size
|
| 53 |
+
folds.append(indices[start:stop])
|
| 54 |
+
current = stop
|
| 55 |
+
return folds
|
| 56 |
+
|
| 57 |
+
def ridge_fit_predict(x_train, y_train, x_test, alpha=1.0):
|
| 58 |
+
# Closed-form ridge regression: W = (X^T X + alpha I)^-1 X^T Y
|
| 59 |
+
x_aug = np.concatenate([x_train, np.ones((x_train.shape[0], 1))], axis=1)
|
| 60 |
+
xtx = x_aug.T @ x_aug
|
| 61 |
+
reg = alpha * np.eye(xtx.shape[0], dtype=x_train.dtype)
|
| 62 |
+
reg[-1, -1] = 0.0 # don't regularize bias
|
| 63 |
+
w = np.linalg.solve(xtx + reg, x_aug.T @ y_train)
|
| 64 |
+
x_test_aug = np.concatenate([x_test, np.ones((x_test.shape[0], 1))], axis=1)
|
| 65 |
+
return x_test_aug @ w
|
| 66 |
+
|
| 67 |
+
def kfold_ridge_baseline(inputs, outputs, k=5, alpha=1.0, seed=42):
|
| 68 |
+
folds = kfold_indices(len(inputs), k=k, seed=seed, shuffle=True)
|
| 69 |
+
mse_folds = []
|
| 70 |
+
r2_folds = []
|
| 71 |
+
for i in range(k):
|
| 72 |
+
test_idx = folds[i]
|
| 73 |
+
train_idx = np.concatenate([f for j, f in enumerate(folds) if j != i])
|
| 74 |
+
|
| 75 |
+
x_train = inputs[train_idx]
|
| 76 |
+
y_train = outputs[train_idx]
|
| 77 |
+
x_test = inputs[test_idx]
|
| 78 |
+
y_test = outputs[test_idx]
|
| 79 |
+
|
| 80 |
+
# Train-only normalization
|
| 81 |
+
x_mean = x_train.mean(axis=0)
|
| 82 |
+
x_std = x_train.std(axis=0) + 1e-8
|
| 83 |
+
y_mean = y_train.mean(axis=0)
|
| 84 |
+
y_std = y_train.std(axis=0) + 1e-8
|
| 85 |
+
|
| 86 |
+
x_train_n = (x_train - x_mean) / x_std
|
| 87 |
+
x_test_n = (x_test - x_mean) / x_std
|
| 88 |
+
y_train_n = (y_train - y_mean) / y_std
|
| 89 |
+
|
| 90 |
+
y_pred_n = ridge_fit_predict(x_train_n, y_train_n, x_test_n, alpha=alpha)
|
| 91 |
+
y_pred = y_pred_n * y_std + y_mean
|
| 92 |
+
|
| 93 |
+
mse = np.mean((y_pred - y_test) ** 2, axis=0)
|
| 94 |
+
ss_res = np.sum((y_test - y_pred) ** 2, axis=0)
|
| 95 |
+
ss_tot = np.sum((y_test - np.mean(y_test, axis=0)) ** 2, axis=0)
|
| 96 |
+
r2 = 1 - ss_res / ss_tot
|
| 97 |
+
mse_folds.append(mse)
|
| 98 |
+
r2_folds.append(r2)
|
| 99 |
+
|
| 100 |
+
mse_folds = np.stack(mse_folds, axis=0)
|
| 101 |
+
r2_folds = np.stack(r2_folds, axis=0)
|
| 102 |
+
print("Ridge k-fold CV (alpha=%.3g, k=%d)" % (alpha, k))
|
| 103 |
+
print("MSE mean:", np.mean(mse_folds, axis=0))
|
| 104 |
+
print("MSE std:", np.std(mse_folds, axis=0))
|
| 105 |
+
print("R2 mean:", np.mean(r2_folds, axis=0))
|
| 106 |
+
print("R2 std:", np.std(r2_folds, axis=0))
|
| 107 |
+
|
| 108 |
+
def main():
|
| 109 |
+
dataset = DataAdditiveManufacturing()
|
| 110 |
+
inputs = dataset.get_input(normalize=False)
|
| 111 |
+
outputs = dataset.get_output(normalize=False)
|
| 112 |
+
|
| 113 |
+
idx_train = np.random.choice(len(inputs), size=int(0.95 * len(inputs)), replace=False)
|
| 114 |
+
idx_test = np.setdiff1d(np.arange(len(inputs)), idx_train)
|
| 115 |
+
|
| 116 |
+
# Normalize using train-only statistics to avoid test leakage
|
| 117 |
+
x_train = inputs[idx_train]
|
| 118 |
+
y_train = outputs[idx_train]
|
| 119 |
+
x_test = inputs[idx_test]
|
| 120 |
+
y_test = outputs[idx_test]
|
| 121 |
+
|
| 122 |
+
x_mean = x_train.mean(axis=0)
|
| 123 |
+
x_std = x_train.std(axis=0) + 1e-8
|
| 124 |
+
y_mean = y_train.mean(axis=0)
|
| 125 |
+
y_std = y_train.std(axis=0) + 1e-8
|
| 126 |
+
|
| 127 |
+
x_train_n = (x_train - x_mean) / x_std
|
| 128 |
+
x_test_n = (x_test - x_mean) / x_std
|
| 129 |
+
y_train_n = (y_train - y_mean) / y_std
|
| 130 |
+
y_test_n = (y_test - y_mean) / y_std
|
| 131 |
+
|
| 132 |
+
inputs_train = torch.tensor(x_train_n, dtype=torch.float32).to(DEVICE)
|
| 133 |
+
outputs_train = torch.tensor(y_train_n, dtype=torch.float32).to(DEVICE)
|
| 134 |
+
|
| 135 |
+
inputs_test = torch.tensor(x_test_n, dtype=torch.float32).to(DEVICE)
|
| 136 |
+
outputs_test = torch.tensor(y_test_n, dtype=torch.float32).to(DEVICE)
|
| 137 |
+
|
| 138 |
+
layer_sizes = [inputs.shape[1], 64, 32, outputs.shape[1]]
|
| 139 |
+
dropout_rate = 0.1
|
| 140 |
+
model = NeuralNetwork(layer_sizes, dropout_rate=dropout_rate, activation=torch.nn.ReLU).to(DEVICE)
|
| 141 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)
|
| 142 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2000, gamma=0.9)
|
| 143 |
+
|
| 144 |
+
# Create a proper dataset that keeps input-output pairs together
|
| 145 |
+
train_dataset = torch.utils.data.TensorDataset(inputs_train, outputs_train)
|
| 146 |
+
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True)
|
| 147 |
+
|
| 148 |
+
# Train the model
|
| 149 |
+
epochs = 5000
|
| 150 |
+
best_test_loss = float("inf")
|
| 151 |
+
patience = 400
|
| 152 |
+
patience_left = patience
|
| 153 |
+
for epoch in range(epochs):
|
| 154 |
+
model.train()
|
| 155 |
+
for inputs_batch, outputs_batch in train_loader:
|
| 156 |
+
inputs_batch = inputs_batch.to(DEVICE)
|
| 157 |
+
outputs_batch = outputs_batch.to(DEVICE)
|
| 158 |
+
optimizer.zero_grad()
|
| 159 |
+
predictions = model(inputs_batch)
|
| 160 |
+
loss = torch.mean(torch.square(predictions - outputs_batch))
|
| 161 |
+
loss.backward()
|
| 162 |
+
optimizer.step()
|
| 163 |
+
|
| 164 |
+
if lr_scheduler:
|
| 165 |
+
lr_scheduler.step()
|
| 166 |
+
|
| 167 |
+
if epoch % 200 == 0:
|
| 168 |
+
model.eval()
|
| 169 |
+
with torch.no_grad():
|
| 170 |
+
train_pred = model(inputs_train, train=False)
|
| 171 |
+
train_loss = torch.mean(torch.square(train_pred - outputs_train))
|
| 172 |
+
test_pred = model(inputs_test, train=False)
|
| 173 |
+
test_loss = torch.mean(torch.square(test_pred - outputs_test))
|
| 174 |
+
print(f'Epoch {epoch}, Train Loss: {train_loss.item():.6f}, Test Loss: {test_loss.item():.6f}')
|
| 175 |
+
if test_loss.item() < best_test_loss - 1e-6:
|
| 176 |
+
best_test_loss = test_loss.item()
|
| 177 |
+
patience_left = patience
|
| 178 |
+
else:
|
| 179 |
+
patience_left -= 1
|
| 180 |
+
if patience_left <= 0:
|
| 181 |
+
print(f"Early stopping at epoch {epoch}")
|
| 182 |
+
break
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
predictions = model.predict(inputs_test)
|
| 186 |
+
test_loss = torch.mean(torch.square(predictions - outputs_test))
|
| 187 |
+
print(f'Test Loss: {test_loss.item()}. Samples: {idx_test}')
|
| 188 |
+
|
| 189 |
+
x = np.arange(0, len(idx_test))
|
| 190 |
+
|
| 191 |
+
outputs_test = outputs_test.cpu().numpy() * y_std + y_mean
|
| 192 |
+
predictions = predictions.cpu().numpy() * y_std + y_mean
|
| 193 |
+
# for sample in outputs_test:
|
| 194 |
+
# print(f'Test samples: {sample}')
|
| 195 |
+
plt.figure(figsize=(10, 6))
|
| 196 |
+
plt.plot(x, outputs_test[:, 0], color='b', linestyle='--', label='True Phi7_Change')
|
| 197 |
+
plt.plot(x, predictions[:, 0], color='b', linestyle='-', label='Predicted Phi7_Change')
|
| 198 |
+
plt.plot(x, outputs_test[:, 1], color='r', linestyle='--', label='True Phi8_Change')
|
| 199 |
+
plt.plot(x, predictions[:, 1], color='r', linestyle='-', label='Predicted Phi8_Change')
|
| 200 |
+
plt.plot(x, outputs_test[:, 2], color='g', linestyle='--', label='True Phi9_Change')
|
| 201 |
+
plt.plot(x, predictions[:, 2], color='g', linestyle='-', label='Predicted Phi9_Change')
|
| 202 |
+
|
| 203 |
+
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(integer=True))
|
| 204 |
+
plt.xlabel('Sample Index')
|
| 205 |
+
plt.xticks(ticks=range(len(idx_test)),labels=idx_test + 1)
|
| 206 |
+
plt.ylabel('Angle Change (Degrees)')
|
| 207 |
+
plt.title('Angle Change Prediction')
|
| 208 |
+
plt.legend(loc='lower left')
|
| 209 |
+
plt.savefig('fdm_simulation.png')
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
plt.figure(figsize=(10, 6))
|
| 213 |
+
plt.plot(x, outputs_test[:, -1], color='m', linestyle='--', label='True Global_Max_Stress')
|
| 214 |
+
plt.plot(x, predictions[:, -1], color='m', linestyle='-', label='Predicted Global_Max_Stress')
|
| 215 |
+
plt.xlabel('Sample Index')
|
| 216 |
+
plt.xticks(ticks=range(len(idx_test)),labels=idx_test + 1)
|
| 217 |
+
plt.ylabel('Stress (MPa)')
|
| 218 |
+
plt.title('Global Max Stress Prediction')
|
| 219 |
+
plt.legend(loc='lower left')
|
| 220 |
+
plt.savefig('fdm_stress_prediction.png')
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# MSE
|
| 225 |
+
mse = np.mean((predictions - outputs_test) ** 2, axis=0)
|
| 226 |
+
# print(f'Mean Squared Error for Phi1_Change: {mse[0]:.6f}, Phi2_Change: {mse[1]:.6f}, Phi3_Change: {mse[2]:.6f}, Phi7_Change: {mse[3]:.6f}, Phi8_Change: {mse[4]:.6f}, Phi9_Change: {mse[5]:.6f}, Global_Max_Stress: {mse[6]:.6f}')
|
| 227 |
+
print(f'Mean Squared Error for Phi7_Change: {mse[0]:.6f}, Phi8_Change: {mse[1]:.6f}, Phi9_Change: {mse[2]:.6f}, Global_Max_Stress: {mse[3]:.6f}')
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
# R 2 score
|
| 231 |
+
ss_ress = np.sum((outputs_test - predictions) ** 2, axis=0)
|
| 232 |
+
ss_tots = np.sum((outputs_test - np.mean(outputs_test, axis=0)) ** 2, axis=0)
|
| 233 |
+
r2_scores = 1 - ss_ress / ss_tots
|
| 234 |
+
# print(f'R² Score for Phi1_Change: {r2_scores[0]:.6f}, Phi2_Change: {r2_scores[1]:.6f}, Phi3_Change: {r2_scores[2]:.6f}, Phi7_Change: {r2_scores[3]:.6f}, Phi8_Change: {r2_scores[4]:.6f}, Phi9_Change: {r2_scores[5]:.6f}, Global_Max_Stress: {r2_scores[6]:.6f}')
|
| 235 |
+
print(f'R² Score for Phi7_Change: {r2_scores[0]:.6f}, Phi8_Change: {r2_scores[1]:.6f}, Phi9_Change: {r2_scores[2]:.6f}, Global_Max_Stress: {r2_scores[3]:.6f}')
|
| 236 |
+
|
| 237 |
+
# Error
|
| 238 |
+
|
| 239 |
+
# Save the model
|
| 240 |
+
model_save_path = './model_fdm_ckpt.pth'
|
| 241 |
+
model_config = {'layer_sizes': layer_sizes,
|
| 242 |
+
'dropout_rate': dropout_rate
|
| 243 |
+
}
|
| 244 |
+
checkpoint = {
|
| 245 |
+
'model_state_dict': model.state_dict(),
|
| 246 |
+
'model_config': model_config
|
| 247 |
+
}
|
| 248 |
+
torch.save(checkpoint, model_save_path)
|
| 249 |
+
|
| 250 |
+
def load_model(model_path):
|
| 251 |
+
checkpoint = torch.load(model_path)
|
| 252 |
+
model_config = checkpoint['model_config']
|
| 253 |
+
model = NeuralNetwork(model_config['layer_sizes'], dropout_rate=model_config['dropout_rate'], activation=torch.nn.ReLU).to(DEVICE)
|
| 254 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
| 255 |
+
print(f"Model loaded from {model_path}")
|
| 256 |
+
return model
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def inverse_design(material_base, fiber, fiber_vf, y_target, n_restarts=5, epochs=100, use_lbfgs=True, model=None, data=None):
|
| 260 |
+
if model is None:
|
| 261 |
+
model = load_model('./model_fdm_ckpt.pth').to(torch.device('cpu'))
|
| 262 |
+
|
| 263 |
+
if data is None:
|
| 264 |
+
data = DataAdditiveManufacturing()
|
| 265 |
+
mat_type = data.material_base_map.get(material_base, 0.0)
|
| 266 |
+
fiber_type = data.fiber_type_map.get(fiber, 0.0)
|
| 267 |
+
build_direction = data.build_direction_map.get("Vertical", 0.0)
|
| 268 |
+
y_target_norm = torch.tensor(data.normalize_output(y_target), dtype=torch.float32)
|
| 269 |
+
y_target_tensor = torch.tensor(y_target, dtype=torch.float32)
|
| 270 |
+
input_mean = torch.tensor(data.input_mean)
|
| 271 |
+
input_std = torch.tensor(data.input_std)
|
| 272 |
+
output_mean = torch.tensor(data.output_mean)
|
| 273 |
+
output_std = torch.tensor(data.output_std)
|
| 274 |
+
|
| 275 |
+
weights = torch.tensor([1.0, 1.0, 1.0, 0.001], dtype=torch.float32)
|
| 276 |
+
bounds = torch.tensor([[100., 300.], [50., 300.], [10., 200.]], dtype=torch.float32) # Extruder_Temp, Velocity, Bed_Temp
|
| 277 |
+
best = {"loss": float('inf'), "input": None, "output": None}
|
| 278 |
+
|
| 279 |
+
for restart in range(n_restarts):
|
| 280 |
+
z = torch.randn(3, requires_grad=True)
|
| 281 |
+
|
| 282 |
+
if use_lbfgs:
|
| 283 |
+
optimizer = torch.optim.LBFGS([z], lr=0.1, max_iter=epochs, line_search_fn="strong_wolfe")
|
| 284 |
+
steps = 1
|
| 285 |
+
else:
|
| 286 |
+
optimizer = torch.optim.Adam([z], lr=0.001)
|
| 287 |
+
steps = epochs
|
| 288 |
+
|
| 289 |
+
for step in range(steps):
|
| 290 |
+
def closure():
|
| 291 |
+
var = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) * torch.sigmoid(z)
|
| 292 |
+
optimizer.zero_grad()
|
| 293 |
+
input_raw = torch.cat([torch.tensor([mat_type, fiber_type, fiber_vf, build_direction]), var]).unsqueeze(0)
|
| 294 |
+
input_norm = (input_raw - input_mean) / input_std
|
| 295 |
+
output_pred = model(input_norm)
|
| 296 |
+
output_pred = (output_pred * output_std) + output_mean
|
| 297 |
+
loss = torch.sum(weights * (output_pred - y_target_tensor) ** 2)
|
| 298 |
+
loss.backward()
|
| 299 |
+
return loss
|
| 300 |
+
|
| 301 |
+
if use_lbfgs:
|
| 302 |
+
loss = optimizer.step(closure)
|
| 303 |
+
else:
|
| 304 |
+
loss = closure()
|
| 305 |
+
optimizer.step()
|
| 306 |
+
|
| 307 |
+
if (step + 1) % 200 == 0:
|
| 308 |
+
print(f'Restart {restart + 1}, Step {step + 1}, Loss: {loss.item():.6f}, grad: {z.grad.norm().item():.6f}')
|
| 309 |
+
|
| 310 |
+
with torch.no_grad():
|
| 311 |
+
var = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) * torch.sigmoid(z)
|
| 312 |
+
input_raw = torch.cat([torch.tensor([mat_type, fiber_type, fiber_vf, build_direction]), var])
|
| 313 |
+
input_norm = (input_raw - input_mean) / input_std
|
| 314 |
+
output_pred = model(input_norm)
|
| 315 |
+
output_pred = data.denormalize_output(output_pred.numpy())
|
| 316 |
+
final_loss = np.sum(weights.numpy() * (output_pred - y_target) ** 2).item()
|
| 317 |
+
if final_loss < best["loss"]:
|
| 318 |
+
best["loss"] = final_loss
|
| 319 |
+
best["input"] = var.detach().cpu().numpy()
|
| 320 |
+
best["output"] = output_pred
|
| 321 |
+
|
| 322 |
+
return best
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
if __name__ == "__main__":
|
| 326 |
+
set_seed(51)
|
| 327 |
+
# dataset = DataAdditiveManufacturing()
|
| 328 |
+
# inputs = dataset.get_input(normalize=False)
|
| 329 |
+
# outputs = dataset.get_output(normalize=False)
|
| 330 |
+
# kfold_ridge_baseline(inputs, outputs, k=5, alpha=1.0, seed=51)
|
| 331 |
+
# main()
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
best = inverse_design(material_base="HDPE", fiber="CF", fiber_vf=45.0,
|
| 335 |
+
y_target=np.array([-0.22, 0.11, -0.004, 185.2]), n_restarts=20, epochs=100, use_lbfgs=True)
|
| 336 |
+
print("Best design found:")
|
| 337 |
+
print(f"Extruder_Temp: {best['input'][0]:.2f}, Velocity: {best['input'][1]:.2f}, Bed_Temp: {best['input'][2]:.2f}")
|
| 338 |
+
print(f"Predicted Outputs: Phi7_Change: {best['output'][0]:.4f}, Phi8_Change: {best['output'][1]:.4f}, Phi9_Change: {best['output'][2]:.4f}")
|
model_fdm_ckpt.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c3e255f49c77c11925a373a288f10a023fcbfeb7835c6653933d815f8bb88c4
|
| 3 |
+
size 14249
|