|
|
import pickle |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
import os |
|
|
import re |
|
|
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score |
|
|
|
|
|
|
|
|
base_results_dir = "Evaluation_results" |
|
|
|
|
|
|
|
|
k_value = 60 |
|
|
epoch_num = 500 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pkl_path = '/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/output_shaft_extra/regDGCNN_seg/shaft_low_extra2_/EXPERIMENT_multi_feat_exponent_k60_500_shaft_low_extra2/Thu-Oct--9-10-04-58-2025/rollout/rollout_epoch_500.pkl' |
|
|
|
|
|
|
|
|
run_name = f"k{k_value}_epoch{epoch_num}_exponent_multi_metrics_2" |
|
|
run_dir = os.path.join(base_results_dir, run_name) |
|
|
os.makedirs(run_dir, exist_ok=True) |
|
|
|
|
|
print(f"Results for this run will be saved in: {run_dir}") |
|
|
|
|
|
|
|
|
try: |
|
|
with open(pkl_path, 'rb') as f: |
|
|
traj_ops = pickle.load(f) |
|
|
except FileNotFoundError: |
|
|
print(f"Error: The file was not found at {pkl_path}") |
|
|
exit() |
|
|
|
|
|
total_trajectories = len(traj_ops) |
|
|
print(f"Loaded {total_trajectories} trajectory operations from the file.") |
|
|
all_metrics=[] |
|
|
for i in range(total_trajectories): |
|
|
|
|
|
gt_life_log = traj_ops[i]['gt_fatigue_life'].cpu().numpy().flatten() |
|
|
pred_life_log = traj_ops[i]['pred_fatigue_life'].cpu().numpy().flatten() |
|
|
|
|
|
cells= traj_ops[i]['cells'].cpu().numpy() |
|
|
mesh_pos= traj_ops[i]['mesh_pos'].cpu().numpy() |
|
|
|
|
|
|
|
|
geometry_path = os.path.join(run_dir, f"geometry_sample_{i+1}.npz") |
|
|
np.savez_compressed(geometry_path, mesh_pos=mesh_pos, cells=cells) |
|
|
print(f" -> Saved geometry to {geometry_path}") |
|
|
|
|
|
|
|
|
gt_life_real = np.power(10.0, gt_life_log) |
|
|
pred_life_real = np.power(10.0, pred_life_log) |
|
|
|
|
|
|
|
|
abs_error = np.abs(gt_life_real - pred_life_real) |
|
|
percentage_error = (abs_error / (gt_life_real + 1e-9)) * 100 |
|
|
|
|
|
|
|
|
df = pd.DataFrame({ |
|
|
'node': np.arange(len(gt_life_log)), |
|
|
|
|
|
|
|
|
'gt_fatigue_life_log10': gt_life_log, |
|
|
'pred_fatigue_life_log10': pred_life_log, |
|
|
|
|
|
|
|
|
'gt_fatigue_life_real': gt_life_real, |
|
|
'pred_fatigue_life_real': pred_life_real, |
|
|
|
|
|
|
|
|
'absolute_error': abs_error, |
|
|
'percentage_error': percentage_error |
|
|
}) |
|
|
|
|
|
|
|
|
mse = mean_squared_error(gt_life_real, pred_life_real) |
|
|
rmse = np.sqrt(mse) |
|
|
mae = mean_absolute_error(gt_life_real, pred_life_real) |
|
|
mape = np.mean(percentage_error) |
|
|
r2 = r2_score(gt_life_real, pred_life_real) |
|
|
|
|
|
metrics_dict = { |
|
|
"test_sample": i+1, |
|
|
"MSE": mse, |
|
|
"RMSE": rmse, |
|
|
"MAE": mae, |
|
|
"MAPE": mape, |
|
|
"R2": r2 |
|
|
} |
|
|
all_metrics.append(metrics_dict) |
|
|
|
|
|
|
|
|
csv_filename = f"prediction_sample{i+1}.csv" |
|
|
csv_path = os.path.join(run_dir, csv_filename) |
|
|
|
|
|
df.to_csv(csv_path, index=False) |
|
|
|
|
|
print(f"Saved fatigue life evaluation to {csv_path}") |
|
|
|
|
|
metrics_df = pd.DataFrame(all_metrics) |
|
|
metrics_csv_path = os.path.join(run_dir, "evaluation_summary.csv") |
|
|
metrics_df.to_csv(metrics_csv_path, index=False) |
|
|
print(f"Saved summary metrics to {metrics_csv_path}") |
|
|
|