SachinSaud's picture
Upload folder using huggingface_hub
6ac44e1 verified
import pickle
import numpy as np
import pandas as pd
import os
import re
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# --- 1. Define the base directory for all evaluation results ---
base_results_dir = "Evaluation_results"
# --- 2. Manually set the k-value and epoch number for this run ---
k_value = 60
epoch_num = 500
# --- You can change the two values above for each run ---
# Load the trajectory operations dictionary from the .pkl file
pkl_path = '/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/output_shaft_extra/regDGCNN_seg/shaft_low_extra2_/EXPERIMENT_multi_feat_exponent_k60_500_shaft_low_extra2/Thu-Oct--9-10-04-58-2025/rollout/rollout_epoch_500.pkl'
# Create a unique, descriptive folder name based on the manual inputs
run_name = f"k{k_value}_epoch{epoch_num}_exponent_multi_metrics_2"
run_dir = os.path.join(base_results_dir, run_name)
os.makedirs(run_dir, exist_ok=True)
print(f"Results for this run will be saved in: {run_dir}")
try:
with open(pkl_path, 'rb') as f:
traj_ops = pickle.load(f)
except FileNotFoundError:
print(f"Error: The file was not found at {pkl_path}")
exit() # Exit the script if the file doesn't exist
total_trajectories = len(traj_ops)
print(f"Loaded {total_trajectories} trajectory operations from the file.")
all_metrics=[]
for i in range(total_trajectories):
# Extract tensors (log10 values)
gt_life_log = traj_ops[i]['gt_fatigue_life'].cpu().numpy().flatten()
pred_life_log = traj_ops[i]['pred_fatigue_life'].cpu().numpy().flatten()
cells= traj_ops[i]['cells'].cpu().numpy()
mesh_pos= traj_ops[i]['mesh_pos'].cpu().numpy()
# Save the geometry to a separate, efficient .npz file
geometry_path = os.path.join(run_dir, f"geometry_sample_{i+1}.npz")
np.savez_compressed(geometry_path, mesh_pos=mesh_pos, cells=cells)
print(f" -> Saved geometry to {geometry_path}")
# Convert back to non-log values
gt_life_real = np.power(10.0, gt_life_log)
pred_life_real = np.power(10.0, pred_life_log)
# --- CALCULATE ERRORS ---
abs_error = np.abs(gt_life_real - pred_life_real)
percentage_error = (abs_error / (gt_life_real + 1e-9)) * 100
# Create a DataFrame for easy analysis and saving
df = pd.DataFrame({
'node': np.arange(len(gt_life_log)),
# Log values
'gt_fatigue_life_log10': gt_life_log,
'pred_fatigue_life_log10': pred_life_log,
# Real (non-log) values
'gt_fatigue_life_real': gt_life_real,
'pred_fatigue_life_real': pred_life_real,
# Errors in real values
'absolute_error': abs_error,
'percentage_error': percentage_error
})
# Metrics on real (non-log) fatigue life
mse = mean_squared_error(gt_life_real, pred_life_real)
rmse = np.sqrt(mse)
mae = mean_absolute_error(gt_life_real, pred_life_real)
mape = np.mean(percentage_error) # Mean of percentage error
r2 = r2_score(gt_life_real, pred_life_real)
metrics_dict = {
"test_sample": i+1,
"MSE": mse,
"RMSE": rmse,
"MAE": mae,
"MAPE": mape,
"R2": r2
}
all_metrics.append(metrics_dict)
# Save each trajectory to a CSV
csv_filename = f"prediction_sample{i+1}.csv"
csv_path = os.path.join(run_dir, csv_filename)
df.to_csv(csv_path, index=False)
print(f"Saved fatigue life evaluation to {csv_path}")
metrics_df = pd.DataFrame(all_metrics)
metrics_csv_path = os.path.join(run_dir, "evaluation_summary.csv")
metrics_df.to_csv(metrics_csv_path, index=False)
print(f"Saved summary metrics to {metrics_csv_path}")