SachinSaud's picture
Upload folder using huggingface_hub
6ac44e1 verified
import os
from pathlib import Path
import pickle
import time
import datetime
import argparse
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
from torch.optim.lr_scheduler import OneCycleLR
from utilities import common
from utilities.dataset import TrajectoryDataset
# from models import fatigue_model
from models import fatigue_model
from models import fatigue_eval
device = torch.device('cuda')
import torch.nn as nn
def squeeze_data_frame(data_frame):
for k, v in data_frame.items():
if isinstance(v, torch.Tensor): # This check solves the problem
data_frame[k] = torch.squeeze(v, 0)
return data_frame
def squeeze_data(data):
# CORRECT: Only squeezes tensors, leaves other types alone
transformed_data = {key: value.squeeze(0) if isinstance(value, torch.Tensor) else value for key, value in data.items()}
return transformed_data
def pickle_save(path, data):
with open(path, 'wb') as f:
pickle.dump(data, f)
def pickle_load(path):
with open(path, 'rb') as f:
return pickle.load(f)
'''
# standard loss function for fatigue life prediction
def loss_fn(inputs, network_output, model):
"""
Calculates the L2 (Mean Squared Error) loss for position prediction.
The model predicts where particles will move next, and this function
measures how far off those predictions are from the actual next positions.
"""
# Extract current and target positions from input data
target_life = inputs['fatigue_life'].to(device) # it stores the traget life or the ground truth of the target life which was given to this method as a input
#letter the input will take frame as its parameter and network output will take the return of forward () method of the model cclass which can be called directly by model(x) which is a feature of pytorch model.nn class
#it also takes model object as inpput too so that the normalizer method can be called.
# Normalize the target values using the model's built-in normalizer
target_normalizer = model.get_output_life_normalizer()
target_life_normalized = target_normalizer(target_life)
# Extract the life prediction
life_prediction = network_output[:,:1]
# prepare an error vector of length B
diff_life = target_life_normalized - life_prediction
loss = torch.mean(diff_life ** 2)
return loss
'''
def loss_fn(inputs, network_output, w1=1.0, w2=1.0):
"""
Custom loss function for predicting fatigue life in scientific form:
fatigue_life = a * 10^b
network_output: [num_nodes, 2] β†’ columns: [a_pred, b_pred]
inputs['fatigue_life']: [num_nodes] or [num_nodes, 1]
"""
# Move target to device and squeeze extra dimensions
target_life = inputs['fatigue_life'].to(device).squeeze(-1) # [num_nodes]
# Convert true fatigue life to scientific form (a * 10^b)
true_b = torch.floor(torch.log10(target_life)) # exponent
true_a = target_life / (10 ** true_b) # coefficient
# Predictions from network
pred_a = network_output[:, 0] # [num_nodes]
pred_b = network_output[:, 1] # [num_nodes]
# Compute separate MSE losses
criterion = nn.MSELoss()
loss_coeff = criterion(pred_a, true_a)
loss_exp = criterion(pred_b, true_b)
# Weighted sum of losses
loss = w1 * loss_coeff + w2 * loss_exp
return loss
'''
def loss_fn(inputs, network_output):
"""
Custom loss function for predicting fatigue life in scientific form:
fatigue_life = a * 10^b
network_output: [num_nodes, 2] β†’ columns: [a_pred, b_pred]
inputs['fatigue_life']: [num_nodes] or [num_nodes, 1]
"""
target_life = inputs['fatigue_life'].to(device).squeeze(-1) # [num_nodes]
# Predictions from network
pred_a = network_output[:, 0] # [num_nodes]
pred_b = network_output[:, 1] # [num_nodes]
# Reconstruct predicted fatigue life
pred_life = pred_a * (10 ** pred_b)
# Compute MSE on log10 scale for numerical stability
loss = torch.mean((torch.log10(pred_life) - torch.log10(target_life)) ** 2)
return loss
'''
def prepare_files_and_directories(output_dir, model_num, train_data_path, experiment):
"""
Creates a organized directory structure for saving training outputs.
The structure will be:
output_dir/model_num/dataset_name/timestamp/
β”œβ”€β”€ checkpoint/ (saved model states)
β”œβ”€β”€ log/ (training metrics and logs)
└── rollout/ (evaluation results)
"""
# Extract dataset name from the full path
train_data = train_data_path.split("/")[-1].split(".")[0]
output_dir = os.path.join(output_dir, str(model_num), train_data, f"EXPERIMENT_{experiment}")
# Create a unique timestamp for this training run
run_create_time = time.time()
run_create_datetime = datetime.datetime.fromtimestamp(run_create_time).strftime('%c')
# Replace spaces and colons with dashes to make it filesystem-friendly
run_create_datetime_datetime_dash = run_create_datetime.replace(" ", "-").replace(":", "-")
# Create the main run directory
run_dir = os.path.join(output_dir, run_create_datetime_datetime_dash)
Path(run_dir).mkdir(parents=True, exist_ok=True)
# Create subdirectories for different types of outputs
checkpoint_dir = os.path.join(run_dir, 'checkpoint') # For saving model weights
log_dir = os.path.join(run_dir, 'log') # For training metrics
rollout_dir = os.path.join(run_dir, 'rollout') # For evaluation results
# Create all directories (parents=True creates intermediate dirs if needed)
Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
Path(log_dir).mkdir(parents=True, exist_ok=True)
Path(rollout_dir).mkdir(parents=True, exist_ok=True)
return checkpoint_dir, log_dir, rollout_dir
def save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch, is_periodic = False):
"""
Saves the current training state to checkpoint files.
"""
try:
# Save epoch info
torch.save({'epoch': epoch}, Path(checkpoint_dir) / "epoch_checkpoint.pth")
# Save model state with epoch number for periodic saves
if is_periodic:
model_checkpoint_name = f"epoch_{epoch + 1}_model_checkpoint"
else:
model_checkpoint_name = "epoch_model_checkpoint"
model.save_model(str(Path(checkpoint_dir) / model_checkpoint_name))
# Save optimizer and scheduler states
torch.save(optimizer.state_dict(), Path(checkpoint_dir) / "epoch_optimizer_checkpoint.pth")
torch.save(scheduler.state_dict(), Path(checkpoint_dir) / "epoch_scheduler_checkpoint.pth")
print(f"Checkpoint saved for epoch {epoch+1}")
except Exception as e:
print(f"Error saving checkpoint for epoch {epoch+1}: {e}")
def main(args):
device = torch.device('cuda')
div_factor = args.peak_lr / args.base_lr # 5e-3 / 1e-3 = 5.0
final_div_factor = args.peak_lr / args.final_lr # 5e-3 / 1e-4 = 50.0
start_epoch = 0
start_time = time.time()
end_epoch = args.epochs
print(f"starting training from epoch {start_epoch} to {end_epoch}")
train_dataset = TrajectoryDataset(args.train_data, split='train')
val_dataset = TrajectoryDataset(args.val_data, split='val')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False)
params = dict(field='world_pos', output_size=args.output_size, model=fatigue_model, evaluator=fatigue_eval, k=args.neighbor_k, input_size=args.input_size)
core_model = 'regDGCNN_seg'
model = fatigue_model.Model(params,core_model_name=core_model).to(device) # create an object of the Model method of fatigue_module
#now the model takes the input of dict prams, model name
optimizer = torch.optim.AdamW(model.parameters(), lr=args.base_lr, weight_decay=1e-3)
if args.scheduler == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0 = args.T_0, T_mult = args.T_mult)
else:
scheduler = OneCycleLR(
optimizer,
max_lr=args.base_lr,
epochs=(end_epoch-start_epoch),
steps_per_epoch=len(train_dataloader),
pct_start=args.pct_start,
div_factor=div_factor,
final_div_factor=final_div_factor,
cycle_momentum=False
)
checkpoint_dir, log_dir, rollout_dir = prepare_files_and_directories(args.output_dir,core_model,args.train_data, args.experiment_id)
epoch_training_losses = []
epoch_learning_rate = []
step_training_losses = []
epoch_run_times = []
#Save eval losses
epoch_eval_losses = []
for epoch in range(start_epoch, end_epoch):
print(f"\n=== Epoch {epoch + 1}/{end_epoch} ===")
epoch_start_time = time.time()
epoch_training_loss = 0.0
print("---------------Training Started---------------")
model.train()
for data in train_dataloader:
frame = squeeze_data_frame(data)
output = model(frame,is_training=True)
loss = loss_fn(frame, output,w1=1.0, w2=1.0) # input to an loss function is the input (frame) whihc is the data set of ours and model and output
#letter the input will take frame as its parameter and network output will take the return of forward () method of the model cclass which can be called directly by model(x) which is a feature of pytorch model.nn class
#it also takes model object as inpput too so that the normalizer method can be called.
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
step_loss = loss.detach().cpu()
step_training_losses.append(step_loss)
epoch_training_loss += step_loss
# Record metrics for this epoch
epoch_training_losses.append(epoch_training_loss)
epoch_learning_rate.append(optimizer.param_groups[0]['lr'])
epoch_run_time = time.time() - epoch_start_time
epoch_run_times.append(epoch_run_time)
print(f"Epoch {epoch + 1} completed, Training loss: {epoch_training_loss:.6f}, Time taken: {epoch_run_time:.2f}s, Learning rate: {optimizer.param_groups[0]['lr']:.2e}")
loss_record = {
'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item(),
'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item(),
'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item(),
'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item(),
'train_epoch_losses': epoch_training_losses,
'all_step_train_losses':
step_training_losses,
'learning_rate': epoch_learning_rate,
'epoch_run_times': epoch_run_times
}
if len(epoch_training_losses) >= 2 and epoch_training_losses[-1]<epoch_training_losses[-2]:
model.save_model(str(Path(checkpoint_dir) / f"best_model_checkpoint_{epoch}"))
torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "best_optimizer_checkpoint.pth"))
torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "best_scheduler_checkpoint.pth"))
should_evaluate = (epoch + 1) % 20 == 0 or epoch == start_epoch or (epoch + 1) == end_epoch
if should_evaluate:
print(f"Saving checkpoint and evaluating at epoch {epoch + 1}...")
# Save training metrics
temp_train_loss_file = Path(log_dir) / f'temp_train_loss_{epoch + 1}.pkl'
pickle_save(str(temp_train_loss_file), loss_record)
# Save checkpoint
save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch)
print("Running evaluation...")
model.eval()
trajectories = [] # Store predicted trajectories
mse_losses = [] # Mean Squared Error losses
l1_losses = [] # Mean Absolute Error losses
# Set up loss functions for evaluation
mse_loss_fn = torch.nn.MSELoss() # Mean Squared Error
l1_loss_fn = torch.nn.L1Loss() # Mean Absolute Error
with torch.no_grad():
for data in val_loader:
data=squeeze_data(data)
# print(data)
prediction_trajectory = fatigue_eval.evaluate(model, data)
# Calculate different types of loss metrics
target = torch.squeeze(data['fatigue_life'].to(device), dim=0)
# Extract predictions from the trajectory
pred = prediction_trajectory['pred_fatigue_life']
# Calculate losses
mse_loss = mse_loss_fn(target, pred)
l1_loss = l1_loss_fn(target, pred)
# Store the results
mse_losses.append(mse_loss.cpu())
l1_losses.append(l1_loss.cpu())
trajectories.append(prediction_trajectory)
epoch_eval_losses.append(torch.mean(torch.stack(mse_losses)).item())
# Save evaluation trajectories
rollout_file = Path(rollout_dir) / f"rollout_epoch_{epoch + 1}.pkl"
pickle_save(str(rollout_file), trajectories)
# So this val loop over val dataloader( whihc consiste the sample of ahsfts that were splited into this validation datasets.) Now similar to the data in data loader.
# this aslo iterates thorugh the val shafts sample but this time the training is false so all dropout are off so that model know this is just validating. Here it uses the learned model from abpuve trainina.
# That is the letest wights and biases are used in this step and the model predicts the output. Now the output of the model is compared with the ground truth of the sample shaft or the batch shaft and loss is calculated.
#It is the same stpes of getting the predicted output from the model and calculating the loss function.
# the only difference is that dufing train dataloader the predicted valuses were compared with the same sample shaft which was input into the model with random wights and biases or updated wights and biases from previous shaft.
# But here the loss is calcualted with the same dataset which is present inside the train val loader for every 20 epoch.
# Create comprehensive evaluation metrics
eval_loss_record = {
'eval_total_mse_loss': torch.sum(torch.stack(mse_losses)).item(),
'eval_total_l1_loss': torch.sum(torch.stack(l1_losses)).item(),
'eval_mean_mse_loss': torch.mean(torch.stack(mse_losses)).item(),
'eval_max_mse_loss': torch.max(torch.stack(mse_losses)).item(),
'eval_min_mse_loss': torch.min(torch.stack(mse_losses)).item(),
'eval_mean_l1_loss': torch.mean(torch.stack(l1_losses)).item(),
'eval_max_l1_loss': torch.max(torch.stack(l1_losses)).item(),
'eval_min_l1_loss': torch.min(torch.stack(l1_losses)).item(),
'eval_mse_losses': mse_losses,
'eval_l1_losses': l1_losses,
'epoch_eval_losses': epoch_eval_losses,
}
# Save evaluation metrics
eval_loss_file = Path(log_dir) / f'eval_loss_epoch_{epoch + 1}.pkl'
pickle_save(str(eval_loss_file), eval_loss_record)
print(f"Evaluation completed, Mean MSE Loss: {eval_loss_record['eval_mean_mse_loss']:.6f}, Mean L1 Loss: {eval_loss_record['eval_mean_l1_loss']:.6f}")
print("\nTraining completed! Saving final results...")
# Save final comprehensive training loss record
final_loss_record = {
'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_epoch_losses': epoch_training_losses,
'all_step_train_losses': step_training_losses,
'learning_rate': epoch_learning_rate,
'epoch_run_times': epoch_run_times
}
# Save final timing information
pickle_save(str(Path(log_dir) / 'epoch_run_times.pkl'), epoch_run_times)
pickle_save(str(Path(log_dir) / 'final_train_loss.pkl'), final_loss_record)
# Save final model state
model.save_model(str(Path(checkpoint_dir) / "final_model_checkpoint"))
torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "final_optimizer_checkpoint.pth"))
torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "final_scheduler_checkpoint.pth"))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/datasets/extracted_data/shaft_low_extra2_.h5")
parser.add_argument('--val_data', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/datasets/extracted_data/shaft_low_extra2_.h5")
parser.add_argument('--output_dir', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/output_shaft_extra")
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--epochs', type=int, default=1500)
parser.add_argument('--base_lr', type=float, default=2e-4)
parser.add_argument('--peak_lr', type=float, default=8e-4)
parser.add_argument('--final_lr', type=float, default=5e-5)
parser.add_argument('--pct_start', type=float, default=0.1)
parser.add_argument('--T_0', type=int, default=10)
parser.add_argument('--T_mult', type=int, default=2)
parser.add_argument('--scheduler', type=str, default="cosine", choices=["cosine", "onecycle"])
parser.add_argument('--experiment_id', type=str, default="multi_feat_exponent_k60_500_shaft_low_extra2")
parser.add_argument('--neighbor_k', type=int, default=60)
parser.add_argument('--model_name', type=str, default="regDGCNN_seg")
parser.add_argument('--input_size', type=int, default=9)
parser.add_argument('--output_size', type=int, default=2)
args = parser.parse_args()
main(args)