| | import os
|
| | from pathlib import Path
|
| | import pickle
|
| | import time
|
| | import datetime
|
| | import argparse
|
| |
|
| | import torch
|
| | from torch.nn.parallel import DistributedDataParallel as DDP
|
| | import numpy as np
|
| | from torch.optim.lr_scheduler import OneCycleLR
|
| |
|
| | from utilities import common
|
| | from utilities.dataset import TrajectoryDataset
|
| |
|
| |
|
| | from models import fatigue_model
|
| | from models import fatigue_eval
|
| |
|
| | device = torch.device('cuda')
|
| | import torch.nn as nn
|
| |
|
| |
|
| | def squeeze_data_frame(data_frame):
|
| | for k, v in data_frame.items():
|
| | if isinstance(v, torch.Tensor):
|
| | data_frame[k] = torch.squeeze(v, 0)
|
| | return data_frame
|
| |
|
| | def squeeze_data(data):
|
| |
|
| | transformed_data = {key: value.squeeze(0) if isinstance(value, torch.Tensor) else value for key, value in data.items()}
|
| | return transformed_data
|
| |
|
| | def pickle_save(path, data):
|
| | with open(path, 'wb') as f:
|
| | pickle.dump(data, f)
|
| |
|
| | def pickle_load(path):
|
| | with open(path, 'rb') as f:
|
| | return pickle.load(f)
|
| |
|
| | '''
|
| | # standard loss function for fatigue life prediction
|
| | def loss_fn(inputs, network_output, model):
|
| | """
|
| | Calculates the L2 (Mean Squared Error) loss for position prediction.
|
| |
|
| | The model predicts where particles will move next, and this function
|
| | measures how far off those predictions are from the actual next positions.
|
| | """
|
| | # Extract current and target positions from input data
|
| | target_life = inputs['fatigue_life'].to(device) # it stores the traget life or the ground truth of the target life which was given to this method as a input
|
| | #letter the input will take frame as its parameter and network output will take the return of forward () method of the model cclass which can be called directly by model(x) which is a feature of pytorch model.nn class
|
| | #it also takes model object as inpput too so that the normalizer method can be called.
|
| | # Normalize the target values using the model's built-in normalizer
|
| | target_normalizer = model.get_output_life_normalizer()
|
| | target_life_normalized = target_normalizer(target_life)
|
| |
|
| | # Extract the life prediction
|
| | life_prediction = network_output[:,:1]
|
| |
|
| | # prepare an error vector of length B
|
| | diff_life = target_life_normalized - life_prediction
|
| |
|
| | loss = torch.mean(diff_life ** 2)
|
| |
|
| | return loss
|
| | '''
|
| |
|
| |
|
| |
|
| | def loss_fn(inputs, network_output, w1=1.0, w2=1.0):
|
| | """
|
| | Custom loss function for predicting fatigue life in scientific form:
|
| | fatigue_life = a * 10^b
|
| | network_output: [num_nodes, 2] β columns: [a_pred, b_pred]
|
| | inputs['fatigue_life']: [num_nodes] or [num_nodes, 1]
|
| | """
|
| |
|
| | target_life = inputs['fatigue_life'].to(device).squeeze(-1)
|
| |
|
| |
|
| | true_b = torch.floor(torch.log10(target_life))
|
| | true_a = target_life / (10 ** true_b)
|
| |
|
| |
|
| | pred_a = network_output[:, 0]
|
| | pred_b = network_output[:, 1]
|
| |
|
| |
|
| | criterion = nn.MSELoss()
|
| | loss_coeff = criterion(pred_a, true_a)
|
| | loss_exp = criterion(pred_b, true_b)
|
| |
|
| |
|
| | loss = w1 * loss_coeff + w2 * loss_exp
|
| |
|
| | return loss
|
| |
|
| | '''
|
| | def loss_fn(inputs, network_output):
|
| | """
|
| | Custom loss function for predicting fatigue life in scientific form:
|
| | fatigue_life = a * 10^b
|
| | network_output: [num_nodes, 2] β columns: [a_pred, b_pred]
|
| | inputs['fatigue_life']: [num_nodes] or [num_nodes, 1]
|
| | """
|
| | target_life = inputs['fatigue_life'].to(device).squeeze(-1) # [num_nodes]
|
| |
|
| | # Predictions from network
|
| | pred_a = network_output[:, 0] # [num_nodes]
|
| | pred_b = network_output[:, 1] # [num_nodes]
|
| |
|
| | # Reconstruct predicted fatigue life
|
| | pred_life = pred_a * (10 ** pred_b)
|
| |
|
| | # Compute MSE on log10 scale for numerical stability
|
| | loss = torch.mean((torch.log10(pred_life) - torch.log10(target_life)) ** 2)
|
| |
|
| | return loss
|
| | '''
|
| |
|
| |
|
| | def prepare_files_and_directories(output_dir, model_num, train_data_path, experiment):
|
| | """
|
| | Creates a organized directory structure for saving training outputs.
|
| |
|
| | The structure will be:
|
| | output_dir/model_num/dataset_name/timestamp/
|
| | βββ checkpoint/ (saved model states)
|
| | βββ log/ (training metrics and logs)
|
| | βββ rollout/ (evaluation results)
|
| | """
|
| |
|
| | train_data = train_data_path.split("/")[-1].split(".")[0]
|
| | output_dir = os.path.join(output_dir, str(model_num), train_data, f"EXPERIMENT_{experiment}")
|
| |
|
| |
|
| | run_create_time = time.time()
|
| | run_create_datetime = datetime.datetime.fromtimestamp(run_create_time).strftime('%c')
|
| |
|
| | run_create_datetime_datetime_dash = run_create_datetime.replace(" ", "-").replace(":", "-")
|
| |
|
| |
|
| | run_dir = os.path.join(output_dir, run_create_datetime_datetime_dash)
|
| | Path(run_dir).mkdir(parents=True, exist_ok=True)
|
| |
|
| |
|
| | checkpoint_dir = os.path.join(run_dir, 'checkpoint')
|
| | log_dir = os.path.join(run_dir, 'log')
|
| | rollout_dir = os.path.join(run_dir, 'rollout')
|
| |
|
| |
|
| | Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
|
| | Path(log_dir).mkdir(parents=True, exist_ok=True)
|
| | Path(rollout_dir).mkdir(parents=True, exist_ok=True)
|
| |
|
| | return checkpoint_dir, log_dir, rollout_dir
|
| |
|
| |
|
| | def save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch, is_periodic = False):
|
| | """
|
| | Saves the current training state to checkpoint files.
|
| | """
|
| | try:
|
| |
|
| | torch.save({'epoch': epoch}, Path(checkpoint_dir) / "epoch_checkpoint.pth")
|
| |
|
| |
|
| | if is_periodic:
|
| | model_checkpoint_name = f"epoch_{epoch + 1}_model_checkpoint"
|
| | else:
|
| | model_checkpoint_name = "epoch_model_checkpoint"
|
| |
|
| | model.save_model(str(Path(checkpoint_dir) / model_checkpoint_name))
|
| |
|
| |
|
| | torch.save(optimizer.state_dict(), Path(checkpoint_dir) / "epoch_optimizer_checkpoint.pth")
|
| | torch.save(scheduler.state_dict(), Path(checkpoint_dir) / "epoch_scheduler_checkpoint.pth")
|
| |
|
| | print(f"Checkpoint saved for epoch {epoch+1}")
|
| |
|
| | except Exception as e:
|
| | print(f"Error saving checkpoint for epoch {epoch+1}: {e}")
|
| |
|
| | def main(args):
|
| | device = torch.device('cuda')
|
| |
|
| | div_factor = args.peak_lr / args.base_lr
|
| | final_div_factor = args.peak_lr / args.final_lr
|
| |
|
| |
|
| | start_epoch = 0
|
| | start_time = time.time()
|
| |
|
| | end_epoch = args.epochs
|
| | print(f"starting training from epoch {start_epoch} to {end_epoch}")
|
| |
|
| | train_dataset = TrajectoryDataset(args.train_data, split='train')
|
| | val_dataset = TrajectoryDataset(args.val_data, split='val')
|
| |
|
| | train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
|
| | val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False)
|
| |
|
| | params = dict(field='world_pos', output_size=args.output_size, model=fatigue_model, evaluator=fatigue_eval, k=args.neighbor_k, input_size=args.input_size)
|
| |
|
| | core_model = 'regDGCNN_seg'
|
| |
|
| | model = fatigue_model.Model(params,core_model_name=core_model).to(device)
|
| |
|
| | optimizer = torch.optim.AdamW(model.parameters(), lr=args.base_lr, weight_decay=1e-3)
|
| | if args.scheduler == 'cosine':
|
| | scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0 = args.T_0, T_mult = args.T_mult)
|
| | else:
|
| | scheduler = OneCycleLR(
|
| | optimizer,
|
| | max_lr=args.base_lr,
|
| | epochs=(end_epoch-start_epoch),
|
| | steps_per_epoch=len(train_dataloader),
|
| | pct_start=args.pct_start,
|
| | div_factor=div_factor,
|
| | final_div_factor=final_div_factor,
|
| | cycle_momentum=False
|
| | )
|
| |
|
| | checkpoint_dir, log_dir, rollout_dir = prepare_files_and_directories(args.output_dir,core_model,args.train_data, args.experiment_id)
|
| |
|
| | epoch_training_losses = []
|
| | epoch_learning_rate = []
|
| | step_training_losses = []
|
| | epoch_run_times = []
|
| |
|
| |
|
| | epoch_eval_losses = []
|
| |
|
| |
|
| | for epoch in range(start_epoch, end_epoch):
|
| | print(f"\n=== Epoch {epoch + 1}/{end_epoch} ===")
|
| | epoch_start_time = time.time()
|
| | epoch_training_loss = 0.0
|
| | print("---------------Training Started---------------")
|
| | model.train()
|
| | for data in train_dataloader:
|
| | frame = squeeze_data_frame(data)
|
| | output = model(frame,is_training=True)
|
| | loss = loss_fn(frame, output,w1=1.0, w2=1.0)
|
| |
|
| |
|
| | optimizer.zero_grad()
|
| | loss.backward()
|
| | optimizer.step()
|
| | scheduler.step()
|
| | step_loss = loss.detach().cpu()
|
| | step_training_losses.append(step_loss)
|
| | epoch_training_loss += step_loss
|
| |
|
| |
|
| | epoch_training_losses.append(epoch_training_loss)
|
| | epoch_learning_rate.append(optimizer.param_groups[0]['lr'])
|
| | epoch_run_time = time.time() - epoch_start_time
|
| | epoch_run_times.append(epoch_run_time)
|
| |
|
| | print(f"Epoch {epoch + 1} completed, Training loss: {epoch_training_loss:.6f}, Time taken: {epoch_run_time:.2f}s, Learning rate: {optimizer.param_groups[0]['lr']:.2e}")
|
| |
|
| | loss_record = {
|
| | 'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item(),
|
| | 'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item(),
|
| | 'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item(),
|
| | 'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item(),
|
| | 'train_epoch_losses': epoch_training_losses,
|
| | 'all_step_train_losses':
|
| | step_training_losses,
|
| | 'learning_rate': epoch_learning_rate,
|
| | 'epoch_run_times': epoch_run_times
|
| | }
|
| |
|
| | if len(epoch_training_losses) >= 2 and epoch_training_losses[-1]<epoch_training_losses[-2]:
|
| | model.save_model(str(Path(checkpoint_dir) / f"best_model_checkpoint_{epoch}"))
|
| | torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "best_optimizer_checkpoint.pth"))
|
| | torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "best_scheduler_checkpoint.pth"))
|
| |
|
| | should_evaluate = (epoch + 1) % 20 == 0 or epoch == start_epoch or (epoch + 1) == end_epoch
|
| |
|
| | if should_evaluate:
|
| | print(f"Saving checkpoint and evaluating at epoch {epoch + 1}...")
|
| |
|
| |
|
| | temp_train_loss_file = Path(log_dir) / f'temp_train_loss_{epoch + 1}.pkl'
|
| | pickle_save(str(temp_train_loss_file), loss_record)
|
| |
|
| |
|
| | save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch)
|
| |
|
| | print("Running evaluation...")
|
| | model.eval()
|
| |
|
| | trajectories = []
|
| | mse_losses = []
|
| | l1_losses = []
|
| |
|
| |
|
| | mse_loss_fn = torch.nn.MSELoss()
|
| | l1_loss_fn = torch.nn.L1Loss()
|
| |
|
| | with torch.no_grad():
|
| | for data in val_loader:
|
| | data=squeeze_data(data)
|
| |
|
| | prediction_trajectory = fatigue_eval.evaluate(model, data)
|
| |
|
| |
|
| | target = torch.squeeze(data['fatigue_life'].to(device), dim=0)
|
| |
|
| |
|
| | pred = prediction_trajectory['pred_fatigue_life']
|
| |
|
| |
|
| | mse_loss = mse_loss_fn(target, pred)
|
| | l1_loss = l1_loss_fn(target, pred)
|
| |
|
| |
|
| | mse_losses.append(mse_loss.cpu())
|
| | l1_losses.append(l1_loss.cpu())
|
| |
|
| | trajectories.append(prediction_trajectory)
|
| | epoch_eval_losses.append(torch.mean(torch.stack(mse_losses)).item())
|
| |
|
| | rollout_file = Path(rollout_dir) / f"rollout_epoch_{epoch + 1}.pkl"
|
| | pickle_save(str(rollout_file), trajectories)
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | eval_loss_record = {
|
| | 'eval_total_mse_loss': torch.sum(torch.stack(mse_losses)).item(),
|
| | 'eval_total_l1_loss': torch.sum(torch.stack(l1_losses)).item(),
|
| | 'eval_mean_mse_loss': torch.mean(torch.stack(mse_losses)).item(),
|
| | 'eval_max_mse_loss': torch.max(torch.stack(mse_losses)).item(),
|
| | 'eval_min_mse_loss': torch.min(torch.stack(mse_losses)).item(),
|
| | 'eval_mean_l1_loss': torch.mean(torch.stack(l1_losses)).item(),
|
| | 'eval_max_l1_loss': torch.max(torch.stack(l1_losses)).item(),
|
| | 'eval_min_l1_loss': torch.min(torch.stack(l1_losses)).item(),
|
| | 'eval_mse_losses': mse_losses,
|
| | 'eval_l1_losses': l1_losses,
|
| | 'epoch_eval_losses': epoch_eval_losses,
|
| | }
|
| |
|
| |
|
| | eval_loss_file = Path(log_dir) / f'eval_loss_epoch_{epoch + 1}.pkl'
|
| | pickle_save(str(eval_loss_file), eval_loss_record)
|
| |
|
| | print(f"Evaluation completed, Mean MSE Loss: {eval_loss_record['eval_mean_mse_loss']:.6f}, Mean L1 Loss: {eval_loss_record['eval_mean_l1_loss']:.6f}")
|
| |
|
| | print("\nTraining completed! Saving final results...")
|
| |
|
| | final_loss_record = {
|
| | 'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| | 'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| | 'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| | 'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| | 'train_epoch_losses': epoch_training_losses,
|
| | 'all_step_train_losses': step_training_losses,
|
| | 'learning_rate': epoch_learning_rate,
|
| | 'epoch_run_times': epoch_run_times
|
| | }
|
| |
|
| |
|
| | pickle_save(str(Path(log_dir) / 'epoch_run_times.pkl'), epoch_run_times)
|
| | pickle_save(str(Path(log_dir) / 'final_train_loss.pkl'), final_loss_record)
|
| |
|
| |
|
| | model.save_model(str(Path(checkpoint_dir) / "final_model_checkpoint"))
|
| | torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "final_optimizer_checkpoint.pth"))
|
| | torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "final_scheduler_checkpoint.pth"))
|
| | return
|
| |
|
| | if __name__ == "__main__":
|
| | parser = argparse.ArgumentParser()
|
| | parser.add_argument('--train_data', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/datasets/extracted_data/shaft_low_extra2_.h5")
|
| | parser.add_argument('--val_data', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/datasets/extracted_data/shaft_low_extra2_.h5")
|
| | parser.add_argument('--output_dir', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life/output_shaft_extra")
|
| | parser.add_argument('--batch_size', type=int, default=1)
|
| | parser.add_argument('--epochs', type=int, default=1500)
|
| | parser.add_argument('--base_lr', type=float, default=2e-4)
|
| | parser.add_argument('--peak_lr', type=float, default=8e-4)
|
| | parser.add_argument('--final_lr', type=float, default=5e-5)
|
| | parser.add_argument('--pct_start', type=float, default=0.1)
|
| | parser.add_argument('--T_0', type=int, default=10)
|
| | parser.add_argument('--T_mult', type=int, default=2)
|
| | parser.add_argument('--scheduler', type=str, default="cosine", choices=["cosine", "onecycle"])
|
| | parser.add_argument('--experiment_id', type=str, default="multi_feat_exponent_k60_500_shaft_low_extra2")
|
| | parser.add_argument('--neighbor_k', type=int, default=60)
|
| | parser.add_argument('--model_name', type=str, default="regDGCNN_seg")
|
| | parser.add_argument('--input_size', type=int, default=9)
|
| | parser.add_argument('--output_size', type=int, default=2)
|
| | args = parser.parse_args()
|
| | main(args) |