import os from pathlib import Path import pickle import time import datetime import argparse import torch from torch.nn.parallel import DistributedDataParallel as DDP import numpy as np from torch.optim.lr_scheduler import OneCycleLR from utilities import common from utilities.dataset import TrajectoryDataset # from models import fatigue_model from models import fatigue_model from models import fatigue_eval device = torch.device('cuda') import torch.nn as nn def squeeze_data_frame(data_frame): for k, v in data_frame.items(): if isinstance(v, torch.Tensor): # This check solves the problem data_frame[k] = torch.squeeze(v, 0) return data_frame def squeeze_data(data): # CORRECT: Only squeezes tensors, leaves other types alone transformed_data = {key: value.squeeze(0) if isinstance(value, torch.Tensor) else value for key, value in data.items()} return transformed_data def pickle_save(path, data): with open(path, 'wb') as f: pickle.dump(data, f) def pickle_load(path): with open(path, 'rb') as f: return pickle.load(f) ''' # standard loss function for fatigue life prediction def loss_fn(inputs, network_output, model): """ Calculates the L2 (Mean Squared Error) loss for position prediction. The model predicts where particles will move next, and this function measures how far off those predictions are from the actual next positions. """ # Extract current and target positions from input data target_life = inputs['fatigue_life'].to(device) # it stores the traget life or the ground truth of the target life which was given to this method as a input #letter the input will take frame as its parameter and network output will take the return of forward () method of the model cclass which can be called directly by model(x) which is a feature of pytorch model.nn class #it also takes model object as inpput too so that the normalizer method can be called. # Normalize the target values using the model's built-in normalizer target_normalizer = model.get_output_life_normalizer() target_life_normalized = target_normalizer(target_life) # Extract the life prediction life_prediction = network_output[:,:1] # prepare an error vector of length B diff_life = target_life_normalized - life_prediction loss = torch.mean(diff_life ** 2) return loss ''' def loss_fn(inputs, network_output, w1=1.0, w2=1.0): """ Custom loss function for predicting fatigue life in scientific form: fatigue_life = a * 10^b network_output: [num_nodes, 2] → columns: [a_pred, b_pred] inputs['fatigue_life']: [num_nodes] or [num_nodes, 1] """ # Move target to device and squeeze extra dimensions target_life = inputs['fatigue_life'].to(device).squeeze(-1) # [num_nodes] # Convert true fatigue life to scientific form (a * 10^b) true_b = torch.floor(torch.log10(target_life)) # exponent true_a = target_life / (10 ** true_b) # coefficient # Predictions from network pred_a = network_output[:, 0] # [num_nodes] pred_b = network_output[:, 1] # [num_nodes] # Compute separate MSE losses criterion = nn.MSELoss() loss_coeff = criterion(pred_a, true_a) loss_exp = criterion(pred_b, true_b) # Weighted sum of losses loss = w1 * loss_coeff + w2 * loss_exp return loss ''' def loss_fn(inputs, network_output): """ Custom loss function for predicting fatigue life in scientific form: fatigue_life = a * 10^b network_output: [num_nodes, 2] → columns: [a_pred, b_pred] inputs['fatigue_life']: [num_nodes] or [num_nodes, 1] """ target_life = inputs['fatigue_life'].to(device).squeeze(-1) # [num_nodes] # Predictions from network pred_a = network_output[:, 0] # [num_nodes] pred_b = network_output[:, 1] # [num_nodes] # Reconstruct predicted fatigue life pred_life = pred_a * (10 ** pred_b) # Compute MSE on log10 scale for numerical stability loss = torch.mean((torch.log10(pred_life) - torch.log10(target_life)) ** 2) return loss ''' def prepare_files_and_directories(output_dir, model_num, train_data_path, experiment): """ Creates a organized directory structure for saving training outputs. The structure will be: output_dir/model_num/dataset_name/timestamp/ ├── checkpoint/ (saved model states) ├── log/ (training metrics and logs) └── rollout/ (evaluation results) """ # Extract dataset name from the full path train_data = train_data_path.split("/")[-1].split(".")[0] output_dir = os.path.join(output_dir, str(model_num), train_data, f"EXPERIMENT_{experiment}") # Create a unique timestamp for this training run run_create_time = time.time() run_create_datetime = datetime.datetime.fromtimestamp(run_create_time).strftime('%c') # Replace spaces and colons with dashes to make it filesystem-friendly run_create_datetime_datetime_dash = run_create_datetime.replace(" ", "-").replace(":", "-") # Create the main run directory run_dir = os.path.join(output_dir, run_create_datetime_datetime_dash) Path(run_dir).mkdir(parents=True, exist_ok=True) # Create subdirectories for different types of outputs checkpoint_dir = os.path.join(run_dir, 'checkpoint') # For saving model weights log_dir = os.path.join(run_dir, 'log') # For training metrics rollout_dir = os.path.join(run_dir, 'rollout') # For evaluation results # Create all directories (parents=True creates intermediate dirs if needed) Path(checkpoint_dir).mkdir(parents=True, exist_ok=True) Path(log_dir).mkdir(parents=True, exist_ok=True) Path(rollout_dir).mkdir(parents=True, exist_ok=True) return checkpoint_dir, log_dir, rollout_dir def save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch, is_periodic = False): """ Saves the current training state to checkpoint files. """ try: # Save epoch info torch.save({'epoch': epoch}, Path(checkpoint_dir) / "epoch_checkpoint.pth") # Save model state with epoch number for periodic saves if is_periodic: model_checkpoint_name = f"epoch_{epoch + 1}_model_checkpoint" else: model_checkpoint_name = "epoch_model_checkpoint" model.save_model(str(Path(checkpoint_dir) / model_checkpoint_name)) # Save optimizer and scheduler states torch.save(optimizer.state_dict(), Path(checkpoint_dir) / "epoch_optimizer_checkpoint.pth") torch.save(scheduler.state_dict(), Path(checkpoint_dir) / "epoch_scheduler_checkpoint.pth") print(f"Checkpoint saved for epoch {epoch+1}") except Exception as e: print(f"Error saving checkpoint for epoch {epoch+1}: {e}") def main(args): device = torch.device('cuda') div_factor = args.peak_lr / args.base_lr # 5e-3 / 1e-3 = 5.0 final_div_factor = args.peak_lr / args.final_lr # 5e-3 / 1e-4 = 50.0 start_epoch = 0 start_time = time.time() end_epoch = args.epochs print(f"starting training from epoch {start_epoch} to {end_epoch}") train_dataset = TrajectoryDataset(args.train_data, split='train') val_dataset = TrajectoryDataset(args.val_data, split='val') train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False) params = dict(field='world_pos', output_size=args.output_size, model=fatigue_model, evaluator=fatigue_eval, k=args.neighbor_k, input_size=args.input_size) core_model = 'regDGCNN_seg' model = fatigue_model.Model(params,core_model_name=core_model).to(device) # create an object of the Model method of fatigue_module #now the model takes the input of dict prams, model name optimizer = torch.optim.AdamW(model.parameters(), lr=args.base_lr, weight_decay=1e-3) if args.scheduler == 'cosine': scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0 = args.T_0, T_mult = args.T_mult) else: scheduler = OneCycleLR( optimizer, max_lr=args.base_lr, epochs=(end_epoch-start_epoch), steps_per_epoch=len(train_dataloader), pct_start=args.pct_start, div_factor=div_factor, final_div_factor=final_div_factor, cycle_momentum=False ) checkpoint_dir, log_dir, rollout_dir = prepare_files_and_directories(args.output_dir,core_model,args.train_data, args.experiment_id) epoch_training_losses = [] epoch_learning_rate = [] step_training_losses = [] epoch_run_times = [] #Save eval losses epoch_eval_losses = [] for epoch in range(start_epoch, end_epoch): print(f"\n=== Epoch {epoch + 1}/{end_epoch} ===") epoch_start_time = time.time() epoch_training_loss = 0.0 print("---------------Training Started---------------") model.train() for data in train_dataloader: frame = squeeze_data_frame(data) output = model(frame,is_training=True) loss = loss_fn(frame, output,w1=1.0, w2=1.0) # input to an loss function is the input (frame) whihc is the data set of ours and model and output #letter the input will take frame as its parameter and network output will take the return of forward () method of the model cclass which can be called directly by model(x) which is a feature of pytorch model.nn class #it also takes model object as inpput too so that the normalizer method can be called. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() step_loss = loss.detach().cpu() step_training_losses.append(step_loss) epoch_training_loss += step_loss # Record metrics for this epoch epoch_training_losses.append(epoch_training_loss) epoch_learning_rate.append(optimizer.param_groups[0]['lr']) epoch_run_time = time.time() - epoch_start_time epoch_run_times.append(epoch_run_time) print(f"Epoch {epoch + 1} completed, Training loss: {epoch_training_loss:.6f}, Time taken: {epoch_run_time:.2f}s, Learning rate: {optimizer.param_groups[0]['lr']:.2e}") loss_record = { 'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item(), 'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item(), 'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item(), 'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item(), 'train_epoch_losses': epoch_training_losses, 'all_step_train_losses': step_training_losses, 'learning_rate': epoch_learning_rate, 'epoch_run_times': epoch_run_times } if len(epoch_training_losses) >= 2 and epoch_training_losses[-1]