| import os
|
| from pathlib import Path
|
| import pickle
|
| import time
|
| import datetime
|
| import argparse
|
|
|
| import torch
|
| from torch.nn.parallel import DistributedDataParallel as DDP
|
| import numpy as np
|
| from torch.optim.lr_scheduler import OneCycleLR
|
|
|
| from utilities import common
|
| from utilities.dataset import TrajectoryDataset
|
|
|
|
|
| from models import dest_model as press_model
|
| from models import dest_eval as press_eval
|
|
|
|
|
| device = torch.device('cuda')
|
|
|
|
|
| def squeeze_data_frame(data_frame):
|
| for k, v in data_frame.items():
|
| data_frame[k] = torch.squeeze(v, 0)
|
| return data_frame
|
|
|
| def pickle_save(path, data):
|
| with open(path, 'wb') as f:
|
| pickle.dump(data, f)
|
|
|
| def pickle_load(path):
|
| with open(path, 'rb') as f:
|
| return pickle.load(f)
|
|
|
| def loss_fn(inputs, network_output, model):
|
| """
|
| Calculates the L2 (Mean Squared Error) loss for position prediction.
|
|
|
| The model predicts where particles will move next, and this function
|
| measures how far off those predictions are from the actual next positions.
|
| """
|
|
|
| world_pos = inputs['curr_pos'].to(device)
|
| target_world_pos = inputs['next_pos'].to(device)
|
| target_von = inputs['next_von'].to(device)
|
|
|
|
|
| target_pos_velocity = target_world_pos - world_pos
|
|
|
|
|
|
|
| world_pos_normalizer = model.get_output_pos_normalizer()
|
| target_pos_normalized = world_pos_normalizer(target_pos_velocity)
|
|
|
| von_normalizer = model.get_output_von_normalizer()
|
| target_von_normalized = von_normalizer(target_von)
|
|
|
|
|
|
|
| node_type = inputs['node_type'].to(device)
|
| node_type = node_type.view(-1)
|
|
|
|
|
| mask_normal = (node_type == common.NodeType.NORMAL.value)
|
| mask_roller = (node_type == common.NodeType.ROLLER.value)
|
| valid_mask = mask_normal | mask_roller
|
|
|
|
|
|
|
| pos_prediction = network_output[:,:3]
|
| von_prediction = network_output[:,3:]
|
|
|
|
|
| diff_pos = target_pos_normalized - pos_prediction
|
| diff_von = target_von_normalized - von_prediction
|
|
|
| error = torch.zeros(diff_pos.shape[0], device=device)
|
|
|
|
|
| if mask_normal.any():
|
| err_N = torch.sum(diff_pos[mask_normal] ** 2, dim=1)
|
| error[mask_normal] = err_N
|
|
|
|
|
| if mask_roller.any():
|
|
|
| err_R = diff_pos[mask_roller][:, 0]**2 + diff_pos[mask_roller][:, 1]**2
|
| error[mask_roller] = err_R
|
|
|
|
|
| if valid_mask.any():
|
| loss_pos = torch.mean(error[valid_mask])
|
| else:
|
| loss_pos = torch.tensor(0.0, device=device)
|
|
|
| loss_von = torch.mean(diff_von ** 2)
|
|
|
| loss = loss_pos + loss_von
|
| return loss
|
|
|
| def prepare_files_and_directories(output_dir, model_num, train_data_path, experiment):
|
| """
|
| Creates a organized directory structure for saving training outputs.
|
|
|
| The structure will be:
|
| output_dir/model_num/dataset_name/timestamp/
|
| ├── checkpoint/ (saved model states)
|
| ├── log/ (training metrics and logs)
|
| └── rollout/ (evaluation results)
|
| """
|
|
|
| train_data = train_data_path.split("/")[-1].split(".")[0]
|
| output_dir = os.path.join(output_dir, str(model_num), train_data, f"EXPERIMENT_{experiment}")
|
|
|
|
|
| run_create_time = time.time()
|
| run_create_datetime = datetime.datetime.fromtimestamp(run_create_time).strftime('%c')
|
|
|
| run_create_datetime_datetime_dash = run_create_datetime.replace(" ", "-").replace(":", "-")
|
|
|
|
|
| run_dir = os.path.join(output_dir, run_create_datetime_datetime_dash)
|
| Path(run_dir).mkdir(parents=True, exist_ok=True)
|
|
|
|
|
| checkpoint_dir = os.path.join(run_dir, 'checkpoint')
|
| log_dir = os.path.join(run_dir, 'log')
|
| rollout_dir = os.path.join(run_dir, 'rollout')
|
|
|
|
|
| Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
|
| Path(log_dir).mkdir(parents=True, exist_ok=True)
|
| Path(rollout_dir).mkdir(parents=True, exist_ok=True)
|
|
|
| return checkpoint_dir, log_dir, rollout_dir
|
|
|
| def squeeze_data(data):
|
| transformed_data = {key: value.squeeze(0) for key, value in data.items()}
|
| return transformed_data
|
|
|
| def save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch, is_periodic = False):
|
| """
|
| Saves the current training state to checkpoint files.
|
| """
|
| try:
|
|
|
| torch.save({'epoch': epoch}, Path(checkpoint_dir) / "epoch_checkpoint.pth")
|
|
|
|
|
| if is_periodic:
|
| model_checkpoint_name = f"epoch_{epoch + 1}_model_checkpoint"
|
| else:
|
| model_checkpoint_name = "epoch_model_checkpoint"
|
|
|
| model.save_model(str(Path(checkpoint_dir) / model_checkpoint_name))
|
|
|
|
|
| torch.save(optimizer.state_dict(), Path(checkpoint_dir) / "epoch_optimizer_checkpoint.pth")
|
| torch.save(scheduler.state_dict(), Path(checkpoint_dir) / "epoch_scheduler_checkpoint.pth")
|
|
|
| print(f"Checkpoint saved for epoch {epoch+1}")
|
|
|
| except Exception as e:
|
| print(f"Error saving checkpoint for epoch {epoch+1}: {e}")
|
|
|
| def main(args):
|
| device = torch.device('cuda')
|
|
|
| div_factor = args.peak_lr / args.base_lr
|
| final_div_factor = args.peak_lr / args.final_lr
|
|
|
|
|
| start_epoch = 0
|
| start_time = time.time()
|
|
|
| end_epoch = args.epochs
|
| print(f"starting training from epoch {start_epoch} to {end_epoch}")
|
|
|
| train_dataset = TrajectoryDataset(args.train_data, split='train')
|
| val_dataset = TrajectoryDataset(args.val_data, split='val')
|
|
|
| train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
|
| val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=True)
|
|
|
| params = dict(field='world_pos', output_size=args.output_size, model=press_model, evaluator=press_eval, k=args.neighbor_k, input_size=args.input_size)
|
|
|
| core_model = 'regDGCNN_seg'
|
|
|
| model = press_model.Model(params,core_model_name=core_model).to(device)
|
|
|
| optimizer = torch.optim.AdamW(model.parameters(), lr=args.base_lr, weight_decay=1e-3)
|
| if args.scheduler == 'cosine':
|
| scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0 = args.T_0, T_mult = args.T_mult)
|
| else:
|
| scheduler = OneCycleLR(
|
| optimizer,
|
| max_lr=args.base_lr,
|
| epochs=(end_epoch-start_epoch),
|
| steps_per_epoch=len(train_dataloader),
|
| pct_start=args.pct_start,
|
| div_factor=div_factor,
|
| final_div_factor=final_div_factor,
|
| cycle_momentum=False
|
| )
|
|
|
|
|
| checkpoint_dir, log_dir, rollout_dir = prepare_files_and_directories(args.output_dir,core_model,args.train_data, args.experiment_id)
|
|
|
| epoch_training_losses = []
|
| epoch_learning_rate = []
|
| step_training_losses = []
|
| epoch_run_times = []
|
| epoch_eval_losses = []
|
|
|
| for epoch in range(start_epoch, end_epoch):
|
| print(f"\n=== Epoch {epoch + 1}/{end_epoch} ===")
|
| epoch_start_time = time.time()
|
| epoch_training_loss = 0.0
|
| print("---------------Training Started---------------")
|
| model.train()
|
| for data in train_dataloader:
|
| frame = squeeze_data_frame(data)
|
| output = model(frame,is_training=True)
|
| loss = loss_fn(frame, output, model)
|
| optimizer.zero_grad()
|
| loss.backward()
|
| optimizer.step()
|
| scheduler.step()
|
| step_loss = loss.detach().cpu()
|
| step_training_losses.append(step_loss)
|
| epoch_training_loss += step_loss
|
|
|
|
|
| epoch_training_losses.append(epoch_training_loss)
|
| epoch_learning_rate.append(optimizer.param_groups[0]['lr'])
|
| epoch_run_time = time.time() - epoch_start_time
|
| epoch_run_times.append(epoch_run_time)
|
|
|
| print(f"Epoch {epoch + 1} completed, Training loss: {epoch_training_loss:.6f}, Time taken: {epoch_run_time:.2f}s, Learning rate: {optimizer.param_groups[0]['lr']:.2e}")
|
|
|
| loss_record = {
|
| 'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item(),
|
| 'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item(),
|
| 'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item(),
|
| 'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item(),
|
| 'train_epoch_losses': epoch_training_losses,
|
| 'all_step_train_losses': step_training_losses,
|
| 'learning_rate': epoch_learning_rate,
|
| 'epoch_run_times': epoch_run_times
|
| }
|
|
|
| should_evaluate = (epoch + 1) % 20 == 0 or epoch == start_epoch or (epoch + 1) == end_epoch
|
|
|
| if should_evaluate:
|
| print(f"Saving checkpoint and evaluating at epoch {epoch + 1}...")
|
|
|
|
|
| temp_train_loss_file = Path(log_dir) / f'temp_train_loss_{epoch + 1}.pkl'
|
| pickle_save(str(temp_train_loss_file), loss_record)
|
|
|
|
|
| save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch)
|
|
|
| print("Running evaluation...")
|
| model.eval()
|
|
|
| trajectories = []
|
| mse_losses = []
|
| l1_losses = []
|
|
|
|
|
| mse_loss_fn = torch.nn.MSELoss()
|
| l1_loss_fn = torch.nn.L1Loss()
|
|
|
| with torch.no_grad():
|
| for data in val_loader:
|
| data=squeeze_data(data)
|
|
|
| _, prediction_trajectory = press_eval.evaluate(model, data)
|
|
|
| target_pos = torch.squeeze(data['next_pos'].to(device), dim=0)
|
| target_von = torch.squeeze(data['next_von'].to(device), dim=0)
|
|
|
|
|
| pred_pos = prediction_trajectory['pred_pos']
|
| pred_von = prediction_trajectory['pred_von']
|
|
|
|
|
| mse_loss_pos = mse_loss_fn(target_pos, pred_pos)
|
| l1_loss_pos = l1_loss_fn(target_pos, pred_pos)
|
| mse_loss_von = mse_loss_fn(target_von, pred_von)
|
| l1_loss_von = l1_loss_fn(target_von, pred_von)
|
|
|
|
|
| mse_loss = mse_loss_pos + mse_loss_von
|
| l1_loss = l1_loss_pos + l1_loss_von
|
|
|
|
|
| mse_losses.append(mse_loss.cpu())
|
| l1_losses.append(l1_loss.cpu())
|
| trajectories.append(prediction_trajectory)
|
| epoch_eval_losses.append(torch.mean(torch.stack(mse_losses)).item())
|
|
|
| rollout_file = Path(rollout_dir) / f"rollout_epoch_{epoch + 1}.pkl"
|
| pickle_save(str(rollout_file), trajectories)
|
|
|
|
|
| eval_loss_record = {
|
| 'eval_total_mse_loss': torch.sum(torch.stack(mse_losses)).item(),
|
| 'eval_total_l1_loss': torch.sum(torch.stack(l1_losses)).item(),
|
| 'eval_mean_mse_loss': torch.mean(torch.stack(mse_losses)).item(),
|
| 'eval_max_mse_loss': torch.max(torch.stack(mse_losses)).item(),
|
| 'eval_min_mse_loss': torch.min(torch.stack(mse_losses)).item(),
|
| 'eval_mean_l1_loss': torch.mean(torch.stack(l1_losses)).item(),
|
| 'eval_max_l1_loss': torch.max(torch.stack(l1_losses)).item(),
|
| 'eval_min_l1_loss': torch.min(torch.stack(l1_losses)).item(),
|
| 'eval_mse_losses': mse_losses,
|
| 'eval_l1_losses': l1_losses,
|
| 'epoch_eval_losses': epoch_eval_losses
|
| }
|
|
|
|
|
| eval_loss_file = Path(log_dir) / f'eval_loss_epoch_{epoch + 1}.pkl'
|
| pickle_save(str(eval_loss_file), eval_loss_record)
|
|
|
| print(f"Evaluation completed, Mean MSE Loss: {eval_loss_record['eval_mean_mse_loss']:.6f}, Mean L1 Loss: {eval_loss_record['eval_mean_l1_loss']:.6f}")
|
|
|
| print("\nTraining completed! Saving final results...")
|
|
|
| final_loss_record = {
|
| 'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| 'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| 'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| 'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
|
| 'train_epoch_losses': epoch_training_losses,
|
| 'all_step_train_losses': step_training_losses,
|
| 'learning_rate': epoch_learning_rate,
|
| 'epoch_run_times': epoch_run_times
|
| }
|
|
|
|
|
| pickle_save(str(Path(log_dir) / 'epoch_run_times.pkl'), epoch_run_times)
|
| pickle_save(str(Path(log_dir) / 'final_train_loss.pkl'), final_loss_record)
|
|
|
|
|
| model.save_model(str(Path(checkpoint_dir) / "final_model_checkpoint"))
|
| torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "final_optimizer_checkpoint.pth"))
|
| torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "final_scheduler_checkpoint.pth"))
|
| return
|
|
|
| if __name__ == "__main__":
|
| parser = argparse.ArgumentParser()
|
| parser.add_argument('--train_data', type=str, default="/home/rachit/GlassForming/processed/train.h5")
|
| parser.add_argument('--val_data', type=str, default="/home/rachit/GlassForming/processed/val.h5")
|
| parser.add_argument('--output_dir', type=str, default="/home/rachit/GlassForming/new_final_code_output")
|
| parser.add_argument('--batch_size', type=int, default=1)
|
| parser.add_argument('--epochs', type=int, default=2000)
|
| parser.add_argument('--base_lr', type=float, default=2e-4)
|
| parser.add_argument('--peak_lr', type=float, default=8e-4)
|
| parser.add_argument('--final_lr', type=float, default=5e-5)
|
| parser.add_argument('--pct_start', type=float, default=0.1)
|
| parser.add_argument('--T_0', type=int, default=10)
|
| parser.add_argument('--T_mult', type=int, default=2)
|
| parser.add_argument('--scheduler', type=str, default="cosine", choices=["cosine", "onecycle"])
|
| parser.add_argument('--experiment_id', type=str, default="400")
|
| parser.add_argument('--neighbor_k', type=int, default=80)
|
| parser.add_argument('--model_name', type=str, default="regDGCNN_seg")
|
| parser.add_argument('--input_size', type=int, default=11)
|
| parser.add_argument('--output_size', type=int, default=4)
|
| args = parser.parse_args()
|
| main(args) |