SachinSaud's picture
Upload folder using huggingface_hub
328a002 verified
import os
from pathlib import Path
import pickle
import time
import datetime
import argparse
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
from torch.optim.lr_scheduler import OneCycleLR
# from model import DGCNN, MagNet
from utilities import common
from utilities.dataset import TrajectoryDataset
# from models import press_model
from models import dest_model as press_model
from models import dest_eval as press_eval
device = torch.device('cuda')
def squeeze_data_frame(data_frame):
for k, v in data_frame.items():
data_frame[k] = torch.squeeze(v, 0)
return data_frame
def pickle_save(path, data):
with open(path, 'wb') as f:
pickle.dump(data, f)
def pickle_load(path):
with open(path, 'rb') as f:
return pickle.load(f)
def loss_fn(inputs, network_output, model):
"""
Calculates the L2 (Mean Squared Error) loss for position prediction.
The model predicts where particles will move next, and this function
measures how far off those predictions are from the actual next positions.
"""
# Extract current and target positions from input data
world_pos = inputs['curr_pos'].to(device) # Where particles are now
target_world_pos = inputs['next_pos'].to(device) # Target positions at next time step
target_von = inputs['next_von'].to(device) # Target von Mises stress at next position
# Calculate the expected movement (velocity) between time steps
target_pos_velocity = target_world_pos - world_pos # This is what we want to predict
# Normalize the target values using the model's built-in normalizer
# This helps with training stability by keeping values in a reasonable range
world_pos_normalizer = model.get_output_pos_normalizer()
target_pos_normalized = world_pos_normalizer(target_pos_velocity)
von_normalizer = model.get_output_von_normalizer()
target_von_normalized = von_normalizer(target_von)
# Create a mask to only calculate loss for "normal" particles
# Some particles might be boundary conditions or special types we don't want to predict
node_type = inputs['node_type'].to(device)
node_type = node_type.view(-1)
# make masks
mask_normal = (node_type == common.NodeType.NORMAL.value) # predict x,y,z
mask_roller = (node_type == common.NodeType.ROLLER.value) # predict x,y
valid_mask = mask_normal | mask_roller # exclude obstacles
# Extract the position prediction from the model output (first 3 dimensions: x, y, z)
pos_prediction = network_output[:,:3]
von_prediction = network_output[:,3:]
# prepare an error vector of length B
diff_pos = target_pos_normalized - pos_prediction
diff_von = target_von_normalized - von_prediction
error = torch.zeros(diff_pos.shape[0], device=device)
# full-3D error for NORMAL nodes
if mask_normal.any():
err_N = torch.sum(diff_pos[mask_normal] ** 2, dim=1)
error[mask_normal] = err_N
# x–y only error for ROLLER nodes
if mask_roller.any():
# dim 0=x, dim 1=y
err_R = diff_pos[mask_roller][:, 0]**2 + diff_pos[mask_roller][:, 1]**2
error[mask_roller] = err_R
# final loss: mean over all non‐obstacle nodes
if valid_mask.any():
loss_pos = torch.mean(error[valid_mask])
else:
loss_pos = torch.tensor(0.0, device=device)
loss_von = torch.mean(diff_von ** 2)
loss = loss_pos + loss_von
return loss
def prepare_files_and_directories(output_dir, model_num, train_data_path, experiment):
"""
Creates a organized directory structure for saving training outputs.
The structure will be:
output_dir/model_num/dataset_name/timestamp/
├── checkpoint/ (saved model states)
├── log/ (training metrics and logs)
└── rollout/ (evaluation results)
"""
# Extract dataset name from the full path
train_data = train_data_path.split("/")[-1].split(".")[0]
output_dir = os.path.join(output_dir, str(model_num), train_data, f"EXPERIMENT_{experiment}")
# Create a unique timestamp for this training run
run_create_time = time.time()
run_create_datetime = datetime.datetime.fromtimestamp(run_create_time).strftime('%c')
# Replace spaces and colons with dashes to make it filesystem-friendly
run_create_datetime_datetime_dash = run_create_datetime.replace(" ", "-").replace(":", "-")
# Create the main run directory
run_dir = os.path.join(output_dir, run_create_datetime_datetime_dash)
Path(run_dir).mkdir(parents=True, exist_ok=True)
# Create subdirectories for different types of outputs
checkpoint_dir = os.path.join(run_dir, 'checkpoint') # For saving model weights
log_dir = os.path.join(run_dir, 'log') # For training metrics
rollout_dir = os.path.join(run_dir, 'rollout') # For evaluation results
# Create all directories (parents=True creates intermediate dirs if needed)
Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
Path(log_dir).mkdir(parents=True, exist_ok=True)
Path(rollout_dir).mkdir(parents=True, exist_ok=True)
return checkpoint_dir, log_dir, rollout_dir
def squeeze_data(data):
transformed_data = {key: value.squeeze(0) for key, value in data.items()}
return transformed_data
def save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch, is_periodic = False):
"""
Saves the current training state to checkpoint files.
"""
try:
# Save epoch info
torch.save({'epoch': epoch}, Path(checkpoint_dir) / "epoch_checkpoint.pth")
# Save model state with epoch number for periodic saves
if is_periodic:
model_checkpoint_name = f"epoch_{epoch + 1}_model_checkpoint"
else:
model_checkpoint_name = "epoch_model_checkpoint"
model.save_model(str(Path(checkpoint_dir) / model_checkpoint_name))
# Save optimizer and scheduler states
torch.save(optimizer.state_dict(), Path(checkpoint_dir) / "epoch_optimizer_checkpoint.pth")
torch.save(scheduler.state_dict(), Path(checkpoint_dir) / "epoch_scheduler_checkpoint.pth")
print(f"Checkpoint saved for epoch {epoch+1}")
except Exception as e:
print(f"Error saving checkpoint for epoch {epoch+1}: {e}")
def main(args):
device = torch.device('cuda')
div_factor = args.peak_lr / args.base_lr # 5e-3 / 1e-3 = 5.0
final_div_factor = args.peak_lr / args.final_lr # 5e-3 / 1e-4 = 50.0
start_epoch = 0
start_time = time.time()
end_epoch = args.epochs
print(f"starting training from epoch {start_epoch} to {end_epoch}")
train_dataset = TrajectoryDataset(args.train_data, split='train')
val_dataset = TrajectoryDataset(args.val_data, split='val')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=True)
params = dict(field='world_pos', output_size=args.output_size, model=press_model, evaluator=press_eval, k=args.neighbor_k, input_size=args.input_size)
core_model = 'regDGCNN_seg'
model = press_model.Model(params,core_model_name=core_model).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.base_lr, weight_decay=1e-3)
if args.scheduler == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0 = args.T_0, T_mult = args.T_mult)
else:
scheduler = OneCycleLR(
optimizer,
max_lr=args.base_lr,
epochs=(end_epoch-start_epoch),
steps_per_epoch=len(train_dataloader),
pct_start=args.pct_start,
div_factor=div_factor,
final_div_factor=final_div_factor,
cycle_momentum=False
)
checkpoint_dir, log_dir, rollout_dir = prepare_files_and_directories(args.output_dir,core_model,args.train_data, args.experiment_id)
epoch_training_losses = []
epoch_learning_rate = []
step_training_losses = []
epoch_run_times = []
epoch_eval_losses = []
for epoch in range(start_epoch, end_epoch):
print(f"\n=== Epoch {epoch + 1}/{end_epoch} ===")
epoch_start_time = time.time()
epoch_training_loss = 0.0
print("---------------Training Started---------------")
model.train()
for data in train_dataloader:
frame = squeeze_data_frame(data)
output = model(frame,is_training=True)
loss = loss_fn(frame, output, model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
step_loss = loss.detach().cpu()
step_training_losses.append(step_loss)
epoch_training_loss += step_loss
# Record metrics for this epoch
epoch_training_losses.append(epoch_training_loss)
epoch_learning_rate.append(optimizer.param_groups[0]['lr'])
epoch_run_time = time.time() - epoch_start_time
epoch_run_times.append(epoch_run_time)
print(f"Epoch {epoch + 1} completed, Training loss: {epoch_training_loss:.6f}, Time taken: {epoch_run_time:.2f}s, Learning rate: {optimizer.param_groups[0]['lr']:.2e}")
loss_record = {
'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item(),
'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item(),
'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item(),
'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item(),
'train_epoch_losses': epoch_training_losses,
'all_step_train_losses': step_training_losses,
'learning_rate': epoch_learning_rate,
'epoch_run_times': epoch_run_times
}
should_evaluate = (epoch + 1) % 20 == 0 or epoch == start_epoch or (epoch + 1) == end_epoch
if should_evaluate:
print(f"Saving checkpoint and evaluating at epoch {epoch + 1}...")
# Save training metrics
temp_train_loss_file = Path(log_dir) / f'temp_train_loss_{epoch + 1}.pkl'
pickle_save(str(temp_train_loss_file), loss_record)
# Save checkpoint
save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch)
print("Running evaluation...")
model.eval()
trajectories = [] # Store predicted trajectories
mse_losses = [] # Mean Squared Error losses
l1_losses = [] # Mean Absolute Error losses
# Set up loss functions for evaluation
mse_loss_fn = torch.nn.MSELoss() # Mean Squared Error
l1_loss_fn = torch.nn.L1Loss() # Mean Absolute Error
with torch.no_grad():
for data in val_loader:
data=squeeze_data(data)
# print(data)
_, prediction_trajectory = press_eval.evaluate(model, data)
# Calculate different types of loss metrics
target_pos = torch.squeeze(data['next_pos'].to(device), dim=0)
target_von = torch.squeeze(data['next_von'].to(device), dim=0)
# Extract predictions from the trajectory
pred_pos = prediction_trajectory['pred_pos']
pred_von = prediction_trajectory['pred_von']
# Calculate losses
mse_loss_pos = mse_loss_fn(target_pos, pred_pos)
l1_loss_pos = l1_loss_fn(target_pos, pred_pos)
mse_loss_von = mse_loss_fn(target_von, pred_von)
l1_loss_von = l1_loss_fn(target_von, pred_von)
# Combine losses
mse_loss = mse_loss_pos + mse_loss_von
l1_loss = l1_loss_pos + l1_loss_von
# Store the results
mse_losses.append(mse_loss.cpu())
l1_losses.append(l1_loss.cpu())
trajectories.append(prediction_trajectory)
epoch_eval_losses.append(torch.mean(torch.stack(mse_losses)).item())
# Save evaluation trajectories
rollout_file = Path(rollout_dir) / f"rollout_epoch_{epoch + 1}.pkl"
pickle_save(str(rollout_file), trajectories)
# Create comprehensive evaluation metrics
eval_loss_record = {
'eval_total_mse_loss': torch.sum(torch.stack(mse_losses)).item(),
'eval_total_l1_loss': torch.sum(torch.stack(l1_losses)).item(),
'eval_mean_mse_loss': torch.mean(torch.stack(mse_losses)).item(),
'eval_max_mse_loss': torch.max(torch.stack(mse_losses)).item(),
'eval_min_mse_loss': torch.min(torch.stack(mse_losses)).item(),
'eval_mean_l1_loss': torch.mean(torch.stack(l1_losses)).item(),
'eval_max_l1_loss': torch.max(torch.stack(l1_losses)).item(),
'eval_min_l1_loss': torch.min(torch.stack(l1_losses)).item(),
'eval_mse_losses': mse_losses,
'eval_l1_losses': l1_losses,
'epoch_eval_losses': epoch_eval_losses
}
# Save evaluation metrics
eval_loss_file = Path(log_dir) / f'eval_loss_epoch_{epoch + 1}.pkl'
pickle_save(str(eval_loss_file), eval_loss_record)
print(f"Evaluation completed, Mean MSE Loss: {eval_loss_record['eval_mean_mse_loss']:.6f}, Mean L1 Loss: {eval_loss_record['eval_mean_l1_loss']:.6f}")
print("\nTraining completed! Saving final results...")
# Save final comprehensive training loss record
final_loss_record = {
'train_total_loss': torch.sum(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_mean_epoch_loss': torch.mean(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_max_epoch_loss': torch.max(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_min_epoch_loss': torch.min(torch.stack(epoch_training_losses)).item() if epoch_training_losses else 0,
'train_epoch_losses': epoch_training_losses,
'all_step_train_losses': step_training_losses,
'learning_rate': epoch_learning_rate,
'epoch_run_times': epoch_run_times
}
# Save final timing information
pickle_save(str(Path(log_dir) / 'epoch_run_times.pkl'), epoch_run_times)
pickle_save(str(Path(log_dir) / 'final_train_loss.pkl'), final_loss_record)
# Save final model state
model.save_model(str(Path(checkpoint_dir) / "final_model_checkpoint"))
torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "final_optimizer_checkpoint.pth"))
torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "final_scheduler_checkpoint.pth"))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', type=str, default="/home/rachit/GlassForming/processed/train.h5")
parser.add_argument('--val_data', type=str, default="/home/rachit/GlassForming/processed/val.h5")
parser.add_argument('--output_dir', type=str, default="/home/rachit/GlassForming/new_final_code_output")
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--epochs', type=int, default=2000)
parser.add_argument('--base_lr', type=float, default=2e-4)
parser.add_argument('--peak_lr', type=float, default=8e-4)
parser.add_argument('--final_lr', type=float, default=5e-5)
parser.add_argument('--pct_start', type=float, default=0.1)
parser.add_argument('--T_0', type=int, default=10)
parser.add_argument('--T_mult', type=int, default=2)
parser.add_argument('--scheduler', type=str, default="cosine", choices=["cosine", "onecycle"])
parser.add_argument('--experiment_id', type=str, default="400")
parser.add_argument('--neighbor_k', type=int, default=80)
parser.add_argument('--model_name', type=str, default="regDGCNN_seg")
parser.add_argument('--input_size', type=int, default=11)
parser.add_argument('--output_size', type=int, default=4)
args = parser.parse_args()
main(args)