SachinSaud's picture
Upload folder using huggingface_hub
328a002 verified
import os
from pathlib import Path
import pickle
import time
import datetime
import argparse
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import OneCycleLR, CosineAnnealingWarmRestarts
# Make sure to use the dataset file that supports different modes
from utilities.dataset import TrajectoryDataset
from models import fatigue_model
from models import fatigue_eval # Re-importing for detailed evaluation
# Set device to CUDA if available, otherwise CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def squeeze_data_frame(data_frame):
"""Removes the batch dimension from tensors in a data dictionary."""
for k, v in data_frame.items():
if isinstance(v, torch.Tensor):
data_frame[k] = torch.squeeze(v, 0)
return data_frame
def squeeze_data(data):
"""Squeezes tensors in a dictionary, similar to squeeze_data_frame."""
return {key: value.squeeze(0) for key, value in data.items() if isinstance(value, torch.Tensor)}
def pickle_save(path, data):
"""Saves data to a pickle file."""
with open(path, 'wb') as f:
pickle.dump(data, f)
def loss_fn_regressor(inputs, network_output, model):
"""
Calculates the L2 (Mean Squared Error) loss by comparing the model's
normalized prediction to the normalized ground truth.
"""
target_log_life = inputs['fatigue_life'].to(device)
target_normalizer = model.get_output_life_normalizer()
if target_normalizer is None or not (hasattr(target_normalizer, '_acc_count') and target_normalizer._acc_count > 0):
raise ValueError("Model's normalizer has not been fitted. Call model.fit_normalizer().")
target_log_life_normalized = target_normalizer(target_log_life)
prediction_normalized = network_output[:,:1]
loss = torch.mean((target_log_life_normalized - prediction_normalized) ** 2)
return loss
def loss_fn_classifier(inputs, network_output, model):
"""
Calculates the Binary Cross-Entropy loss for the LCF/HCF classifier.
"""
target_class = inputs['fatigue_class'].to(device)
prediction_logits = network_output[:,:1]
loss_func = nn.BCEWithLogitsLoss()
loss = loss_func(prediction_logits, target_class)
return loss
def prepare_files_and_directories(output_dir, model_type, core_model, train_data_path, experiment_id):
"""Creates an organized directory structure for each training run."""
train_data_name = Path(train_data_path).stem
run_base_dir = Path(output_dir) / model_type / core_model / train_data_name / f"EXPERIMENT_{experiment_id}"
run_create_datetime = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
run_dir = run_base_dir / run_create_datetime
run_dir.mkdir(parents=True, exist_ok=True)
checkpoint_dir = run_dir / 'checkpoint'
log_dir = run_dir / 'log'
rollout_dir = run_dir / 'rollout'
checkpoint_dir.mkdir(exist_ok=True)
log_dir.mkdir(exist_ok=True)
rollout_dir.mkdir(exist_ok=True)
return str(checkpoint_dir), str(log_dir), str(rollout_dir)
def save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch):
"""Saves the current training state to checkpoint files for periodic saves."""
try:
# Save epoch info
torch.save({'epoch': epoch}, Path(checkpoint_dir) / "epoch_checkpoint.pth")
model_checkpoint_name = f"epoch_{epoch + 1}_model_checkpoint"
model.save_model(str(Path(checkpoint_dir) / model_checkpoint_name))
# Save optimizer and scheduler states
torch.save(optimizer.state_dict(), Path(checkpoint_dir) / "epoch_optimizer_checkpoint.pth")
torch.save(scheduler.state_dict(), Path(checkpoint_dir) / "epoch_scheduler_checkpoint.pth")
print(f"Periodic checkpoint saved for epoch {epoch+1}")
except Exception as e:
print(f"Error saving periodic checkpoint for epoch {epoch+1}: {e}")
def main(args):
div_factor = args.peak_lr / args.base_lr
final_div_factor = args.peak_lr / args.final_lr
start_epoch = 0
end_epoch = args.epochs
if args.model_type == 'classifier':
print("--- Mode: Training CLASSIFIER ---")
dataset_mode = 'classifier'
loss_fn = loss_fn_classifier
else: # lcf_regressor or hcf_regressor
print(f"--- Mode: Training {args.model_type.upper()} ---")
dataset_mode = args.model_type
loss_fn = loss_fn_regressor
print(f"Starting training from epoch {start_epoch} to {end_epoch}")
train_dataset = TrajectoryDataset(args.train_data, split='train', mode=dataset_mode)
val_dataset = TrajectoryDataset(args.val_data, split='val', mode=dataset_mode)
if len(train_dataset) == 0:
print(f"ERROR: Training dataset for mode '{dataset_mode}' is empty. Cannot train.")
return
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=True) if len(val_dataset) > 0 else None
core_model = args.model_name
params = dict(
purpose=args.model_type,
output_size=args.output_size,
k=args.neighbor_k,
input_size=args.input_size
)
model = fatigue_model.Model(params, core_model_name=core_model).to(device)
if 'regressor' in args.model_type:
model.fit_normalizer(train_dataloader)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.base_lr, weight_decay=1e-3)
if args.scheduler == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0 = args.T_0, T_mult = args.T_mult)
else:
scheduler = OneCycleLR(
optimizer,
max_lr=args.base_lr,
epochs=(end_epoch-start_epoch),
steps_per_epoch=len(train_dataloader),
pct_start=args.pct_start,
div_factor=div_factor,
final_div_factor=final_div_factor,
cycle_momentum=False
)
checkpoint_dir, log_dir, rollout_dir = prepare_files_and_directories(args.output_dir, args.model_type, core_model, args.train_data, args.experiment_id)
epoch_training_losses, epoch_learning_rate, step_training_losses, epoch_run_times, epoch_eval_losses = [], [], [], [], []
for epoch in range(start_epoch, end_epoch):
print(f"\n=== Epoch {epoch + 1}/{end_epoch} ===")
epoch_start_time = time.time()
model.train()
epoch_training_loss = 0.0
for data in train_dataloader:
frame = squeeze_data_frame(data)
output = model(frame, is_training=True)
loss = loss_fn(frame, output, model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
#if args.scheduler == 'onecycle':
# scheduler.step()
step_loss = loss.item()
step_training_losses.append(step_loss)
epoch_training_loss += step_loss
#if args.scheduler == 'cosine':
# scheduler.step()
epoch_training_losses.append(epoch_training_loss)
epoch_learning_rate.append(optimizer.param_groups[0]['lr'])
epoch_run_time = time.time() - epoch_start_time
epoch_run_times.append(epoch_run_time)
print(f"Epoch {epoch + 1} completed. Total Training Loss: {epoch_training_loss:.6f}, Time: {epoch_run_time:.2f}s, LR: {optimizer.param_groups[0]['lr']:.2e}")
# --- Restored: Detailed training log from your original script ---
epoch_losses_tensor = torch.tensor(epoch_training_losses)
loss_record = {
'train_total_loss': torch.sum(epoch_losses_tensor).item(),
'train_mean_epoch_loss': torch.mean(epoch_losses_tensor).item(),
'train_max_epoch_loss': torch.max(epoch_losses_tensor).item(),
'train_min_epoch_loss': torch.min(epoch_losses_tensor).item(),
'train_epoch_losses': epoch_training_losses,
'all_step_train_losses': step_training_losses,
'learning_rate': epoch_learning_rate,
'epoch_run_times': epoch_run_times
}
# --- Restored: Save best model based on training loss improvement ---
if len(epoch_training_losses) >= 2 and epoch_training_losses[-1] < epoch_training_losses[-2]:
print("Training loss improved. Saving best model checkpoint...")
model.save_model(str(Path(checkpoint_dir) / f"best_model_checkpoint_{epoch}"))
torch.save(optimizer.state_dict(), str(Path(checkpoint_dir) / "best_optimizer_checkpoint.pth"))
torch.save(scheduler.state_dict(), str(Path(checkpoint_dir) / "best_scheduler_checkpoint.pth"))
should_evaluate = (val_loader is not None) and ((epoch + 1) % 20 == 0 or (epoch + 1) == end_epoch)
if should_evaluate:
print(f"Saving checkpoint and evaluating at epoch {epoch + 1}...")
temp_train_loss_file = Path(log_dir) / f'temp_train_loss_{epoch + 1}.pkl'
pickle_save(str(temp_train_loss_file), loss_record)
# --- Restored: Call to original save_checkpoint function ---
save_checkpoint(checkpoint_dir, model, optimizer, scheduler, epoch)
print("--- Running Evaluation ---")
model.eval()
eval_mse_losses, eval_l1_losses, eval_accuracies, trajectories = [], [], [], []
with torch.no_grad():
for data in val_loader:
frame = squeeze_data(data)
if 'regressor' in args.model_type:
prediction_trajectory = fatigue_eval.evaluate(model, frame)
trajectories.append(prediction_trajectory)
target = frame['fatigue_life'].to(device)
prediction = prediction_trajectory['pred_fatigue_life']
eval_mse_losses.append(nn.functional.mse_loss(prediction, target).cpu())
eval_l1_losses.append(nn.functional.l1_loss(prediction, target).cpu())
elif args.model_type == 'classifier':
output = model(frame, is_training=False)
target = frame['fatigue_class'].to(device)
preds = (output > 0).float()
eval_accuracies.append((preds == target).float().mean().cpu())
if trajectories:
rollout_file = Path(rollout_dir) / f"rollout_epoch_{epoch + 1}.pkl"
pickle_save(str(rollout_file), trajectories)
eval_loss_record = {}
if eval_mse_losses:
mse_stack = torch.stack(eval_mse_losses)
l1_stack = torch.stack(eval_l1_losses)
mean_mse = torch.mean(mse_stack).item()
epoch_eval_losses.append(mean_mse)
eval_loss_record.update({
'eval_total_mse_loss': torch.sum(mse_stack).item(),
'eval_total_l1_loss': torch.sum(l1_stack).item(),
'eval_mean_mse_loss': mean_mse,
'eval_max_mse_loss': torch.max(mse_stack).item(),
'eval_min_mse_loss': torch.min(mse_stack).item(),
'eval_mean_l1_loss': torch.mean(l1_stack).item(),
'eval_max_l1_loss': torch.max(l1_stack).item(),
'eval_min_l1_loss': torch.min(l1_stack).item(),
'eval_mse_losses': eval_mse_losses,
'eval_l1_losses': eval_l1_losses,
'epoch_eval_losses': epoch_eval_losses,
})
print(f"Evaluation: Mean MSE Loss (on log10): {eval_loss_record['eval_mean_mse_loss']:.6f}, Mean L1 Loss (on log10): {eval_loss_record['eval_mean_l1_loss']:.6f}")
if eval_accuracies:
mean_acc = torch.mean(torch.stack(eval_accuracies)).item()
eval_loss_record['eval_mean_accuracy'] = mean_acc
print(f"Evaluation: Mean Accuracy: {mean_acc:.4f}")
eval_loss_file = Path(log_dir) / f'eval_loss_epoch_{epoch + 1}.pkl'
pickle_save(str(eval_loss_file), eval_loss_record)
print("\nTraining completed! Saving final results...")
final_loss_record = {
'train_epoch_losses': epoch_training_losses,
'all_step_train_losses': step_training_losses,
'learning_rate': epoch_learning_rate,
'epoch_run_times': epoch_run_times
}
pickle_save(str(Path(log_dir) / 'final_train_loss.pkl'), final_loss_record)
final_model_path = str(Path(checkpoint_dir) / "final_model_checkpoint")
model.save_model(final_model_path)
print("Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train a fatigue life prediction model (classifier or regressor).")
parser.add_argument('--model_type', type=str, required=True, choices=['classifier', 'lcf_regressor', 'hcf_regressor'], help='Specify which model to train.')
parser.add_argument('--train_data', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life_Combined_master/datasets/extracted_data/shaft_low_extra2_.h5")
parser.add_argument('--val_data', type=str, default="/home/gd_user1/AnK/project_PINN/Project_Fatigue/Fatigue_Life_Combined_master/datasets/extracted_data/shaft_low_extra2_.h5")
parser.add_argument('--output_dir', type=str, default="./output")
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--base_lr', type=float, default=2e-4)
parser.add_argument('--peak_lr', type=float, default=8e-4)
parser.add_argument('--final_lr', type=float, default=5e-5)
parser.add_argument('--pct_start', type=float, default=0.1)
parser.add_argument('--T_0', type=int, default=10)
parser.add_argument('--T_mult', type=int, default=2)
parser.add_argument('--scheduler', type=str, default="onecycle", choices=["cosine", "onecycle"])
parser.add_argument('--experiment_id', type=str, default="ktrain_lcf_extra_multi_k80_500")
parser.add_argument('--neighbor_k', type=int, default=80)
parser.add_argument('--model_name', type=str, default="regDGCNN_seg")
parser.add_argument('--input_size', type=int, default=9)
parser.add_argument('--output_size', type=int, default=1, help="Should be 1 for both regressor and classifier.")
args = parser.parse_args()
main(args)