| import os, torch, re, time, shutil, glob |
| from tqdm import tqdm |
| from accelerate import Accelerator |
| from .training_module import DiffusionTrainingModule |
| from .logger import ModelLogger |
| import wandb |
|
|
|
|
| def launch_training_task( |
| accelerator: Accelerator, |
| dataset: torch.utils.data.Dataset, |
| model: DiffusionTrainingModule, |
| model_logger: ModelLogger, |
| learning_rate: float = 1e-5, |
| weight_decay: float = 1e-2, |
| num_workers: int = 1, |
| save_steps: int = None, |
| num_epochs: int = 1, |
| args = None, |
| ): |
| if args is not None: |
| learning_rate = args.learning_rate |
| weight_decay = args.weight_decay |
| num_workers = args.dataset_num_workers |
| save_steps = args.save_steps |
| num_epochs = args.num_epochs |
| wandb_project = getattr(args, "wandb_project", "diffusion_training") |
| wandb_run_name = getattr(args, "wandb_run_name", None) |
| max_checkpoints = getattr(args, "max_checkpoints", None) |
| reset_training_progress = getattr(args, "reset_training_progress", False) if args is not None else False |
| |
| optimizer = torch.optim.AdamW(model.trainable_modules(), lr=learning_rate, weight_decay=weight_decay) |
| scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer) |
| dataloader = torch.utils.data.DataLoader(dataset, shuffle=True, collate_fn=lambda x: x[0], num_workers=num_workers,prefetch_factor=4,pin_memory=True) |
| |
| model, optimizer, dataloader, scheduler = accelerator.prepare(model, optimizer, dataloader, scheduler) |
| |
| global_step = 0 |
| starting_epoch = 0 |
| resume_step = 0 |
| |
| resume_from_checkpoint = getattr(args, "resume_from_checkpoint", None) |
| if resume_from_checkpoint: |
| accelerator.print(f"Resuming training from checkpoint: {resume_from_checkpoint}") |
| accelerator.load_state(resume_from_checkpoint) |
| |
| if reset_training_progress: |
| |
| accelerator.print("Config: reset_training_progress=True. Starting a new training phase (Step 0, Epoch 0).") |
| |
| |
| |
| else: |
| |
| match = re.search(r"checkpoint-(\d+)", resume_from_checkpoint) |
| if match: |
| global_step = int(match.group(1)) |
| steps_per_epoch = len(dataloader) |
| starting_epoch = global_step // steps_per_epoch |
| resume_step = global_step % steps_per_epoch |
| model_logger.num_steps = global_step |
| accelerator.print(f"Resuming at epoch {starting_epoch}, starting at step {resume_step} in the current epoch.") |
| else: |
| accelerator.print("Warning: Could not infer global_step from checkpoint folder name. Make sure it contains 'checkpoint-<step_number>'.") |
|
|
| if accelerator.is_main_process: |
| wandb.init(project=wandb_project, name=wandb_run_name,mode="offline") |
| total_steps=num_epochs*len(dataloader) |
| progress_bar = tqdm( |
| initial=global_step, |
| total=total_steps, |
| desc="Steps", |
| disable=not accelerator.is_main_process |
| ) |
| |
| for epoch_id in range(starting_epoch,num_epochs): |
| print(epoch_id,"epoch begin") |
| if resume_from_checkpoint and epoch_id == starting_epoch and resume_step > 0: |
| active_dataloader = accelerator.skip_first_batches(dataloader, resume_step) |
| else: |
| active_dataloader = dataloader |
| for data in active_dataloader: |
| with accelerator.accumulate(model): |
| optimizer.zero_grad() |
| |
| |
| |
| loss = model(data) |
| accelerator.backward(loss) |
| optimizer.step() |
| model_logger.on_step_end(accelerator, model, save_steps) |
| scheduler.step() |
| |
| global_step += 1 |
| |
| if save_steps is not None and global_step % save_steps == 0: |
| if accelerator.is_main_process: |
| print(f"Saving checkpoint at step {global_step}...") |
| save_path = os.path.join(model_logger.output_path, f"checkpoint-{global_step}") |
| start_time = time.time() |
| accelerator.save_state(save_path) |
| end_time = time.time() |
|
|
| elapsed_time = end_time - start_time |
| print(f"accelerator.save_state 耗时: {elapsed_time:.4f} 秒") |
| |
| if max_checkpoints is not None and max_checkpoints > 0: |
| |
| accelerator.wait_for_everyone() |
| if accelerator.is_main_process: |
| |
| all_ckpts = glob.glob(os.path.join(model_logger.output_path, "checkpoint-[0-9]*")) |
| all_ckpts = [ckpt for ckpt in all_ckpts if int(re.search(r'checkpoint-(\d+)$', ckpt).group(1)) <= global_step] |
| |
| all_ckpts.sort(key=lambda x: int(re.search(r'checkpoint-(\d+)$', x).group(1))) |
| |
| while len(all_ckpts) > max_checkpoints: |
| oldest_ckpt = all_ckpts.pop(0) |
| |
| |
| try: |
| if os.path.exists(oldest_ckpt): |
| shutil.rmtree(oldest_ckpt) |
| print(f"已清理旧断点文件夹: {oldest_ckpt}") |
| except Exception as e: |
| print(f"清理断点失败 {oldest_ckpt}: {e}") |
|
|
| |
| if accelerator.is_main_process: |
| wandb.log({ |
| "loss": loss.item(), |
| "epoch": epoch_id, |
| "global_step": global_step |
| }) |
| progress_bar.set_postfix(loss=f"{loss.item():.4f}") |
| progress_bar.update(1) |
| |
| if save_steps is None: |
| model_logger.on_epoch_end(accelerator, model, epoch_id) |
| save_path = os.path.join(model_logger.output_path, f"checkpoint-epoch-{epoch_id}") |
| accelerator.save_state(save_path) |
| |
| if max_checkpoints is not None and max_checkpoints > 0: |
| accelerator.wait_for_everyone() |
| if accelerator.is_main_process: |
| all_epoch_ckpts = glob.glob(os.path.join(model_logger.output_path, "checkpoint-epoch-[0-9]*")) |
| all_epoch_ckpts = [ckpt for ckpt in all_epoch_ckpts if int(re.search(r'checkpoint-epoch-(\d+)$', ckpt).group(1)) <= epoch_id] |
| all_epoch_ckpts.sort(key=lambda x: int(re.search(r'checkpoint-epoch-(\d+)$', x).group(1))) |
| |
| while len(all_epoch_ckpts) > max_checkpoints: |
| oldest_ckpt = all_epoch_ckpts.pop(0) |
| |
| try: |
| if os.path.exists(oldest_ckpt): |
| shutil.rmtree(oldest_ckpt) |
| print(f"已清理旧 Epoch 断点: {oldest_ckpt}") |
| except Exception as e: |
| pass |
| |
| if accelerator.is_main_process: |
| print(f"epoch {epoch_id+1}: finished") |
| progress_bar.close() |
| model_logger.on_training_end(accelerator, model, save_steps) |
|
|
|
|
| def launch_data_process_task( |
| accelerator: Accelerator, |
| dataset: torch.utils.data.Dataset, |
| model: DiffusionTrainingModule, |
| model_logger: ModelLogger, |
| num_workers: int = 8, |
| args = None, |
| ): |
| if args is not None: |
| num_workers = args.dataset_num_workers |
| |
| dataloader = torch.utils.data.DataLoader(dataset, shuffle=False, collate_fn=lambda x: x[0], num_workers=num_workers) |
| model, dataloader = accelerator.prepare(model, dataloader) |
| |
| for data_id, data in enumerate(tqdm(dataloader)): |
| with accelerator.accumulate(model): |
| with torch.no_grad(): |
| folder = os.path.join(model_logger.output_path, str(accelerator.process_index)) |
| os.makedirs(folder, exist_ok=True) |
| save_path = os.path.join(model_logger.output_path, str(accelerator.process_index), f"{data_id}.pth") |
| data = model(data) |
| torch.save(data, save_path) |
|
|