lucky-lzh's picture
Upload folder using huggingface_hub
920fd91 verified
import os, torch, re, time, shutil, glob
from tqdm import tqdm
from accelerate import Accelerator
from .training_module import DiffusionTrainingModule
from .logger import ModelLogger
import wandb
def launch_training_task(
accelerator: Accelerator,
dataset: torch.utils.data.Dataset,
model: DiffusionTrainingModule,
model_logger: ModelLogger,
learning_rate: float = 1e-5,
weight_decay: float = 1e-2,
num_workers: int = 1,
save_steps: int = None,
num_epochs: int = 1,
args = None,
):
if args is not None:
learning_rate = args.learning_rate
weight_decay = args.weight_decay
num_workers = args.dataset_num_workers
save_steps = args.save_steps
num_epochs = args.num_epochs
wandb_project = getattr(args, "wandb_project", "diffusion_training")
wandb_run_name = getattr(args, "wandb_run_name", None)
max_checkpoints = getattr(args, "max_checkpoints", None)
reset_training_progress = getattr(args, "reset_training_progress", False) if args is not None else False
optimizer = torch.optim.AdamW(model.trainable_modules(), lr=learning_rate, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
dataloader = torch.utils.data.DataLoader(dataset, shuffle=True, collate_fn=lambda x: x[0], num_workers=num_workers,prefetch_factor=4,pin_memory=True)
model, optimizer, dataloader, scheduler = accelerator.prepare(model, optimizer, dataloader, scheduler)
global_step = 0
starting_epoch = 0
resume_step = 0
resume_from_checkpoint = getattr(args, "resume_from_checkpoint", None)
if resume_from_checkpoint:
accelerator.print(f"Resuming training from checkpoint: {resume_from_checkpoint}")
accelerator.load_state(resume_from_checkpoint)
if reset_training_progress:
# 【新增逻辑】如果是新阶段训练:保留 load_state 恢复的模型和优化器,但跳过步数解析
accelerator.print("Config: reset_training_progress=True. Starting a new training phase (Step 0, Epoch 0).")
# # 【非常重要】load_state 会把优化器的学习率覆盖为旧的,这里强制重新赋值为你传入的新学习率
# for param_group in optimizer.param_groups:
# param_group['lr'] = learning_rate
else:
# 【保留原有逻辑】传统的断点续训:尝试从路径中解析出原本的 global_step
match = re.search(r"checkpoint-(\d+)", resume_from_checkpoint)
if match:
global_step = int(match.group(1))
steps_per_epoch = len(dataloader)
starting_epoch = global_step // steps_per_epoch
resume_step = global_step % steps_per_epoch
model_logger.num_steps = global_step
accelerator.print(f"Resuming at epoch {starting_epoch}, starting at step {resume_step} in the current epoch.")
else:
accelerator.print("Warning: Could not infer global_step from checkpoint folder name. Make sure it contains 'checkpoint-<step_number>'.")
if accelerator.is_main_process:
wandb.init(project=wandb_project, name=wandb_run_name,mode="offline")
total_steps=num_epochs*len(dataloader)
progress_bar = tqdm(
initial=global_step,
total=total_steps,
desc="Steps",
disable=not accelerator.is_main_process
)
for epoch_id in range(starting_epoch,num_epochs):
print(epoch_id,"epoch begin")
if resume_from_checkpoint and epoch_id == starting_epoch and resume_step > 0:
active_dataloader = accelerator.skip_first_batches(dataloader, resume_step)
else:
active_dataloader = dataloader
for data in active_dataloader:
with accelerator.accumulate(model):
optimizer.zero_grad()
# if dataset.load_from_cache:
# loss = model({}, inputs=data)
# else:
loss = model(data)
accelerator.backward(loss)
optimizer.step()
model_logger.on_step_end(accelerator, model, save_steps)
scheduler.step()
global_step += 1
# print(global_step,"step complete",save_steps)
if save_steps is not None and global_step % save_steps == 0:
if accelerator.is_main_process:
print(f"Saving checkpoint at step {global_step}...")
save_path = os.path.join(model_logger.output_path, f"checkpoint-{global_step}")
start_time = time.time()
accelerator.save_state(save_path)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"accelerator.save_state 耗时: {elapsed_time:.4f} 秒")
if max_checkpoints is not None and max_checkpoints > 0:
# 确保所有进程都保存完毕后再执行清理,防止死锁或文件损坏
accelerator.wait_for_everyone()
if accelerator.is_main_process:
# 使用正则精确匹配 step 维度的文件夹 (规避掉 epoch 的文件夹)
all_ckpts = glob.glob(os.path.join(model_logger.output_path, "checkpoint-[0-9]*"))
all_ckpts = [ckpt for ckpt in all_ckpts if int(re.search(r'checkpoint-(\d+)$', ckpt).group(1)) <= global_step]
# 按后缀数字升序排序 (最老的排在最前面)
all_ckpts.sort(key=lambda x: int(re.search(r'checkpoint-(\d+)$', x).group(1)))
while len(all_ckpts) > max_checkpoints:
oldest_ckpt = all_ckpts.pop(0)
# 删除巨大的断点状态文件夹
try:
if os.path.exists(oldest_ckpt):
shutil.rmtree(oldest_ckpt)
print(f"已清理旧断点文件夹: {oldest_ckpt}")
except Exception as e:
print(f"清理断点失败 {oldest_ckpt}: {e}")
if accelerator.is_main_process:
wandb.log({
"loss": loss.item(),
"epoch": epoch_id,
"global_step": global_step
})
progress_bar.set_postfix(loss=f"{loss.item():.4f}")
progress_bar.update(1)
if save_steps is None:
model_logger.on_epoch_end(accelerator, model, epoch_id)
save_path = os.path.join(model_logger.output_path, f"checkpoint-epoch-{epoch_id}")
accelerator.save_state(save_path)
if max_checkpoints is not None and max_checkpoints > 0:
accelerator.wait_for_everyone()
if accelerator.is_main_process:
all_epoch_ckpts = glob.glob(os.path.join(model_logger.output_path, "checkpoint-epoch-[0-9]*"))
all_epoch_ckpts = [ckpt for ckpt in all_epoch_ckpts if int(re.search(r'checkpoint-epoch-(\d+)$', ckpt).group(1)) <= epoch_id]
all_epoch_ckpts.sort(key=lambda x: int(re.search(r'checkpoint-epoch-(\d+)$', x).group(1)))
while len(all_epoch_ckpts) > max_checkpoints:
oldest_ckpt = all_epoch_ckpts.pop(0)
try:
if os.path.exists(oldest_ckpt):
shutil.rmtree(oldest_ckpt)
print(f"已清理旧 Epoch 断点: {oldest_ckpt}")
except Exception as e:
pass
if accelerator.is_main_process:
print(f"epoch {epoch_id+1}: finished")
progress_bar.close()
model_logger.on_training_end(accelerator, model, save_steps)
def launch_data_process_task(
accelerator: Accelerator,
dataset: torch.utils.data.Dataset,
model: DiffusionTrainingModule,
model_logger: ModelLogger,
num_workers: int = 8,
args = None,
):
if args is not None:
num_workers = args.dataset_num_workers
dataloader = torch.utils.data.DataLoader(dataset, shuffle=False, collate_fn=lambda x: x[0], num_workers=num_workers)
model, dataloader = accelerator.prepare(model, dataloader)
for data_id, data in enumerate(tqdm(dataloader)):
with accelerator.accumulate(model):
with torch.no_grad():
folder = os.path.join(model_logger.output_path, str(accelerator.process_index))
os.makedirs(folder, exist_ok=True)
save_path = os.path.join(model_logger.output_path, str(accelerator.process_index), f"{data_id}.pth")
data = model(data)
torch.save(data, save_path)