| from causvid.data import ODERegressionDataset, ODERegressionLMDBDataset |
| from causvid.ode_regression import ODERegression |
| from causvid.models import get_block_class |
| from collections import defaultdict |
| from causvid.util import ( |
| launch_distributed_job, |
| set_seed, init_logging_folder, |
| fsdp_wrap, cycle, |
| fsdp_state_dict, |
| barrier |
| ) |
| import torch.distributed as dist |
| from omegaconf import OmegaConf |
| import argparse |
| import torch |
| import wandb |
| import time |
| import os |
|
|
|
|
| class Trainer: |
| def __init__(self, config): |
| self.config = config |
|
|
| |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
|
|
| launch_distributed_job() |
| global_rank = dist.get_rank() |
| self.world_size = dist.get_world_size() |
|
|
| self.dtype = torch.bfloat16 if config.mixed_precision else torch.float32 |
| self.device = torch.cuda.current_device() |
| self.is_main_process = global_rank == 0 |
|
|
| |
| if config.seed == 0: |
| random_seed = torch.randint(0, 10000000, (1,), device=self.device) |
| dist.broadcast(random_seed, src=0) |
| config.seed = random_seed.item() |
|
|
| set_seed(config.seed + global_rank) |
|
|
| |
| |
|
|
| |
|
|
| if config.distillation_loss == "ode": |
| self.distillation_model = ODERegression(config, device=self.device) |
| else: |
| raise ValueError("Invalid distillation loss type") |
|
|
| self.distillation_model.generator = fsdp_wrap( |
| self.distillation_model.generator, |
| sharding_strategy=config.sharding_strategy, |
| mixed_precision=config.mixed_precision, |
| wrap_strategy=config.generator_fsdp_wrap_strategy, |
| transformer_module=(get_block_class(config.generator_fsdp_transformer_module), |
| ) if config.generator_fsdp_wrap_strategy == "transformer" else None |
| ) |
| self.distillation_model.text_encoder = fsdp_wrap( |
| self.distillation_model.text_encoder, |
| sharding_strategy=config.sharding_strategy, |
| mixed_precision=config.mixed_precision, |
| wrap_strategy=config.text_encoder_fsdp_wrap_strategy, |
| transformer_module=(get_block_class(config.text_encoder_fsdp_transformer_module), |
| ) if config.text_encoder_fsdp_wrap_strategy == "transformer" else None |
| ) |
|
|
| self.generator_optimizer = torch.optim.AdamW( |
| [param for param in self.distillation_model.generator.parameters() |
| if param.requires_grad], |
| lr=config.lr, |
| betas=(config.beta1, config.beta2) |
| ) |
|
|
| |
| |
| dataset = ODERegressionLMDBDataset( |
| config.data_path, max_pair=getattr(config, "max_pair", int(1e8))) |
| sampler = torch.utils.data.distributed.DistributedSampler( |
| dataset, shuffle=True, drop_last=True) |
| dataloader = torch.utils.data.DataLoader( |
| dataset, batch_size=config.batch_size, sampler=sampler, num_workers=8) |
| self.dataloader = cycle(dataloader) |
|
|
| self.step = 0 |
| self.max_grad_norm = 10.0 |
| self.previous_time = None |
|
|
| def save(self): |
| print("Start gathering distributed model states...") |
| generator_state_dict = fsdp_state_dict( |
| self.distillation_model.generator) |
| state_dict = { |
| "generator": generator_state_dict |
| } |
|
|
| if self.is_main_process: |
| os.makedirs(os.path.join(self.output_path, |
| f"checkpoint_model_{self.step:06d}"), exist_ok=True) |
| torch.save(state_dict, os.path.join(self.output_path, |
| f"checkpoint_model_{self.step:06d}", "model.pt")) |
| print("Model saved to", os.path.join(self.output_path, |
| f"checkpoint_model_{self.step:06d}", "model.pt")) |
|
|
| def train_one_step(self): |
| self.distillation_model.eval() |
|
|
| |
| batch = next(self.dataloader) |
| text_prompts = batch["prompts"] |
| ode_latent = batch["ode_latent"].to( |
| device=self.device, dtype=self.dtype) |
|
|
| |
| with torch.no_grad(): |
| conditional_dict = self.distillation_model.text_encoder( |
| text_prompts=text_prompts) |
|
|
| |
| generator_loss, log_dict = self.distillation_model.generator_loss( |
| ode_latent=ode_latent, |
| conditional_dict=conditional_dict |
| ) |
|
|
| unnormalized_loss = log_dict["unnormalized_loss"] |
| timestep = log_dict["timestep"] |
|
|
| if self.world_size > 1: |
| gathered_unnormalized_loss = torch.zeros( |
| [self.world_size, *unnormalized_loss.shape], |
| dtype=unnormalized_loss.dtype, device=self.device) |
| gathered_timestep = torch.zeros( |
| [self.world_size, *timestep.shape], |
| dtype=timestep.dtype, device=self.device) |
|
|
| dist.all_gather_into_tensor( |
| gathered_unnormalized_loss, unnormalized_loss) |
| dist.all_gather_into_tensor(gathered_timestep, timestep) |
| else: |
| gathered_unnormalized_loss = unnormalized_loss |
| gathered_timestep = timestep |
|
|
| loss_breakdown = defaultdict(list) |
| stats = {} |
|
|
| for index, t in enumerate(timestep): |
| loss_breakdown[str(int(t.item()) // 250 * 250)].append( |
| unnormalized_loss[index].item()) |
|
|
| for key_t in loss_breakdown.keys(): |
| stats["loss_at_time_" + key_t] = sum(loss_breakdown[key_t]) / \ |
| len(loss_breakdown[key_t]) |
|
|
| self.generator_optimizer.zero_grad() |
| generator_loss.backward() |
| generator_grad_norm = self.distillation_model.generator.clip_grad_norm_( |
| self.max_grad_norm) |
| self.generator_optimizer.step() |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def train(self): |
| while True: |
| self.train_one_step() |
| if (not self.config.no_save) and self.step % self.config.log_iters == 0: |
| self.save() |
| torch.cuda.empty_cache() |
|
|
| barrier() |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| self.step += 1 |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--config_path", type=str, required=True) |
| parser.add_argument("--local_rank", type=int, default=-1) |
| parser.add_argument("--no_save", action="store_true") |
|
|
| args = parser.parse_args() |
|
|
| config = OmegaConf.load(args.config_path) |
| config.no_save = args.no_save |
|
|
| trainer = Trainer(config) |
| trainer.train() |
|
|
| |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|