| """Modified from VideoX-Fun/scripts/cogvideox_fun/train_lora.py |
| """ |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import argparse |
| import gc |
| import json |
| import logging |
| import math |
| import os |
| import random |
| import shutil |
| import sys |
| from contextlib import contextmanager |
| from typing import List, Optional, Union |
|
|
| import accelerate |
| import diffusers |
| import numpy as np |
| import torch |
| import torch.utils.checkpoint |
| import torchvision |
| import transformers |
| from accelerate import Accelerator |
| from accelerate.logging import get_logger |
| from accelerate.state import AcceleratorState |
| from accelerate.utils import ProjectConfiguration, set_seed |
| from decord import VideoReader |
| from diffusers import CogVideoXDPMScheduler, DDIMScheduler |
| from diffusers.optimization import get_scheduler |
| from diffusers.utils import check_min_version, deprecate, is_wandb_available |
| from diffusers.utils.torch_utils import is_compiled_module |
| from einops import rearrange |
| from packaging import version |
| from tqdm.auto import tqdm |
| from transformers.utils import ContextManagers |
|
|
| import datasets |
|
|
| current_file_path = os.path.abspath(__file__) |
| project_roots = [os.path.dirname(current_file_path), os.path.dirname(os.path.dirname(current_file_path)), os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))] |
| for project_root in project_roots: |
| sys.path.insert(0, project_root) if project_root not in sys.path else None |
|
|
| import videox_fun.reward.reward_fn as reward_fn |
| from videox_fun.models import (AutoencoderKLCogVideoX, |
| CogVideoXTransformer3DModel, T5EncoderModel, |
| T5Tokenizer) |
| from videox_fun.pipeline.pipeline_cogvideox_fun_inpaint import (CogVideoXFunInpaintPipeline, |
| get_3d_rotary_pos_embed, |
| get_resize_crop_region_for_grid) |
| from videox_fun.utils.lora_utils import create_network, merge_lora |
| from videox_fun.utils.utils import get_image_to_video_latent, save_videos_grid |
|
|
| if is_wandb_available(): |
| import wandb |
|
|
|
|
| |
| check_min_version("0.18.0.dev0") |
|
|
| logger = get_logger(__name__, log_level="INFO") |
|
|
|
|
| @contextmanager |
| def video_reader(*args, **kwargs): |
| """A context manager to solve the memory leak of decord. |
| """ |
| vr = VideoReader(*args, **kwargs) |
| try: |
| yield vr |
| finally: |
| del vr |
| gc.collect() |
|
|
|
|
| def log_validation(vae, text_encoder, tokenizer, transformer3d, network, |
| loss_fn, args, accelerator, weight_dtype, global_step, validation_prompts_idx |
| ): |
| try: |
| logger.info("Running validation... ") |
|
|
| transformer3d_val = CogVideoXTransformer3DModel.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="transformer", |
| ).to(weight_dtype) |
| transformer3d_val.load_state_dict(accelerator.unwrap_model(transformer3d).state_dict()) |
| scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") |
| |
| if args.vae_gradient_checkpointing or args.low_vram: |
| |
| vae = AutoencoderKLCogVideoX.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant |
| ).to(weight_dtype) |
| pipeline = CogVideoXFunInpaintPipeline.from_pretrained( |
| args.pretrained_model_name_or_path, |
| vae=vae, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| transformer=transformer3d_val, |
| scheduler=scheduler, |
| torch_dtype=weight_dtype, |
| ) |
| if args.low_vram: |
| pipeline.enable_model_cpu_offload() |
| else: |
| pipeline = pipeline.to(device=accelerator.device) |
| lora_state_dict = accelerator.unwrap_model(network).state_dict() |
| pipeline = merge_lora(pipeline, None, 1, accelerator.device, state_dict=lora_state_dict, transformer_only=True) |
|
|
| to_tensor = torchvision.transforms.ToTensor() |
| validation_loss, validation_reward = 0, 0 |
| for i in range(len(validation_prompts_idx)): |
| validation_idx, validation_prompt = validation_prompts_idx[i] |
| with torch.no_grad(): |
| with torch.autocast("cuda", dtype=weight_dtype): |
| temporal_compression_ratio = vae.config.temporal_compression_ratio |
| video_length = 1 |
| if args.video_length != 1: |
| video_length += int((args.video_length - 1) // temporal_compression_ratio * temporal_compression_ratio) |
| sample_size = [args.validation_sample_height, args.validation_sample_width] |
| input_video, input_video_mask, _ = get_image_to_video_latent( |
| None, None, video_length=video_length, sample_size=sample_size |
| ) |
| |
| if args.seed is None: |
| generator = None |
| else: |
| generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| |
| sample = pipeline( |
| validation_prompt, |
| num_frames = video_length, |
| negative_prompt = "bad detailed", |
| height = args.validation_sample_height, |
| width = args.validation_sample_width, |
| guidance_scale = 7, |
| generator = generator, |
| video = input_video, |
| mask_video = input_video_mask, |
| ).videos |
| sample_saved_name = f"validation_sample/sample-{global_step}-{validation_idx}.mp4" |
| sample_saved_path = os.path.join(args.output_dir, sample_saved_name) |
| save_videos_grid(sample, sample_saved_path, fps=8) |
|
|
| num_sampled_frames = 4 |
| sampled_frames_list = [] |
| with video_reader(sample_saved_path) as vr: |
| sampled_frame_idx_list = np.linspace(0, len(vr), num_sampled_frames, endpoint=False, dtype=int) |
| sampled_frame_list = vr.get_batch(sampled_frame_idx_list).asnumpy() |
| sampled_frames = torch.stack([to_tensor(frame) for frame in sampled_frame_list], dim=0) |
| sampled_frames_list.append(sampled_frames) |
| |
| sampled_frames = torch.stack(sampled_frames_list) |
| sampled_frames = rearrange(sampled_frames, "b t c h w -> b c t h w") |
| loss, reward = loss_fn(sampled_frames, [validation_prompt]) |
| validation_loss, validation_reward = validation_loss + loss, validation_reward + reward |
| |
| validation_loss = validation_loss / len(validation_prompts_idx) |
| validation_reward = validation_reward / len(validation_prompts_idx) |
|
|
| del pipeline |
| del transformer3d_val |
| gc.collect() |
| torch.cuda.empty_cache() |
| torch.cuda.ipc_collect() |
|
|
| return validation_loss, validation_reward |
| except Exception as e: |
| gc.collect() |
| torch.cuda.empty_cache() |
| torch.cuda.ipc_collect() |
| print(f"Eval error with info {e}") |
| return None, None |
|
|
|
|
| def load_prompts(prompt_path, prompt_column="prompt", start_idx=None, end_idx=None): |
| prompt_list = [] |
| if prompt_path.endswith(".txt"): |
| with open(prompt_path, "r") as f: |
| for line in f: |
| prompt_list.append(line.strip()) |
| elif prompt_path.endswith(".jsonl"): |
| with open(prompt_path, "r") as f: |
| for line in f.readlines(): |
| item = json.loads(line) |
| prompt_list.append(item[prompt_column]) |
| else: |
| raise ValueError("The prompt_path must end with .txt or .jsonl.") |
| prompt_list = prompt_list[start_idx:end_idx] |
|
|
| return prompt_list |
|
|
|
|
| |
| def get_t5_prompt_embeds( |
| tokenizer: T5Tokenizer, |
| text_encoder: T5EncoderModel, |
| prompt: Union[str, List[str]] = None, |
| num_videos_per_prompt: int = 1, |
| max_sequence_length: int = 226, |
| device: Optional[torch.device] = None, |
| dtype: Optional[torch.dtype] = None, |
| ): |
| prompt = [prompt] if isinstance(prompt, str) else prompt |
| batch_size = len(prompt) |
|
|
| text_inputs = tokenizer( |
| prompt, |
| padding="max_length", |
| max_length=max_sequence_length, |
| truncation=True, |
| add_special_tokens=True, |
| return_tensors="pt", |
| ) |
| text_input_ids = text_inputs.input_ids |
| untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids |
|
|
| if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): |
| removed_text = tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) |
| logger.warning( |
| "The following part of your input was truncated because `max_sequence_length` is set to " |
| f" {max_sequence_length} tokens: {removed_text}" |
| ) |
|
|
| prompt_embeds = text_encoder(text_input_ids.to(device))[0] |
| prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
|
|
| |
| _, seq_len, _ = prompt_embeds.shape |
| prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) |
| prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) |
|
|
| return prompt_embeds |
|
|
|
|
| |
| def encode_prompt( |
| tokenizer: T5Tokenizer, |
| text_encoder: T5EncoderModel, |
| prompt: Union[str, List[str]], |
| negative_prompt: Optional[Union[str, List[str]]] = None, |
| do_classifier_free_guidance: bool = True, |
| num_videos_per_prompt: int = 1, |
| prompt_embeds: Optional[torch.Tensor] = None, |
| negative_prompt_embeds: Optional[torch.Tensor] = None, |
| max_sequence_length: int = 226, |
| device: Optional[torch.device] = None, |
| dtype: Optional[torch.dtype] = None, |
| ): |
| r""" |
| Encodes the prompt into text encoder hidden states. |
| """ |
| prompt = [prompt] if isinstance(prompt, str) else prompt |
| if prompt is not None: |
| batch_size = len(prompt) |
| else: |
| batch_size = prompt_embeds.shape[0] |
|
|
| if prompt_embeds is None: |
| prompt_embeds = get_t5_prompt_embeds( |
| tokenizer, |
| text_encoder, |
| prompt=prompt, |
| num_videos_per_prompt=num_videos_per_prompt, |
| max_sequence_length=max_sequence_length, |
| device=device, |
| dtype=dtype, |
| ) |
|
|
| if do_classifier_free_guidance and negative_prompt_embeds is None: |
| negative_prompt = negative_prompt or "" |
| negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt |
|
|
| if prompt is not None and type(prompt) is not type(negative_prompt): |
| raise TypeError( |
| f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" |
| f" {type(prompt)}." |
| ) |
| elif batch_size != len(negative_prompt): |
| raise ValueError( |
| f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" |
| f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" |
| " the batch size of `prompt`." |
| ) |
|
|
| negative_prompt_embeds = get_t5_prompt_embeds( |
| tokenizer, |
| text_encoder, |
| prompt=negative_prompt, |
| num_videos_per_prompt=num_videos_per_prompt, |
| max_sequence_length=max_sequence_length, |
| device=device, |
| dtype=dtype, |
| ) |
|
|
| return prompt_embeds, negative_prompt_embeds |
|
|
|
|
| |
| def prepare_extra_step_kwargs(scheduler, generator, eta): |
| |
| |
| |
| |
| import inspect |
|
|
| accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys()) |
| extra_step_kwargs = {} |
| if accepts_eta: |
| extra_step_kwargs["eta"] = eta |
|
|
| |
| accepts_generator = "generator" in set(inspect.signature(scheduler.step).parameters.keys()) |
| if accepts_generator: |
| extra_step_kwargs["generator"] = generator |
| return extra_step_kwargs |
|
|
|
|
| |
| def prepare_rotary_positional_embeddings( |
| height: int, |
| width: int, |
| num_frames: int, |
| vae_scale_factor_spatial: int = 8, |
| patch_size: int = 2, |
| patch_size_t: int = 2, |
| attention_head_dim: int = 64, |
| sample_height: int = 720, |
| sample_width: int = 480, |
| device: torch.device = "cpu" |
| ): |
|
|
| grid_height = height // (vae_scale_factor_spatial * patch_size) |
| grid_width = width // (vae_scale_factor_spatial * patch_size) |
| base_size_height = sample_height // patch_size |
| base_size_width = sample_width // patch_size |
|
|
| if patch_size_t is None: |
| |
| grid_crops_coords = get_resize_crop_region_for_grid( |
| (grid_height, grid_width), base_size_width, base_size_height |
| ) |
| freqs_cos, freqs_sin = get_3d_rotary_pos_embed( |
| embed_dim=attention_head_dim, |
| crops_coords=grid_crops_coords, |
| grid_size=(grid_height, grid_width), |
| temporal_size=num_frames, |
| use_real=True, |
| ) |
| else: |
| |
| base_num_frames = (num_frames + patch_size_t - 1) // patch_size_t |
| freqs_cos, freqs_sin = get_3d_rotary_pos_embed( |
| embed_dim=attention_head_dim, |
| crops_coords=None, |
| grid_size=(grid_height, grid_width), |
| temporal_size=base_num_frames, |
| grid_type="slice", |
| max_size=(base_size_height, base_size_width), |
| ) |
| freqs_cos = freqs_cos.to(device=device) |
| freqs_sin = freqs_sin.to(device=device) |
| return freqs_cos, freqs_sin |
|
|
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| parser.add_argument( |
| "--pretrained_model_name_or_path", |
| type=str, |
| default=None, |
| required=True, |
| help="Path to pretrained model or model identifier from huggingface.co/models.", |
| ) |
| parser.add_argument( |
| "--revision", |
| type=str, |
| default=None, |
| required=False, |
| help="Revision of pretrained model identifier from huggingface.co/models.", |
| ) |
| parser.add_argument( |
| "--variant", |
| type=str, |
| default=None, |
| help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
| ) |
| parser.add_argument( |
| "--validation_prompt_path", |
| type=str, |
| default=None, |
| help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), |
| ) |
| parser.add_argument( |
| "--validation_prompts", |
| type=str, |
| default=None, |
| nargs="+", |
| help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), |
| ) |
| parser.add_argument( |
| "--validation_batch_size", |
| type=int, |
| default=1, |
| help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), |
| ) |
| parser.add_argument( |
| "--validation_sample_height", |
| type=int, |
| default=512, |
| help="The height of sampling videos in validation.", |
| ) |
| parser.add_argument( |
| "--validation_sample_width", |
| type=int, |
| default=512, |
| help="The width of sampling videos in validation.", |
| ) |
| parser.add_argument( |
| "--output_dir", |
| type=str, |
| default="sd-model-finetuned", |
| help="The output directory where the model predictions and checkpoints will be written.", |
| ) |
| parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| parser.add_argument( |
| "--use_came", |
| action="store_true", |
| help="whether to use came", |
| ) |
| parser.add_argument( |
| "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." |
| ) |
| parser.add_argument("--num_train_epochs", type=int, default=200) |
| parser.add_argument( |
| "--max_train_steps", |
| type=int, |
| default=None, |
| help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| ) |
| parser.add_argument( |
| "--gradient_accumulation_steps", |
| type=int, |
| default=1, |
| help="Number of updates steps to accumulate before performing a backward/update pass.", |
| ) |
| parser.add_argument( |
| "--gradient_checkpointing", |
| action="store_true", |
| help="Whether or not to use gradient checkpointing (for DiT) to save memory at the expense of slower backward pass.", |
| ) |
| parser.add_argument( |
| "--vae_gradient_checkpointing", |
| action="store_true", |
| help="Whether or not to use gradient checkpointing (for VAE) to save memory at the expense of slower backward pass.", |
| ) |
| parser.add_argument( |
| "--learning_rate", |
| type=float, |
| default=1e-4, |
| help="Initial learning rate (after the potential warmup period) to use.", |
| ) |
| parser.add_argument( |
| "--scale_lr", |
| action="store_true", |
| default=False, |
| help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| ) |
| parser.add_argument( |
| "--lr_scheduler", |
| type=str, |
| default="constant", |
| help=( |
| 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| ' "constant", "constant_with_warmup"]' |
| ), |
| ) |
| parser.add_argument( |
| "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| ) |
| parser.add_argument( |
| "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." |
| ) |
| parser.add_argument( |
| "--allow_tf32", |
| action="store_true", |
| help=( |
| "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| ), |
| ) |
| parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") |
| parser.add_argument( |
| "--non_ema_revision", |
| type=str, |
| default=None, |
| required=False, |
| help=( |
| "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" |
| " remote repository specified with --pretrained_model_name_or_path." |
| ), |
| ) |
| parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| parser.add_argument( |
| "--prediction_type", |
| type=str, |
| default=None, |
| help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", |
| ) |
| parser.add_argument( |
| "--logging_dir", |
| type=str, |
| default="logs", |
| help=( |
| "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| ), |
| ) |
| parser.add_argument( |
| "--mixed_precision", |
| type=str, |
| default=None, |
| choices=["no", "fp16", "bf16"], |
| help=( |
| "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
| " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
| ), |
| ) |
| parser.add_argument( |
| "--report_to", |
| type=str, |
| default="tensorboard", |
| help=( |
| 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| ), |
| ) |
| parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| parser.add_argument( |
| "--checkpointing_steps", |
| type=int, |
| default=500, |
| help=( |
| "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" |
| " training using `--resume_from_checkpoint`." |
| ), |
| ) |
| parser.add_argument( |
| "--checkpoints_total_limit", |
| type=int, |
| default=None, |
| help=("Max number of checkpoints to store."), |
| ) |
| parser.add_argument( |
| "--resume_from_checkpoint", |
| type=str, |
| default=None, |
| help=( |
| "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| ), |
| ) |
| parser.add_argument( |
| "--validation_epochs", |
| type=int, |
| default=5, |
| help="Run validation every X epochs.", |
| ) |
| parser.add_argument( |
| "--validation_steps", |
| type=int, |
| default=2000, |
| help="Run validation every X steps.", |
| ) |
| parser.add_argument( |
| "--tracker_project_name", |
| type=str, |
| default="text2image-fine-tune", |
| help=( |
| "The `project_name` argument passed to Accelerator.init_trackers for" |
| " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" |
| ), |
| ) |
| |
| parser.add_argument( |
| "--rank", |
| type=int, |
| default=128, |
| help=("The dimension of the LoRA update matrices."), |
| ) |
| parser.add_argument( |
| "--network_alpha", |
| type=int, |
| default=64, |
| help=("The dimension of the LoRA update matrices."), |
| ) |
| parser.add_argument( |
| "--train_text_encoder", |
| action="store_true", |
| help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", |
| ) |
| parser.add_argument( |
| "--transformer_path", |
| type=str, |
| default=None, |
| help=("If you want to load the weight from other transformers, input its path."), |
| ) |
| parser.add_argument( |
| "--vae_path", |
| type=str, |
| default=None, |
| help=("If you want to load the weight from other vaes, input its path."), |
| ) |
| parser.add_argument("--save_state", action="store_true", help="Whether or not to save state.") |
|
|
| parser.add_argument( |
| "--use_deepspeed", action="store_true", help="Whether or not to use deepspeed." |
| ) |
| parser.add_argument( |
| "--low_vram", action="store_true", help="Whether enable low_vram mode." |
| ) |
|
|
| parser.add_argument( |
| "--prompt_path", |
| type=str, |
| default="normal", |
| help="The path to the training prompt file.", |
| ) |
| parser.add_argument( |
| '--train_sample_height', |
| type=int, |
| default=384, |
| help='The height of sampling videos in training' |
| ) |
| parser.add_argument( |
| '--train_sample_width', |
| type=int, |
| default=672, |
| help='The width of sampling videos in training' |
| ) |
| parser.add_argument( |
| "--video_length", |
| type=int, |
| default=49, |
| help="The number of frames to generate in training and validation." |
| ) |
| parser.add_argument( |
| '--eta', |
| type=float, |
| default=0.0, |
| help='eta parameter for the DDIM sampler. this controls the amount of noise injected into the sampling process, ' |
| 'with 0.0 being fully deterministic and 1.0 being equivalent to the DDPM sampler.' |
| ) |
| parser.add_argument( |
| "--guidance_scale", |
| type=float, |
| default=6.0, |
| help="The classifier-free diffusion guidance." |
| ) |
| parser.add_argument( |
| "--num_inference_steps", |
| type=int, |
| default=50, |
| help="The number of denoising steps in training and validation." |
| ) |
| parser.add_argument( |
| "--num_decoded_latents", |
| type=int, |
| default=3, |
| help="The number of latents to be decoded." |
| ) |
| parser.add_argument( |
| "--num_sampled_frames", |
| type=int, |
| default=None, |
| help="The number of sampled frames for the reward function." |
| ) |
| parser.add_argument( |
| "--reward_fn", |
| type=str, |
| default="HPSReward", |
| help='The reward function.' |
| ) |
| parser.add_argument( |
| "--reward_fn_kwargs", |
| type=str, |
| default=None, |
| help='The keyword arguments of the reward function.' |
| ) |
| parser.add_argument( |
| "--backprop", |
| action="store_true", |
| default=False, |
| help="Whether to use the reward backprop training mode.", |
| ) |
| parser.add_argument( |
| "--backprop_step_list", |
| nargs="+", |
| type=int, |
| default=None, |
| help="The preset step list for reward backprop. If provided, overrides `backprop_strategy`." |
| ) |
| parser.add_argument( |
| "--backprop_strategy", |
| choices=["last", "tail", "uniform", "random"], |
| default="last", |
| help="The strategy for reward backprop." |
| ) |
| parser.add_argument( |
| "--stop_latent_model_input_gradient", |
| action="store_true", |
| default=False, |
| help="Whether to stop the gradient of the latents during reward backprop.", |
| ) |
| parser.add_argument( |
| "--backprop_random_start_step", |
| type=int, |
| default=0, |
| help="The random start step for reward backprop. Only used when `backprop_strategy` is random." |
| ) |
| parser.add_argument( |
| "--backprop_random_end_step", |
| type=int, |
| default=50, |
| help="The random end step for reward backprop. Only used when `backprop_strategy` is random." |
| ) |
| parser.add_argument( |
| "--backprop_num_steps", |
| type=int, |
| default=5, |
| help="The number of steps for backprop. Only used when `backprop_strategy` is tail/uniform/random." |
| ) |
|
|
| args = parser.parse_args() |
| env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| if env_local_rank != -1 and env_local_rank != args.local_rank: |
| args.local_rank = env_local_rank |
|
|
| |
| if args.non_ema_revision is None: |
| args.non_ema_revision = args.revision |
|
|
| return args |
|
|
|
|
| def main(): |
| args = parse_args() |
|
|
| if args.report_to == "wandb" and args.hub_token is not None: |
| raise ValueError( |
| "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| " Please use `huggingface-cli login` to authenticate with the Hub." |
| ) |
|
|
| if args.non_ema_revision is not None: |
| deprecate( |
| "non_ema_revision!=None", |
| "0.15.0", |
| message=( |
| "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" |
| " use `--variant=non_ema` instead." |
| ), |
| ) |
| logging_dir = os.path.join(args.output_dir, args.logging_dir) |
|
|
| accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
|
|
| accelerator = Accelerator( |
| gradient_accumulation_steps=args.gradient_accumulation_steps, |
| mixed_precision=args.mixed_precision, |
| log_with=args.report_to, |
| project_config=accelerator_project_config, |
| ) |
|
|
| |
| do_validation = (args.validation_prompt_path is not None or args.validation_prompts is not None) |
| if do_validation: |
| if not (os.path.exists(args.validation_prompt_path) or args.validation_prompt_path.endswith(".txt")): |
| raise ValueError("The `--validation_prompt_path` must be a txt file containing prompts.") |
| if args.validation_batch_size < accelerator.num_processes or args.validation_batch_size % accelerator.num_processes != 0: |
| raise ValueError("The `--validation_batch_size` must be divisible by the number of processes.") |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| level=logging.INFO, |
| ) |
| logger.info(accelerator.state, main_process_only=False) |
| if accelerator.is_local_main_process: |
| datasets.utils.logging.set_verbosity_warning() |
| transformers.utils.logging.set_verbosity_warning() |
| diffusers.utils.logging.set_verbosity_info() |
| else: |
| datasets.utils.logging.set_verbosity_error() |
| transformers.utils.logging.set_verbosity_error() |
| diffusers.utils.logging.set_verbosity_error() |
|
|
| |
| if args.seed is not None: |
| set_seed(args.seed, device_specific=True) |
|
|
| |
| if accelerator.is_main_process: |
| if args.output_dir is not None: |
| os.makedirs(args.output_dir, exist_ok=True) |
|
|
| |
| |
| weight_dtype = torch.float32 |
| if accelerator.mixed_precision == "fp16": |
| weight_dtype = torch.float16 |
| args.mixed_precision = accelerator.mixed_precision |
| elif accelerator.mixed_precision == "bf16": |
| weight_dtype = torch.bfloat16 |
| args.mixed_precision = accelerator.mixed_precision |
|
|
| |
| |
| noise_scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") |
| noise_scheduler.set_timesteps(args.num_inference_steps, device=accelerator.device) |
|
|
| tokenizer = T5Tokenizer.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision |
| ) |
|
|
| def deepspeed_zero_init_disabled_context_manager(): |
| """ |
| returns either a context list that includes one that will disable zero.Init or an empty context list |
| """ |
| deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None |
| if deepspeed_plugin is None: |
| return [] |
|
|
| return [deepspeed_plugin.zero3_init_context_manager(enable=False)] |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| with ContextManagers(deepspeed_zero_init_disabled_context_manager()): |
| text_encoder = T5EncoderModel.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant, |
| torch_dtype=weight_dtype |
| ) |
|
|
| vae = AutoencoderKLCogVideoX.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant |
| ) |
|
|
| transformer3d = CogVideoXTransformer3DModel.from_pretrained( |
| args.pretrained_model_name_or_path, subfolder="transformer" |
| ) |
|
|
| |
| vae.requires_grad_(False) |
| text_encoder.requires_grad_(False) |
| transformer3d.requires_grad_(False) |
|
|
| |
| network = create_network( |
| 1.0, |
| args.rank, |
| args.network_alpha, |
| text_encoder, |
| transformer3d, |
| neuron_dropout=None, |
| add_lora_in_attn_temporal=True, |
| ) |
| network.apply_to(text_encoder, transformer3d, args.train_text_encoder, True) |
|
|
| if args.transformer_path is not None: |
| print(f"From checkpoint: {args.transformer_path}") |
| if args.transformer_path.endswith("safetensors"): |
| from safetensors.torch import load_file, safe_open |
| state_dict = load_file(args.transformer_path) |
| else: |
| state_dict = torch.load(args.transformer_path, map_location="cpu") |
| state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict |
|
|
| m, u = transformer3d.load_state_dict(state_dict, strict=False) |
| print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
| assert len(u) == 0 |
|
|
| if args.vae_path is not None: |
| print(f"From checkpoint: {args.vae_path}") |
| if args.vae_path.endswith("safetensors"): |
| from safetensors.torch import load_file, safe_open |
| state_dict = load_file(args.vae_path) |
| else: |
| state_dict = torch.load(args.vae_path, map_location="cpu") |
| state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict |
|
|
| m, u = vae.load_state_dict(state_dict, strict=False) |
| print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
| assert len(u) == 0 |
| |
| vae_scale_factor_spatial = 2 ** (len(vae.config.block_out_channels) - 1) |
| vae_scale_factor_temporal = vae.config.temporal_compression_ratio |
| num_channels_latent = vae.config.latent_channels |
|
|
| |
| if version.parse(accelerate.__version__) >= version.parse("0.16.0"): |
| |
| def save_model_hook(models, weights, output_dir): |
| if accelerator.is_main_process: |
| safetensor_save_path = os.path.join(output_dir, f"lora_diffusion_pytorch_model.safetensors") |
| save_model(safetensor_save_path, accelerator.unwrap_model(models[-1])) |
| if not args.use_deepspeed: |
| for _ in range(len(weights)): |
| weights.pop() |
|
|
| accelerator.register_save_state_pre_hook(save_model_hook) |
| |
|
|
| if args.gradient_checkpointing: |
| transformer3d.enable_gradient_checkpointing() |
| |
| if args.vae_gradient_checkpointing: |
| |
| |
| if args.num_decoded_latents > 3: |
| raise ValueError("The vae_gradient_checkpointing is not supported for num_decoded_latents > 3.") |
| vae.enable_gradient_checkpointing() |
|
|
| |
| |
| if args.allow_tf32: |
| torch.backends.cuda.matmul.allow_tf32 = True |
|
|
| if args.scale_lr: |
| args.learning_rate = ( |
| args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| ) |
|
|
| |
| if args.use_8bit_adam: |
| try: |
| import bitsandbytes as bnb |
| except ImportError: |
| raise ImportError( |
| "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" |
| ) |
|
|
| optimizer_cls = bnb.optim.AdamW8bit |
| elif args.use_came: |
| try: |
| from came_pytorch import CAME |
| except: |
| raise ImportError( |
| "Please install came_pytorch to use CAME. You can do so by running `pip install came_pytorch`" |
| ) |
|
|
| optimizer_cls = CAME |
| else: |
| optimizer_cls = torch.optim.AdamW |
|
|
| logging.info("Add network parameters") |
| trainable_params = list(filter(lambda p: p.requires_grad, network.parameters())) |
| trainable_params_optim = network.prepare_optimizer_params(args.learning_rate / 2, args.learning_rate, args.learning_rate) |
|
|
| if args.use_came: |
| optimizer = optimizer_cls( |
| trainable_params_optim, |
| lr=args.learning_rate, |
| betas=(0.9, 0.999, 0.9999), |
| eps=(1e-30, 1e-16) |
| ) |
| else: |
| optimizer = optimizer_cls( |
| trainable_params_optim, |
| lr=args.learning_rate, |
| betas=(args.adam_beta1, args.adam_beta2), |
| weight_decay=args.adam_weight_decay, |
| eps=args.adam_epsilon, |
| ) |
| |
| |
| reward_fn_kwargs = {} |
| if args.reward_fn_kwargs is not None: |
| reward_fn_kwargs = json.loads(args.reward_fn_kwargs) |
| if accelerator.is_main_process: |
| |
| loss_fn = getattr(reward_fn, args.reward_fn)(device="cpu", dtype=weight_dtype, **reward_fn_kwargs) |
| accelerator.wait_for_everyone() |
| loss_fn = getattr(reward_fn, args.reward_fn)(device=accelerator.device, dtype=weight_dtype, **reward_fn_kwargs) |
| |
| |
| prompt_list = load_prompts(args.prompt_path) |
|
|
| |
| overrode_max_train_steps = False |
| num_update_steps_per_epoch = math.ceil(len(prompt_list) / args.gradient_accumulation_steps) |
| if args.max_train_steps is None: |
| args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| overrode_max_train_steps = True |
|
|
| lr_scheduler = get_scheduler( |
| args.lr_scheduler, |
| optimizer=optimizer, |
| num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, |
| num_training_steps=args.max_train_steps * accelerator.num_processes, |
| ) |
|
|
| |
| network, optimizer, lr_scheduler = accelerator.prepare( |
| network, optimizer, lr_scheduler |
| ) |
|
|
| |
| vae.to(accelerator.device, dtype=weight_dtype) |
| transformer3d.to(accelerator.device, dtype=weight_dtype) |
| text_encoder.to(accelerator.device) |
|
|
| |
| vae.enable_auto_split_process() |
|
|
| |
| num_update_steps_per_epoch = math.ceil(len(prompt_list) / args.train_batch_size / args.gradient_accumulation_steps) |
| if overrode_max_train_steps: |
| args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| |
| args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
|
|
| |
| |
| if accelerator.is_main_process: |
| tracker_config = dict(vars(args)) |
| keys_to_pop = [k for k, v in tracker_config.items() if isinstance(v, list)] |
| for k in keys_to_pop: |
| tracker_config.pop(k) |
| print(f"Removed tracker_config['{k}']") |
| accelerator.init_trackers(args.tracker_project_name, tracker_config) |
|
|
| |
| def unwrap_model(model): |
| model = accelerator.unwrap_model(model) |
| model = model._orig_mod if is_compiled_module(model) else model |
| return model |
|
|
| |
| total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
|
|
| logger.info("***** Running training *****") |
| logger.info(f" Num examples = {len(prompt_list)}") |
| logger.info(f" Num Epochs = {args.num_train_epochs}") |
| logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| logger.info(f" Total optimization steps = {args.max_train_steps}") |
| global_step = 0 |
| first_epoch = 0 |
|
|
| |
| if args.resume_from_checkpoint: |
| if args.resume_from_checkpoint != "latest": |
| path = os.path.basename(args.resume_from_checkpoint) |
| else: |
| |
| dirs = os.listdir(args.output_dir) |
| dirs = [d for d in dirs if d.startswith("checkpoint")] |
| dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| path = dirs[-1] if len(dirs) > 0 else None |
|
|
| if path is None: |
| accelerator.print( |
| f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." |
| ) |
| args.resume_from_checkpoint = None |
| initial_global_step = 0 |
| else: |
| from safetensors.torch import load_file, safe_open |
| state_dict = load_file(os.path.join(os.path.join(args.output_dir, path), "lora_diffusion_pytorch_model.safetensors")) |
| m, u = accelerator.unwrap_model(network).load_state_dict(state_dict, strict=False) |
| print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
|
|
| accelerator.print(f"Resuming from checkpoint {path}") |
| accelerator.load_state(os.path.join(args.output_dir, path)) |
| global_step = int(path.split("-")[1]) |
|
|
| initial_global_step = global_step |
| first_epoch = global_step // num_update_steps_per_epoch |
| else: |
| initial_global_step = 0 |
|
|
| |
| def save_model(ckpt_file, unwrapped_nw): |
| os.makedirs(args.output_dir, exist_ok=True) |
| accelerator.print(f"\nsaving checkpoint: {ckpt_file}") |
| unwrapped_nw.save_weights(ckpt_file, weight_dtype, None) |
|
|
| progress_bar = tqdm( |
| range(0, args.max_train_steps), |
| initial=initial_global_step, |
| desc="Steps", |
| |
| disable=not accelerator.is_local_main_process, |
| ) |
| |
| for epoch in range(first_epoch, args.num_train_epochs): |
| train_loss = 0.0 |
| train_reward = 0.0 |
| |
| |
| for _ in range(num_update_steps_per_epoch): |
| |
| train_prompt = random.choices(prompt_list, k=args.train_batch_size) |
| logger.info(f"train_prompt: {train_prompt}") |
| |
| |
| |
| |
| do_classifier_free_guidance = args.guidance_scale > 1.0 |
|
|
| |
| if args.low_vram: |
| torch.cuda.empty_cache() |
| text_encoder.to(accelerator.device) |
| |
| |
| prompt_embeds, negative_prompt_embeds = encode_prompt( |
| tokenizer, |
| text_encoder, |
| train_prompt, |
| do_classifier_free_guidance=do_classifier_free_guidance, |
| negative_prompt="", |
| dtype=weight_dtype, |
| device=accelerator.device, |
| ) |
| if do_classifier_free_guidance: |
| prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) |
| |
| |
| if args.low_vram: |
| text_encoder.to("cpu") |
| torch.cuda.empty_cache() |
|
|
| |
| timesteps = noise_scheduler.timesteps |
|
|
| |
| latent_shape = [ |
| len(train_prompt), |
| (args.video_length - 1) // vae_scale_factor_temporal + 1, |
| num_channels_latent, |
| args.train_sample_height // vae_scale_factor_spatial, |
| args.train_sample_width // vae_scale_factor_spatial, |
| ] |
|
|
| with accelerator.accumulate(transformer3d): |
| with accelerator.autocast(): |
| latents = torch.randn(*latent_shape, device=accelerator.device, dtype=weight_dtype) |
| latents = latents * noise_scheduler.init_noise_sigma |
|
|
| mask_latents = torch.zeros_like(latents)[:, :, :1].to(latents.device, latents.dtype) |
| masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype) |
| mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents |
| masked_video_latents_input = ( |
| torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents |
| ) |
| inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype) |
|
|
| generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| |
| extra_step_kwargs = prepare_extra_step_kwargs(noise_scheduler, generator, args.eta) |
|
|
| |
| image_rotary_emb = ( |
| prepare_rotary_positional_embeddings( |
| height = args.train_sample_height, |
| width = args.train_sample_width, |
| num_frames = latents.size(1), |
| vae_scale_factor_spatial = vae_scale_factor_spatial, |
| patch_size = unwrap_model(transformer3d).config.patch_size, |
| patch_size_t = unwrap_model(transformer3d).config.patch_size_t, |
| attention_head_dim = unwrap_model(transformer3d).config.attention_head_dim, |
| sample_height = unwrap_model(transformer3d).config.sample_height, |
| sample_width = unwrap_model(transformer3d).config.sample_width, |
| device = accelerator.device |
| ) |
| if unwrap_model(transformer3d).config.use_rotary_positional_embeddings |
| else None |
| ) |
|
|
| |
| if args.backprop: |
| if args.backprop_step_list is None: |
| if args.backprop_strategy == "last": |
| backprop_step_list = [args.num_inference_steps - 1] |
| elif args.backprop_strategy == "tail": |
| backprop_step_list = list(range(args.num_inference_steps))[-args.backprop_num_steps:] |
| elif args.backprop_strategy == "uniform": |
| interval = args.num_inference_steps // args.backprop_num_steps |
| random_start = random.randint(0, interval) |
| backprop_step_list = [random_start + i * interval for i in range(args.backprop_num_steps)] |
| elif args.backprop_strategy == "random": |
| backprop_step_list = random.sample( |
| range(args.backprop_random_start_step, args.backprop_random_end_step + 1), args.backprop_num_steps |
| ) |
| else: |
| raise ValueError(f"Invalid backprop strategy: {args.backprop_strategy}.") |
| else: |
| backprop_step_list = args.backprop_step_list |
| |
| for i, t in enumerate(tqdm(timesteps)): |
| |
| old_pred_original_sample = None |
| |
| latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
| latent_model_input = noise_scheduler.scale_model_input(latent_model_input, t) |
|
|
| |
| timestep = t.expand(latent_model_input.shape[0]) |
|
|
| |
| if args.stop_latent_model_input_gradient: |
| latent_model_input = latent_model_input.detach() |
|
|
| |
| noise_pred = transformer3d( |
| hidden_states=latent_model_input, |
| encoder_hidden_states=prompt_embeds, |
| timestep=timestep, |
| image_rotary_emb=image_rotary_emb, |
| return_dict=False, |
| inpaint_latents=inpaint_latents |
| )[0] |
| noise_pred = noise_pred.float() |
|
|
| |
| if i in backprop_step_list: |
| noise_pred = noise_pred |
| else: |
| noise_pred = noise_pred.detach() |
|
|
| |
| guidance_scale = args.guidance_scale |
| |
| |
| |
| |
| if do_classifier_free_guidance: |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
| |
| if not isinstance(noise_scheduler, CogVideoXDPMScheduler): |
| latents = noise_scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] |
| else: |
| latents, old_pred_original_sample = noise_scheduler.step( |
| noise_pred, |
| old_pred_original_sample, |
| t, |
| timesteps[i - 1] if i > 0 else None, |
| latents, |
| **extra_step_kwargs, |
| return_dict=False, |
| ) |
| latents = latents.to(prompt_embeds.dtype) |
| |
| |
| latents = latents.permute(0, 2, 1, 3, 4) |
| |
| |
| |
| sampled_frame_indices = list(range(args.num_decoded_latents)) |
| sampled_latents = latents[:, :, sampled_frame_indices, :, :] |
| sampled_latents = 1 / vae.config.scaling_factor * sampled_latents |
| sampled_frames = vae.decode(sampled_latents).sample |
| sampled_frames = (sampled_frames / 2 + 0.5).clamp(0, 1) |
|
|
| if global_step % args.checkpointing_steps == 0: |
| saved_file = f"sample-{global_step}-{accelerator.process_index}.mp4" |
| save_videos_grid( |
| sampled_frames.to(torch.float32).detach().cpu(), |
| os.path.join(args.output_dir, "train_sample", saved_file), |
| fps=8 |
| ) |
| |
| if args.num_sampled_frames is not None: |
| num_frames = sampled_frames.size(2) - 1 |
| sampled_frames_indices = torch.linspace(0, num_frames, steps=args.num_sampled_frames).long() |
| sampled_frames = sampled_frames[:, :, sampled_frames_indices, :, :] |
| |
| loss, reward = loss_fn(sampled_frames, train_prompt) |
|
|
| |
| avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() |
| avg_reward = accelerator.gather(reward.repeat(args.train_batch_size)).mean() |
| train_loss += avg_loss.item() / args.gradient_accumulation_steps |
| train_reward += avg_reward.item() / args.gradient_accumulation_steps |
|
|
| |
| accelerator.backward(loss) |
| if accelerator.sync_gradients: |
| total_norm = accelerator.clip_grad_norm_(trainable_params, args.max_grad_norm) |
| |
| if not args.use_deepspeed: |
| accelerator.log({"total_norm": total_norm}, step=global_step) |
| else: |
| if hasattr(optimizer, "optimizer") and hasattr(optimizer.optimizer, "_global_grad_norm"): |
| accelerator.log({"total_norm": optimizer.optimizer._global_grad_norm}, step=global_step) |
| optimizer.step() |
| lr_scheduler.step() |
| optimizer.zero_grad() |
|
|
| |
| if accelerator.sync_gradients: |
| progress_bar.update(1) |
| global_step += 1 |
| accelerator.log({"train_loss": train_loss, "train_reward": train_reward}, step=global_step) |
| train_loss = 0.0 |
| train_reward = 0.0 |
|
|
| if global_step % args.checkpointing_steps == 0: |
| |
| if args.use_deepspeed or accelerator.is_main_process: |
| |
| if args.checkpoints_total_limit is not None: |
| checkpoints = os.listdir(args.output_dir) |
| checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
|
|
| |
| if len(checkpoints) >= args.checkpoints_total_limit: |
| num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| removing_checkpoints = checkpoints[0:num_to_remove] |
|
|
| logger.info( |
| f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
| ) |
| logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
|
|
| for removing_checkpoint in removing_checkpoints: |
| removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| shutil.rmtree(removing_checkpoint) |
| |
| gc.collect() |
| torch.cuda.empty_cache() |
| torch.cuda.ipc_collect() |
| if not args.save_state: |
| safetensor_save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}.safetensors") |
| save_model(safetensor_save_path, accelerator.unwrap_model(network)) |
| logger.info(f"Saved safetensor to {safetensor_save_path}") |
| else: |
| accelerator_save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| accelerator.save_state(accelerator_save_path) |
| logger.info(f"Saved state to {accelerator_save_path}") |
|
|
| |
| if do_validation and (global_step % args.validation_steps) == 0: |
| if args.validation_prompts is None and args.validation_prompt_path.endswith(".txt"): |
| validation_prompts = [] |
| with open(args.validation_prompt_path, "r") as f: |
| for line in f: |
| validation_prompts.append(line.strip()) |
| |
| args.validation_prompts = validation_prompts[:args.validation_batch_size] |
| |
| validation_prompts_idx = [(i, p) for i, p in enumerate(args.validation_prompts)] |
| |
| if hasattr(vae, "enable_cache_in_vae"): |
| vae.enable_cache_in_vae() |
| accelerator.wait_for_everyone() |
| with accelerator.split_between_processes(validation_prompts_idx) as splitted_prompts_idx: |
| validation_loss, validation_reward = log_validation( |
| vae, |
| text_encoder, |
| tokenizer, |
| transformer3d, |
| network, |
| loss_fn, |
| args, |
| accelerator, |
| weight_dtype, |
| global_step, |
| splitted_prompts_idx |
| ) |
| avg_validation_loss = accelerator.gather(validation_loss).mean() |
| avg_validation_reward = accelerator.gather(validation_reward).mean() |
| if accelerator.is_main_process: |
| accelerator.log({"validation_loss": avg_validation_loss, "validation_reward": avg_validation_reward}, step=global_step) |
| accelerator.wait_for_everyone() |
| |
| logs = {"step_loss": loss.detach().item(), "step_reward": reward.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| progress_bar.set_postfix(**logs) |
| |
| if global_step >= args.max_train_steps: |
| break |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|