| import os |
| from tqdm import tqdm |
| from diffusers import AutoencoderKLHunyuanVideo |
| from transformers import ( |
| CLIPTextModel, |
| CLIPTokenizer, |
| LlamaModel, |
| LlamaTokenizerFast, |
| SiglipImageProcessor, |
| SiglipVisionModel, |
| ) |
| from diffusers.video_processor import VideoProcessor |
| from diffusers.utils import export_to_video, load_image |
|
|
| from dataset_tool import CollectionDataset, collate_fn_map |
| from omegaconf import OmegaConf |
| from torch.utils.data import DataLoader |
|
|
| import torch |
| import torch.distributed as dist |
| import torch.nn as nn |
| from torch.nn.parallel import DistributedDataParallel as DDP |
| import torchvision.transforms as transforms |
| import numpy as np |
| import matplotlib.pyplot as plt |
| from matplotlib.animation import FuncAnimation |
| from IPython.display import HTML, display |
| from IPython.display import clear_output |
|
|
| from accelerate import Accelerator, DistributedType |
| from accelerate.logging import get_logger |
| from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed |
|
|
| from utils_framepack import encode_image, encode_prompt |
|
|
| def main(rank, world_size): |
| weight_dtype = torch.bfloat16 |
| batch_size = 2 |
| dataloader_num_workers = 0 |
| output_latent_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents" |
| pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo" |
| siglip_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl" |
| os.makedirs(output_latent_folder, exist_ok=True) |
|
|
| device = "cuda" |
|
|
| |
| tokenizer_one = LlamaTokenizerFast.from_pretrained( |
| pretrained_model_name_or_path, |
| subfolder="tokenizer", |
| ) |
| tokenizer_two = CLIPTokenizer.from_pretrained( |
| pretrained_model_name_or_path, |
| subfolder="tokenizer_2", |
| ) |
| feature_extractor = SiglipImageProcessor.from_pretrained( |
| siglip_model_name_or_path, |
| subfolder="feature_extractor", |
|
|
| ) |
|
|
| vae = AutoencoderKLHunyuanVideo.from_pretrained( |
| pretrained_model_name_or_path, |
| subfolder="vae", |
| torch_dtype=torch.float32, |
| ) |
| vae_scale_factor_spatial = vae.spatial_compression_ratio |
| video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial) |
|
|
| text_encoder_one = LlamaModel.from_pretrained( |
| pretrained_model_name_or_path, |
| subfolder="text_encoder", |
| torch_dtype=weight_dtype, |
| ) |
| text_encoder_two = CLIPTextModel.from_pretrained( |
| pretrained_model_name_or_path, |
| subfolder="text_encoder_2", |
| torch_dtype=weight_dtype, |
| ) |
| image_encoder = SiglipVisionModel.from_pretrained( |
| siglip_model_name_or_path, |
| subfolder="image_encoder", |
| torch_dtype=weight_dtype, |
| ) |
|
|
| vae.requires_grad_(False) |
| text_encoder_one.requires_grad_(False) |
| text_encoder_two.requires_grad_(False) |
| image_encoder.requires_grad_(False) |
| vae.eval() |
| text_encoder_one.eval() |
| text_encoder_two.eval() |
| image_encoder.eval() |
|
|
| vae = vae.to(device) |
| text_encoder_one = text_encoder_one.to(device) |
| text_encoder_two = text_encoder_two.to(device) |
| image_encoder = image_encoder.to(device) |
|
|
| configs = OmegaConf.load("512_collection_config_vae1011_aligned_full_dump.yaml") |
| dataset = CollectionDataset.create_dataset_function(configs['train_data'], |
| configs['train_data_weights'], |
| **configs['data']['params']) |
| dataloader = DataLoader( |
| dataset, |
| shuffle=False, |
| batch_size=batch_size, |
| num_workers=dataloader_num_workers, |
| collate_fn=collate_fn_map, |
| pin_memory=True, |
| prefetch_factor=2 if dataloader_num_workers != 0 else None, |
| persistent_workers=True if dataloader_num_workers != 0 else False, |
| ) |
|
|
| for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader), desc="Processing batches"): |
| exis_flag = True |
| num_frames = batch["video_metadata"]["num_frames"] |
| for uttid, num_frame in batch["uttid"], num_frames: |
| output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}.pt") |
| if not os.path.exists(output_path): |
| exis_flag = False |
| break |
| if exis_flag: |
| print("skipping!") |
| continue |
|
|
| with torch.no_grad(): |
| |
| pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device) |
| vae_latents = vae.encode(pixel_values).latent_dist.sample() |
| vae_latents = vae_latents * vae.config.scaling_factor |
|
|
| |
| prompts = batch["prompts"] |
| prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt( |
| tokenizer=tokenizer_one, |
| text_encoder=text_encoder_one, |
| tokenizer_2=tokenizer_two, |
| text_encoder_2=text_encoder_two, |
| prompt=prompts, |
| device=device, |
| ) |
|
|
| |
| image_tensor = batch["first_frames_images"] |
| images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor] |
| image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1]) |
| image_embeds = encode_image( |
| feature_extractor, |
| image_encoder, |
| image, |
| device=device, |
| dtype=weight_dtype, |
| ) |
|
|
| for uttid, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds): |
| output_path = os.path.join(output_latent_folder, f"{uttid}_{pixel_values.shape[2]}.pt") |
| torch.save( |
| { |
| "vae_latent": cur_vae_latent.cpu().detach(), |
| "prompt_embed": cur_prompt_embed.cpu().detach(), |
| "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(), |
| "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(), |
| "image_embeds": cur_image_embed.cpu().detach(), |
| }, |
| output_path |
| ) |
| print(f"save to: {output_path}") |
|
|
| def setup_distributed_env(): |
| dist.init_process_group(backend="nccl") |
| torch.cuda.set_device(int(os.environ["LOCAL_RANK"])) |
|
|
| def cleanup_distributed_env(): |
| dist.destroy_process_group() |
|
|
| if __name__ == "__main__": |
| setup_distributed_env() |
|
|
| global_rank = dist.get_rank() |
| local_rank = int(os.environ["LOCAL_RANK"]) |
| device = torch.cuda.current_device() |
| world_size = dist.get_world_size() |
|
|
| main(world_size=world_size, rank = device) |