| | import argparse |
| | import os |
| | from tqdm import tqdm |
| | from diffusers import AutoencoderKLHunyuanVideo |
| | from transformers import ( |
| | CLIPTextModel, |
| | CLIPTokenizer, |
| | LlamaModel, |
| | LlamaTokenizerFast, |
| | SiglipImageProcessor, |
| | SiglipVisionModel, |
| | ) |
| | from diffusers.video_processor import VideoProcessor |
| | from diffusers.utils import export_to_video, load_image |
| |
|
| | from dummy_dataloader_official import BucketedFeatureDataset, BucketedSampler, collate_fn |
| | from torch.utils.data import DataLoader |
| |
|
| | import torch |
| | import torch.distributed as dist |
| | import torch.nn as nn |
| | from torch.nn.parallel import DistributedDataParallel as DDP |
| | from torch.utils.data.distributed import DistributedSampler |
| | from torch.utils.data import Subset |
| | import torchvision.transforms as transforms |
| | import numpy as np |
| | import matplotlib.pyplot as plt |
| | from matplotlib.animation import FuncAnimation |
| | from IPython.display import HTML, display |
| | from IPython.display import clear_output |
| |
|
| | from accelerate import Accelerator, DistributedType |
| | from accelerate.logging import get_logger |
| | from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed |
| | from diffusers.training_utils import free_memory |
| |
|
| | from accelerate import Accelerator |
| | from utils_framepack import encode_image, encode_prompt |
| |
|
| | def setup_distributed_env(): |
| | dist.init_process_group(backend="nccl") |
| | torch.cuda.set_device(int(os.environ["LOCAL_RANK"])) |
| |
|
| | def cleanup_distributed_env(): |
| | dist.destroy_process_group() |
| |
|
| | def main(rank, world_size, global_rank, stride, batch_size, dataloader_num_workers, csv_file, video_folder, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path): |
| | weight_dtype = torch.bfloat16 |
| | device = rank |
| | seed = 42 |
| |
|
| | |
| | tokenizer_one = LlamaTokenizerFast.from_pretrained( |
| | pretrained_model_name_or_path, |
| | subfolder="tokenizer", |
| | ) |
| | tokenizer_two = CLIPTokenizer.from_pretrained( |
| | pretrained_model_name_or_path, |
| | subfolder="tokenizer_2", |
| | ) |
| | feature_extractor = SiglipImageProcessor.from_pretrained( |
| | siglip_model_name_or_path, |
| | subfolder="feature_extractor", |
| |
|
| | ) |
| |
|
| | vae = AutoencoderKLHunyuanVideo.from_pretrained( |
| | pretrained_model_name_or_path, |
| | subfolder="vae", |
| | torch_dtype=torch.float32, |
| | ) |
| | vae_scale_factor_spatial = vae.spatial_compression_ratio |
| | video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial) |
| |
|
| | text_encoder_one = LlamaModel.from_pretrained( |
| | pretrained_model_name_or_path, |
| | subfolder="text_encoder", |
| | torch_dtype=weight_dtype, |
| | ) |
| | text_encoder_two = CLIPTextModel.from_pretrained( |
| | pretrained_model_name_or_path, |
| | subfolder="text_encoder_2", |
| | torch_dtype=weight_dtype, |
| | ) |
| | image_encoder = SiglipVisionModel.from_pretrained( |
| | siglip_model_name_or_path, |
| | subfolder="image_encoder", |
| | torch_dtype=weight_dtype, |
| | ) |
| |
|
| | vae.requires_grad_(False) |
| | text_encoder_one.requires_grad_(False) |
| | text_encoder_two.requires_grad_(False) |
| | image_encoder.requires_grad_(False) |
| | vae.eval() |
| | text_encoder_one.eval() |
| | text_encoder_two.eval() |
| | image_encoder.eval() |
| |
|
| | vae = vae.to(device) |
| | text_encoder_one = text_encoder_one.to(device) |
| | text_encoder_two = text_encoder_two.to(device) |
| | image_encoder = image_encoder.to(device) |
| |
|
| | dist.barrier() |
| | dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride) |
| | sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=False, shuffle=False, seed=seed) |
| | dataloader = DataLoader( |
| | dataset, |
| | batch_sampler=sampler, |
| | collate_fn=collate_fn, |
| | num_workers=dataloader_num_workers, |
| | |
| | prefetch_factor=2 if dataloader_num_workers != 0 else None, |
| | |
| | ) |
| |
|
| | print(len(dataset), len(dataloader)) |
| | accelerator = Accelerator() |
| | dataloader = accelerator.prepare(dataloader) |
| | print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}") |
| | print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}") |
| |
|
| | sampler.set_epoch(0) |
| | if rank==0: |
| | pbar = tqdm(total=len(dataloader), desc="Processing") |
| | dist.barrier() |
| | for idx, batch in enumerate(dataloader): |
| | free_memory() |
| |
|
| | valid_indices = [] |
| | valid_uttids = [] |
| | valid_num_frames = [] |
| | valid_heights = [] |
| | valid_widths = [] |
| | valid_videos = [] |
| | valid_prompts = [] |
| | valid_first_frames_images = [] |
| |
|
| | for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])): |
| | os.makedirs(output_latent_folder, exist_ok=True) |
| | output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt") |
| | if not os.path.exists(output_path): |
| | valid_indices.append(i) |
| | valid_uttids.append(uttid) |
| | valid_num_frames.append(num_frame) |
| | valid_heights.append(height) |
| | valid_widths.append(width) |
| | valid_videos.append(batch["videos"][i]) |
| | valid_prompts.append(batch["prompts"][i]) |
| | valid_first_frames_images.append(batch["first_frames_images"][i]) |
| | else: |
| | print(f"skipping {uttid}") |
| | |
| | if not valid_indices: |
| | print("skipping entire batch!") |
| | if rank==0: |
| | pbar.update(1) |
| | pbar.set_postfix({"batch": idx}) |
| | continue |
| | |
| | batch = None |
| | del batch |
| | free_memory() |
| |
|
| | batch = { |
| | "uttid": valid_uttids, |
| | "video_metadata": { |
| | "num_frames": valid_num_frames, |
| | "height": valid_heights, |
| | "width": valid_widths |
| | }, |
| | "videos": torch.stack(valid_videos), |
| | "prompts": valid_prompts, |
| | "first_frames_images": torch.stack(valid_first_frames_images), |
| | } |
| | |
| | if len(batch["uttid"]) == 0: |
| | print("All samples in this batch are already processed, skipping!") |
| | continue |
| |
|
| | with torch.no_grad(): |
| | |
| | pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device) |
| | vae_latents = vae.encode(pixel_values).latent_dist.sample() |
| | vae_latents = vae_latents * vae.config.scaling_factor |
| |
|
| | |
| | prompts = batch["prompts"] |
| | prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt( |
| | tokenizer=tokenizer_one, |
| | text_encoder=text_encoder_one, |
| | tokenizer_2=tokenizer_two, |
| | text_encoder_2=text_encoder_two, |
| | prompt=prompts, |
| | device=device, |
| | ) |
| |
|
| | |
| | image_tensor = batch["first_frames_images"] |
| | images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor] |
| | image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1]) |
| | image_embeds = encode_image( |
| | feature_extractor, |
| | image_encoder, |
| | image, |
| | device=device, |
| | dtype=weight_dtype, |
| | ) |
| |
|
| | for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds): |
| | output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt") |
| | temp_to_save = { |
| | "vae_latent": cur_vae_latent.cpu().detach(), |
| | "prompt_embed": cur_prompt_embed.cpu().detach(), |
| | "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(), |
| | "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(), |
| | "image_embeds": cur_image_embed.cpu().detach(), |
| | } |
| | torch.save( |
| | temp_to_save, |
| | output_path |
| | ) |
| | print(f"save latent to: {output_path}") |
| | |
| | if rank==0: |
| | pbar.update(1) |
| | pbar.set_postfix({"batch": idx}) |
| |
|
| |
|
| | pixel_values = None |
| | prompts = None |
| | image_tensor = None |
| | images = None |
| | vae_latents = None |
| | vae_latents_2 = None |
| | image_embeds = None |
| | prompt_embeds = None |
| | pooled_prompt_embeds = None |
| | prompt_attention_mask = None |
| | batch = None |
| | valid_indices = None |
| | valid_uttids = None |
| | valid_num_frames = None |
| | valid_heights = None |
| | valid_widths = None |
| | valid_videos = None |
| | valid_prompts = None |
| | valid_first_frames_images = None |
| | temp_to_save = None |
| |
|
| | del pixel_values |
| | del prompts |
| | del image_tensor |
| | del images |
| | del vae_latents |
| | del vae_latents_2 |
| | del image_embeds |
| | del batch |
| | del valid_indices |
| | del valid_uttids |
| | del valid_num_frames |
| | del valid_heights |
| | del valid_widths |
| | del valid_videos |
| | del valid_prompts |
| | del valid_first_frames_images |
| | del temp_to_save |
| |
|
| | free_memory() |
| | dist.barrier() |
| | |
| | dist.destroy_process_group() |
| |
|
| | if __name__ == "__main__": |
| | parser = argparse.ArgumentParser(description="Script for running model training and data processing.") |
| | parser.add_argument("--stride", type=int, default=2, help="Batch size for processing") |
| | parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing") |
| | parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading") |
| | parser.add_argument("--csv_file", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/train/sekai-game-drone_updated.csv", help="Path to the config file") |
| | parser.add_argument("--video_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone", help="Path to the config file") |
| | parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone/latents", help="Folder to store output latents") |
| | parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path") |
| | parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path") |
| | args = parser.parse_args() |
| |
|
| |
|
| | setup_distributed_env() |
| |
|
| | global_rank = dist.get_rank() |
| | local_rank = int(os.environ["LOCAL_RANK"]) |
| | device = torch.cuda.current_device() |
| | world_size = dist.get_world_size() |
| |
|
| | main( |
| | rank=device, |
| | world_size=world_size, |
| | global_rank=global_rank, |
| | stride=args.stride, |
| | batch_size=args.batch_size, |
| | dataloader_num_workers=args.dataloader_num_workers, |
| | csv_file=args.csv_file, |
| | video_folder=args.video_folder, |
| | output_latent_folder=args.output_latent_folder, |
| | pretrained_model_name_or_path=args.pretrained_model_name_or_path, |
| | siglip_model_name_or_path=args.siglip_model_name_or_path, |
| | ) |