Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- dataset_code/spatialvid/offoload_features_hv_official.py +307 -0
- dataset_code/spatialvid/utils_framepack.py +1229 -0
- exp_code/1_benchmark/1.py +748 -0
- exp_code/1_benchmark/2.py +1059 -0
- exp_code/1_benchmark/ALG/.gitignore +2 -0
- exp_code/1_benchmark/ALG/__pycache__/lp_utils.cpython-311.pyc +0 -0
- exp_code/1_benchmark/ALG/__pycache__/pipeline_cogvideox_image2video_lowpass.cpython-311.pyc +0 -0
- exp_code/1_benchmark/ALG/__pycache__/pipeline_hunyuan_video_image2video_lowpass.cpython-311.pyc +0 -0
- exp_code/1_benchmark/ALG/__pycache__/pipeline_wan_image2video_lowpass.cpython-311.pyc +0 -0
- exp_code/1_benchmark/ALG/configs/cogvideox_alg.yaml +33 -0
- exp_code/1_benchmark/ALG/configs/cogvideox_default.yaml +16 -0
- exp_code/1_benchmark/ALG/configs/hunyuan_video_alg.yaml +36 -0
- exp_code/1_benchmark/ALG/configs/hunyuan_video_default.yaml +19 -0
- exp_code/1_benchmark/ALG/configs/wan_alg.yaml +33 -0
- exp_code/1_benchmark/ALG/configs/wan_default.yaml +16 -0
- exp_code/1_benchmark/ALG/lp_utils.py +189 -0
- exp_code/1_benchmark/ALG/pipeline_cogvideox_image2video_lowpass.py +1158 -0
- exp_code/1_benchmark/ALG/pipeline_hunyuan_video_image2video_lowpass.py +1308 -0
- exp_code/1_benchmark/ALG/pipeline_wan_image2video_lowpass.py +970 -0
- exp_code/1_benchmark/ALG/readme.md +170 -0
- exp_code/1_benchmark/ALG/requirements.txt +13 -0
- exp_code/1_benchmark/ALG/run.py +150 -0
- exp_code/1_benchmark/ALG/run.sh +5 -0
- exp_code/1_benchmark/AccVideo/LICENSE.txt +77 -0
- exp_code/1_benchmark/AccVideo/README.md +130 -0
- exp_code/1_benchmark/AccVideo/assets/prompt.txt +3 -0
- exp_code/1_benchmark/AccVideo/models/__init__.py +0 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/__init__.py +0 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/constants.py +87 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/__init__.py +2 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/pipelines/__init__.py +1 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/pipelines/pipeline_hunyuan_video.py +1114 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/schedulers/__init__.py +1 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/schedulers/scheduling_flow_match_discrete.py +257 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/idle_config.py +383 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/inference.py +687 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/__init__.py +26 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/activation_layers.py +23 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/attenion.py +212 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/embed_layers.py +157 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/fp8_optimization.py +102 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/mlp_layers.py +118 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/models.py +816 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/modulate_layers.py +76 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/norm_layers.py +77 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/posemb_layers.py +310 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/modules/token_refiner.py +236 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/parallel_states.py +63 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/prompt_rewrite.py +53 -0
- exp_code/1_benchmark/AccVideo/models/hunyuan/text_encoder/__init__.py +357 -0
dataset_code/spatialvid/offoload_features_hv_official.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from diffusers import AutoencoderKLHunyuanVideo
|
| 5 |
+
from transformers import (
|
| 6 |
+
CLIPTextModel,
|
| 7 |
+
CLIPTokenizer,
|
| 8 |
+
LlamaModel,
|
| 9 |
+
LlamaTokenizerFast,
|
| 10 |
+
SiglipImageProcessor,
|
| 11 |
+
SiglipVisionModel,
|
| 12 |
+
)
|
| 13 |
+
from diffusers.video_processor import VideoProcessor
|
| 14 |
+
from diffusers.utils import export_to_video, load_image
|
| 15 |
+
|
| 16 |
+
from dummy_dataloader_official import BucketedFeatureDataset, BucketedSampler, collate_fn
|
| 17 |
+
from torch.utils.data import DataLoader
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import torch.distributed as dist
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 23 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 24 |
+
from torch.utils.data import Subset
|
| 25 |
+
import torchvision.transforms as transforms
|
| 26 |
+
import numpy as np
|
| 27 |
+
import matplotlib.pyplot as plt
|
| 28 |
+
from matplotlib.animation import FuncAnimation
|
| 29 |
+
from IPython.display import HTML, display
|
| 30 |
+
from IPython.display import clear_output
|
| 31 |
+
|
| 32 |
+
from accelerate import Accelerator, DistributedType
|
| 33 |
+
from accelerate.logging import get_logger
|
| 34 |
+
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
|
| 35 |
+
from diffusers.training_utils import free_memory
|
| 36 |
+
|
| 37 |
+
from accelerate import Accelerator
|
| 38 |
+
from utils_framepack import encode_image, encode_prompt
|
| 39 |
+
|
| 40 |
+
def setup_distributed_env():
|
| 41 |
+
dist.init_process_group(backend="nccl")
|
| 42 |
+
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
|
| 43 |
+
|
| 44 |
+
def cleanup_distributed_env():
|
| 45 |
+
dist.destroy_process_group()
|
| 46 |
+
|
| 47 |
+
def main(rank, world_size, global_rank, stride, batch_size, dataloader_num_workers, csv_file, video_folder, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
|
| 48 |
+
weight_dtype = torch.bfloat16
|
| 49 |
+
device = rank
|
| 50 |
+
seed = 42
|
| 51 |
+
|
| 52 |
+
# Load the tokenizers
|
| 53 |
+
tokenizer_one = LlamaTokenizerFast.from_pretrained(
|
| 54 |
+
pretrained_model_name_or_path,
|
| 55 |
+
subfolder="tokenizer",
|
| 56 |
+
)
|
| 57 |
+
tokenizer_two = CLIPTokenizer.from_pretrained(
|
| 58 |
+
pretrained_model_name_or_path,
|
| 59 |
+
subfolder="tokenizer_2",
|
| 60 |
+
)
|
| 61 |
+
feature_extractor = SiglipImageProcessor.from_pretrained(
|
| 62 |
+
siglip_model_name_or_path,
|
| 63 |
+
subfolder="feature_extractor",
|
| 64 |
+
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
vae = AutoencoderKLHunyuanVideo.from_pretrained(
|
| 68 |
+
pretrained_model_name_or_path,
|
| 69 |
+
subfolder="vae",
|
| 70 |
+
torch_dtype=torch.float32,
|
| 71 |
+
)
|
| 72 |
+
vae_scale_factor_spatial = vae.spatial_compression_ratio
|
| 73 |
+
video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
|
| 74 |
+
|
| 75 |
+
text_encoder_one = LlamaModel.from_pretrained(
|
| 76 |
+
pretrained_model_name_or_path,
|
| 77 |
+
subfolder="text_encoder",
|
| 78 |
+
torch_dtype=weight_dtype,
|
| 79 |
+
)
|
| 80 |
+
text_encoder_two = CLIPTextModel.from_pretrained(
|
| 81 |
+
pretrained_model_name_or_path,
|
| 82 |
+
subfolder="text_encoder_2",
|
| 83 |
+
torch_dtype=weight_dtype,
|
| 84 |
+
)
|
| 85 |
+
image_encoder = SiglipVisionModel.from_pretrained(
|
| 86 |
+
siglip_model_name_or_path,
|
| 87 |
+
subfolder="image_encoder",
|
| 88 |
+
torch_dtype=weight_dtype,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
vae.requires_grad_(False)
|
| 92 |
+
text_encoder_one.requires_grad_(False)
|
| 93 |
+
text_encoder_two.requires_grad_(False)
|
| 94 |
+
image_encoder.requires_grad_(False)
|
| 95 |
+
vae.eval()
|
| 96 |
+
text_encoder_one.eval()
|
| 97 |
+
text_encoder_two.eval()
|
| 98 |
+
image_encoder.eval()
|
| 99 |
+
|
| 100 |
+
vae = vae.to(device)
|
| 101 |
+
text_encoder_one = text_encoder_one.to(device)
|
| 102 |
+
text_encoder_two = text_encoder_two.to(device)
|
| 103 |
+
image_encoder = image_encoder.to(device)
|
| 104 |
+
|
| 105 |
+
# dist.barrier()
|
| 106 |
+
dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride, force_rebuild=True)
|
| 107 |
+
sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=True, shuffle=True, seed=seed)
|
| 108 |
+
dataloader = DataLoader(
|
| 109 |
+
dataset,
|
| 110 |
+
batch_sampler=sampler,
|
| 111 |
+
collate_fn=collate_fn,
|
| 112 |
+
num_workers=dataloader_num_workers,
|
| 113 |
+
# pin_memory=True,
|
| 114 |
+
prefetch_factor=2 if dataloader_num_workers != 0 else None,
|
| 115 |
+
# persistent_workers=True if dataloader_num_workers > 0 else False,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
print(len(dataset), len(dataloader))
|
| 119 |
+
accelerator = Accelerator()
|
| 120 |
+
dataloader = accelerator.prepare(dataloader)
|
| 121 |
+
print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
|
| 122 |
+
print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
|
| 123 |
+
|
| 124 |
+
sampler.set_epoch(0)
|
| 125 |
+
if rank==0:
|
| 126 |
+
pbar = tqdm(total=len(dataloader), desc="Processing")
|
| 127 |
+
# dist.barrier()
|
| 128 |
+
for idx, batch in enumerate(dataloader):
|
| 129 |
+
free_memory()
|
| 130 |
+
|
| 131 |
+
valid_indices = []
|
| 132 |
+
valid_uttids = []
|
| 133 |
+
valid_num_frames = []
|
| 134 |
+
valid_heights = []
|
| 135 |
+
valid_widths = []
|
| 136 |
+
valid_videos = []
|
| 137 |
+
valid_prompts = []
|
| 138 |
+
valid_first_frames_images = []
|
| 139 |
+
|
| 140 |
+
for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])):
|
| 141 |
+
os.makedirs(output_latent_folder, exist_ok=True)
|
| 142 |
+
output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
|
| 143 |
+
if not os.path.exists(output_path):
|
| 144 |
+
valid_indices.append(i)
|
| 145 |
+
valid_uttids.append(uttid)
|
| 146 |
+
valid_num_frames.append(num_frame)
|
| 147 |
+
valid_heights.append(height)
|
| 148 |
+
valid_widths.append(width)
|
| 149 |
+
valid_videos.append(batch["videos"][i])
|
| 150 |
+
valid_prompts.append(batch["prompts"][i])
|
| 151 |
+
valid_first_frames_images.append(batch["first_frames_images"][i])
|
| 152 |
+
else:
|
| 153 |
+
print(f"skipping {uttid}")
|
| 154 |
+
|
| 155 |
+
if not valid_indices:
|
| 156 |
+
print("skipping entire batch!")
|
| 157 |
+
if rank==0:
|
| 158 |
+
pbar.update(1)
|
| 159 |
+
pbar.set_postfix({"batch": idx})
|
| 160 |
+
continue
|
| 161 |
+
|
| 162 |
+
batch = None
|
| 163 |
+
del batch
|
| 164 |
+
free_memory()
|
| 165 |
+
|
| 166 |
+
batch = {
|
| 167 |
+
"uttid": valid_uttids,
|
| 168 |
+
"video_metadata": {
|
| 169 |
+
"num_frames": valid_num_frames,
|
| 170 |
+
"height": valid_heights,
|
| 171 |
+
"width": valid_widths
|
| 172 |
+
},
|
| 173 |
+
"videos": torch.stack(valid_videos),
|
| 174 |
+
"prompts": valid_prompts,
|
| 175 |
+
"first_frames_images": torch.stack(valid_first_frames_images),
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
if len(batch["uttid"]) == 0:
|
| 179 |
+
print("All samples in this batch are already processed, skipping!")
|
| 180 |
+
continue
|
| 181 |
+
|
| 182 |
+
with torch.no_grad():
|
| 183 |
+
# Get Vae feature 1
|
| 184 |
+
pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
|
| 185 |
+
vae_latents = vae.encode(pixel_values).latent_dist.sample()
|
| 186 |
+
vae_latents = vae_latents * vae.config.scaling_factor
|
| 187 |
+
|
| 188 |
+
# Encode prompts
|
| 189 |
+
prompts = batch["prompts"]
|
| 190 |
+
prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
|
| 191 |
+
tokenizer=tokenizer_one,
|
| 192 |
+
text_encoder=text_encoder_one,
|
| 193 |
+
tokenizer_2=tokenizer_two,
|
| 194 |
+
text_encoder_2=text_encoder_two,
|
| 195 |
+
prompt=prompts,
|
| 196 |
+
device=device,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Prepare images
|
| 200 |
+
image_tensor = batch["first_frames_images"]
|
| 201 |
+
images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
|
| 202 |
+
image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
|
| 203 |
+
image_embeds = encode_image(
|
| 204 |
+
feature_extractor,
|
| 205 |
+
image_encoder,
|
| 206 |
+
image,
|
| 207 |
+
device=device,
|
| 208 |
+
dtype=weight_dtype,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds):
|
| 212 |
+
output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
|
| 213 |
+
temp_to_save = {
|
| 214 |
+
"vae_latent": cur_vae_latent.cpu().detach(),
|
| 215 |
+
"prompt_embed": cur_prompt_embed.cpu().detach(),
|
| 216 |
+
"pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
|
| 217 |
+
"prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
|
| 218 |
+
"image_embeds": cur_image_embed.cpu().detach(),
|
| 219 |
+
}
|
| 220 |
+
torch.save(
|
| 221 |
+
temp_to_save,
|
| 222 |
+
output_path
|
| 223 |
+
)
|
| 224 |
+
print(f"save latent to: {output_path}")
|
| 225 |
+
|
| 226 |
+
if rank==0:
|
| 227 |
+
pbar.update(1)
|
| 228 |
+
pbar.set_postfix({"batch": idx})
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
pixel_values = None
|
| 232 |
+
prompts = None
|
| 233 |
+
image_tensor = None
|
| 234 |
+
images = None
|
| 235 |
+
vae_latents = None
|
| 236 |
+
vae_latents_2 = None
|
| 237 |
+
image_embeds = None
|
| 238 |
+
prompt_embeds = None
|
| 239 |
+
pooled_prompt_embeds = None
|
| 240 |
+
prompt_attention_mask = None
|
| 241 |
+
batch = None
|
| 242 |
+
valid_indices = None
|
| 243 |
+
valid_uttids = None
|
| 244 |
+
valid_num_frames = None
|
| 245 |
+
valid_heights = None
|
| 246 |
+
valid_widths = None
|
| 247 |
+
valid_videos = None
|
| 248 |
+
valid_prompts = None
|
| 249 |
+
valid_first_frames_images = None
|
| 250 |
+
temp_to_save = None
|
| 251 |
+
|
| 252 |
+
del pixel_values
|
| 253 |
+
del prompts
|
| 254 |
+
del image_tensor
|
| 255 |
+
del images
|
| 256 |
+
del vae_latents
|
| 257 |
+
del vae_latents_2
|
| 258 |
+
del image_embeds
|
| 259 |
+
del batch
|
| 260 |
+
del valid_indices
|
| 261 |
+
del valid_uttids
|
| 262 |
+
del valid_num_frames
|
| 263 |
+
del valid_heights
|
| 264 |
+
del valid_widths
|
| 265 |
+
del valid_videos
|
| 266 |
+
del valid_prompts
|
| 267 |
+
del valid_first_frames_images
|
| 268 |
+
del temp_to_save
|
| 269 |
+
|
| 270 |
+
free_memory()
|
| 271 |
+
# dist.barrier()
|
| 272 |
+
# dist.barrier()
|
| 273 |
+
dist.destroy_process_group()
|
| 274 |
+
|
| 275 |
+
if __name__ == "__main__":
|
| 276 |
+
parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
|
| 277 |
+
parser.add_argument("--stride", type=int, default=2, help="Batch size for processing")
|
| 278 |
+
parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
|
| 279 |
+
parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading")
|
| 280 |
+
parser.add_argument("--csv_file", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/train/sekai-game-drone_updated.csv", help="Path to the config file")
|
| 281 |
+
parser.add_argument("--video_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone", help="Path to the config file")
|
| 282 |
+
parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone/latents", help="Folder to store output latents")
|
| 283 |
+
parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path")
|
| 284 |
+
parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
|
| 285 |
+
args = parser.parse_args()
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
setup_distributed_env()
|
| 289 |
+
|
| 290 |
+
global_rank = dist.get_rank()
|
| 291 |
+
local_rank = int(os.environ["LOCAL_RANK"])
|
| 292 |
+
device = torch.cuda.current_device()
|
| 293 |
+
world_size = dist.get_world_size()
|
| 294 |
+
|
| 295 |
+
main(
|
| 296 |
+
rank=device,
|
| 297 |
+
world_size=world_size,
|
| 298 |
+
global_rank=global_rank,
|
| 299 |
+
stride=args.stride,
|
| 300 |
+
batch_size=args.batch_size,
|
| 301 |
+
dataloader_num_workers=args.dataloader_num_workers,
|
| 302 |
+
csv_file=args.csv_file,
|
| 303 |
+
video_folder=args.video_folder,
|
| 304 |
+
output_latent_folder=args.output_latent_folder,
|
| 305 |
+
pretrained_model_name_or_path=args.pretrained_model_name_or_path,
|
| 306 |
+
siglip_model_name_or_path=args.siglip_model_name_or_path,
|
| 307 |
+
)
|
dataset_code/spatialvid/utils_framepack.py
ADDED
|
@@ -0,0 +1,1229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import random
|
| 3 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from einops import rearrange, repeat
|
| 8 |
+
|
| 9 |
+
from diffusers.training_utils import compute_density_for_timestep_sampling
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
DEFAULT_PROMPT_TEMPLATE = {
|
| 13 |
+
"template": (
|
| 14 |
+
"<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
|
| 15 |
+
"1. The main content and theme of the video."
|
| 16 |
+
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
| 17 |
+
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
| 18 |
+
"4. background environment, light, style and atmosphere."
|
| 19 |
+
"5. camera angles, movements, and transitions used in the video:<|eot_id|>"
|
| 20 |
+
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
| 21 |
+
),
|
| 22 |
+
"crop_start": 95,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
def get_config_value(args, name):
|
| 26 |
+
if hasattr(args, name):
|
| 27 |
+
return getattr(args, name)
|
| 28 |
+
elif hasattr(args, 'training_config') and hasattr(args.training_config, name):
|
| 29 |
+
return getattr(args.training_config, name)
|
| 30 |
+
else:
|
| 31 |
+
raise AttributeError(f"Neither args nor args.training_config has attribute '{name}'")
|
| 32 |
+
|
| 33 |
+
# Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline._get_llama_prompt_embeds
|
| 34 |
+
def _get_llama_prompt_embeds(
|
| 35 |
+
tokenizer,
|
| 36 |
+
text_encoder,
|
| 37 |
+
prompt: Union[str, List[str]],
|
| 38 |
+
prompt_template: Dict[str, Any],
|
| 39 |
+
num_videos_per_prompt: int = 1,
|
| 40 |
+
device: Optional[torch.device] = None,
|
| 41 |
+
dtype: Optional[torch.dtype] = None,
|
| 42 |
+
max_sequence_length: int = 256,
|
| 43 |
+
num_hidden_layers_to_skip: int = 2,
|
| 44 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 45 |
+
device = device
|
| 46 |
+
dtype = dtype
|
| 47 |
+
|
| 48 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 49 |
+
batch_size = len(prompt)
|
| 50 |
+
|
| 51 |
+
prompt = [prompt_template["template"].format(p) for p in prompt]
|
| 52 |
+
|
| 53 |
+
crop_start = prompt_template.get("crop_start", None)
|
| 54 |
+
if crop_start is None:
|
| 55 |
+
prompt_template_input = tokenizer(
|
| 56 |
+
prompt_template["template"],
|
| 57 |
+
padding="max_length",
|
| 58 |
+
return_tensors="pt",
|
| 59 |
+
return_length=False,
|
| 60 |
+
return_overflowing_tokens=False,
|
| 61 |
+
return_attention_mask=False,
|
| 62 |
+
)
|
| 63 |
+
crop_start = prompt_template_input["input_ids"].shape[-1]
|
| 64 |
+
# Remove <|eot_id|> token and placeholder {}
|
| 65 |
+
crop_start -= 2
|
| 66 |
+
|
| 67 |
+
max_sequence_length += crop_start
|
| 68 |
+
text_inputs = tokenizer(
|
| 69 |
+
prompt,
|
| 70 |
+
max_length=max_sequence_length,
|
| 71 |
+
padding="max_length",
|
| 72 |
+
truncation=True,
|
| 73 |
+
return_tensors="pt",
|
| 74 |
+
return_length=False,
|
| 75 |
+
return_overflowing_tokens=False,
|
| 76 |
+
return_attention_mask=True,
|
| 77 |
+
)
|
| 78 |
+
text_input_ids = text_inputs.input_ids.to(device=device)
|
| 79 |
+
prompt_attention_mask = text_inputs.attention_mask.to(device=device)
|
| 80 |
+
|
| 81 |
+
prompt_embeds = text_encoder(
|
| 82 |
+
input_ids=text_input_ids,
|
| 83 |
+
attention_mask=prompt_attention_mask,
|
| 84 |
+
output_hidden_states=True,
|
| 85 |
+
).hidden_states[-(num_hidden_layers_to_skip + 1)]
|
| 86 |
+
prompt_embeds = prompt_embeds.to(dtype=dtype)
|
| 87 |
+
|
| 88 |
+
if crop_start is not None and crop_start > 0:
|
| 89 |
+
prompt_embeds = prompt_embeds[:, crop_start:]
|
| 90 |
+
prompt_attention_mask = prompt_attention_mask[:, crop_start:]
|
| 91 |
+
|
| 92 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 93 |
+
_, seq_len, _ = prompt_embeds.shape
|
| 94 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
| 95 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
| 96 |
+
prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt)
|
| 97 |
+
prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len)
|
| 98 |
+
|
| 99 |
+
return prompt_embeds, prompt_attention_mask
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline._get_clip_prompt_embeds
|
| 103 |
+
def _get_clip_prompt_embeds(
|
| 104 |
+
tokenizer_2,
|
| 105 |
+
text_encoder_2,
|
| 106 |
+
prompt: Union[str, List[str]],
|
| 107 |
+
num_videos_per_prompt: int = 1,
|
| 108 |
+
device: Optional[torch.device] = None,
|
| 109 |
+
dtype: Optional[torch.dtype] = None,
|
| 110 |
+
max_sequence_length: int = 77,
|
| 111 |
+
) -> torch.Tensor:
|
| 112 |
+
device = device
|
| 113 |
+
dtype = dtype
|
| 114 |
+
|
| 115 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 116 |
+
batch_size = len(prompt)
|
| 117 |
+
|
| 118 |
+
text_inputs = tokenizer_2(
|
| 119 |
+
prompt,
|
| 120 |
+
padding="max_length",
|
| 121 |
+
max_length=max_sequence_length,
|
| 122 |
+
truncation=True,
|
| 123 |
+
return_tensors="pt",
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
text_input_ids = text_inputs.input_ids
|
| 127 |
+
untruncated_ids = tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
|
| 128 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 129 |
+
_ = tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
| 130 |
+
|
| 131 |
+
prompt_embeds = text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output
|
| 132 |
+
|
| 133 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 134 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt)
|
| 135 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1)
|
| 136 |
+
|
| 137 |
+
return prompt_embeds
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline.encode_prompt
|
| 141 |
+
def encode_prompt(
|
| 142 |
+
tokenizer,
|
| 143 |
+
text_encoder,
|
| 144 |
+
tokenizer_2,
|
| 145 |
+
text_encoder_2,
|
| 146 |
+
prompt: Union[str, List[str]],
|
| 147 |
+
prompt_2: Union[str, List[str]] = None,
|
| 148 |
+
prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE,
|
| 149 |
+
num_videos_per_prompt: int = 1,
|
| 150 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 151 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 152 |
+
prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 153 |
+
device: Optional[torch.device] = None,
|
| 154 |
+
dtype: Optional[torch.dtype] = None,
|
| 155 |
+
max_sequence_length: int = 256,
|
| 156 |
+
):
|
| 157 |
+
if prompt_embeds is None:
|
| 158 |
+
prompt_embeds, prompt_attention_mask = _get_llama_prompt_embeds(
|
| 159 |
+
tokenizer,
|
| 160 |
+
text_encoder,
|
| 161 |
+
prompt,
|
| 162 |
+
prompt_template,
|
| 163 |
+
num_videos_per_prompt,
|
| 164 |
+
device=device,
|
| 165 |
+
dtype=dtype,
|
| 166 |
+
max_sequence_length=max_sequence_length,
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
if pooled_prompt_embeds is None:
|
| 170 |
+
if prompt_2 is None:
|
| 171 |
+
prompt_2 = prompt
|
| 172 |
+
pooled_prompt_embeds = _get_clip_prompt_embeds(
|
| 173 |
+
tokenizer_2,
|
| 174 |
+
text_encoder_2,
|
| 175 |
+
prompt,
|
| 176 |
+
num_videos_per_prompt,
|
| 177 |
+
device=device,
|
| 178 |
+
dtype=dtype,
|
| 179 |
+
max_sequence_length=77,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def encode_image(
|
| 186 |
+
feature_extractor,
|
| 187 |
+
image_encoder,
|
| 188 |
+
image: torch.Tensor,
|
| 189 |
+
device: Optional[torch.device] = None,
|
| 190 |
+
dtype: Optional[torch.dtype] = None,
|
| 191 |
+
):
|
| 192 |
+
device = device
|
| 193 |
+
image = (image + 1) / 2.0 # [-1, 1] -> [0, 1]
|
| 194 |
+
image = feature_extractor(images=image, return_tensors="pt", do_rescale=False).to(
|
| 195 |
+
device=device, dtype=image_encoder.dtype
|
| 196 |
+
)
|
| 197 |
+
image_embeds = image_encoder(**image).last_hidden_state
|
| 198 |
+
return image_embeds.to(dtype=dtype)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def get_framepack_input_t2v(
|
| 202 |
+
vae,
|
| 203 |
+
pixel_values, # [-1, 1], (B, C, F, H, W)
|
| 204 |
+
latent_window_size: int = 9,
|
| 205 |
+
vanilla_sampling: bool = False,
|
| 206 |
+
dtype: Optional[torch.dtype] = None,
|
| 207 |
+
is_keep_x0=False,
|
| 208 |
+
):
|
| 209 |
+
# calculate latent frame count from original frame count (4n+1)
|
| 210 |
+
latent_f = (pixel_values.shape[2] - 1) // 4 + 1
|
| 211 |
+
# assert latent_f % latent_window_size == 0
|
| 212 |
+
|
| 213 |
+
# calculate the total number of sections (excluding the first frame, divided by window size)
|
| 214 |
+
total_latent_sections = math.floor(latent_f / latent_window_size) # 2.0
|
| 215 |
+
if total_latent_sections < 1:
|
| 216 |
+
min_frames_needed = latent_window_size * 4 + 1
|
| 217 |
+
raise ValueError(
|
| 218 |
+
f"Not enough frames for FramePack: {pixel_values.shape[2]} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)"
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
# actual latent frame count (aligned to section boundaries)
|
| 222 |
+
latent_f_aligned = total_latent_sections * latent_window_size
|
| 223 |
+
|
| 224 |
+
# actual video frame count
|
| 225 |
+
frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 # 73
|
| 226 |
+
if frame_count_aligned != pixel_values.shape[2]: # 73 != 89
|
| 227 |
+
print(
|
| 228 |
+
f"Frame count mismatch: required={frame_count_aligned} != actual={pixel_values.shape[2]}, trimming to {frame_count_aligned}"
|
| 229 |
+
)
|
| 230 |
+
pixel_values = pixel_values[
|
| 231 |
+
:, :, :frame_count_aligned, :, :
|
| 232 |
+
] # torch.Size([1, 3, 89, 480, 832]) -> torch.Size([1, 3, 73, 480, 832])
|
| 233 |
+
|
| 234 |
+
latent_f = latent_f_aligned # Update to the aligned value
|
| 235 |
+
|
| 236 |
+
# VAE encode
|
| 237 |
+
pixel_values = pixel_values.to(device=vae.device, dtype=vae.dtype)
|
| 238 |
+
latents = vae.encode(pixel_values).latent_dist.sample()
|
| 239 |
+
latents = latents * vae.config.scaling_factor
|
| 240 |
+
latents = latents.to(dtype=dtype)
|
| 241 |
+
|
| 242 |
+
all_target_latents = []
|
| 243 |
+
all_target_latent_indices = []
|
| 244 |
+
all_clean_latents = []
|
| 245 |
+
all_clean_latent_indices = []
|
| 246 |
+
all_clean_latents_2x = []
|
| 247 |
+
all_clean_latent_2x_indices = []
|
| 248 |
+
all_clean_latents_4x = []
|
| 249 |
+
all_clean_latent_4x_indices = []
|
| 250 |
+
section_to_video_idx = []
|
| 251 |
+
|
| 252 |
+
if vanilla_sampling:
|
| 253 |
+
# Vanilla Sampling Logic
|
| 254 |
+
if is_keep_x0:
|
| 255 |
+
for b in range(latents.shape[0]):
|
| 256 |
+
video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
|
| 257 |
+
|
| 258 |
+
for section_index in range(total_latent_sections):
|
| 259 |
+
target_start_f = section_index * latent_window_size
|
| 260 |
+
target_end_f = target_start_f + latent_window_size
|
| 261 |
+
start_latent = video_lat[:, :, 0:1, :, :]
|
| 262 |
+
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
|
| 263 |
+
|
| 264 |
+
# Clean latents preparation (Vanilla)
|
| 265 |
+
if section_index == 0:
|
| 266 |
+
clean_latents_total_count = 2 + 2 + 16
|
| 267 |
+
else:
|
| 268 |
+
clean_latents_total_count = 1 + 2 + 16
|
| 269 |
+
history_latents = torch.zeros(
|
| 270 |
+
size=(
|
| 271 |
+
1,
|
| 272 |
+
16,
|
| 273 |
+
clean_latents_total_count,
|
| 274 |
+
video_lat.shape[-2],
|
| 275 |
+
video_lat.shape[-1],
|
| 276 |
+
),
|
| 277 |
+
device=video_lat.device,
|
| 278 |
+
dtype=video_lat.dtype,
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
history_start_f = 0
|
| 282 |
+
video_start_f = target_start_f - clean_latents_total_count
|
| 283 |
+
copy_count = clean_latents_total_count
|
| 284 |
+
|
| 285 |
+
if video_start_f < 0:
|
| 286 |
+
history_start_f = -video_start_f
|
| 287 |
+
copy_count = clean_latents_total_count - history_start_f
|
| 288 |
+
video_start_f = 0
|
| 289 |
+
if copy_count > 0:
|
| 290 |
+
history_latents[:, :, history_start_f:] = video_lat[
|
| 291 |
+
:, :, video_start_f : video_start_f + copy_count, :, :
|
| 292 |
+
]
|
| 293 |
+
|
| 294 |
+
# indices generation (Vanilla): copy from FramePack-F1
|
| 295 |
+
if section_index == 0:
|
| 296 |
+
indices = torch.arange(0, sum([16, 2, 2, latent_window_size])).unsqueeze(0)
|
| 297 |
+
(
|
| 298 |
+
clean_latent_4x_indices,
|
| 299 |
+
clean_latent_2x_indices,
|
| 300 |
+
clean_latent_indices,
|
| 301 |
+
latent_indices,
|
| 302 |
+
) = indices.split([16, 2, 2, latent_window_size], dim=1)
|
| 303 |
+
clean_latents_4x, clean_latents_2x, clean_latents = history_latents.split([16, 2, 2], dim=2)
|
| 304 |
+
else:
|
| 305 |
+
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
|
| 306 |
+
(
|
| 307 |
+
clean_latent_indices_start,
|
| 308 |
+
clean_latent_4x_indices,
|
| 309 |
+
clean_latent_2x_indices,
|
| 310 |
+
clean_latent_1x_indices,
|
| 311 |
+
latent_indices,
|
| 312 |
+
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
|
| 313 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
| 314 |
+
|
| 315 |
+
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2)
|
| 316 |
+
clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
|
| 317 |
+
|
| 318 |
+
all_target_latents.append(target_latents)
|
| 319 |
+
all_target_latent_indices.append(latent_indices)
|
| 320 |
+
all_clean_latents.append(clean_latents)
|
| 321 |
+
all_clean_latent_indices.append(clean_latent_indices)
|
| 322 |
+
all_clean_latents_2x.append(clean_latents_2x)
|
| 323 |
+
all_clean_latent_2x_indices.append(clean_latent_2x_indices)
|
| 324 |
+
all_clean_latents_4x.append(clean_latents_4x)
|
| 325 |
+
all_clean_latent_4x_indices.append(clean_latent_4x_indices)
|
| 326 |
+
section_to_video_idx.append(b)
|
| 327 |
+
else:
|
| 328 |
+
for b in range(latents.shape[0]):
|
| 329 |
+
video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
|
| 330 |
+
|
| 331 |
+
for section_index in range(total_latent_sections):
|
| 332 |
+
target_start_f = section_index * latent_window_size
|
| 333 |
+
target_end_f = target_start_f + latent_window_size
|
| 334 |
+
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
|
| 335 |
+
|
| 336 |
+
# Clean latents preparation (Vanilla)
|
| 337 |
+
clean_latents_total_count = 2 + 2 + 16
|
| 338 |
+
history_latents = torch.zeros(
|
| 339 |
+
size=(
|
| 340 |
+
1,
|
| 341 |
+
16,
|
| 342 |
+
clean_latents_total_count,
|
| 343 |
+
video_lat.shape[-2],
|
| 344 |
+
video_lat.shape[-1],
|
| 345 |
+
),
|
| 346 |
+
device=video_lat.device,
|
| 347 |
+
dtype=video_lat.dtype,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
history_start_f = 0
|
| 351 |
+
video_start_f = target_start_f - clean_latents_total_count
|
| 352 |
+
copy_count = clean_latents_total_count
|
| 353 |
+
|
| 354 |
+
if video_start_f < 0:
|
| 355 |
+
history_start_f = -video_start_f
|
| 356 |
+
copy_count = clean_latents_total_count - history_start_f
|
| 357 |
+
video_start_f = 0
|
| 358 |
+
if copy_count > 0:
|
| 359 |
+
history_latents[:, :, history_start_f:] = video_lat[
|
| 360 |
+
:, :, video_start_f : video_start_f + copy_count, :, :
|
| 361 |
+
]
|
| 362 |
+
|
| 363 |
+
# indices generation (Vanilla): copy from FramePack-F1
|
| 364 |
+
indices = torch.arange(0, sum([16, 2, 2, latent_window_size])).unsqueeze(0)
|
| 365 |
+
(
|
| 366 |
+
clean_latent_4x_indices,
|
| 367 |
+
clean_latent_2x_indices,
|
| 368 |
+
clean_latent_indices,
|
| 369 |
+
latent_indices,
|
| 370 |
+
) = indices.split([16, 2, 2, latent_window_size], dim=1)
|
| 371 |
+
clean_latents_4x, clean_latents_2x, clean_latents = history_latents.split([16, 2, 2], dim=2)
|
| 372 |
+
|
| 373 |
+
all_target_latents.append(target_latents)
|
| 374 |
+
all_target_latent_indices.append(latent_indices)
|
| 375 |
+
all_clean_latents.append(clean_latents)
|
| 376 |
+
all_clean_latent_indices.append(clean_latent_indices)
|
| 377 |
+
all_clean_latents_2x.append(clean_latents_2x)
|
| 378 |
+
all_clean_latent_2x_indices.append(clean_latent_2x_indices)
|
| 379 |
+
all_clean_latents_4x.append(clean_latents_4x)
|
| 380 |
+
all_clean_latent_4x_indices.append(clean_latent_4x_indices)
|
| 381 |
+
section_to_video_idx.append(b)
|
| 382 |
+
else:
|
| 383 |
+
pass
|
| 384 |
+
|
| 385 |
+
# Stack all sections into batches
|
| 386 |
+
batched_target_latents = torch.cat(all_target_latents, dim=0)
|
| 387 |
+
batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0)
|
| 388 |
+
batched_clean_latents = torch.cat(all_clean_latents, dim=0)
|
| 389 |
+
batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0)
|
| 390 |
+
batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0)
|
| 391 |
+
batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0)
|
| 392 |
+
batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0)
|
| 393 |
+
batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0)
|
| 394 |
+
|
| 395 |
+
return (
|
| 396 |
+
batched_target_latents,
|
| 397 |
+
batched_target_latent_indices,
|
| 398 |
+
batched_clean_latents,
|
| 399 |
+
batched_clean_latent_indices,
|
| 400 |
+
batched_clean_latents_2x,
|
| 401 |
+
batched_clean_latent_2x_indices,
|
| 402 |
+
batched_clean_latents_4x,
|
| 403 |
+
batched_clean_latent_4x_indices,
|
| 404 |
+
section_to_video_idx,
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def get_framepack_input_i2v(
|
| 409 |
+
vae,
|
| 410 |
+
pixel_values, # [-1, 1], (B, C, F, H, W)
|
| 411 |
+
latent_window_size: int = 9,
|
| 412 |
+
vanilla_sampling: bool = False,
|
| 413 |
+
dtype: Optional[torch.dtype] = None,
|
| 414 |
+
):
|
| 415 |
+
# calculate latent frame count from original frame count (4n+1)
|
| 416 |
+
latent_f = (pixel_values.shape[2] - 1) // 4 + 1
|
| 417 |
+
|
| 418 |
+
# calculate the total number of sections (excluding the first frame, divided by window size)
|
| 419 |
+
total_latent_sections = math.floor((latent_f - 1) / latent_window_size) # 2.0
|
| 420 |
+
if total_latent_sections < 1:
|
| 421 |
+
min_frames_needed = latent_window_size * 4 + 1
|
| 422 |
+
raise ValueError(
|
| 423 |
+
f"Not enough frames for FramePack: {pixel_values.shape[2]} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)"
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
# actual latent frame count (aligned to section boundaries)
|
| 427 |
+
latent_f_aligned = total_latent_sections * latent_window_size + 1
|
| 428 |
+
|
| 429 |
+
# actual video frame count
|
| 430 |
+
frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 # 73
|
| 431 |
+
if frame_count_aligned != pixel_values.shape[2]: # 73 != 89
|
| 432 |
+
print(
|
| 433 |
+
f"Frame count mismatch: required={frame_count_aligned} != actual={pixel_values.shape[2]}, trimming to {frame_count_aligned}"
|
| 434 |
+
)
|
| 435 |
+
pixel_values = pixel_values[
|
| 436 |
+
:, :, :frame_count_aligned, :, :
|
| 437 |
+
] # torch.Size([1, 3, 89, 480, 832]) -> torch.Size([1, 3, 73, 480, 832])
|
| 438 |
+
|
| 439 |
+
latent_f = latent_f_aligned # Update to the aligned value
|
| 440 |
+
|
| 441 |
+
# VAE encode
|
| 442 |
+
pixel_values = pixel_values.to(device=vae.device, dtype=vae.dtype)
|
| 443 |
+
latents = vae.encode(pixel_values).latent_dist.sample()
|
| 444 |
+
latents = latents * vae.config.scaling_factor
|
| 445 |
+
latents = latents.to(dtype=dtype)
|
| 446 |
+
|
| 447 |
+
all_target_latents = []
|
| 448 |
+
all_target_latent_indices = []
|
| 449 |
+
all_clean_latents = []
|
| 450 |
+
all_clean_latent_indices = []
|
| 451 |
+
all_clean_latents_2x = []
|
| 452 |
+
all_clean_latent_2x_indices = []
|
| 453 |
+
all_clean_latents_4x = []
|
| 454 |
+
all_clean_latent_4x_indices = []
|
| 455 |
+
section_to_video_idx = []
|
| 456 |
+
|
| 457 |
+
if vanilla_sampling:
|
| 458 |
+
# Vanilla Sampling Logic
|
| 459 |
+
for b in range(latents.shape[0]):
|
| 460 |
+
video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
|
| 461 |
+
|
| 462 |
+
for section_index in range(total_latent_sections):
|
| 463 |
+
target_start_f = section_index * latent_window_size + 1
|
| 464 |
+
target_end_f = target_start_f + latent_window_size
|
| 465 |
+
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
|
| 466 |
+
start_latent = video_lat[:, :, 0:1, :, :]
|
| 467 |
+
|
| 468 |
+
# Clean latents preparation (Vanilla)
|
| 469 |
+
clean_latents_total_count = 1 + 2 + 16
|
| 470 |
+
history_latents = torch.zeros(
|
| 471 |
+
size=(
|
| 472 |
+
1,
|
| 473 |
+
16,
|
| 474 |
+
clean_latents_total_count,
|
| 475 |
+
video_lat.shape[-2],
|
| 476 |
+
video_lat.shape[-1],
|
| 477 |
+
),
|
| 478 |
+
device=video_lat.device,
|
| 479 |
+
dtype=video_lat.dtype,
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
history_start_f = 0
|
| 483 |
+
video_start_f = target_start_f - clean_latents_total_count
|
| 484 |
+
copy_count = clean_latents_total_count
|
| 485 |
+
|
| 486 |
+
if video_start_f < 0:
|
| 487 |
+
history_start_f = -video_start_f
|
| 488 |
+
copy_count = clean_latents_total_count - history_start_f
|
| 489 |
+
video_start_f = 0
|
| 490 |
+
if copy_count > 0:
|
| 491 |
+
history_latents[:, :, history_start_f:] = video_lat[
|
| 492 |
+
:, :, video_start_f : video_start_f + copy_count, :, :
|
| 493 |
+
]
|
| 494 |
+
|
| 495 |
+
# indices generation (Vanilla): copy from FramePack-F1
|
| 496 |
+
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
|
| 497 |
+
(
|
| 498 |
+
clean_latent_indices_start,
|
| 499 |
+
clean_latent_4x_indices,
|
| 500 |
+
clean_latent_2x_indices,
|
| 501 |
+
clean_latent_1x_indices,
|
| 502 |
+
latent_indices,
|
| 503 |
+
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
|
| 504 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
| 505 |
+
|
| 506 |
+
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2)
|
| 507 |
+
clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
|
| 508 |
+
|
| 509 |
+
all_target_latents.append(target_latents)
|
| 510 |
+
all_target_latent_indices.append(latent_indices)
|
| 511 |
+
all_clean_latents.append(clean_latents)
|
| 512 |
+
all_clean_latent_indices.append(clean_latent_indices)
|
| 513 |
+
all_clean_latents_2x.append(clean_latents_2x)
|
| 514 |
+
all_clean_latent_2x_indices.append(clean_latent_2x_indices)
|
| 515 |
+
all_clean_latents_4x.append(clean_latents_4x)
|
| 516 |
+
all_clean_latent_4x_indices.append(clean_latent_4x_indices)
|
| 517 |
+
section_to_video_idx.append(b)
|
| 518 |
+
else:
|
| 519 |
+
# padding is reversed for inference (future to past)
|
| 520 |
+
latent_paddings = list(reversed(range(total_latent_sections))) # [1, 0]
|
| 521 |
+
# Note: The padding trick for inference. See the paper for details.
|
| 522 |
+
if total_latent_sections > 4:
|
| 523 |
+
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
|
| 524 |
+
|
| 525 |
+
for b in range(latents.shape[0]):
|
| 526 |
+
video_lat = latents[
|
| 527 |
+
b : b + 1
|
| 528 |
+
] # keep batch dim, (1, C, F, H, W) # torch.Size([1, 16, 19, 60, 104])
|
| 529 |
+
|
| 530 |
+
# emulate inference step (history latents)
|
| 531 |
+
# Note: In inference, history_latents stores *generated* future latents.
|
| 532 |
+
# Here, for caching, we just need its shape and type for clean_* tensors.
|
| 533 |
+
# The actual content doesn't matter much as clean_* will be overwritten.
|
| 534 |
+
history_latents = torch.zeros(
|
| 535 |
+
(
|
| 536 |
+
1,
|
| 537 |
+
video_lat.shape[1],
|
| 538 |
+
1 + 2 + 16,
|
| 539 |
+
video_lat.shape[3],
|
| 540 |
+
video_lat.shape[4],
|
| 541 |
+
),
|
| 542 |
+
dtype=video_lat.dtype,
|
| 543 |
+
).to(video_lat.device) # torch.Size([1, 16, 19, 60, 104])
|
| 544 |
+
|
| 545 |
+
latent_f_index = latent_f - latent_window_size # Start from the last section # 19 - 9 = 10
|
| 546 |
+
section_index = total_latent_sections - 1 # 2 - 1 = 1
|
| 547 |
+
|
| 548 |
+
for latent_padding in latent_paddings:
|
| 549 |
+
is_last_section = (
|
| 550 |
+
section_index == 0
|
| 551 |
+
) # the last section in inference order == the first section in time
|
| 552 |
+
latent_padding_size = latent_padding * latent_window_size
|
| 553 |
+
if is_last_section:
|
| 554 |
+
assert latent_f_index == 1, "Last section should be starting from frame 1"
|
| 555 |
+
|
| 556 |
+
# indices generation (same as inference)
|
| 557 |
+
indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
|
| 558 |
+
(
|
| 559 |
+
clean_latent_indices_pre, # Index for start_latent
|
| 560 |
+
blank_indices, # Indices for padding (future context in inference)
|
| 561 |
+
latent_indices, # Indices for the target latents to predict
|
| 562 |
+
clean_latent_indices_post, # Index for the most recent history frame
|
| 563 |
+
clean_latent_2x_indices, # Indices for the next 2 history frames
|
| 564 |
+
clean_latent_4x_indices, # Indices for the next 16 history frames
|
| 565 |
+
) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
|
| 566 |
+
|
| 567 |
+
# Indices for clean_latents (start + recent history)
|
| 568 |
+
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
|
| 569 |
+
|
| 570 |
+
# clean latents preparation (emulating inference)
|
| 571 |
+
clean_latents_pre = video_lat[:, :, 0:1, :, :] # Always the first frame (start_latent)
|
| 572 |
+
clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[
|
| 573 |
+
:, :, : 1 + 2 + 16, :, :
|
| 574 |
+
].split([1, 2, 16], dim=2)
|
| 575 |
+
clean_latents = torch.cat(
|
| 576 |
+
[clean_latents_pre, clean_latents_post], dim=2
|
| 577 |
+
) # Combine start frame + placeholder
|
| 578 |
+
|
| 579 |
+
# Target latents for this section (ground truth)
|
| 580 |
+
target_latents = video_lat[:, :, latent_f_index : latent_f_index + latent_window_size, :, :]
|
| 581 |
+
|
| 582 |
+
all_target_latents.append(target_latents)
|
| 583 |
+
all_target_latent_indices.append(latent_indices)
|
| 584 |
+
all_clean_latents.append(clean_latents)
|
| 585 |
+
all_clean_latent_indices.append(clean_latent_indices)
|
| 586 |
+
all_clean_latents_2x.append(clean_latents_2x)
|
| 587 |
+
all_clean_latent_2x_indices.append(clean_latent_2x_indices)
|
| 588 |
+
all_clean_latents_4x.append(clean_latents_4x)
|
| 589 |
+
all_clean_latent_4x_indices.append(clean_latent_4x_indices)
|
| 590 |
+
section_to_video_idx.append(b)
|
| 591 |
+
|
| 592 |
+
if is_last_section: # If this was the first section generated in inference (time=0)
|
| 593 |
+
# History gets the start frame + the generated first section
|
| 594 |
+
generated_latents_for_history = video_lat[:, :, : latent_window_size + 1, :, :]
|
| 595 |
+
else:
|
| 596 |
+
# History gets the generated current section
|
| 597 |
+
generated_latents_for_history = target_latents # Use true latents as stand-in for generated
|
| 598 |
+
|
| 599 |
+
history_latents = torch.cat([generated_latents_for_history, history_latents], dim=2)
|
| 600 |
+
|
| 601 |
+
section_index -= 1
|
| 602 |
+
latent_f_index -= latent_window_size
|
| 603 |
+
|
| 604 |
+
# Stack all sections into batches
|
| 605 |
+
batched_target_latents = torch.cat(all_target_latents, dim=0)
|
| 606 |
+
batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0)
|
| 607 |
+
batched_clean_latents = torch.cat(all_clean_latents, dim=0)
|
| 608 |
+
batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0)
|
| 609 |
+
batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0)
|
| 610 |
+
batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0)
|
| 611 |
+
batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0)
|
| 612 |
+
batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0)
|
| 613 |
+
|
| 614 |
+
return (
|
| 615 |
+
batched_target_latents,
|
| 616 |
+
batched_target_latent_indices,
|
| 617 |
+
batched_clean_latents,
|
| 618 |
+
batched_clean_latent_indices,
|
| 619 |
+
batched_clean_latents_2x,
|
| 620 |
+
batched_clean_latent_2x_indices,
|
| 621 |
+
batched_clean_latents_4x,
|
| 622 |
+
batched_clean_latent_4x_indices,
|
| 623 |
+
section_to_video_idx,
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def get_pyramid_input(
|
| 628 |
+
args,
|
| 629 |
+
scheduler,
|
| 630 |
+
latents, # [b c t h w]
|
| 631 |
+
pyramid_stage_num=3,
|
| 632 |
+
pyramid_sample_ratios=[1, 2, 1],
|
| 633 |
+
pyramid_sample_mode="efficient", # ["efficient", "full", "diffusion_forcing", "stream_sample"]
|
| 634 |
+
pyramid_stream_inference_steps=[10, 10, 10],
|
| 635 |
+
stream_chunk_size=5,
|
| 636 |
+
):
|
| 637 |
+
assert pyramid_stage_num == len(pyramid_sample_ratios)
|
| 638 |
+
if pyramid_sample_mode not in ["efficient", "full", "diffusion_forcing", "stream_sample"]:
|
| 639 |
+
raise ValueError(
|
| 640 |
+
f"Invalid pyramid_sample_mode: {pyramid_sample_mode}. Must be one of ['efficient', 'full', 'diffusion_forcing', 'dance_forcing']."
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
# Get clen pyramid latent list
|
| 644 |
+
pyramid_latent_list = []
|
| 645 |
+
pyramid_latent_list.append(latents)
|
| 646 |
+
num_frames, height, width = latents.shape[-3], latents.shape[-2], latents.shape[-1]
|
| 647 |
+
for _ in range(pyramid_stage_num - 1):
|
| 648 |
+
height //= 2
|
| 649 |
+
width //= 2
|
| 650 |
+
latents = rearrange(latents, "b c t h w -> (b t) c h w")
|
| 651 |
+
latents = torch.nn.functional.interpolate(latents, size=(height, width), mode="bilinear")
|
| 652 |
+
latents = rearrange(latents, "(b t) c h w -> b c t h w", t=num_frames)
|
| 653 |
+
pyramid_latent_list.append(latents)
|
| 654 |
+
pyramid_latent_list = list(reversed(pyramid_latent_list))
|
| 655 |
+
|
| 656 |
+
# Get pyramid noise list
|
| 657 |
+
noise = torch.randn_like(pyramid_latent_list[-1])
|
| 658 |
+
device = noise.device
|
| 659 |
+
dtype = pyramid_latent_list[-1].dtype
|
| 660 |
+
latent_frame_num = noise.shape[2]
|
| 661 |
+
input_video_num = noise.shape[0]
|
| 662 |
+
|
| 663 |
+
height, width = noise.shape[-2], noise.shape[-1]
|
| 664 |
+
noise_list = [noise]
|
| 665 |
+
cur_noise = noise
|
| 666 |
+
for i_s in range(pyramid_stage_num - 1):
|
| 667 |
+
height //= 2
|
| 668 |
+
width //= 2
|
| 669 |
+
cur_noise = rearrange(cur_noise, "b c t h w -> (b t) c h w")
|
| 670 |
+
cur_noise = F.interpolate(cur_noise, size=(height, width), mode="bilinear") * 2
|
| 671 |
+
cur_noise = rearrange(cur_noise, "(b t) c h w -> b c t h w", t=latent_frame_num)
|
| 672 |
+
noise_list.append(cur_noise)
|
| 673 |
+
noise_list = list(reversed(noise_list)) # make sure from low res to high res
|
| 674 |
+
|
| 675 |
+
# Get pyramid target list
|
| 676 |
+
if pyramid_sample_mode == "efficient":
|
| 677 |
+
assert input_video_num % (int(sum(pyramid_sample_ratios))) == 0
|
| 678 |
+
# To calculate the padding batchsize and column size
|
| 679 |
+
bsz = input_video_num // int(sum(pyramid_sample_ratios))
|
| 680 |
+
column_size = int(sum(pyramid_sample_ratios))
|
| 681 |
+
column_to_stage = {}
|
| 682 |
+
i_sum = 0
|
| 683 |
+
for i_s, column_num in enumerate(pyramid_sample_ratios):
|
| 684 |
+
for index in range(i_sum, i_sum + column_num):
|
| 685 |
+
column_to_stage[index] = i_s
|
| 686 |
+
i_sum += column_num
|
| 687 |
+
|
| 688 |
+
# from low resolution to high resolution
|
| 689 |
+
noisy_latents_list = []
|
| 690 |
+
sigmas_list = []
|
| 691 |
+
targets_list = []
|
| 692 |
+
timesteps_list = []
|
| 693 |
+
training_steps = scheduler.config.num_train_timesteps
|
| 694 |
+
for index in range(column_size):
|
| 695 |
+
i_s = column_to_stage[index]
|
| 696 |
+
clean_latent = pyramid_latent_list[i_s][index::column_size] # [bs, c, t, h, w]
|
| 697 |
+
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1][index::column_size]
|
| 698 |
+
start_sigma = scheduler.start_sigmas[i_s]
|
| 699 |
+
end_sigma = scheduler.end_sigmas[i_s]
|
| 700 |
+
|
| 701 |
+
if i_s == 0:
|
| 702 |
+
start_point = noise_list[i_s][index::column_size]
|
| 703 |
+
else:
|
| 704 |
+
# Get the upsampled latent
|
| 705 |
+
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
|
| 706 |
+
last_clean_latent = F.interpolate(
|
| 707 |
+
last_clean_latent,
|
| 708 |
+
size=(
|
| 709 |
+
last_clean_latent.shape[-2] * 2,
|
| 710 |
+
last_clean_latent.shape[-1] * 2,
|
| 711 |
+
),
|
| 712 |
+
mode="nearest",
|
| 713 |
+
)
|
| 714 |
+
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
|
| 715 |
+
start_point = start_sigma * noise_list[i_s][index::column_size] + (1 - start_sigma) * last_clean_latent
|
| 716 |
+
|
| 717 |
+
if i_s == pyramid_stage_num - 1:
|
| 718 |
+
end_point = clean_latent
|
| 719 |
+
else:
|
| 720 |
+
end_point = end_sigma * noise_list[i_s][index::column_size] + (1 - end_sigma) * clean_latent
|
| 721 |
+
|
| 722 |
+
# Sample a random timestep for each image
|
| 723 |
+
# for weighting schemes where we sample timesteps non-uniformly
|
| 724 |
+
u = compute_density_for_timestep_sampling(
|
| 725 |
+
weighting_scheme=get_config_value(args, 'weighting_scheme'),
|
| 726 |
+
batch_size=bsz,
|
| 727 |
+
logit_mean=get_config_value(args, 'logit_mean'),
|
| 728 |
+
logit_std=get_config_value(args, 'logit_std'),
|
| 729 |
+
mode_scale=get_config_value(args, 'mode_scale'),
|
| 730 |
+
)
|
| 731 |
+
indices = (u * training_steps).long() # Totally 1000 training steps per stage
|
| 732 |
+
indices = indices.clamp(0, training_steps - 1)
|
| 733 |
+
timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device)
|
| 734 |
+
|
| 735 |
+
# Add noise according to flow matching.
|
| 736 |
+
# zt = (1 - texp) * x + texp * z1
|
| 737 |
+
sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device)
|
| 738 |
+
while len(sigmas.shape) < start_point.ndim:
|
| 739 |
+
sigmas = sigmas.unsqueeze(-1)
|
| 740 |
+
|
| 741 |
+
noisy_latents = sigmas * start_point + (1 - sigmas) * end_point
|
| 742 |
+
|
| 743 |
+
# [stage1_latent, stage2_latent, ..., stagen_latent], which will be concat after patching
|
| 744 |
+
noisy_latents_list.append([noisy_latents.to(dtype)])
|
| 745 |
+
sigmas_list.append(sigmas.to(dtype))
|
| 746 |
+
timesteps_list.append(timesteps.to(dtype))
|
| 747 |
+
targets_list.append(start_point - end_point) # The standard rectified flow matching objective
|
| 748 |
+
elif pyramid_sample_mode == "full":
|
| 749 |
+
# To calculate the batchsize
|
| 750 |
+
bsz = input_video_num
|
| 751 |
+
|
| 752 |
+
# from low resolution to high resolution
|
| 753 |
+
noisy_latents_list = []
|
| 754 |
+
sigmas_list = []
|
| 755 |
+
targets_list = []
|
| 756 |
+
timesteps_list = []
|
| 757 |
+
training_steps = scheduler.config.num_train_timesteps
|
| 758 |
+
for i_s, cur_sample_ratio in zip(range(pyramid_stage_num), pyramid_sample_ratios):
|
| 759 |
+
clean_latent = pyramid_latent_list[i_s] # [bs, c, t, h, w]
|
| 760 |
+
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1]
|
| 761 |
+
start_sigma = scheduler.start_sigmas[i_s]
|
| 762 |
+
end_sigma = scheduler.end_sigmas[i_s]
|
| 763 |
+
|
| 764 |
+
if i_s == 0:
|
| 765 |
+
start_point = noise_list[i_s]
|
| 766 |
+
else:
|
| 767 |
+
# Get the upsampled latent
|
| 768 |
+
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
|
| 769 |
+
last_clean_latent = F.interpolate(
|
| 770 |
+
last_clean_latent,
|
| 771 |
+
size=(
|
| 772 |
+
last_clean_latent.shape[-2] * 2,
|
| 773 |
+
last_clean_latent.shape[-1] * 2,
|
| 774 |
+
),
|
| 775 |
+
mode="nearest",
|
| 776 |
+
)
|
| 777 |
+
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
|
| 778 |
+
start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent
|
| 779 |
+
|
| 780 |
+
if i_s == pyramid_stage_num - 1:
|
| 781 |
+
end_point = clean_latent
|
| 782 |
+
else:
|
| 783 |
+
end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent
|
| 784 |
+
|
| 785 |
+
for _ in range(cur_sample_ratio):
|
| 786 |
+
# Sample a random timestep for each image
|
| 787 |
+
# for weighting schemes where we sample timesteps non-uniformly
|
| 788 |
+
u = compute_density_for_timestep_sampling(
|
| 789 |
+
weighting_scheme=get_config_value(args, 'weighting_scheme'),
|
| 790 |
+
batch_size=bsz,
|
| 791 |
+
logit_mean=get_config_value(args, 'logit_mean'),
|
| 792 |
+
logit_std=get_config_value(args, 'logit_std'),
|
| 793 |
+
mode_scale=get_config_value(args, 'mode_scale'),
|
| 794 |
+
)
|
| 795 |
+
indices = (u * training_steps).long() # Totally 1000 training steps per stage
|
| 796 |
+
indices = indices.clamp(0, training_steps - 1)
|
| 797 |
+
timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device)
|
| 798 |
+
|
| 799 |
+
# Add noise according to flow matching.
|
| 800 |
+
# zt = (1 - texp) * x + texp * z1
|
| 801 |
+
sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device)
|
| 802 |
+
while len(sigmas.shape) < start_point.ndim:
|
| 803 |
+
sigmas = sigmas.unsqueeze(-1)
|
| 804 |
+
|
| 805 |
+
noisy_latents = sigmas * start_point + (1 - sigmas) * end_point
|
| 806 |
+
|
| 807 |
+
# [stage1_latent, stage2_latent, ..., stagen_latent]
|
| 808 |
+
noisy_latents_list.append(noisy_latents.to(dtype))
|
| 809 |
+
sigmas_list.append(sigmas.to(dtype))
|
| 810 |
+
timesteps_list.append(timesteps.to(dtype))
|
| 811 |
+
targets_list.append(start_point - end_point) # The standard rectified flow matching objective
|
| 812 |
+
elif pyramid_sample_mode == "diffusion_forcing":
|
| 813 |
+
# To calculate the batchsize
|
| 814 |
+
bsz = input_video_num
|
| 815 |
+
latent_chunk_num = latent_frame_num // stream_chunk_size
|
| 816 |
+
assert latent_frame_num % stream_chunk_size == 0
|
| 817 |
+
|
| 818 |
+
# from low resolution to high resolution
|
| 819 |
+
noisy_latents_list = []
|
| 820 |
+
sigmas_list = []
|
| 821 |
+
targets_list = []
|
| 822 |
+
timesteps_list = []
|
| 823 |
+
training_steps = scheduler.config.num_train_timesteps
|
| 824 |
+
for i_s, cur_sample_ratio in zip(range(pyramid_stage_num), pyramid_sample_ratios):
|
| 825 |
+
clean_latent = pyramid_latent_list[i_s] # [bs, c, t, h, w]
|
| 826 |
+
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1]
|
| 827 |
+
start_sigma = scheduler.start_sigmas[i_s]
|
| 828 |
+
end_sigma = scheduler.end_sigmas[i_s]
|
| 829 |
+
|
| 830 |
+
if i_s == 0:
|
| 831 |
+
start_point = noise_list[i_s]
|
| 832 |
+
else:
|
| 833 |
+
# Get the upsampled latent
|
| 834 |
+
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
|
| 835 |
+
last_clean_latent = F.interpolate(
|
| 836 |
+
last_clean_latent,
|
| 837 |
+
size=(
|
| 838 |
+
last_clean_latent.shape[-2] * 2,
|
| 839 |
+
last_clean_latent.shape[-1] * 2,
|
| 840 |
+
),
|
| 841 |
+
mode="nearest",
|
| 842 |
+
)
|
| 843 |
+
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
|
| 844 |
+
start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent
|
| 845 |
+
|
| 846 |
+
if i_s == pyramid_stage_num - 1:
|
| 847 |
+
end_point = clean_latent
|
| 848 |
+
else:
|
| 849 |
+
end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent
|
| 850 |
+
|
| 851 |
+
for _ in range(cur_sample_ratio):
|
| 852 |
+
# Sample a random timestep for each image
|
| 853 |
+
# for weighting schemes where we sample timesteps non-uniformly
|
| 854 |
+
u = compute_density_for_timestep_sampling(
|
| 855 |
+
weighting_scheme=get_config_value(args, 'weighting_scheme'),
|
| 856 |
+
batch_size=bsz * latent_chunk_num,
|
| 857 |
+
logit_mean=get_config_value(args, 'logit_mean'),
|
| 858 |
+
logit_std=get_config_value(args, 'logit_std'),
|
| 859 |
+
mode_scale=get_config_value(args, 'mode_scale'),
|
| 860 |
+
)
|
| 861 |
+
indices = (u * training_steps).long() # Totally 1000 training steps per stage
|
| 862 |
+
indices = indices.clamp(0, training_steps - 1)
|
| 863 |
+
|
| 864 |
+
timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device)
|
| 865 |
+
timesteps = timesteps.view(bsz, latent_chunk_num) # [bsz, latent_chunk_num]
|
| 866 |
+
sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device)
|
| 867 |
+
sigmas = sigmas.view(bsz, latent_chunk_num) # [bsz, latent_chunk_num]
|
| 868 |
+
|
| 869 |
+
chunk_index = (
|
| 870 |
+
torch.arange(latent_frame_num, device=device).unsqueeze(0).expand(bsz, -1) // stream_chunk_size
|
| 871 |
+
)
|
| 872 |
+
chunk_index = chunk_index.clamp(max=latent_chunk_num - 1)
|
| 873 |
+
sigmas = torch.gather(sigmas, 1, chunk_index) # [bsz, t]
|
| 874 |
+
timesteps = torch.gather(timesteps, 1, chunk_index)
|
| 875 |
+
|
| 876 |
+
# Add noise according to flow matching.
|
| 877 |
+
# zt = (1 - texp) * x + texp * z1
|
| 878 |
+
sigmas = (
|
| 879 |
+
sigmas.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
|
| 880 |
+
) # reshape to [bsz, 1, t, 1, 1] for broadcasting
|
| 881 |
+
noisy_latents = sigmas * start_point + (1 - sigmas) * end_point
|
| 882 |
+
|
| 883 |
+
# [stage1_latent, stage2_latent, ..., stagen_latent]
|
| 884 |
+
noisy_latents_list.append(noisy_latents.to(dtype)) # torch.Size([2, 16, 10, 12, 20])
|
| 885 |
+
sigmas_list.append(sigmas.to(dtype)) # torch.Size([2, 1, 10, 1, 1])
|
| 886 |
+
timesteps_list.append(timesteps.to(dtype)) # torch.Size([2, 10])
|
| 887 |
+
targets_list.append(start_point - end_point) # The standard rectified flow matching objective
|
| 888 |
+
elif pyramid_sample_mode == "stream_sample":
|
| 889 |
+
# training_all_progressive_timesteps
|
| 890 |
+
# skip 0. (1, max_inference_steps):[1.3850, 44.1200, 86.8550, 129.5900, 172.3250,
|
| 891 |
+
# 215.0600, 257.7950, 300.5300, 343.2650, 386.0000,
|
| 892 |
+
# 386.3580, 426.0960, 465.8340, 505.5720, 545.3100,
|
| 893 |
+
# 585.0480, 624.7860, 664.5240, 704.2620, 744.0000,
|
| 894 |
+
# 744.2560, 772.6720, 801.0880, 829.5040, 857.9200,
|
| 895 |
+
# 886.3360, 914.7520, 943.1680, 971.5840, 1000.0000]
|
| 896 |
+
|
| 897 |
+
# progressive_timesteps_stages
|
| 898 |
+
# stream_chunk_size=3:
|
| 899 |
+
# [ 386., 386., 386., 744., 744., 744., 1000., 1000., 1000.] high, mid, low
|
| 900 |
+
# [343.2650, 343.2650, 343.2650, 704.2620, 704.2620, 704.2620, 971.5840, 971.5840, 971.5840] high, mid, low
|
| 901 |
+
# [300.5300, 300.5300, 300.5300, 664.5240, 664.5240, 664.5240, 943.1680, 943.1680, 943.1680] high, mid, low
|
| 902 |
+
# [257.7950, 257.7950, 257.7950, 624.7860, 624.7860, 624.7860, 914.7520, 914.7520, 914.7520] high, mid, low
|
| 903 |
+
# [215.0600, 215.0600, 215.0600, 585.0480, 585.0480, 585.0480, 886.3360, 886.3360, 886.3360] high, mid, low
|
| 904 |
+
# [172.3250, 172.3250, 172.3250, 545.3100, 545.3100, 545.3100, 857.9200, 857.9200, 857.9200] high, mid, low
|
| 905 |
+
# [129.5900, 129.5900, 129.5900, 505.5720, 505.5720, 505.5720, 829.5040, 829.5040, 829.5040] high, mid, low
|
| 906 |
+
# [ 86.8550, 86.8550, 86.8550, 465.8340, 465.8340, 465.8340, 801.0880, 801.0880, 801.0880] high, mid, low
|
| 907 |
+
# [ 44.1200, 44.1200, 44.1200, 426.0960, 426.0960, 426.0960, 772.6720, 772.6720, 772.6720] high, mid, low
|
| 908 |
+
# [ 1.3850, 1.3850, 1.3850, 386.3580, 386.3580, 386.3580, 744.2560, 744.2560, 744.2560] high, mid, low
|
| 909 |
+
|
| 910 |
+
# stream_chunk_size=5, shape = (training_num_steps_to_be_saved, latent_frame_num):
|
| 911 |
+
# [545.3100, 545.3100, 545.3100, 545.3100, 545.3100, 1000.0000, 1000.0000, 1000.0000, 1000.0000, 1000.0000] mid, low
|
| 912 |
+
# [505.5720, 505.5720, 505.5720, 505.5720, 505.5720, 971.5840, 971.5840, 971.5840, 971.5840, 971.5840] mid, low
|
| 913 |
+
# [465.8340, 465.8340, 465.8340, 465.8340, 465.8340, 943.1680, 943.1680, 943.1680, 943.1680, 943.1680] mid, low
|
| 914 |
+
# [426.0960, 426.0960, 426.0960, 426.0960, 426.0960, 914.7520, 914.7520, 914.7520, 914.7520, 914.7520] mid, low
|
| 915 |
+
# [386.3580, 386.3580, 386.3580, 386.3580, 386.3580, 886.3360, 886.3360, 886.3360, 886.3360, 886.3360] mid, low
|
| 916 |
+
# [386.0000, 386.0000, 386.0000, 386.0000, 386.0000, 857.9200, 857.9200, 857.9200, 857.9200, 857.9200] high, low
|
| 917 |
+
# [343.2650, 343.2650, 343.2650, 343.2650, 343.2650, 829.5040, 829.5040, 829.5040, 829.5040, 829.5040] high, low
|
| 918 |
+
# [300.5300, 300.5300, 300.5300, 300.5300, 300.5300, 801.0880, 801.0880, 801.0880, 801.0880, 801.0880] high, low
|
| 919 |
+
# [257.7950, 257.7950, 257.7950, 257.7950, 257.7950, 772.6720, 772.6720, 772.6720, 772.6720, 772.6720] high, low
|
| 920 |
+
# [215.0600, 215.0600, 215.0600, 215.0600, 215.0600, 744.2560, 744.2560, 744.2560, 744.2560, 744.2560] high, low
|
| 921 |
+
# [172.3250, 172.3250, 172.3250, 172.3250, 172.3250, 744.0000, 744.0000, 744.0000, 744.0000, 744.0000] high, mid
|
| 922 |
+
# [129.5900, 129.5900, 129.5900, 129.5900, 129.5900, 704.2620, 704.2620, 704.2620, 704.2620, 704.2620] high, mid
|
| 923 |
+
# [ 86.8550, 86.8550, 86.8550, 86.8550, 86.8550, 664.5240, 664.5240, 664.5240, 664.5240, 664.5240] high, mid
|
| 924 |
+
# [ 44.1200, 44.1200, 44.1200, 44.1200, 44.1200, 624.7860, 624.7860, 624.7860, 624.7860, 624.7860] high, mid
|
| 925 |
+
# [ 1.3850, 1.3850, 1.3850, 1.3850, 1.3850, 585.0480, 585.0480, 585.0480, 585.0480, 585.0480] high, mid
|
| 926 |
+
|
| 927 |
+
# To calculate the batchsize
|
| 928 |
+
bsz = input_video_num
|
| 929 |
+
|
| 930 |
+
# Get multi stage timesteps for streamgen
|
| 931 |
+
(
|
| 932 |
+
training_num_steps_to_be_saved,
|
| 933 |
+
training_all_timesteps_stage_ids,
|
| 934 |
+
training_all_progressive_timesteps,
|
| 935 |
+
progressive_timesteps_stages,
|
| 936 |
+
) = get_stream_sample(
|
| 937 |
+
scheduler=scheduler,
|
| 938 |
+
max_latent_frame_num=latent_frame_num,
|
| 939 |
+
stream_chunk_size=stream_chunk_size,
|
| 940 |
+
pyramid_stage_num=pyramid_stage_num,
|
| 941 |
+
pyramid_stream_inference_steps=pyramid_stream_inference_steps,
|
| 942 |
+
)
|
| 943 |
+
timestep_to_stage = {
|
| 944 |
+
float(t.item()): int(stage.item())
|
| 945 |
+
for t, stage in zip(training_all_progressive_timesteps[0], training_all_timesteps_stage_ids[0])
|
| 946 |
+
}
|
| 947 |
+
|
| 948 |
+
while True:
|
| 949 |
+
initialization = random.choice([True, False])
|
| 950 |
+
termination = random.choice([True, False])
|
| 951 |
+
if not (initialization and termination): # Make sure not both are True
|
| 952 |
+
break
|
| 953 |
+
|
| 954 |
+
stage_i = random.randint(0, training_num_steps_to_be_saved - 1)
|
| 955 |
+
timesteps = progressive_timesteps_stages[stage_i].clone().repeat(bsz, 1) # (b, f)
|
| 956 |
+
if initialization: # get the ending timesteps, [999]x5 from [91, 192, ..., 999]x5
|
| 957 |
+
timesteps = timesteps[:, -latent_frame_num:]
|
| 958 |
+
elif termination: # get the starting timesteps, [91]x5 from [91, ..., 999]x5
|
| 959 |
+
timesteps = timesteps[:, :latent_frame_num]
|
| 960 |
+
|
| 961 |
+
# For stage mapping / Get sigmas
|
| 962 |
+
sigmas, stage_latent_mapping = get_sigmas_from_pyramid_timesteps(scheduler, timesteps, timestep_to_stage)
|
| 963 |
+
|
| 964 |
+
# To device
|
| 965 |
+
timesteps = timesteps.to(device)
|
| 966 |
+
sigmas = sigmas.to(device)
|
| 967 |
+
|
| 968 |
+
# Get pyramid stage points
|
| 969 |
+
stage_point_list = []
|
| 970 |
+
for i_s in range(pyramid_stage_num):
|
| 971 |
+
clean_latent = pyramid_latent_list[i_s] # [bs, c, t, h, w]
|
| 972 |
+
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1]
|
| 973 |
+
start_sigma = scheduler.start_sigmas[i_s]
|
| 974 |
+
end_sigma = scheduler.end_sigmas[i_s]
|
| 975 |
+
|
| 976 |
+
if i_s == 0:
|
| 977 |
+
start_point = noise_list[i_s]
|
| 978 |
+
else:
|
| 979 |
+
# Get the upsampled latent
|
| 980 |
+
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
|
| 981 |
+
last_clean_latent = F.interpolate(
|
| 982 |
+
last_clean_latent,
|
| 983 |
+
size=(
|
| 984 |
+
last_clean_latent.shape[-2] * 2,
|
| 985 |
+
last_clean_latent.shape[-1] * 2,
|
| 986 |
+
),
|
| 987 |
+
mode="nearest",
|
| 988 |
+
)
|
| 989 |
+
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
|
| 990 |
+
start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent
|
| 991 |
+
|
| 992 |
+
if i_s == pyramid_stage_num - 1:
|
| 993 |
+
end_point = clean_latent
|
| 994 |
+
else:
|
| 995 |
+
end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent
|
| 996 |
+
|
| 997 |
+
stage_point_list.append((start_point, end_point))
|
| 998 |
+
|
| 999 |
+
noisy_latents_list = [] # torch.Size([2, 16, 10, 12, 20])
|
| 1000 |
+
targets_list = [] # torch.Size([2, 16, 10, 12, 20])
|
| 1001 |
+
sigmas_list = [] # torch.Size([2, 1, 10, 1, 1])
|
| 1002 |
+
timesteps_list = [] # torch.Size([2, 10])
|
| 1003 |
+
temp_noisy_latents_list = []
|
| 1004 |
+
temp_targets_list = []
|
| 1005 |
+
|
| 1006 |
+
unique_elements = list(map(int, torch.unique(stage_latent_mapping)))
|
| 1007 |
+
for cur_stage in reversed(unique_elements):
|
| 1008 |
+
stage_indices = torch.nonzero(stage_latent_mapping == cur_stage, as_tuple=True)
|
| 1009 |
+
start_index = stage_indices[1][0].item()
|
| 1010 |
+
end_index = start_index + stream_chunk_size
|
| 1011 |
+
|
| 1012 |
+
start_point, end_point = stage_point_list[cur_stage]
|
| 1013 |
+
start_point_slice = start_point[:, :, start_index:end_index, :, :]
|
| 1014 |
+
end_point_slice = end_point[:, :, start_index:end_index, :, :]
|
| 1015 |
+
|
| 1016 |
+
sigmas_slice = sigmas[:, :, start_index:end_index, :, :]
|
| 1017 |
+
noisy_latents = sigmas_slice * start_point_slice + (1 - sigmas_slice) * end_point_slice
|
| 1018 |
+
target = start_point_slice - end_point_slice
|
| 1019 |
+
|
| 1020 |
+
temp_noisy_latents_list.append(noisy_latents.to(dtype))
|
| 1021 |
+
temp_targets_list.append(target)
|
| 1022 |
+
|
| 1023 |
+
noisy_latents_list.append(temp_noisy_latents_list)
|
| 1024 |
+
targets_list.append(temp_targets_list)
|
| 1025 |
+
sigmas_list.append(sigmas.to(dtype))
|
| 1026 |
+
timesteps_list.append(timesteps.to(dtype=dtype))
|
| 1027 |
+
|
| 1028 |
+
return noisy_latents_list, sigmas_list, timesteps_list, targets_list
|
| 1029 |
+
|
| 1030 |
+
|
| 1031 |
+
def get_sigmas_from_pyramid_timesteps(scheduler, timesteps, timestep_to_stage):
|
| 1032 |
+
# For stage mapping
|
| 1033 |
+
flat_timesteps = timesteps.flatten()
|
| 1034 |
+
stage_latent_mapping = torch.tensor(
|
| 1035 |
+
[timestep_to_stage.get(float(t.item()), -1) for t in flat_timesteps],
|
| 1036 |
+
device=timesteps.device,
|
| 1037 |
+
).view(timesteps.shape)
|
| 1038 |
+
|
| 1039 |
+
# Get sigmas
|
| 1040 |
+
sigmas = torch.full_like(timesteps, -1.0)
|
| 1041 |
+
for i in range(timesteps.shape[0]):
|
| 1042 |
+
for j in range(timesteps.shape[1]):
|
| 1043 |
+
temp_stage_mapping = int(stage_latent_mapping[i, j])
|
| 1044 |
+
target_value = timesteps[i, j]
|
| 1045 |
+
temp_indice = (
|
| 1046 |
+
(
|
| 1047 |
+
torch.isclose(
|
| 1048 |
+
scheduler.timesteps_per_stage[temp_stage_mapping],
|
| 1049 |
+
target_value.clone().detach().to(scheduler.timesteps_per_stage[temp_stage_mapping].dtype),
|
| 1050 |
+
)
|
| 1051 |
+
)
|
| 1052 |
+
.nonzero(as_tuple=True)[0]
|
| 1053 |
+
.item()
|
| 1054 |
+
)
|
| 1055 |
+
sigmas[i, j] = scheduler.sigmas_per_stage[temp_stage_mapping][temp_indice]
|
| 1056 |
+
sigmas = sigmas.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
|
| 1057 |
+
|
| 1058 |
+
return sigmas, stage_latent_mapping
|
| 1059 |
+
|
| 1060 |
+
|
| 1061 |
+
def get_stream_sample(
|
| 1062 |
+
scheduler,
|
| 1063 |
+
max_latent_frame_num,
|
| 1064 |
+
stream_chunk_size,
|
| 1065 |
+
pyramid_stage_num=3,
|
| 1066 |
+
pyramid_stream_inference_steps=[10, 10, 10],
|
| 1067 |
+
):
|
| 1068 |
+
max_inference_steps = sum(pyramid_stream_inference_steps)
|
| 1069 |
+
|
| 1070 |
+
# Set training all progressive timesteps and stage mapping
|
| 1071 |
+
all_progressive_timesteps_list = []
|
| 1072 |
+
timestep_stage_list = []
|
| 1073 |
+
for stage_idx in range(pyramid_stage_num):
|
| 1074 |
+
scheduler.set_timesteps(pyramid_stream_inference_steps[stage_idx], stage_idx)
|
| 1075 |
+
temp_timesteps = scheduler.timesteps # shape: (n_i,)
|
| 1076 |
+
all_progressive_timesteps_list.append(temp_timesteps)
|
| 1077 |
+
timestep_stage_list.append(
|
| 1078 |
+
torch.full_like(temp_timesteps, fill_value=stage_idx)
|
| 1079 |
+
) # same shape, filled with stage_idx
|
| 1080 |
+
all_progressive_timesteps = torch.cat(all_progressive_timesteps_list).unsqueeze(0).flip(1) # (1, T)
|
| 1081 |
+
all_timesteps_stage_ids = torch.cat(timestep_stage_list).unsqueeze(0).flip(1)
|
| 1082 |
+
|
| 1083 |
+
# Set training progressive timesteps stages
|
| 1084 |
+
# every stream_chunk_size frames is treated as one, using the same noise level. f' = f / c
|
| 1085 |
+
assert max_latent_frame_num % stream_chunk_size == 0, (
|
| 1086 |
+
f"num_frames should be multiple of stream_chunk_size, {max_latent_frame_num} % {stream_chunk_size} != 0"
|
| 1087 |
+
)
|
| 1088 |
+
assert max_inference_steps % (max_latent_frame_num // stream_chunk_size) == 0, (
|
| 1089 |
+
f"max_inference_steps should be multiple of max_latent_frame_num // stream_chunk_size, {max_inference_steps} % {max_latent_frame_num // stream_chunk_size} != 0"
|
| 1090 |
+
)
|
| 1091 |
+
num_steps_to_be_saved = max_inference_steps // (
|
| 1092 |
+
max_latent_frame_num // stream_chunk_size
|
| 1093 |
+
) # every m steps, save stream_chunk_size frames. m = t / f' = t / (f / c) = c * (t / f)
|
| 1094 |
+
|
| 1095 |
+
# (b, t) -> [(b, t / m) in reverse range(m)] -> [(b, f) in reverse range(m)]
|
| 1096 |
+
progressive_timesteps_stages = [
|
| 1097 |
+
repeat(
|
| 1098 |
+
all_progressive_timesteps[:, (num_steps_to_be_saved - 1) - s :: num_steps_to_be_saved],
|
| 1099 |
+
"b f -> b f c",
|
| 1100 |
+
c=stream_chunk_size,
|
| 1101 |
+
).flatten(1, 2)
|
| 1102 |
+
for s in range(num_steps_to_be_saved)
|
| 1103 |
+
]
|
| 1104 |
+
|
| 1105 |
+
return num_steps_to_be_saved, all_timesteps_stage_ids, all_progressive_timesteps, progressive_timesteps_stages
|
| 1106 |
+
|
| 1107 |
+
|
| 1108 |
+
if __name__ == "__main__":
|
| 1109 |
+
import argparse
|
| 1110 |
+
|
| 1111 |
+
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
| 1112 |
+
parser.add_argument(
|
| 1113 |
+
"--weighting_scheme",
|
| 1114 |
+
type=str,
|
| 1115 |
+
default="logit_normal",
|
| 1116 |
+
choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"],
|
| 1117 |
+
help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'),
|
| 1118 |
+
)
|
| 1119 |
+
parser.add_argument(
|
| 1120 |
+
"--logit_mean",
|
| 1121 |
+
type=float,
|
| 1122 |
+
default=0.0,
|
| 1123 |
+
help="mean to use when using the `'logit_normal'` weighting scheme.",
|
| 1124 |
+
)
|
| 1125 |
+
parser.add_argument(
|
| 1126 |
+
"--logit_std",
|
| 1127 |
+
type=float,
|
| 1128 |
+
default=1.0,
|
| 1129 |
+
help="std to use when using the `'logit_normal'` weighting scheme.",
|
| 1130 |
+
)
|
| 1131 |
+
parser.add_argument(
|
| 1132 |
+
"--mode_scale",
|
| 1133 |
+
type=float,
|
| 1134 |
+
default=1.29,
|
| 1135 |
+
help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
|
| 1136 |
+
)
|
| 1137 |
+
args = parser.parse_args()
|
| 1138 |
+
|
| 1139 |
+
device = "cuda"
|
| 1140 |
+
|
| 1141 |
+
import sys
|
| 1142 |
+
|
| 1143 |
+
sys.path.append("../")
|
| 1144 |
+
from scheduler.scheduling_flow_matching_pyramid import PyramidFlowMatchEulerDiscreteScheduler
|
| 1145 |
+
|
| 1146 |
+
stages = [1, 2, 4]
|
| 1147 |
+
timestep_shift = 1.0
|
| 1148 |
+
stage_range = [0, 1 / 3, 2 / 3, 1]
|
| 1149 |
+
scheduler_gamma = 1 / 3
|
| 1150 |
+
scheduler = PyramidFlowMatchEulerDiscreteScheduler(
|
| 1151 |
+
shift=timestep_shift,
|
| 1152 |
+
stages=len(stages),
|
| 1153 |
+
stage_range=stage_range,
|
| 1154 |
+
gamma=scheduler_gamma,
|
| 1155 |
+
)
|
| 1156 |
+
print(
|
| 1157 |
+
f"The start sigmas and end sigmas of each stage is Start: {scheduler.start_sigmas}, End: {scheduler.end_sigmas}, Ori_start: {scheduler.ori_start_sigmas}"
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
# Test get_framepack_input
|
| 1161 |
+
from diffusers import AutoencoderKLHunyuanVideo
|
| 1162 |
+
|
| 1163 |
+
# 5: (21, 41, 61, 81, 101)
|
| 1164 |
+
# 6: (25, 49, 73, 97, 121)
|
| 1165 |
+
# 7: (29, 57, 85, 113, 141)
|
| 1166 |
+
# 8: (33, 65, 97, 129, 161)
|
| 1167 |
+
# 9: (37, 73, 109, 145, 181)
|
| 1168 |
+
# 10: (41, 81, 121, 161, 201)
|
| 1169 |
+
# 11: (45, 89, 133, 177, 221)
|
| 1170 |
+
# 12: (49, 97, 145, 193, 241)
|
| 1171 |
+
|
| 1172 |
+
pixel_values = torch.randn([2, 3, 241, 384, 640], device=device).clamp(-1, 1)
|
| 1173 |
+
pixel_values = pixel_values.to(torch.bfloat16)
|
| 1174 |
+
vae = AutoencoderKLHunyuanVideo.from_pretrained(
|
| 1175 |
+
"/mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/",
|
| 1176 |
+
subfolder="vae",
|
| 1177 |
+
weight_dtype=torch.bfloat16,
|
| 1178 |
+
).to(device)
|
| 1179 |
+
vae.requires_grad_(False)
|
| 1180 |
+
vae.eval()
|
| 1181 |
+
|
| 1182 |
+
(
|
| 1183 |
+
model_input, # torch.Size([2, 16, 9, 60, 104])
|
| 1184 |
+
indices_latents, # torch.Size([2, 9])
|
| 1185 |
+
latents_clean, # torch.Size([2, 16, 2, 60, 104])
|
| 1186 |
+
indices_clean_latents, # torch.Size([2, 2])
|
| 1187 |
+
latents_history_2x, # torch.Size([2, 16, 2, 60, 104])
|
| 1188 |
+
indices_latents_history_2x, # torch.Size([2, 2])
|
| 1189 |
+
latents_history_4x, # torch.Size([2, 16, 16, 60, 104])
|
| 1190 |
+
indices_latents_history_4x, # torch.Size([2, 16])
|
| 1191 |
+
section_to_video_idx,
|
| 1192 |
+
) = get_framepack_input_i2v(
|
| 1193 |
+
vae=vae,
|
| 1194 |
+
pixel_values=pixel_values, # torch.Size([1, 3, 73, 480, 832])
|
| 1195 |
+
latent_window_size=12,
|
| 1196 |
+
vanilla_sampling=False,
|
| 1197 |
+
dtype=torch.bfloat16,
|
| 1198 |
+
)
|
| 1199 |
+
|
| 1200 |
+
print(indices_latents, "\n", indices_clean_latents, "\n", indices_latents_history_2x, "\n", indices_latents_history_4x)
|
| 1201 |
+
|
| 1202 |
+
# print(
|
| 1203 |
+
# indices_latents,
|
| 1204 |
+
# "\n",
|
| 1205 |
+
# indices_clean_latents,
|
| 1206 |
+
# "\n",
|
| 1207 |
+
# indices_latents_history_2x,
|
| 1208 |
+
# "\n",
|
| 1209 |
+
# indices_latents_history_4x,
|
| 1210 |
+
# )
|
| 1211 |
+
|
| 1212 |
+
# Test get_pyramid_input
|
| 1213 |
+
# model_input = torch.randn([2, 16, 10, 48, 80], device=device)
|
| 1214 |
+
# noisy_model_input_list, sigmas_list, timesteps_list, targets_list = get_pyramid_input(
|
| 1215 |
+
# args=args,
|
| 1216 |
+
# scheduler=scheduler,
|
| 1217 |
+
# latents=model_input,
|
| 1218 |
+
# pyramid_stage_num=3,
|
| 1219 |
+
# pyramid_sample_ratios=[1, 2, 1],
|
| 1220 |
+
# pyramid_sample_mode="stream_sample",
|
| 1221 |
+
# stream_chunk_size=3,
|
| 1222 |
+
# pyramid_stream_inference_steps=[10, 10, 10],
|
| 1223 |
+
# )
|
| 1224 |
+
|
| 1225 |
+
# if isinstance(noisy_model_input_list[0], list):
|
| 1226 |
+
# total_sample_count = sum(y.shape[0] for x in noisy_model_input_list for y in x)
|
| 1227 |
+
# else:
|
| 1228 |
+
# total_sample_count = sum(x.shape[0] for x in noisy_model_input_list)
|
| 1229 |
+
# batch_size = model_input.shape[0]
|
exp_code/1_benchmark/1.py
ADDED
|
@@ -0,0 +1,748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from causvid.models.wan.wan_base.modules.attention import attention
|
| 2 |
+
from causvid.models.wan.wan_base.modules.model import (
|
| 3 |
+
WanRMSNorm,
|
| 4 |
+
rope_apply,
|
| 5 |
+
WanLayerNorm,
|
| 6 |
+
WAN_CROSSATTENTION_CLASSES,
|
| 7 |
+
Head,
|
| 8 |
+
rope_params,
|
| 9 |
+
MLPProj,
|
| 10 |
+
sinusoidal_embedding_1d
|
| 11 |
+
)
|
| 12 |
+
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
|
| 13 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 14 |
+
from torch.nn.attention.flex_attention import BlockMask
|
| 15 |
+
from diffusers.models.modeling_utils import ModelMixin
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
import torch
|
| 18 |
+
import math
|
| 19 |
+
|
| 20 |
+
# wan 1.3B model has a weird channel / head configurations and require max-autotune to work with flexattention
|
| 21 |
+
# see https://github.com/pytorch/pytorch/issues/133254
|
| 22 |
+
# change to default for other models
|
| 23 |
+
flex_attention = torch.compile(
|
| 24 |
+
flex_attention, dynamic=False, mode="max-autotune")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def causal_rope_apply(x, grid_sizes, freqs, start_frame=0):
|
| 28 |
+
n, c = x.size(2), x.size(3) // 2
|
| 29 |
+
|
| 30 |
+
# split freqs
|
| 31 |
+
freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)
|
| 32 |
+
|
| 33 |
+
# loop over samples
|
| 34 |
+
output = []
|
| 35 |
+
|
| 36 |
+
for i, (f, h, w) in enumerate(grid_sizes.tolist()):
|
| 37 |
+
seq_len = f * h * w
|
| 38 |
+
|
| 39 |
+
# precompute multipliers
|
| 40 |
+
x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape(
|
| 41 |
+
seq_len, n, -1, 2))
|
| 42 |
+
freqs_i = torch.cat([
|
| 43 |
+
freqs[0][start_frame:start_frame + f].view(f, 1, 1, -1).expand(f, h, w, -1),
|
| 44 |
+
freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
|
| 45 |
+
freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)
|
| 46 |
+
],
|
| 47 |
+
dim=-1).reshape(seq_len, 1, -1)
|
| 48 |
+
|
| 49 |
+
# apply rotary embedding
|
| 50 |
+
x_i = torch.view_as_real(x_i * freqs_i).flatten(2)
|
| 51 |
+
x_i = torch.cat([x_i, x[i, seq_len:]])
|
| 52 |
+
|
| 53 |
+
# append to collection
|
| 54 |
+
output.append(x_i)
|
| 55 |
+
return torch.stack(output).type_as(x)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class CausalWanSelfAttention(nn.Module):
|
| 59 |
+
|
| 60 |
+
def __init__(self,
|
| 61 |
+
dim,
|
| 62 |
+
num_heads,
|
| 63 |
+
window_size=(-1, -1),
|
| 64 |
+
qk_norm=True,
|
| 65 |
+
eps=1e-6):
|
| 66 |
+
assert dim % num_heads == 0
|
| 67 |
+
super().__init__()
|
| 68 |
+
self.dim = dim
|
| 69 |
+
self.num_heads = num_heads
|
| 70 |
+
self.head_dim = dim // num_heads
|
| 71 |
+
self.window_size = window_size
|
| 72 |
+
self.qk_norm = qk_norm
|
| 73 |
+
self.eps = eps
|
| 74 |
+
|
| 75 |
+
# layers
|
| 76 |
+
self.q = nn.Linear(dim, dim)
|
| 77 |
+
self.k = nn.Linear(dim, dim)
|
| 78 |
+
self.v = nn.Linear(dim, dim)
|
| 79 |
+
self.o = nn.Linear(dim, dim)
|
| 80 |
+
self.norm_q = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 81 |
+
self.norm_k = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 82 |
+
|
| 83 |
+
def forward(self, x, seq_lens, grid_sizes, freqs, block_mask, kv_cache=None, current_start=0, current_end=0):
|
| 84 |
+
r"""
|
| 85 |
+
Args:
|
| 86 |
+
x(Tensor): Shape [B, L, num_heads, C / num_heads]
|
| 87 |
+
seq_lens(Tensor): Shape [B]
|
| 88 |
+
grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
|
| 89 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
| 90 |
+
block_mask (BlockMask)
|
| 91 |
+
"""
|
| 92 |
+
b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
|
| 93 |
+
|
| 94 |
+
# query, key, value function
|
| 95 |
+
def qkv_fn(x):
|
| 96 |
+
q = self.norm_q(self.q(x)).view(b, s, n, d)
|
| 97 |
+
k = self.norm_k(self.k(x)).view(b, s, n, d)
|
| 98 |
+
v = self.v(x).view(b, s, n, d)
|
| 99 |
+
return q, k, v
|
| 100 |
+
|
| 101 |
+
q, k, v = qkv_fn(x)
|
| 102 |
+
|
| 103 |
+
if kv_cache is None:
|
| 104 |
+
roped_query = rope_apply(q, grid_sizes, freqs).type_as(v)
|
| 105 |
+
roped_key = rope_apply(k, grid_sizes, freqs).type_as(v)
|
| 106 |
+
|
| 107 |
+
padded_length = math.ceil(q.shape[1] / 128) * 128 - q.shape[1]
|
| 108 |
+
padded_roped_query = torch.cat(
|
| 109 |
+
[roped_query,
|
| 110 |
+
torch.zeros([q.shape[0], padded_length, q.shape[2], q.shape[3]],
|
| 111 |
+
device=q.device, dtype=v.dtype)],
|
| 112 |
+
dim=1
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
padded_roped_key = torch.cat(
|
| 116 |
+
[roped_key, torch.zeros([k.shape[0], padded_length, k.shape[2], k.shape[3]],
|
| 117 |
+
device=k.device, dtype=v.dtype)],
|
| 118 |
+
dim=1
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
padded_v = torch.cat(
|
| 122 |
+
[v, torch.zeros([v.shape[0], padded_length, v.shape[2], v.shape[3]],
|
| 123 |
+
device=v.device, dtype=v.dtype)],
|
| 124 |
+
dim=1
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# print(q.shape, k.shape, v.shape, padded_roped_query.shape, padded_roped_key.shape, padded_v.shape)
|
| 128 |
+
x = flex_attention(
|
| 129 |
+
query=padded_roped_query.transpose(2, 1),
|
| 130 |
+
key=padded_roped_key.transpose(2, 1),
|
| 131 |
+
value=padded_v.transpose(2, 1),
|
| 132 |
+
block_mask=block_mask
|
| 133 |
+
)[:, :, :-padded_length].transpose(2, 1)
|
| 134 |
+
else:
|
| 135 |
+
roped_query = causal_rope_apply(
|
| 136 |
+
q, grid_sizes, freqs, start_frame=current_start // math.prod(grid_sizes[0][1:]).item()).type_as(v)
|
| 137 |
+
roped_key = causal_rope_apply(
|
| 138 |
+
k, grid_sizes, freqs, start_frame=current_start // math.prod(grid_sizes[0][1:]).item()).type_as(v)
|
| 139 |
+
|
| 140 |
+
kv_cache["k"][:, current_start:current_end] = roped_key
|
| 141 |
+
kv_cache["v"][:, current_start:current_end] = v
|
| 142 |
+
|
| 143 |
+
x = attention(roped_query, kv_cache["k"][:, :current_end], kv_cache["v"][:, :current_end])
|
| 144 |
+
|
| 145 |
+
# print(x.shape, q.shape, k.shape, v.shape, roped_query.shape, roped_key.shape, kv_cache["k"][:, :current_end].shape, kv_cache["v"][:, :current_end].shape)
|
| 146 |
+
|
| 147 |
+
# output
|
| 148 |
+
x = x.flatten(2)
|
| 149 |
+
x = self.o(x)
|
| 150 |
+
return x
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class CausalWanAttentionBlock(nn.Module):
|
| 154 |
+
|
| 155 |
+
def __init__(self,
|
| 156 |
+
cross_attn_type,
|
| 157 |
+
dim,
|
| 158 |
+
ffn_dim,
|
| 159 |
+
num_heads,
|
| 160 |
+
window_size=(-1, -1),
|
| 161 |
+
qk_norm=True,
|
| 162 |
+
cross_attn_norm=False,
|
| 163 |
+
eps=1e-6):
|
| 164 |
+
super().__init__()
|
| 165 |
+
self.dim = dim
|
| 166 |
+
self.ffn_dim = ffn_dim
|
| 167 |
+
self.num_heads = num_heads
|
| 168 |
+
self.window_size = window_size
|
| 169 |
+
self.qk_norm = qk_norm
|
| 170 |
+
self.cross_attn_norm = cross_attn_norm
|
| 171 |
+
self.eps = eps
|
| 172 |
+
|
| 173 |
+
# layers
|
| 174 |
+
self.norm1 = WanLayerNorm(dim, eps)
|
| 175 |
+
self.self_attn = CausalWanSelfAttention(dim, num_heads, window_size, qk_norm,
|
| 176 |
+
eps)
|
| 177 |
+
self.norm3 = WanLayerNorm(
|
| 178 |
+
dim, eps,
|
| 179 |
+
elementwise_affine=True) if cross_attn_norm else nn.Identity()
|
| 180 |
+
self.cross_attn = WAN_CROSSATTENTION_CLASSES[cross_attn_type](dim,
|
| 181 |
+
num_heads,
|
| 182 |
+
(-1, -1),
|
| 183 |
+
qk_norm,
|
| 184 |
+
eps)
|
| 185 |
+
self.norm2 = WanLayerNorm(dim, eps)
|
| 186 |
+
self.ffn = nn.Sequential(
|
| 187 |
+
nn.Linear(dim, ffn_dim), nn.GELU(approximate='tanh'),
|
| 188 |
+
nn.Linear(ffn_dim, dim))
|
| 189 |
+
|
| 190 |
+
# modulation
|
| 191 |
+
self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
|
| 192 |
+
|
| 193 |
+
def forward(
|
| 194 |
+
self,
|
| 195 |
+
x,
|
| 196 |
+
e,
|
| 197 |
+
seq_lens,
|
| 198 |
+
grid_sizes,
|
| 199 |
+
freqs,
|
| 200 |
+
context,
|
| 201 |
+
context_lens,
|
| 202 |
+
block_mask,
|
| 203 |
+
kv_cache=None,
|
| 204 |
+
crossattn_cache=None,
|
| 205 |
+
current_start=0,
|
| 206 |
+
current_end=0
|
| 207 |
+
):
|
| 208 |
+
r"""
|
| 209 |
+
Args:
|
| 210 |
+
x(Tensor): Shape [B, L, C]
|
| 211 |
+
e(Tensor): Shape [B, F, 6, C]
|
| 212 |
+
seq_lens(Tensor): Shape [B], length of each sequence in batch
|
| 213 |
+
grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
|
| 214 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
| 215 |
+
"""
|
| 216 |
+
num_frames, frame_seqlen = e.shape[1], x.shape[1] // e.shape[1]
|
| 217 |
+
# assert e.dtype == torch.float32
|
| 218 |
+
# with amp.autocast(dtype=torch.float32):
|
| 219 |
+
e = (self.modulation.unsqueeze(1) + e).chunk(6, dim=2)
|
| 220 |
+
# assert e[0].dtype == torch.float32
|
| 221 |
+
|
| 222 |
+
# self-attention
|
| 223 |
+
y = self.self_attn(
|
| 224 |
+
(self.norm1(x).unflatten(dim=1, sizes=(num_frames, frame_seqlen))
|
| 225 |
+
* (1 + e[1]) + e[0]).flatten(1, 2),
|
| 226 |
+
seq_lens, grid_sizes,
|
| 227 |
+
freqs, block_mask, kv_cache, current_start, current_end)
|
| 228 |
+
|
| 229 |
+
# with amp.autocast(dtype=torch.float32):
|
| 230 |
+
x = x + (y.unflatten(dim=1, sizes=(num_frames, frame_seqlen))
|
| 231 |
+
* e[2]).flatten(1, 2)
|
| 232 |
+
|
| 233 |
+
# cross-attention & ffn function
|
| 234 |
+
def cross_attn_ffn(x, context, context_lens, e, crossattn_cache=None):
|
| 235 |
+
x = x + self.cross_attn(self.norm3(x), context,
|
| 236 |
+
context_lens, crossattn_cache=crossattn_cache)
|
| 237 |
+
y = self.ffn(
|
| 238 |
+
(self.norm2(x).unflatten(dim=1, sizes=(num_frames,
|
| 239 |
+
frame_seqlen)) * (1 + e[4]) + e[3]).flatten(1, 2)
|
| 240 |
+
)
|
| 241 |
+
# with amp.autocast(dtype=torch.float32):
|
| 242 |
+
x = x + (y.unflatten(dim=1, sizes=(num_frames,
|
| 243 |
+
frame_seqlen)) * e[5]).flatten(1, 2)
|
| 244 |
+
return x
|
| 245 |
+
|
| 246 |
+
x = cross_attn_ffn(x, context, context_lens, e, crossattn_cache)
|
| 247 |
+
return x
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class CausalHead(nn.Module):
|
| 251 |
+
|
| 252 |
+
def __init__(self, dim, out_dim, patch_size, eps=1e-6):
|
| 253 |
+
super().__init__()
|
| 254 |
+
self.dim = dim
|
| 255 |
+
self.out_dim = out_dim
|
| 256 |
+
self.patch_size = patch_size
|
| 257 |
+
self.eps = eps
|
| 258 |
+
|
| 259 |
+
# layers
|
| 260 |
+
out_dim = math.prod(patch_size) * out_dim
|
| 261 |
+
self.norm = WanLayerNorm(dim, eps)
|
| 262 |
+
self.head = nn.Linear(dim, out_dim)
|
| 263 |
+
|
| 264 |
+
# modulation
|
| 265 |
+
self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5)
|
| 266 |
+
|
| 267 |
+
def forward(self, x, e):
|
| 268 |
+
r"""
|
| 269 |
+
Args:
|
| 270 |
+
x(Tensor): Shape [B, L1, C]
|
| 271 |
+
e(Tensor): Shape [B, F, 1, C]
|
| 272 |
+
"""
|
| 273 |
+
# assert e.dtype == torch.float32
|
| 274 |
+
# with amp.autocast(dtype=torch.float32):
|
| 275 |
+
num_frames, frame_seqlen = e.shape[1], x.shape[1] // e.shape[1]
|
| 276 |
+
e = (self.modulation.unsqueeze(1) + e).chunk(2, dim=2)
|
| 277 |
+
x = (self.head(
|
| 278 |
+
self.norm(x).unflatten(dim=1, sizes=(num_frames, frame_seqlen)) *
|
| 279 |
+
(1 + e[1]) + e[0]))
|
| 280 |
+
return x
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class CausalWanModel(ModelMixin, ConfigMixin):
|
| 284 |
+
r"""
|
| 285 |
+
Wan diffusion backbone supporting both text-to-video and image-to-video.
|
| 286 |
+
"""
|
| 287 |
+
|
| 288 |
+
ignore_for_config = [
|
| 289 |
+
'patch_size', 'cross_attn_norm', 'qk_norm', 'text_dim', 'window_size'
|
| 290 |
+
]
|
| 291 |
+
_no_split_modules = ['WanAttentionBlock']
|
| 292 |
+
_supports_gradient_checkpointing = True
|
| 293 |
+
|
| 294 |
+
@register_to_config
|
| 295 |
+
def __init__(self,
|
| 296 |
+
model_type='t2v',
|
| 297 |
+
patch_size=(1, 2, 2),
|
| 298 |
+
text_len=512,
|
| 299 |
+
in_dim=16,
|
| 300 |
+
dim=2048,
|
| 301 |
+
ffn_dim=8192,
|
| 302 |
+
freq_dim=256,
|
| 303 |
+
text_dim=4096,
|
| 304 |
+
out_dim=16,
|
| 305 |
+
num_heads=16,
|
| 306 |
+
num_layers=32,
|
| 307 |
+
window_size=(-1, -1),
|
| 308 |
+
qk_norm=True,
|
| 309 |
+
cross_attn_norm=True,
|
| 310 |
+
eps=1e-6):
|
| 311 |
+
r"""
|
| 312 |
+
Initialize the diffusion model backbone.
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
model_type (`str`, *optional*, defaults to 't2v'):
|
| 316 |
+
Model variant - 't2v' (text-to-video) or 'i2v' (image-to-video)
|
| 317 |
+
patch_size (`tuple`, *optional*, defaults to (1, 2, 2)):
|
| 318 |
+
3D patch dimensions for video embedding (t_patch, h_patch, w_patch)
|
| 319 |
+
text_len (`int`, *optional*, defaults to 512):
|
| 320 |
+
Fixed length for text embeddings
|
| 321 |
+
in_dim (`int`, *optional*, defaults to 16):
|
| 322 |
+
Input video channels (C_in)
|
| 323 |
+
dim (`int`, *optional*, defaults to 2048):
|
| 324 |
+
Hidden dimension of the transformer
|
| 325 |
+
ffn_dim (`int`, *optional*, defaults to 8192):
|
| 326 |
+
Intermediate dimension in feed-forward network
|
| 327 |
+
freq_dim (`int`, *optional*, defaults to 256):
|
| 328 |
+
Dimension for sinusoidal time embeddings
|
| 329 |
+
text_dim (`int`, *optional*, defaults to 4096):
|
| 330 |
+
Input dimension for text embeddings
|
| 331 |
+
out_dim (`int`, *optional*, defaults to 16):
|
| 332 |
+
Output video channels (C_out)
|
| 333 |
+
num_heads (`int`, *optional*, defaults to 16):
|
| 334 |
+
Number of attention heads
|
| 335 |
+
num_layers (`int`, *optional*, defaults to 32):
|
| 336 |
+
Number of transformer blocks
|
| 337 |
+
window_size (`tuple`, *optional*, defaults to (-1, -1)):
|
| 338 |
+
Window size for local attention (-1 indicates global attention)
|
| 339 |
+
qk_norm (`bool`, *optional*, defaults to True):
|
| 340 |
+
Enable query/key normalization
|
| 341 |
+
cross_attn_norm (`bool`, *optional*, defaults to False):
|
| 342 |
+
Enable cross-attention normalization
|
| 343 |
+
eps (`float`, *optional*, defaults to 1e-6):
|
| 344 |
+
Epsilon value for normalization layers
|
| 345 |
+
"""
|
| 346 |
+
|
| 347 |
+
super().__init__()
|
| 348 |
+
|
| 349 |
+
assert model_type in ['t2v', 'i2v']
|
| 350 |
+
self.model_type = model_type
|
| 351 |
+
|
| 352 |
+
self.patch_size = patch_size
|
| 353 |
+
self.text_len = text_len
|
| 354 |
+
self.in_dim = in_dim
|
| 355 |
+
self.dim = dim
|
| 356 |
+
self.ffn_dim = ffn_dim
|
| 357 |
+
self.freq_dim = freq_dim
|
| 358 |
+
self.text_dim = text_dim
|
| 359 |
+
self.out_dim = out_dim
|
| 360 |
+
self.num_heads = num_heads
|
| 361 |
+
self.num_layers = num_layers
|
| 362 |
+
self.window_size = window_size
|
| 363 |
+
self.qk_norm = qk_norm
|
| 364 |
+
self.cross_attn_norm = cross_attn_norm
|
| 365 |
+
self.eps = eps
|
| 366 |
+
|
| 367 |
+
# embeddings
|
| 368 |
+
self.patch_embedding = nn.Conv3d(
|
| 369 |
+
in_dim, dim, kernel_size=patch_size, stride=patch_size)
|
| 370 |
+
self.text_embedding = nn.Sequential(
|
| 371 |
+
nn.Linear(text_dim, dim), nn.GELU(approximate='tanh'),
|
| 372 |
+
nn.Linear(dim, dim))
|
| 373 |
+
|
| 374 |
+
self.time_embedding = nn.Sequential(
|
| 375 |
+
nn.Linear(freq_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
|
| 376 |
+
self.time_projection = nn.Sequential(
|
| 377 |
+
nn.SiLU(), nn.Linear(dim, dim * 6))
|
| 378 |
+
|
| 379 |
+
# blocks
|
| 380 |
+
cross_attn_type = 't2v_cross_attn' if model_type == 't2v' else 'i2v_cross_attn'
|
| 381 |
+
self.blocks = nn.ModuleList([
|
| 382 |
+
CausalWanAttentionBlock(cross_attn_type, dim, ffn_dim, num_heads,
|
| 383 |
+
window_size, qk_norm, cross_attn_norm, eps)
|
| 384 |
+
for _ in range(num_layers)
|
| 385 |
+
])
|
| 386 |
+
|
| 387 |
+
# head
|
| 388 |
+
self.head = CausalHead(dim, out_dim, patch_size, eps)
|
| 389 |
+
|
| 390 |
+
# buffers (don't use register_buffer otherwise dtype will be changed in to())
|
| 391 |
+
assert (dim % num_heads) == 0 and (dim // num_heads) % 2 == 0
|
| 392 |
+
d = dim // num_heads
|
| 393 |
+
self.freqs = torch.cat([
|
| 394 |
+
rope_params(1024, d - 4 * (d // 6)),
|
| 395 |
+
rope_params(1024, 2 * (d // 6)),
|
| 396 |
+
rope_params(1024, 2 * (d // 6))
|
| 397 |
+
],
|
| 398 |
+
dim=1)
|
| 399 |
+
|
| 400 |
+
if model_type == 'i2v':
|
| 401 |
+
self.img_emb = MLPProj(1280, dim)
|
| 402 |
+
|
| 403 |
+
# initialize weights
|
| 404 |
+
self.init_weights()
|
| 405 |
+
|
| 406 |
+
self.gradient_checkpointing = False
|
| 407 |
+
|
| 408 |
+
self.block_mask = None
|
| 409 |
+
|
| 410 |
+
self.num_frame_per_block = 1
|
| 411 |
+
|
| 412 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 413 |
+
self.gradient_checkpointing = value
|
| 414 |
+
|
| 415 |
+
@staticmethod
|
| 416 |
+
def _prepare_blockwise_causal_attn_mask(
|
| 417 |
+
device: torch.device | str, num_frames: int = 21,
|
| 418 |
+
frame_seqlen: int = 1560, num_frame_per_block=1
|
| 419 |
+
) -> BlockMask:
|
| 420 |
+
"""
|
| 421 |
+
we will divide the token sequence into the following format
|
| 422 |
+
[1 latent frame] [1 latent frame] ... [1 latent frame]
|
| 423 |
+
We use flexattention to construct the attention mask
|
| 424 |
+
"""
|
| 425 |
+
total_length = num_frames * frame_seqlen
|
| 426 |
+
|
| 427 |
+
# we do right padding to get to a multiple of 128
|
| 428 |
+
padded_length = math.ceil(total_length / 128) * 128 - total_length
|
| 429 |
+
|
| 430 |
+
ends = torch.zeros(total_length + padded_length,
|
| 431 |
+
device=device, dtype=torch.long)
|
| 432 |
+
|
| 433 |
+
# Block-wise causal mask will attend to all elements that are before the end of the current chunk
|
| 434 |
+
frame_indices = torch.arange(
|
| 435 |
+
start=0,
|
| 436 |
+
end=total_length,
|
| 437 |
+
step=frame_seqlen * num_frame_per_block,
|
| 438 |
+
device=device
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
for tmp in frame_indices:
|
| 442 |
+
ends[tmp:tmp + frame_seqlen * num_frame_per_block] = tmp + \
|
| 443 |
+
frame_seqlen * num_frame_per_block
|
| 444 |
+
|
| 445 |
+
def attention_mask(b, h, q_idx, kv_idx):
|
| 446 |
+
return (kv_idx < ends[q_idx]) | (q_idx == kv_idx)
|
| 447 |
+
# return ((kv_idx < total_length) & (q_idx < total_length)) | (q_idx == kv_idx) # bidirectional mask
|
| 448 |
+
|
| 449 |
+
block_mask = create_block_mask(attention_mask, B=None, H=None, Q_LEN=total_length + padded_length,
|
| 450 |
+
KV_LEN=total_length + padded_length, _compile=False, device=device)
|
| 451 |
+
|
| 452 |
+
import torch.distributed as dist
|
| 453 |
+
if not dist.is_initialized() or dist.get_rank() == 0:
|
| 454 |
+
print(
|
| 455 |
+
f" cache a block wise causal mask with block size of {num_frame_per_block} frames")
|
| 456 |
+
print(block_mask)
|
| 457 |
+
|
| 458 |
+
return block_mask
|
| 459 |
+
|
| 460 |
+
def _forward_inference(
|
| 461 |
+
self,
|
| 462 |
+
x,
|
| 463 |
+
t,
|
| 464 |
+
context,
|
| 465 |
+
seq_len,
|
| 466 |
+
clip_fea=None,
|
| 467 |
+
y=None,
|
| 468 |
+
kv_cache: dict = None,
|
| 469 |
+
crossattn_cache: dict = None,
|
| 470 |
+
current_start: int = 0,
|
| 471 |
+
current_end: int = 0
|
| 472 |
+
):
|
| 473 |
+
r"""
|
| 474 |
+
Run the diffusion model with kv caching.
|
| 475 |
+
See Algorithm 2 of CausVid paper https://arxiv.org/abs/2412.07772 for details.
|
| 476 |
+
This function will be run for num_frame times.
|
| 477 |
+
Process the latent frames one by one (1560 tokens each)
|
| 478 |
+
|
| 479 |
+
Args:
|
| 480 |
+
x (List[Tensor]):
|
| 481 |
+
List of input video tensors, each with shape [C_in, F, H, W]
|
| 482 |
+
t (Tensor):
|
| 483 |
+
Diffusion timesteps tensor of shape [B]
|
| 484 |
+
context (List[Tensor]):
|
| 485 |
+
List of text embeddings each with shape [L, C]
|
| 486 |
+
seq_len (`int`):
|
| 487 |
+
Maximum sequence length for positional encoding
|
| 488 |
+
clip_fea (Tensor, *optional*):
|
| 489 |
+
CLIP image features for image-to-video mode
|
| 490 |
+
y (List[Tensor], *optional*):
|
| 491 |
+
Conditional video inputs for image-to-video mode, same shape as x
|
| 492 |
+
|
| 493 |
+
Returns:
|
| 494 |
+
List[Tensor]:
|
| 495 |
+
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
|
| 496 |
+
"""
|
| 497 |
+
if self.model_type == 'i2v':
|
| 498 |
+
assert clip_fea is not None and y is not None
|
| 499 |
+
# params
|
| 500 |
+
device = self.patch_embedding.weight.device
|
| 501 |
+
if self.freqs.device != device:
|
| 502 |
+
self.freqs = self.freqs.to(device)
|
| 503 |
+
|
| 504 |
+
if y is not None:
|
| 505 |
+
x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
|
| 506 |
+
|
| 507 |
+
# embeddings
|
| 508 |
+
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
|
| 509 |
+
grid_sizes = torch.stack(
|
| 510 |
+
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
|
| 511 |
+
x = [u.flatten(2).transpose(1, 2) for u in x]
|
| 512 |
+
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
|
| 513 |
+
assert seq_lens.max() <= seq_len
|
| 514 |
+
x = torch.cat(x)
|
| 515 |
+
"""
|
| 516 |
+
torch.cat([
|
| 517 |
+
torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))],
|
| 518 |
+
dim=1) for u in x
|
| 519 |
+
])
|
| 520 |
+
"""
|
| 521 |
+
|
| 522 |
+
# time embeddings
|
| 523 |
+
# with amp.autocast(dtype=torch.float32):
|
| 524 |
+
e = self.time_embedding(
|
| 525 |
+
sinusoidal_embedding_1d(self.freq_dim, t.flatten()).type_as(x))
|
| 526 |
+
e0 = self.time_projection(e).unflatten(
|
| 527 |
+
1, (6, self.dim)).unflatten(dim=0, sizes=t.shape)
|
| 528 |
+
# assert e.dtype == torch.float32 and e0.dtype == torch.float32
|
| 529 |
+
|
| 530 |
+
# context
|
| 531 |
+
context_lens = None
|
| 532 |
+
context = self.text_embedding(
|
| 533 |
+
torch.stack([
|
| 534 |
+
torch.cat(
|
| 535 |
+
[u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
|
| 536 |
+
for u in context
|
| 537 |
+
]))
|
| 538 |
+
|
| 539 |
+
if clip_fea is not None:
|
| 540 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
| 541 |
+
context = torch.concat([context_clip, context], dim=1)
|
| 542 |
+
|
| 543 |
+
# arguments
|
| 544 |
+
kwargs = dict(
|
| 545 |
+
e=e0,
|
| 546 |
+
seq_lens=seq_lens,
|
| 547 |
+
grid_sizes=grid_sizes,
|
| 548 |
+
freqs=self.freqs,
|
| 549 |
+
context=context,
|
| 550 |
+
context_lens=context_lens,
|
| 551 |
+
block_mask=self.block_mask
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
def create_custom_forward(module):
|
| 555 |
+
def custom_forward(*inputs, **kwargs):
|
| 556 |
+
return module(*inputs, **kwargs)
|
| 557 |
+
return custom_forward
|
| 558 |
+
|
| 559 |
+
for block_index, block in enumerate(self.blocks):
|
| 560 |
+
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
| 561 |
+
assert False
|
| 562 |
+
else:
|
| 563 |
+
kwargs.update(
|
| 564 |
+
{
|
| 565 |
+
"kv_cache": kv_cache[block_index],
|
| 566 |
+
"crossattn_cache": crossattn_cache[block_index],
|
| 567 |
+
"current_start": current_start,
|
| 568 |
+
"current_end": current_end
|
| 569 |
+
}
|
| 570 |
+
)
|
| 571 |
+
x = block(x, **kwargs)
|
| 572 |
+
|
| 573 |
+
# head
|
| 574 |
+
x = self.head(x, e.unflatten(dim=0, sizes=t.shape).unsqueeze(2))
|
| 575 |
+
|
| 576 |
+
# unpatchify
|
| 577 |
+
x = self.unpatchify(x, grid_sizes)
|
| 578 |
+
return torch.stack(x)
|
| 579 |
+
|
| 580 |
+
def _forward_train(
|
| 581 |
+
self,
|
| 582 |
+
x,
|
| 583 |
+
t,
|
| 584 |
+
context,
|
| 585 |
+
seq_len,
|
| 586 |
+
clip_fea=None,
|
| 587 |
+
y=None,
|
| 588 |
+
):
|
| 589 |
+
r"""
|
| 590 |
+
Forward pass through the diffusion model
|
| 591 |
+
|
| 592 |
+
Args:
|
| 593 |
+
x (List[Tensor]):
|
| 594 |
+
List of input video tensors, each with shape [C_in, F, H, W]
|
| 595 |
+
t (Tensor):
|
| 596 |
+
Diffusion timesteps tensor of shape [B]
|
| 597 |
+
context (List[Tensor]):
|
| 598 |
+
List of text embeddings each with shape [L, C]
|
| 599 |
+
seq_len (`int`):
|
| 600 |
+
Maximum sequence length for positional encoding
|
| 601 |
+
clip_fea (Tensor, *optional*):
|
| 602 |
+
CLIP image features for image-to-video mode
|
| 603 |
+
y (List[Tensor], *optional*):
|
| 604 |
+
Conditional video inputs for image-to-video mode, same shape as x
|
| 605 |
+
|
| 606 |
+
Returns:
|
| 607 |
+
List[Tensor]:
|
| 608 |
+
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
|
| 609 |
+
"""
|
| 610 |
+
if self.model_type == 'i2v':
|
| 611 |
+
assert clip_fea is not None and y is not None
|
| 612 |
+
# params
|
| 613 |
+
device = self.patch_embedding.weight.device
|
| 614 |
+
if self.freqs.device != device:
|
| 615 |
+
self.freqs = self.freqs.to(device)
|
| 616 |
+
|
| 617 |
+
# Construct blockwise causal attn mask
|
| 618 |
+
if self.block_mask is None:
|
| 619 |
+
self.block_mask = self._prepare_blockwise_causal_attn_mask(
|
| 620 |
+
device, num_frames=x.shape[2],
|
| 621 |
+
frame_seqlen=x.shape[-2] *
|
| 622 |
+
x.shape[-1] // (self.patch_size[1] * self.patch_size[2]),
|
| 623 |
+
num_frame_per_block=self.num_frame_per_block
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
if y is not None:
|
| 627 |
+
x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
|
| 628 |
+
|
| 629 |
+
# embeddings
|
| 630 |
+
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
|
| 631 |
+
grid_sizes = torch.stack(
|
| 632 |
+
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
|
| 633 |
+
x = [u.flatten(2).transpose(1, 2) for u in x]
|
| 634 |
+
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
|
| 635 |
+
assert seq_lens.max() <= seq_len
|
| 636 |
+
x = torch.cat([torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1) for u in x])
|
| 637 |
+
|
| 638 |
+
# time embeddings
|
| 639 |
+
# with amp.autocast(dtype=torch.float32):
|
| 640 |
+
e = self.time_embedding(
|
| 641 |
+
sinusoidal_embedding_1d(self.freq_dim, t.flatten()).type_as(x))
|
| 642 |
+
e0 = self.time_projection(e).unflatten(
|
| 643 |
+
1, (6, self.dim)).unflatten(dim=0, sizes=t.shape)
|
| 644 |
+
# assert e.dtype == torch.float32 and e0.dtype == torch.float32
|
| 645 |
+
|
| 646 |
+
# context
|
| 647 |
+
context_lens = None
|
| 648 |
+
context = self.text_embedding(
|
| 649 |
+
torch.stack([
|
| 650 |
+
torch.cat(
|
| 651 |
+
[u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
|
| 652 |
+
for u in context
|
| 653 |
+
]))
|
| 654 |
+
|
| 655 |
+
if clip_fea is not None:
|
| 656 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
| 657 |
+
context = torch.concat([context_clip, context], dim=1)
|
| 658 |
+
|
| 659 |
+
# arguments
|
| 660 |
+
kwargs = dict(
|
| 661 |
+
e=e0,
|
| 662 |
+
seq_lens=seq_lens,
|
| 663 |
+
grid_sizes=grid_sizes,
|
| 664 |
+
freqs=self.freqs,
|
| 665 |
+
context=context,
|
| 666 |
+
context_lens=context_lens,
|
| 667 |
+
block_mask=self.block_mask)
|
| 668 |
+
|
| 669 |
+
def create_custom_forward(module):
|
| 670 |
+
def custom_forward(*inputs, **kwargs):
|
| 671 |
+
return module(*inputs, **kwargs)
|
| 672 |
+
return custom_forward
|
| 673 |
+
|
| 674 |
+
for block in self.blocks:
|
| 675 |
+
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
| 676 |
+
x = torch.utils.checkpoint.checkpoint(
|
| 677 |
+
create_custom_forward(block),
|
| 678 |
+
x, **kwargs,
|
| 679 |
+
use_reentrant=False,
|
| 680 |
+
)
|
| 681 |
+
else:
|
| 682 |
+
x = block(x, **kwargs)
|
| 683 |
+
|
| 684 |
+
# head
|
| 685 |
+
x = self.head(x, e.unflatten(dim=0, sizes=t.shape).unsqueeze(2))
|
| 686 |
+
|
| 687 |
+
# unpatchify
|
| 688 |
+
x = self.unpatchify(x, grid_sizes)
|
| 689 |
+
return torch.stack(x)
|
| 690 |
+
|
| 691 |
+
def forward(
|
| 692 |
+
self,
|
| 693 |
+
*args,
|
| 694 |
+
**kwargs
|
| 695 |
+
):
|
| 696 |
+
if kwargs.get('kv_cache', None) is not None:
|
| 697 |
+
return self._forward_inference(*args, **kwargs)
|
| 698 |
+
else:
|
| 699 |
+
return self._forward_train(*args, **kwargs)
|
| 700 |
+
|
| 701 |
+
def unpatchify(self, x, grid_sizes):
|
| 702 |
+
r"""
|
| 703 |
+
Reconstruct video tensors from patch embeddings.
|
| 704 |
+
|
| 705 |
+
Args:
|
| 706 |
+
x (List[Tensor]):
|
| 707 |
+
List of patchified features, each with shape [L, C_out * prod(patch_size)]
|
| 708 |
+
grid_sizes (Tensor):
|
| 709 |
+
Original spatial-temporal grid dimensions before patching,
|
| 710 |
+
shape [B, 3] (3 dimensions correspond to F_patches, H_patches, W_patches)
|
| 711 |
+
|
| 712 |
+
Returns:
|
| 713 |
+
List[Tensor]:
|
| 714 |
+
Reconstructed video tensors with shape [C_out, F, H / 8, W / 8]
|
| 715 |
+
"""
|
| 716 |
+
|
| 717 |
+
c = self.out_dim
|
| 718 |
+
out = []
|
| 719 |
+
for u, v in zip(x, grid_sizes.tolist()):
|
| 720 |
+
u = u[:math.prod(v)].view(*v, *self.patch_size, c)
|
| 721 |
+
u = torch.einsum('fhwpqrc->cfphqwr', u)
|
| 722 |
+
u = u.reshape(c, *[i * j for i, j in zip(v, self.patch_size)])
|
| 723 |
+
out.append(u)
|
| 724 |
+
return out
|
| 725 |
+
|
| 726 |
+
def init_weights(self):
|
| 727 |
+
r"""
|
| 728 |
+
Initialize model parameters using Xavier initialization.
|
| 729 |
+
"""
|
| 730 |
+
|
| 731 |
+
# basic init
|
| 732 |
+
for m in self.modules():
|
| 733 |
+
if isinstance(m, nn.Linear):
|
| 734 |
+
nn.init.xavier_uniform_(m.weight)
|
| 735 |
+
if m.bias is not None:
|
| 736 |
+
nn.init.zeros_(m.bias)
|
| 737 |
+
|
| 738 |
+
# init embeddings
|
| 739 |
+
nn.init.xavier_uniform_(self.patch_embedding.weight.flatten(1))
|
| 740 |
+
for m in self.text_embedding.modules():
|
| 741 |
+
if isinstance(m, nn.Linear):
|
| 742 |
+
nn.init.normal_(m.weight, std=.02)
|
| 743 |
+
for m in self.time_embedding.modules():
|
| 744 |
+
if isinstance(m, nn.Linear):
|
| 745 |
+
nn.init.normal_(m.weight, std=.02)
|
| 746 |
+
|
| 747 |
+
# init output layer
|
| 748 |
+
nn.init.zeros_(self.head.head.weight)
|
exp_code/1_benchmark/2.py
ADDED
|
@@ -0,0 +1,1059 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from wan.modules.attention import attention
|
| 2 |
+
from wan.modules.model import (
|
| 3 |
+
WanRMSNorm,
|
| 4 |
+
rope_apply,
|
| 5 |
+
WanLayerNorm,
|
| 6 |
+
WAN_CROSSATTENTION_CLASSES,
|
| 7 |
+
rope_params,
|
| 8 |
+
MLPProj,
|
| 9 |
+
sinusoidal_embedding_1d
|
| 10 |
+
)
|
| 11 |
+
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
|
| 12 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 13 |
+
from torch.nn.attention.flex_attention import BlockMask
|
| 14 |
+
from diffusers.models.modeling_utils import ModelMixin
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import torch
|
| 17 |
+
import math
|
| 18 |
+
import torch.distributed as dist
|
| 19 |
+
|
| 20 |
+
# wan 1.3B model has a weird channel / head configurations and require max-autotune to work with flexattention
|
| 21 |
+
# see https://github.com/pytorch/pytorch/issues/133254
|
| 22 |
+
# change to default for other models
|
| 23 |
+
flex_attention = torch.compile(
|
| 24 |
+
flex_attention, dynamic=False, mode="max-autotune-no-cudagraphs")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def causal_rope_apply(x, grid_sizes, freqs, start_frame=0):
|
| 28 |
+
n, c = x.size(2), x.size(3) // 2
|
| 29 |
+
|
| 30 |
+
# split freqs
|
| 31 |
+
freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)
|
| 32 |
+
|
| 33 |
+
# loop over samples
|
| 34 |
+
output = []
|
| 35 |
+
|
| 36 |
+
for i, (f, h, w) in enumerate(grid_sizes.tolist()):
|
| 37 |
+
seq_len = f * h * w
|
| 38 |
+
|
| 39 |
+
# precompute multipliers
|
| 40 |
+
x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape(
|
| 41 |
+
seq_len, n, -1, 2))
|
| 42 |
+
freqs_i = torch.cat([
|
| 43 |
+
freqs[0][start_frame:start_frame + f].view(f, 1, 1, -1).expand(f, h, w, -1),
|
| 44 |
+
freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
|
| 45 |
+
freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)
|
| 46 |
+
],
|
| 47 |
+
dim=-1).reshape(seq_len, 1, -1)
|
| 48 |
+
|
| 49 |
+
# apply rotary embedding
|
| 50 |
+
x_i = torch.view_as_real(x_i * freqs_i).flatten(2)
|
| 51 |
+
x_i = torch.cat([x_i, x[i, seq_len:]])
|
| 52 |
+
|
| 53 |
+
# append to collection
|
| 54 |
+
output.append(x_i)
|
| 55 |
+
return torch.stack(output).type_as(x)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class CausalWanSelfAttention(nn.Module):
|
| 59 |
+
|
| 60 |
+
def __init__(self,
|
| 61 |
+
dim,
|
| 62 |
+
num_heads,
|
| 63 |
+
local_attn_size=-1,
|
| 64 |
+
sink_size=0,
|
| 65 |
+
qk_norm=True,
|
| 66 |
+
eps=1e-6):
|
| 67 |
+
assert dim % num_heads == 0
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.dim = dim
|
| 70 |
+
self.num_heads = num_heads
|
| 71 |
+
self.head_dim = dim // num_heads
|
| 72 |
+
self.local_attn_size = local_attn_size
|
| 73 |
+
self.sink_size = sink_size
|
| 74 |
+
self.qk_norm = qk_norm
|
| 75 |
+
self.eps = eps
|
| 76 |
+
self.max_attention_size = 32760 if local_attn_size == -1 else local_attn_size * 1560
|
| 77 |
+
|
| 78 |
+
# layers
|
| 79 |
+
self.q = nn.Linear(dim, dim)
|
| 80 |
+
self.k = nn.Linear(dim, dim)
|
| 81 |
+
self.v = nn.Linear(dim, dim)
|
| 82 |
+
self.o = nn.Linear(dim, dim)
|
| 83 |
+
self.norm_q = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 84 |
+
self.norm_k = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 85 |
+
|
| 86 |
+
def forward(
|
| 87 |
+
self,
|
| 88 |
+
x,
|
| 89 |
+
seq_lens,
|
| 90 |
+
grid_sizes,
|
| 91 |
+
freqs,
|
| 92 |
+
block_mask,
|
| 93 |
+
kv_cache=None,
|
| 94 |
+
current_start=0,
|
| 95 |
+
cache_start=None
|
| 96 |
+
):
|
| 97 |
+
r"""
|
| 98 |
+
Args:
|
| 99 |
+
x(Tensor): Shape [B, L, num_heads, C / num_heads]
|
| 100 |
+
seq_lens(Tensor): Shape [B]
|
| 101 |
+
grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
|
| 102 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
| 103 |
+
block_mask (BlockMask)
|
| 104 |
+
"""
|
| 105 |
+
b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
|
| 106 |
+
if cache_start is None:
|
| 107 |
+
cache_start = current_start
|
| 108 |
+
|
| 109 |
+
# query, key, value function
|
| 110 |
+
def qkv_fn(x):
|
| 111 |
+
q = self.norm_q(self.q(x)).view(b, s, n, d)
|
| 112 |
+
k = self.norm_k(self.k(x)).view(b, s, n, d)
|
| 113 |
+
v = self.v(x).view(b, s, n, d)
|
| 114 |
+
return q, k, v
|
| 115 |
+
|
| 116 |
+
q, k, v = qkv_fn(x)
|
| 117 |
+
|
| 118 |
+
if kv_cache is None:
|
| 119 |
+
# if it is teacher forcing training?
|
| 120 |
+
is_tf = (s == seq_lens[0].item() * 2)
|
| 121 |
+
if is_tf:
|
| 122 |
+
q_chunk = torch.chunk(q, 2, dim=1)
|
| 123 |
+
k_chunk = torch.chunk(k, 2, dim=1)
|
| 124 |
+
roped_query = []
|
| 125 |
+
roped_key = []
|
| 126 |
+
# rope should be same for clean and noisy parts
|
| 127 |
+
for ii in range(2):
|
| 128 |
+
rq = rope_apply(q_chunk[ii], grid_sizes, freqs).type_as(v)
|
| 129 |
+
rk = rope_apply(k_chunk[ii], grid_sizes, freqs).type_as(v)
|
| 130 |
+
roped_query.append(rq)
|
| 131 |
+
roped_key.append(rk)
|
| 132 |
+
|
| 133 |
+
roped_query = torch.cat(roped_query, dim=1)
|
| 134 |
+
roped_key = torch.cat(roped_key, dim=1)
|
| 135 |
+
|
| 136 |
+
padded_length = math.ceil(q.shape[1] / 128) * 128 - q.shape[1]
|
| 137 |
+
padded_roped_query = torch.cat(
|
| 138 |
+
[roped_query,
|
| 139 |
+
torch.zeros([q.shape[0], padded_length, q.shape[2], q.shape[3]],
|
| 140 |
+
device=q.device, dtype=v.dtype)],
|
| 141 |
+
dim=1
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
padded_roped_key = torch.cat(
|
| 145 |
+
[roped_key, torch.zeros([k.shape[0], padded_length, k.shape[2], k.shape[3]],
|
| 146 |
+
device=k.device, dtype=v.dtype)],
|
| 147 |
+
dim=1
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
padded_v = torch.cat(
|
| 151 |
+
[v, torch.zeros([v.shape[0], padded_length, v.shape[2], v.shape[3]],
|
| 152 |
+
device=v.device, dtype=v.dtype)],
|
| 153 |
+
dim=1
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
x = flex_attention(
|
| 157 |
+
query=padded_roped_query.transpose(2, 1),
|
| 158 |
+
key=padded_roped_key.transpose(2, 1),
|
| 159 |
+
value=padded_v.transpose(2, 1),
|
| 160 |
+
block_mask=block_mask
|
| 161 |
+
)[:, :, :-padded_length].transpose(2, 1)
|
| 162 |
+
|
| 163 |
+
else:
|
| 164 |
+
roped_query = rope_apply(q, grid_sizes, freqs).type_as(v)
|
| 165 |
+
roped_key = rope_apply(k, grid_sizes, freqs).type_as(v)
|
| 166 |
+
|
| 167 |
+
padded_length = math.ceil(q.shape[1] / 128) * 128 - q.shape[1]
|
| 168 |
+
padded_roped_query = torch.cat(
|
| 169 |
+
[roped_query,
|
| 170 |
+
torch.zeros([q.shape[0], padded_length, q.shape[2], q.shape[3]],
|
| 171 |
+
device=q.device, dtype=v.dtype)],
|
| 172 |
+
dim=1
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
padded_roped_key = torch.cat(
|
| 176 |
+
[roped_key, torch.zeros([k.shape[0], padded_length, k.shape[2], k.shape[3]],
|
| 177 |
+
device=k.device, dtype=v.dtype)],
|
| 178 |
+
dim=1
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
padded_v = torch.cat(
|
| 182 |
+
[v, torch.zeros([v.shape[0], padded_length, v.shape[2], v.shape[3]],
|
| 183 |
+
device=v.device, dtype=v.dtype)],
|
| 184 |
+
dim=1
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
x = flex_attention(
|
| 188 |
+
query=padded_roped_query.transpose(2, 1),
|
| 189 |
+
key=padded_roped_key.transpose(2, 1),
|
| 190 |
+
value=padded_v.transpose(2, 1),
|
| 191 |
+
block_mask=block_mask
|
| 192 |
+
)[:, :, :-padded_length].transpose(2, 1)
|
| 193 |
+
else:
|
| 194 |
+
frame_seqlen = math.prod(grid_sizes[0][1:]).item()
|
| 195 |
+
current_start_frame = current_start // frame_seqlen
|
| 196 |
+
roped_query = causal_rope_apply(
|
| 197 |
+
q, grid_sizes, freqs, start_frame=current_start_frame).type_as(v)
|
| 198 |
+
roped_key = causal_rope_apply(
|
| 199 |
+
k, grid_sizes, freqs, start_frame=current_start_frame).type_as(v)
|
| 200 |
+
|
| 201 |
+
current_end = current_start + roped_query.shape[1]
|
| 202 |
+
sink_tokens = self.sink_size * frame_seqlen
|
| 203 |
+
# If we are using local attention and the current KV cache size is larger than the local attention size, we need to truncate the KV cache
|
| 204 |
+
kv_cache_size = kv_cache["k"].shape[1]
|
| 205 |
+
num_new_tokens = roped_query.shape[1]
|
| 206 |
+
if self.local_attn_size != -1 and (current_end > kv_cache["global_end_index"].item()) and (
|
| 207 |
+
num_new_tokens + kv_cache["local_end_index"].item() > kv_cache_size):
|
| 208 |
+
# Calculate the number of new tokens added in this step
|
| 209 |
+
# Shift existing cache content left to discard oldest tokens
|
| 210 |
+
# Clone the source slice to avoid overlapping memory error
|
| 211 |
+
num_evicted_tokens = num_new_tokens + kv_cache["local_end_index"].item() - kv_cache_size
|
| 212 |
+
num_rolled_tokens = kv_cache["local_end_index"].item() - num_evicted_tokens - sink_tokens
|
| 213 |
+
kv_cache["k"][:, sink_tokens:sink_tokens + num_rolled_tokens] = \
|
| 214 |
+
kv_cache["k"][:, sink_tokens + num_evicted_tokens:sink_tokens + num_evicted_tokens + num_rolled_tokens].clone()
|
| 215 |
+
kv_cache["v"][:, sink_tokens:sink_tokens + num_rolled_tokens] = \
|
| 216 |
+
kv_cache["v"][:, sink_tokens + num_evicted_tokens:sink_tokens + num_evicted_tokens + num_rolled_tokens].clone()
|
| 217 |
+
# Insert the new keys/values at the end
|
| 218 |
+
local_end_index = kv_cache["local_end_index"].item() + current_end - \
|
| 219 |
+
kv_cache["global_end_index"].item() - num_evicted_tokens
|
| 220 |
+
local_start_index = local_end_index - num_new_tokens
|
| 221 |
+
kv_cache["k"][:, local_start_index:local_end_index] = roped_key
|
| 222 |
+
kv_cache["v"][:, local_start_index:local_end_index] = v
|
| 223 |
+
else:
|
| 224 |
+
# Assign new keys/values directly up to current_end
|
| 225 |
+
local_end_index = kv_cache["local_end_index"].item() + current_end - kv_cache["global_end_index"].item()
|
| 226 |
+
local_start_index = local_end_index - num_new_tokens
|
| 227 |
+
kv_cache["k"][:, local_start_index:local_end_index] = roped_key
|
| 228 |
+
kv_cache["v"][:, local_start_index:local_end_index] = v
|
| 229 |
+
|
| 230 |
+
x = attention(
|
| 231 |
+
roped_query,
|
| 232 |
+
kv_cache["k"][:, max(0, local_end_index - self.max_attention_size):local_end_index],
|
| 233 |
+
kv_cache["v"][:, max(0, local_end_index - self.max_attention_size):local_end_index]
|
| 234 |
+
)
|
| 235 |
+
kv_cache["global_end_index"].fill_(current_end)
|
| 236 |
+
kv_cache["local_end_index"].fill_(local_end_index)
|
| 237 |
+
|
| 238 |
+
# output
|
| 239 |
+
x = x.flatten(2)
|
| 240 |
+
x = self.o(x)
|
| 241 |
+
return x
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class CausalWanAttentionBlock(nn.Module):
|
| 245 |
+
|
| 246 |
+
def __init__(self,
|
| 247 |
+
cross_attn_type,
|
| 248 |
+
dim,
|
| 249 |
+
ffn_dim,
|
| 250 |
+
num_heads,
|
| 251 |
+
local_attn_size=-1,
|
| 252 |
+
sink_size=0,
|
| 253 |
+
qk_norm=True,
|
| 254 |
+
cross_attn_norm=False,
|
| 255 |
+
eps=1e-6):
|
| 256 |
+
super().__init__()
|
| 257 |
+
self.dim = dim
|
| 258 |
+
self.ffn_dim = ffn_dim
|
| 259 |
+
self.num_heads = num_heads
|
| 260 |
+
self.local_attn_size = local_attn_size
|
| 261 |
+
self.qk_norm = qk_norm
|
| 262 |
+
self.cross_attn_norm = cross_attn_norm
|
| 263 |
+
self.eps = eps
|
| 264 |
+
|
| 265 |
+
# layers
|
| 266 |
+
self.norm1 = WanLayerNorm(dim, eps)
|
| 267 |
+
self.self_attn = CausalWanSelfAttention(dim, num_heads, local_attn_size, sink_size, qk_norm, eps)
|
| 268 |
+
self.norm3 = WanLayerNorm(
|
| 269 |
+
dim, eps,
|
| 270 |
+
elementwise_affine=True) if cross_attn_norm else nn.Identity()
|
| 271 |
+
self.cross_attn = WAN_CROSSATTENTION_CLASSES[cross_attn_type](dim,
|
| 272 |
+
num_heads,
|
| 273 |
+
(-1, -1),
|
| 274 |
+
qk_norm,
|
| 275 |
+
eps)
|
| 276 |
+
self.norm2 = WanLayerNorm(dim, eps)
|
| 277 |
+
self.ffn = nn.Sequential(
|
| 278 |
+
nn.Linear(dim, ffn_dim), nn.GELU(approximate='tanh'),
|
| 279 |
+
nn.Linear(ffn_dim, dim))
|
| 280 |
+
|
| 281 |
+
# modulation
|
| 282 |
+
self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
|
| 283 |
+
|
| 284 |
+
def forward(
|
| 285 |
+
self,
|
| 286 |
+
x,
|
| 287 |
+
e,
|
| 288 |
+
seq_lens,
|
| 289 |
+
grid_sizes,
|
| 290 |
+
freqs,
|
| 291 |
+
context,
|
| 292 |
+
context_lens,
|
| 293 |
+
block_mask,
|
| 294 |
+
kv_cache=None,
|
| 295 |
+
crossattn_cache=None,
|
| 296 |
+
current_start=0,
|
| 297 |
+
cache_start=None
|
| 298 |
+
):
|
| 299 |
+
r"""
|
| 300 |
+
Args:
|
| 301 |
+
x(Tensor): Shape [B, L, C]
|
| 302 |
+
e(Tensor): Shape [B, F, 6, C]
|
| 303 |
+
seq_lens(Tensor): Shape [B], length of each sequence in batch
|
| 304 |
+
grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
|
| 305 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
| 306 |
+
"""
|
| 307 |
+
num_frames, frame_seqlen = e.shape[1], x.shape[1] // e.shape[1]
|
| 308 |
+
# assert e.dtype == torch.float32
|
| 309 |
+
# with amp.autocast(dtype=torch.float32):
|
| 310 |
+
e = (self.modulation.unsqueeze(1) + e).chunk(6, dim=2)
|
| 311 |
+
# assert e[0].dtype == torch.float32
|
| 312 |
+
|
| 313 |
+
# self-attention
|
| 314 |
+
y = self.self_attn(
|
| 315 |
+
(self.norm1(x).unflatten(dim=1, sizes=(num_frames, frame_seqlen)) * (1 + e[1]) + e[0]).flatten(1, 2),
|
| 316 |
+
seq_lens, grid_sizes,
|
| 317 |
+
freqs, block_mask, kv_cache, current_start, cache_start)
|
| 318 |
+
|
| 319 |
+
# with amp.autocast(dtype=torch.float32):
|
| 320 |
+
x = x + (y.unflatten(dim=1, sizes=(num_frames, frame_seqlen)) * e[2]).flatten(1, 2)
|
| 321 |
+
|
| 322 |
+
# cross-attention & ffn function
|
| 323 |
+
def cross_attn_ffn(x, context, context_lens, e, crossattn_cache=None):
|
| 324 |
+
x = x + self.cross_attn(self.norm3(x), context,
|
| 325 |
+
context_lens, crossattn_cache=crossattn_cache)
|
| 326 |
+
y = self.ffn(
|
| 327 |
+
(self.norm2(x).unflatten(dim=1, sizes=(num_frames,
|
| 328 |
+
frame_seqlen)) * (1 + e[4]) + e[3]).flatten(1, 2)
|
| 329 |
+
)
|
| 330 |
+
# with amp.autocast(dtype=torch.float32):
|
| 331 |
+
x = x + (y.unflatten(dim=1, sizes=(num_frames,
|
| 332 |
+
frame_seqlen)) * e[5]).flatten(1, 2)
|
| 333 |
+
return x
|
| 334 |
+
|
| 335 |
+
x = cross_attn_ffn(x, context, context_lens, e, crossattn_cache)
|
| 336 |
+
return x
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
class CausalHead(nn.Module):
|
| 340 |
+
|
| 341 |
+
def __init__(self, dim, out_dim, patch_size, eps=1e-6):
|
| 342 |
+
super().__init__()
|
| 343 |
+
self.dim = dim
|
| 344 |
+
self.out_dim = out_dim
|
| 345 |
+
self.patch_size = patch_size
|
| 346 |
+
self.eps = eps
|
| 347 |
+
|
| 348 |
+
# layers
|
| 349 |
+
out_dim = math.prod(patch_size) * out_dim
|
| 350 |
+
self.norm = WanLayerNorm(dim, eps)
|
| 351 |
+
self.head = nn.Linear(dim, out_dim)
|
| 352 |
+
|
| 353 |
+
# modulation
|
| 354 |
+
self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5)
|
| 355 |
+
|
| 356 |
+
def forward(self, x, e):
|
| 357 |
+
r"""
|
| 358 |
+
Args:
|
| 359 |
+
x(Tensor): Shape [B, L1, C]
|
| 360 |
+
e(Tensor): Shape [B, F, 1, C]
|
| 361 |
+
"""
|
| 362 |
+
# assert e.dtype == torch.float32
|
| 363 |
+
# with amp.autocast(dtype=torch.float32):
|
| 364 |
+
num_frames, frame_seqlen = e.shape[1], x.shape[1] // e.shape[1]
|
| 365 |
+
e = (self.modulation.unsqueeze(1) + e).chunk(2, dim=2)
|
| 366 |
+
x = (self.head(self.norm(x).unflatten(dim=1, sizes=(num_frames, frame_seqlen)) * (1 + e[1]) + e[0]))
|
| 367 |
+
return x
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
class CausalWanModel(ModelMixin, ConfigMixin):
|
| 371 |
+
r"""
|
| 372 |
+
Wan diffusion backbone supporting both text-to-video and image-to-video.
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
ignore_for_config = [
|
| 376 |
+
'patch_size', 'cross_attn_norm', 'qk_norm', 'text_dim'
|
| 377 |
+
]
|
| 378 |
+
_no_split_modules = ['WanAttentionBlock']
|
| 379 |
+
_supports_gradient_checkpointing = True
|
| 380 |
+
|
| 381 |
+
@register_to_config
|
| 382 |
+
def __init__(self,
|
| 383 |
+
model_type='t2v',
|
| 384 |
+
patch_size=(1, 2, 2),
|
| 385 |
+
text_len=512,
|
| 386 |
+
in_dim=16,
|
| 387 |
+
dim=2048,
|
| 388 |
+
ffn_dim=8192,
|
| 389 |
+
freq_dim=256,
|
| 390 |
+
text_dim=4096,
|
| 391 |
+
out_dim=16,
|
| 392 |
+
num_heads=16,
|
| 393 |
+
num_layers=32,
|
| 394 |
+
local_attn_size=-1,
|
| 395 |
+
sink_size=0,
|
| 396 |
+
qk_norm=True,
|
| 397 |
+
cross_attn_norm=True,
|
| 398 |
+
eps=1e-6):
|
| 399 |
+
r"""
|
| 400 |
+
Initialize the diffusion model backbone.
|
| 401 |
+
|
| 402 |
+
Args:
|
| 403 |
+
model_type (`str`, *optional*, defaults to 't2v'):
|
| 404 |
+
Model variant - 't2v' (text-to-video) or 'i2v' (image-to-video)
|
| 405 |
+
patch_size (`tuple`, *optional*, defaults to (1, 2, 2)):
|
| 406 |
+
3D patch dimensions for video embedding (t_patch, h_patch, w_patch)
|
| 407 |
+
text_len (`int`, *optional*, defaults to 512):
|
| 408 |
+
Fixed length for text embeddings
|
| 409 |
+
in_dim (`int`, *optional*, defaults to 16):
|
| 410 |
+
Input video channels (C_in)
|
| 411 |
+
dim (`int`, *optional*, defaults to 2048):
|
| 412 |
+
Hidden dimension of the transformer
|
| 413 |
+
ffn_dim (`int`, *optional*, defaults to 8192):
|
| 414 |
+
Intermediate dimension in feed-forward network
|
| 415 |
+
freq_dim (`int`, *optional*, defaults to 256):
|
| 416 |
+
Dimension for sinusoidal time embeddings
|
| 417 |
+
text_dim (`int`, *optional*, defaults to 4096):
|
| 418 |
+
Input dimension for text embeddings
|
| 419 |
+
out_dim (`int`, *optional*, defaults to 16):
|
| 420 |
+
Output video channels (C_out)
|
| 421 |
+
num_heads (`int`, *optional*, defaults to 16):
|
| 422 |
+
Number of attention heads
|
| 423 |
+
num_layers (`int`, *optional*, defaults to 32):
|
| 424 |
+
Number of transformer blocks
|
| 425 |
+
local_attn_size (`int`, *optional*, defaults to -1):
|
| 426 |
+
Window size for temporal local attention (-1 indicates global attention)
|
| 427 |
+
sink_size (`int`, *optional*, defaults to 0):
|
| 428 |
+
Size of the attention sink, we keep the first `sink_size` frames unchanged when rolling the KV cache
|
| 429 |
+
qk_norm (`bool`, *optional*, defaults to True):
|
| 430 |
+
Enable query/key normalization
|
| 431 |
+
cross_attn_norm (`bool`, *optional*, defaults to False):
|
| 432 |
+
Enable cross-attention normalization
|
| 433 |
+
eps (`float`, *optional*, defaults to 1e-6):
|
| 434 |
+
Epsilon value for normalization layers
|
| 435 |
+
"""
|
| 436 |
+
|
| 437 |
+
super().__init__()
|
| 438 |
+
|
| 439 |
+
assert model_type in ['t2v', 'i2v']
|
| 440 |
+
self.model_type = model_type
|
| 441 |
+
|
| 442 |
+
self.patch_size = patch_size
|
| 443 |
+
self.text_len = text_len
|
| 444 |
+
self.in_dim = in_dim
|
| 445 |
+
self.dim = dim
|
| 446 |
+
self.ffn_dim = ffn_dim
|
| 447 |
+
self.freq_dim = freq_dim
|
| 448 |
+
self.text_dim = text_dim
|
| 449 |
+
self.out_dim = out_dim
|
| 450 |
+
self.num_heads = num_heads
|
| 451 |
+
self.num_layers = num_layers
|
| 452 |
+
self.local_attn_size = local_attn_size
|
| 453 |
+
self.qk_norm = qk_norm
|
| 454 |
+
self.cross_attn_norm = cross_attn_norm
|
| 455 |
+
self.eps = eps
|
| 456 |
+
|
| 457 |
+
# embeddings
|
| 458 |
+
self.patch_embedding = nn.Conv3d(
|
| 459 |
+
in_dim, dim, kernel_size=patch_size, stride=patch_size)
|
| 460 |
+
self.text_embedding = nn.Sequential(
|
| 461 |
+
nn.Linear(text_dim, dim), nn.GELU(approximate='tanh'),
|
| 462 |
+
nn.Linear(dim, dim))
|
| 463 |
+
|
| 464 |
+
self.time_embedding = nn.Sequential(
|
| 465 |
+
nn.Linear(freq_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
|
| 466 |
+
self.time_projection = nn.Sequential(
|
| 467 |
+
nn.SiLU(), nn.Linear(dim, dim * 6))
|
| 468 |
+
|
| 469 |
+
# blocks
|
| 470 |
+
cross_attn_type = 't2v_cross_attn' if model_type == 't2v' else 'i2v_cross_attn'
|
| 471 |
+
self.blocks = nn.ModuleList([
|
| 472 |
+
CausalWanAttentionBlock(cross_attn_type, dim, ffn_dim, num_heads,
|
| 473 |
+
local_attn_size, sink_size, qk_norm, cross_attn_norm, eps)
|
| 474 |
+
for _ in range(num_layers)
|
| 475 |
+
])
|
| 476 |
+
|
| 477 |
+
# head
|
| 478 |
+
self.head = CausalHead(dim, out_dim, patch_size, eps)
|
| 479 |
+
|
| 480 |
+
# buffers (don't use register_buffer otherwise dtype will be changed in to())
|
| 481 |
+
assert (dim % num_heads) == 0 and (dim // num_heads) % 2 == 0
|
| 482 |
+
d = dim // num_heads
|
| 483 |
+
self.freqs = torch.cat([
|
| 484 |
+
rope_params(1024, d - 4 * (d // 6)),
|
| 485 |
+
rope_params(1024, 2 * (d // 6)),
|
| 486 |
+
rope_params(1024, 2 * (d // 6))
|
| 487 |
+
],
|
| 488 |
+
dim=1)
|
| 489 |
+
|
| 490 |
+
if model_type == 'i2v':
|
| 491 |
+
self.img_emb = MLPProj(1280, dim)
|
| 492 |
+
|
| 493 |
+
# initialize weights
|
| 494 |
+
self.init_weights()
|
| 495 |
+
|
| 496 |
+
self.gradient_checkpointing = False
|
| 497 |
+
|
| 498 |
+
self.block_mask = None
|
| 499 |
+
|
| 500 |
+
self.num_frame_per_block = 1
|
| 501 |
+
self.independent_first_frame = False
|
| 502 |
+
|
| 503 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 504 |
+
self.gradient_checkpointing = value
|
| 505 |
+
|
| 506 |
+
@staticmethod
|
| 507 |
+
def _prepare_blockwise_causal_attn_mask(
|
| 508 |
+
device: torch.device | str, num_frames: int = 21,
|
| 509 |
+
frame_seqlen: int = 1560, num_frame_per_block=1, local_attn_size=-1
|
| 510 |
+
) -> BlockMask:
|
| 511 |
+
"""
|
| 512 |
+
we will divide the token sequence into the following format
|
| 513 |
+
[1 latent frame] [1 latent frame] ... [1 latent frame]
|
| 514 |
+
We use flexattention to construct the attention mask
|
| 515 |
+
"""
|
| 516 |
+
total_length = num_frames * frame_seqlen
|
| 517 |
+
|
| 518 |
+
# we do right padding to get to a multiple of 128
|
| 519 |
+
padded_length = math.ceil(total_length / 128) * 128 - total_length
|
| 520 |
+
|
| 521 |
+
ends = torch.zeros(total_length + padded_length,
|
| 522 |
+
device=device, dtype=torch.long)
|
| 523 |
+
|
| 524 |
+
# Block-wise causal mask will attend to all elements that are before the end of the current chunk
|
| 525 |
+
frame_indices = torch.arange(
|
| 526 |
+
start=0,
|
| 527 |
+
end=total_length,
|
| 528 |
+
step=frame_seqlen * num_frame_per_block,
|
| 529 |
+
device=device
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
for tmp in frame_indices:
|
| 533 |
+
ends[tmp:tmp + frame_seqlen * num_frame_per_block] = tmp + \
|
| 534 |
+
frame_seqlen * num_frame_per_block
|
| 535 |
+
|
| 536 |
+
def attention_mask(b, h, q_idx, kv_idx):
|
| 537 |
+
if local_attn_size == -1:
|
| 538 |
+
return (kv_idx < ends[q_idx]) | (q_idx == kv_idx)
|
| 539 |
+
else:
|
| 540 |
+
return ((kv_idx < ends[q_idx]) & (kv_idx >= (ends[q_idx] - local_attn_size * frame_seqlen))) | (q_idx == kv_idx)
|
| 541 |
+
# return ((kv_idx < total_length) & (q_idx < total_length)) | (q_idx == kv_idx) # bidirectional mask
|
| 542 |
+
|
| 543 |
+
block_mask = create_block_mask(attention_mask, B=None, H=None, Q_LEN=total_length + padded_length,
|
| 544 |
+
KV_LEN=total_length + padded_length, _compile=False, device=device)
|
| 545 |
+
|
| 546 |
+
import torch.distributed as dist
|
| 547 |
+
if not dist.is_initialized() or dist.get_rank() == 0:
|
| 548 |
+
print(
|
| 549 |
+
f" cache a block wise causal mask with block size of {num_frame_per_block} frames")
|
| 550 |
+
print(block_mask)
|
| 551 |
+
|
| 552 |
+
# import imageio
|
| 553 |
+
# import numpy as np
|
| 554 |
+
# from torch.nn.attention.flex_attention import create_mask
|
| 555 |
+
|
| 556 |
+
# mask = create_mask(attention_mask, B=None, H=None, Q_LEN=total_length +
|
| 557 |
+
# padded_length, KV_LEN=total_length + padded_length, device=device)
|
| 558 |
+
# import cv2
|
| 559 |
+
# mask = cv2.resize(mask[0, 0].cpu().float().numpy(), (1024, 1024))
|
| 560 |
+
# imageio.imwrite("mask_%d.jpg" % (0), np.uint8(255. * mask))
|
| 561 |
+
|
| 562 |
+
return block_mask
|
| 563 |
+
|
| 564 |
+
@staticmethod
|
| 565 |
+
def _prepare_teacher_forcing_mask(
|
| 566 |
+
device: torch.device | str, num_frames: int = 21,
|
| 567 |
+
frame_seqlen: int = 1560, num_frame_per_block=1
|
| 568 |
+
) -> BlockMask:
|
| 569 |
+
"""
|
| 570 |
+
we will divide the token sequence into the following format
|
| 571 |
+
[1 latent frame] [1 latent frame] ... [1 latent frame]
|
| 572 |
+
We use flexattention to construct the attention mask
|
| 573 |
+
"""
|
| 574 |
+
# debug
|
| 575 |
+
DEBUG = False
|
| 576 |
+
if DEBUG:
|
| 577 |
+
num_frames = 9
|
| 578 |
+
frame_seqlen = 256
|
| 579 |
+
|
| 580 |
+
total_length = num_frames * frame_seqlen * 2
|
| 581 |
+
|
| 582 |
+
# we do right padding to get to a multiple of 128
|
| 583 |
+
padded_length = math.ceil(total_length / 128) * 128 - total_length
|
| 584 |
+
|
| 585 |
+
clean_ends = num_frames * frame_seqlen
|
| 586 |
+
# for clean context frames, we can construct their flex attention mask based on a [start, end] interval
|
| 587 |
+
context_ends = torch.zeros(total_length + padded_length, device=device, dtype=torch.long)
|
| 588 |
+
# for noisy frames, we need two intervals to construct the flex attention mask [context_start, context_end] [noisy_start, noisy_end]
|
| 589 |
+
noise_context_starts = torch.zeros(total_length + padded_length, device=device, dtype=torch.long)
|
| 590 |
+
noise_context_ends = torch.zeros(total_length + padded_length, device=device, dtype=torch.long)
|
| 591 |
+
noise_noise_starts = torch.zeros(total_length + padded_length, device=device, dtype=torch.long)
|
| 592 |
+
noise_noise_ends = torch.zeros(total_length + padded_length, device=device, dtype=torch.long)
|
| 593 |
+
|
| 594 |
+
# Block-wise causal mask will attend to all elements that are before the end of the current chunk
|
| 595 |
+
attention_block_size = frame_seqlen * num_frame_per_block
|
| 596 |
+
frame_indices = torch.arange(
|
| 597 |
+
start=0,
|
| 598 |
+
end=num_frames * frame_seqlen,
|
| 599 |
+
step=attention_block_size,
|
| 600 |
+
device=device, dtype=torch.long
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
# attention for clean context frames
|
| 604 |
+
for start in frame_indices:
|
| 605 |
+
context_ends[start:start + attention_block_size] = start + attention_block_size
|
| 606 |
+
|
| 607 |
+
noisy_image_start_list = torch.arange(
|
| 608 |
+
num_frames * frame_seqlen, total_length,
|
| 609 |
+
step=attention_block_size,
|
| 610 |
+
device=device, dtype=torch.long
|
| 611 |
+
)
|
| 612 |
+
noisy_image_end_list = noisy_image_start_list + attention_block_size
|
| 613 |
+
|
| 614 |
+
# attention for noisy frames
|
| 615 |
+
for block_index, (start, end) in enumerate(zip(noisy_image_start_list, noisy_image_end_list)):
|
| 616 |
+
# attend to noisy tokens within the same block
|
| 617 |
+
noise_noise_starts[start:end] = start
|
| 618 |
+
noise_noise_ends[start:end] = end
|
| 619 |
+
# attend to context tokens in previous blocks
|
| 620 |
+
# noise_context_starts[start:end] = 0
|
| 621 |
+
noise_context_ends[start:end] = block_index * attention_block_size
|
| 622 |
+
|
| 623 |
+
def attention_mask(b, h, q_idx, kv_idx):
|
| 624 |
+
# first design the mask for clean frames
|
| 625 |
+
clean_mask = (q_idx < clean_ends) & (kv_idx < context_ends[q_idx])
|
| 626 |
+
# then design the mask for noisy frames
|
| 627 |
+
# noisy frames will attend to all clean preceeding clean frames + itself
|
| 628 |
+
C1 = (kv_idx < noise_noise_ends[q_idx]) & (kv_idx >= noise_noise_starts[q_idx])
|
| 629 |
+
C2 = (kv_idx < noise_context_ends[q_idx]) & (kv_idx >= noise_context_starts[q_idx])
|
| 630 |
+
noise_mask = (q_idx >= clean_ends) & (C1 | C2)
|
| 631 |
+
|
| 632 |
+
eye_mask = q_idx == kv_idx
|
| 633 |
+
return eye_mask | clean_mask | noise_mask
|
| 634 |
+
|
| 635 |
+
block_mask = create_block_mask(attention_mask, B=None, H=None, Q_LEN=total_length + padded_length,
|
| 636 |
+
KV_LEN=total_length + padded_length, _compile=False, device=device)
|
| 637 |
+
|
| 638 |
+
if DEBUG:
|
| 639 |
+
print(block_mask)
|
| 640 |
+
import imageio
|
| 641 |
+
import numpy as np
|
| 642 |
+
from torch.nn.attention.flex_attention import create_mask
|
| 643 |
+
|
| 644 |
+
mask = create_mask(attention_mask, B=None, H=None, Q_LEN=total_length +
|
| 645 |
+
padded_length, KV_LEN=total_length + padded_length, device=device)
|
| 646 |
+
import cv2
|
| 647 |
+
mask = cv2.resize(mask[0, 0].cpu().float().numpy(), (1024, 1024))
|
| 648 |
+
imageio.imwrite("mask_%d.jpg" % (0), np.uint8(255. * mask))
|
| 649 |
+
|
| 650 |
+
return block_mask
|
| 651 |
+
|
| 652 |
+
@staticmethod
|
| 653 |
+
def _prepare_blockwise_causal_attn_mask_i2v(
|
| 654 |
+
device: torch.device | str, num_frames: int = 21,
|
| 655 |
+
frame_seqlen: int = 1560, num_frame_per_block=4, local_attn_size=-1
|
| 656 |
+
) -> BlockMask:
|
| 657 |
+
"""
|
| 658 |
+
we will divide the token sequence into the following format
|
| 659 |
+
[1 latent frame] [N latent frame] ... [N latent frame]
|
| 660 |
+
The first frame is separated out to support I2V generation
|
| 661 |
+
We use flexattention to construct the attention mask
|
| 662 |
+
"""
|
| 663 |
+
total_length = num_frames * frame_seqlen
|
| 664 |
+
|
| 665 |
+
# we do right padding to get to a multiple of 128
|
| 666 |
+
padded_length = math.ceil(total_length / 128) * 128 - total_length
|
| 667 |
+
|
| 668 |
+
ends = torch.zeros(total_length + padded_length,
|
| 669 |
+
device=device, dtype=torch.long)
|
| 670 |
+
|
| 671 |
+
# special handling for the first frame
|
| 672 |
+
ends[:frame_seqlen] = frame_seqlen
|
| 673 |
+
|
| 674 |
+
# Block-wise causal mask will attend to all elements that are before the end of the current chunk
|
| 675 |
+
frame_indices = torch.arange(
|
| 676 |
+
start=frame_seqlen,
|
| 677 |
+
end=total_length,
|
| 678 |
+
step=frame_seqlen * num_frame_per_block,
|
| 679 |
+
device=device
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
for idx, tmp in enumerate(frame_indices):
|
| 683 |
+
ends[tmp:tmp + frame_seqlen * num_frame_per_block] = tmp + \
|
| 684 |
+
frame_seqlen * num_frame_per_block
|
| 685 |
+
|
| 686 |
+
def attention_mask(b, h, q_idx, kv_idx):
|
| 687 |
+
if local_attn_size == -1:
|
| 688 |
+
return (kv_idx < ends[q_idx]) | (q_idx == kv_idx)
|
| 689 |
+
else:
|
| 690 |
+
return ((kv_idx < ends[q_idx]) & (kv_idx >= (ends[q_idx] - local_attn_size * frame_seqlen))) | \
|
| 691 |
+
(q_idx == kv_idx)
|
| 692 |
+
|
| 693 |
+
block_mask = create_block_mask(attention_mask, B=None, H=None, Q_LEN=total_length + padded_length,
|
| 694 |
+
KV_LEN=total_length + padded_length, _compile=False, device=device)
|
| 695 |
+
|
| 696 |
+
if not dist.is_initialized() or dist.get_rank() == 0:
|
| 697 |
+
print(
|
| 698 |
+
f" cache a block wise causal mask with block size of {num_frame_per_block} frames")
|
| 699 |
+
print(block_mask)
|
| 700 |
+
|
| 701 |
+
# import imageio
|
| 702 |
+
# import numpy as np
|
| 703 |
+
# from torch.nn.attention.flex_attention import create_mask
|
| 704 |
+
|
| 705 |
+
# mask = create_mask(attention_mask, B=None, H=None, Q_LEN=total_length +
|
| 706 |
+
# padded_length, KV_LEN=total_length + padded_length, device=device)
|
| 707 |
+
# import cv2
|
| 708 |
+
# mask = cv2.resize(mask[0, 0].cpu().float().numpy(), (1024, 1024))
|
| 709 |
+
# imageio.imwrite("mask_%d.jpg" % (0), np.uint8(255. * mask))
|
| 710 |
+
|
| 711 |
+
return block_mask
|
| 712 |
+
|
| 713 |
+
def _forward_inference(
|
| 714 |
+
self,
|
| 715 |
+
x,
|
| 716 |
+
t,
|
| 717 |
+
context,
|
| 718 |
+
seq_len,
|
| 719 |
+
clip_fea=None,
|
| 720 |
+
y=None,
|
| 721 |
+
kv_cache: dict = None,
|
| 722 |
+
crossattn_cache: dict = None,
|
| 723 |
+
current_start: int = 0,
|
| 724 |
+
cache_start: int = 0
|
| 725 |
+
):
|
| 726 |
+
r"""
|
| 727 |
+
Run the diffusion model with kv caching.
|
| 728 |
+
See Algorithm 2 of CausVid paper https://arxiv.org/abs/2412.07772 for details.
|
| 729 |
+
This function will be run for num_frame times.
|
| 730 |
+
Process the latent frames one by one (1560 tokens each)
|
| 731 |
+
|
| 732 |
+
Args:
|
| 733 |
+
x (List[Tensor]):
|
| 734 |
+
List of input video tensors, each with shape [C_in, F, H, W]
|
| 735 |
+
t (Tensor):
|
| 736 |
+
Diffusion timesteps tensor of shape [B]
|
| 737 |
+
context (List[Tensor]):
|
| 738 |
+
List of text embeddings each with shape [L, C]
|
| 739 |
+
seq_len (`int`):
|
| 740 |
+
Maximum sequence length for positional encoding
|
| 741 |
+
clip_fea (Tensor, *optional*):
|
| 742 |
+
CLIP image features for image-to-video mode
|
| 743 |
+
y (List[Tensor], *optional*):
|
| 744 |
+
Conditional video inputs for image-to-video mode, same shape as x
|
| 745 |
+
|
| 746 |
+
Returns:
|
| 747 |
+
List[Tensor]:
|
| 748 |
+
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
if self.model_type == 'i2v':
|
| 752 |
+
assert clip_fea is not None and y is not None
|
| 753 |
+
# params
|
| 754 |
+
device = self.patch_embedding.weight.device
|
| 755 |
+
if self.freqs.device != device:
|
| 756 |
+
self.freqs = self.freqs.to(device)
|
| 757 |
+
|
| 758 |
+
if y is not None:
|
| 759 |
+
x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
|
| 760 |
+
|
| 761 |
+
# embeddings
|
| 762 |
+
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
|
| 763 |
+
grid_sizes = torch.stack(
|
| 764 |
+
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
|
| 765 |
+
x = [u.flatten(2).transpose(1, 2) for u in x]
|
| 766 |
+
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
|
| 767 |
+
assert seq_lens.max() <= seq_len
|
| 768 |
+
x = torch.cat(x)
|
| 769 |
+
"""
|
| 770 |
+
torch.cat([
|
| 771 |
+
torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))],
|
| 772 |
+
dim=1) for u in x
|
| 773 |
+
])
|
| 774 |
+
"""
|
| 775 |
+
|
| 776 |
+
# time embeddings
|
| 777 |
+
# with amp.autocast(dtype=torch.float32):
|
| 778 |
+
e = self.time_embedding(
|
| 779 |
+
sinusoidal_embedding_1d(self.freq_dim, t.flatten()).type_as(x))
|
| 780 |
+
e0 = self.time_projection(e).unflatten(
|
| 781 |
+
1, (6, self.dim)).unflatten(dim=0, sizes=t.shape)
|
| 782 |
+
# assert e.dtype == torch.float32 and e0.dtype == torch.float32
|
| 783 |
+
|
| 784 |
+
# context
|
| 785 |
+
context_lens = None
|
| 786 |
+
context = self.text_embedding(
|
| 787 |
+
torch.stack([
|
| 788 |
+
torch.cat(
|
| 789 |
+
[u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
|
| 790 |
+
for u in context
|
| 791 |
+
]))
|
| 792 |
+
|
| 793 |
+
if clip_fea is not None:
|
| 794 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
| 795 |
+
context = torch.concat([context_clip, context], dim=1)
|
| 796 |
+
|
| 797 |
+
# arguments
|
| 798 |
+
kwargs = dict(
|
| 799 |
+
e=e0,
|
| 800 |
+
seq_lens=seq_lens,
|
| 801 |
+
grid_sizes=grid_sizes,
|
| 802 |
+
freqs=self.freqs,
|
| 803 |
+
context=context,
|
| 804 |
+
context_lens=context_lens,
|
| 805 |
+
block_mask=self.block_mask
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
def create_custom_forward(module):
|
| 809 |
+
def custom_forward(*inputs, **kwargs):
|
| 810 |
+
return module(*inputs, **kwargs)
|
| 811 |
+
return custom_forward
|
| 812 |
+
|
| 813 |
+
for block_index, block in enumerate(self.blocks):
|
| 814 |
+
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
| 815 |
+
kwargs.update(
|
| 816 |
+
{
|
| 817 |
+
"kv_cache": kv_cache[block_index],
|
| 818 |
+
"current_start": current_start,
|
| 819 |
+
"cache_start": cache_start
|
| 820 |
+
}
|
| 821 |
+
)
|
| 822 |
+
x = torch.utils.checkpoint.checkpoint(
|
| 823 |
+
create_custom_forward(block),
|
| 824 |
+
x, **kwargs,
|
| 825 |
+
use_reentrant=False,
|
| 826 |
+
)
|
| 827 |
+
else:
|
| 828 |
+
kwargs.update(
|
| 829 |
+
{
|
| 830 |
+
"kv_cache": kv_cache[block_index],
|
| 831 |
+
"crossattn_cache": crossattn_cache[block_index],
|
| 832 |
+
"current_start": current_start,
|
| 833 |
+
"cache_start": cache_start
|
| 834 |
+
}
|
| 835 |
+
)
|
| 836 |
+
x = block(x, **kwargs)
|
| 837 |
+
|
| 838 |
+
# head
|
| 839 |
+
x = self.head(x, e.unflatten(dim=0, sizes=t.shape).unsqueeze(2))
|
| 840 |
+
# unpatchify
|
| 841 |
+
x = self.unpatchify(x, grid_sizes)
|
| 842 |
+
return torch.stack(x)
|
| 843 |
+
|
| 844 |
+
def _forward_train(
|
| 845 |
+
self,
|
| 846 |
+
x,
|
| 847 |
+
t,
|
| 848 |
+
context,
|
| 849 |
+
seq_len,
|
| 850 |
+
clean_x=None,
|
| 851 |
+
aug_t=None,
|
| 852 |
+
clip_fea=None,
|
| 853 |
+
y=None,
|
| 854 |
+
):
|
| 855 |
+
r"""
|
| 856 |
+
Forward pass through the diffusion model
|
| 857 |
+
|
| 858 |
+
Args:
|
| 859 |
+
x (List[Tensor]):
|
| 860 |
+
List of input video tensors, each with shape [C_in, F, H, W]
|
| 861 |
+
t (Tensor):
|
| 862 |
+
Diffusion timesteps tensor of shape [B]
|
| 863 |
+
context (List[Tensor]):
|
| 864 |
+
List of text embeddings each with shape [L, C]
|
| 865 |
+
seq_len (`int`):
|
| 866 |
+
Maximum sequence length for positional encoding
|
| 867 |
+
clip_fea (Tensor, *optional*):
|
| 868 |
+
CLIP image features for image-to-video mode
|
| 869 |
+
y (List[Tensor], *optional*):
|
| 870 |
+
Conditional video inputs for image-to-video mode, same shape as x
|
| 871 |
+
|
| 872 |
+
Returns:
|
| 873 |
+
List[Tensor]:
|
| 874 |
+
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
|
| 875 |
+
"""
|
| 876 |
+
if self.model_type == 'i2v':
|
| 877 |
+
assert clip_fea is not None and y is not None
|
| 878 |
+
# params
|
| 879 |
+
device = self.patch_embedding.weight.device
|
| 880 |
+
if self.freqs.device != device:
|
| 881 |
+
self.freqs = self.freqs.to(device)
|
| 882 |
+
|
| 883 |
+
# Construct blockwise causal attn mask
|
| 884 |
+
if self.block_mask is None:
|
| 885 |
+
if clean_x is not None:
|
| 886 |
+
if self.independent_first_frame:
|
| 887 |
+
raise NotImplementedError()
|
| 888 |
+
else:
|
| 889 |
+
self.block_mask = self._prepare_teacher_forcing_mask(
|
| 890 |
+
device, num_frames=x.shape[2],
|
| 891 |
+
frame_seqlen=x.shape[-2] * x.shape[-1] // (self.patch_size[1] * self.patch_size[2]),
|
| 892 |
+
num_frame_per_block=self.num_frame_per_block
|
| 893 |
+
)
|
| 894 |
+
else:
|
| 895 |
+
if self.independent_first_frame:
|
| 896 |
+
self.block_mask = self._prepare_blockwise_causal_attn_mask_i2v(
|
| 897 |
+
device, num_frames=x.shape[2],
|
| 898 |
+
frame_seqlen=x.shape[-2] * x.shape[-1] // (self.patch_size[1] * self.patch_size[2]),
|
| 899 |
+
num_frame_per_block=self.num_frame_per_block,
|
| 900 |
+
local_attn_size=self.local_attn_size
|
| 901 |
+
)
|
| 902 |
+
else:
|
| 903 |
+
self.block_mask = self._prepare_blockwise_causal_attn_mask(
|
| 904 |
+
device, num_frames=x.shape[2],
|
| 905 |
+
frame_seqlen=x.shape[-2] * x.shape[-1] // (self.patch_size[1] * self.patch_size[2]),
|
| 906 |
+
num_frame_per_block=self.num_frame_per_block,
|
| 907 |
+
local_attn_size=self.local_attn_size
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
if y is not None:
|
| 911 |
+
x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
|
| 912 |
+
|
| 913 |
+
# embeddings
|
| 914 |
+
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
|
| 915 |
+
|
| 916 |
+
grid_sizes = torch.stack(
|
| 917 |
+
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
|
| 918 |
+
x = [u.flatten(2).transpose(1, 2) for u in x]
|
| 919 |
+
|
| 920 |
+
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
|
| 921 |
+
assert seq_lens.max() <= seq_len
|
| 922 |
+
x = torch.cat([
|
| 923 |
+
torch.cat([u, u.new_zeros(1, seq_lens[0] - u.size(1), u.size(2))],
|
| 924 |
+
dim=1) for u in x
|
| 925 |
+
])
|
| 926 |
+
|
| 927 |
+
# time embeddings
|
| 928 |
+
# with amp.autocast(dtype=torch.float32):
|
| 929 |
+
e = self.time_embedding(
|
| 930 |
+
sinusoidal_embedding_1d(self.freq_dim, t.flatten()).type_as(x))
|
| 931 |
+
e0 = self.time_projection(e).unflatten(
|
| 932 |
+
1, (6, self.dim)).unflatten(dim=0, sizes=t.shape)
|
| 933 |
+
# assert e.dtype == torch.float32 and e0.dtype == torch.float32
|
| 934 |
+
|
| 935 |
+
# context
|
| 936 |
+
context_lens = None
|
| 937 |
+
context = self.text_embedding(
|
| 938 |
+
torch.stack([
|
| 939 |
+
torch.cat(
|
| 940 |
+
[u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
|
| 941 |
+
for u in context
|
| 942 |
+
]))
|
| 943 |
+
|
| 944 |
+
if clip_fea is not None:
|
| 945 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
| 946 |
+
context = torch.concat([context_clip, context], dim=1)
|
| 947 |
+
|
| 948 |
+
if clean_x is not None:
|
| 949 |
+
clean_x = [self.patch_embedding(u.unsqueeze(0)) for u in clean_x]
|
| 950 |
+
clean_x = [u.flatten(2).transpose(1, 2) for u in clean_x]
|
| 951 |
+
|
| 952 |
+
seq_lens_clean = torch.tensor([u.size(1) for u in clean_x], dtype=torch.long)
|
| 953 |
+
assert seq_lens_clean.max() <= seq_len
|
| 954 |
+
clean_x = torch.cat([
|
| 955 |
+
torch.cat([u, u.new_zeros(1, seq_lens_clean[0] - u.size(1), u.size(2))], dim=1) for u in clean_x
|
| 956 |
+
])
|
| 957 |
+
|
| 958 |
+
x = torch.cat([clean_x, x], dim=1)
|
| 959 |
+
if aug_t is None:
|
| 960 |
+
aug_t = torch.zeros_like(t)
|
| 961 |
+
e_clean = self.time_embedding(
|
| 962 |
+
sinusoidal_embedding_1d(self.freq_dim, aug_t.flatten()).type_as(x))
|
| 963 |
+
e0_clean = self.time_projection(e_clean).unflatten(
|
| 964 |
+
1, (6, self.dim)).unflatten(dim=0, sizes=t.shape)
|
| 965 |
+
e0 = torch.cat([e0_clean, e0], dim=1)
|
| 966 |
+
|
| 967 |
+
# arguments
|
| 968 |
+
kwargs = dict(
|
| 969 |
+
e=e0,
|
| 970 |
+
seq_lens=seq_lens,
|
| 971 |
+
grid_sizes=grid_sizes,
|
| 972 |
+
freqs=self.freqs,
|
| 973 |
+
context=context,
|
| 974 |
+
context_lens=context_lens,
|
| 975 |
+
block_mask=self.block_mask)
|
| 976 |
+
|
| 977 |
+
def create_custom_forward(module):
|
| 978 |
+
def custom_forward(*inputs, **kwargs):
|
| 979 |
+
return module(*inputs, **kwargs)
|
| 980 |
+
return custom_forward
|
| 981 |
+
|
| 982 |
+
for block in self.blocks:
|
| 983 |
+
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
| 984 |
+
x = torch.utils.checkpoint.checkpoint(
|
| 985 |
+
create_custom_forward(block),
|
| 986 |
+
x, **kwargs,
|
| 987 |
+
use_reentrant=False,
|
| 988 |
+
)
|
| 989 |
+
else:
|
| 990 |
+
x = block(x, **kwargs)
|
| 991 |
+
|
| 992 |
+
if clean_x is not None:
|
| 993 |
+
x = x[:, x.shape[1] // 2:]
|
| 994 |
+
|
| 995 |
+
# head
|
| 996 |
+
x = self.head(x, e.unflatten(dim=0, sizes=t.shape).unsqueeze(2))
|
| 997 |
+
|
| 998 |
+
# unpatchify
|
| 999 |
+
x = self.unpatchify(x, grid_sizes)
|
| 1000 |
+
return torch.stack(x)
|
| 1001 |
+
|
| 1002 |
+
def forward(
|
| 1003 |
+
self,
|
| 1004 |
+
*args,
|
| 1005 |
+
**kwargs
|
| 1006 |
+
):
|
| 1007 |
+
if kwargs.get('kv_cache', None) is not None:
|
| 1008 |
+
return self._forward_inference(*args, **kwargs)
|
| 1009 |
+
else:
|
| 1010 |
+
return self._forward_train(*args, **kwargs)
|
| 1011 |
+
|
| 1012 |
+
def unpatchify(self, x, grid_sizes):
|
| 1013 |
+
r"""
|
| 1014 |
+
Reconstruct video tensors from patch embeddings.
|
| 1015 |
+
|
| 1016 |
+
Args:
|
| 1017 |
+
x (List[Tensor]):
|
| 1018 |
+
List of patchified features, each with shape [L, C_out * prod(patch_size)]
|
| 1019 |
+
grid_sizes (Tensor):
|
| 1020 |
+
Original spatial-temporal grid dimensions before patching,
|
| 1021 |
+
shape [B, 3] (3 dimensions correspond to F_patches, H_patches, W_patches)
|
| 1022 |
+
|
| 1023 |
+
Returns:
|
| 1024 |
+
List[Tensor]:
|
| 1025 |
+
Reconstructed video tensors with shape [C_out, F, H / 8, W / 8]
|
| 1026 |
+
"""
|
| 1027 |
+
|
| 1028 |
+
c = self.out_dim
|
| 1029 |
+
out = []
|
| 1030 |
+
for u, v in zip(x, grid_sizes.tolist()):
|
| 1031 |
+
u = u[:math.prod(v)].view(*v, *self.patch_size, c)
|
| 1032 |
+
u = torch.einsum('fhwpqrc->cfphqwr', u)
|
| 1033 |
+
u = u.reshape(c, *[i * j for i, j in zip(v, self.patch_size)])
|
| 1034 |
+
out.append(u)
|
| 1035 |
+
return out
|
| 1036 |
+
|
| 1037 |
+
def init_weights(self):
|
| 1038 |
+
r"""
|
| 1039 |
+
Initialize model parameters using Xavier initialization.
|
| 1040 |
+
"""
|
| 1041 |
+
|
| 1042 |
+
# basic init
|
| 1043 |
+
for m in self.modules():
|
| 1044 |
+
if isinstance(m, nn.Linear):
|
| 1045 |
+
nn.init.xavier_uniform_(m.weight)
|
| 1046 |
+
if m.bias is not None:
|
| 1047 |
+
nn.init.zeros_(m.bias)
|
| 1048 |
+
|
| 1049 |
+
# init embeddings
|
| 1050 |
+
nn.init.xavier_uniform_(self.patch_embedding.weight.flatten(1))
|
| 1051 |
+
for m in self.text_embedding.modules():
|
| 1052 |
+
if isinstance(m, nn.Linear):
|
| 1053 |
+
nn.init.normal_(m.weight, std=.02)
|
| 1054 |
+
for m in self.time_embedding.modules():
|
| 1055 |
+
if isinstance(m, nn.Linear):
|
| 1056 |
+
nn.init.normal_(m.weight, std=.02)
|
| 1057 |
+
|
| 1058 |
+
# init output layer
|
| 1059 |
+
nn.init.zeros_(self.head.head.weight)
|
exp_code/1_benchmark/ALG/.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.DS_Store
|
| 2 |
+
.vscode/
|
exp_code/1_benchmark/ALG/__pycache__/lp_utils.cpython-311.pyc
ADDED
|
Binary file (8.49 kB). View file
|
|
|
exp_code/1_benchmark/ALG/__pycache__/pipeline_cogvideox_image2video_lowpass.cpython-311.pyc
ADDED
|
Binary file (55.2 kB). View file
|
|
|
exp_code/1_benchmark/ALG/__pycache__/pipeline_hunyuan_video_image2video_lowpass.cpython-311.pyc
ADDED
|
Binary file (65 kB). View file
|
|
|
exp_code/1_benchmark/ALG/__pycache__/pipeline_wan_image2video_lowpass.cpython-311.pyc
ADDED
|
Binary file (51.2 kB). View file
|
|
|
exp_code/1_benchmark/ALG/configs/cogvideox_alg.yaml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: "THUDM/CogVideoX-5b-I2V"
|
| 3 |
+
dtype: "bfloat16"
|
| 4 |
+
|
| 5 |
+
generation:
|
| 6 |
+
height: null
|
| 7 |
+
width: null
|
| 8 |
+
num_frames: 49
|
| 9 |
+
num_inference_steps: 50
|
| 10 |
+
guidance_scale: 6.0
|
| 11 |
+
|
| 12 |
+
alg:
|
| 13 |
+
use_low_pass_guidance: True
|
| 14 |
+
|
| 15 |
+
lp_filter_type: "down_up"
|
| 16 |
+
lp_filter_in_latent: True
|
| 17 |
+
|
| 18 |
+
lp_blur_sigma: null
|
| 19 |
+
lp_blur_kernel_size: null
|
| 20 |
+
lp_resize_factor: 0.25
|
| 21 |
+
|
| 22 |
+
lp_strength_schedule_type: "interval"
|
| 23 |
+
schedule_blur_kernel_size: False
|
| 24 |
+
|
| 25 |
+
schedule_interval_start_time: 0.0
|
| 26 |
+
schedule_interval_end_time: 0.04
|
| 27 |
+
|
| 28 |
+
schedule_linear_start_weight: null
|
| 29 |
+
schedule_linear_end_weight: null
|
| 30 |
+
schedule_linear_end_time: null
|
| 31 |
+
|
| 32 |
+
video:
|
| 33 |
+
fps: 12
|
exp_code/1_benchmark/ALG/configs/cogvideox_default.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: "THUDM/CogVideoX-5b-I2V"
|
| 3 |
+
dtype: "bfloat16"
|
| 4 |
+
|
| 5 |
+
generation:
|
| 6 |
+
height: null
|
| 7 |
+
width: null
|
| 8 |
+
num_frames: 49
|
| 9 |
+
num_inference_steps: 50
|
| 10 |
+
guidance_scale: 6.0
|
| 11 |
+
|
| 12 |
+
alg:
|
| 13 |
+
use_low_pass_guidance: False
|
| 14 |
+
|
| 15 |
+
video:
|
| 16 |
+
fps: 12
|
exp_code/1_benchmark/ALG/configs/hunyuan_video_alg.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo-I2V"
|
| 3 |
+
dtype: "bfloat16"
|
| 4 |
+
flow_shift: 7.0 #7.0 if i2v_stable else 17.0
|
| 5 |
+
flow_reverse: false
|
| 6 |
+
|
| 7 |
+
generation:
|
| 8 |
+
num_frames: 129
|
| 9 |
+
num_inference_steps: 20
|
| 10 |
+
guidance_scale: 6.0
|
| 11 |
+
i2v_stable: true
|
| 12 |
+
true_cfg_scale: 1.0
|
| 13 |
+
|
| 14 |
+
alg:
|
| 15 |
+
use_low_pass_guidance: True
|
| 16 |
+
|
| 17 |
+
lp_filter_type: "down_up"
|
| 18 |
+
lp_filter_in_latent: True
|
| 19 |
+
|
| 20 |
+
lp_blur_sigma: null
|
| 21 |
+
lp_blur_kernel_size: null
|
| 22 |
+
lp_resize_factor: 0.625
|
| 23 |
+
|
| 24 |
+
lp_strength_schedule_type: "interval"
|
| 25 |
+
schedule_blur_kernel_size: False
|
| 26 |
+
|
| 27 |
+
schedule_interval_start_time: 0.0
|
| 28 |
+
schedule_interval_end_time: 0.04
|
| 29 |
+
|
| 30 |
+
schedule_linear_start_weight: null
|
| 31 |
+
schedule_linear_end_weight: null
|
| 32 |
+
schedule_linear_end_time: null
|
| 33 |
+
|
| 34 |
+
video:
|
| 35 |
+
resolution: 360p
|
| 36 |
+
fps: 30
|
exp_code/1_benchmark/ALG/configs/hunyuan_video_default.yaml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo-I2V"
|
| 3 |
+
dtype: "bfloat16"
|
| 4 |
+
flow_shift: 7.0 #7.0 if i2v_stable else 17.0
|
| 5 |
+
flow_reverse: false
|
| 6 |
+
|
| 7 |
+
generation:
|
| 8 |
+
num_frames: 129
|
| 9 |
+
num_inference_steps: 50
|
| 10 |
+
guidance_scale: 6.0
|
| 11 |
+
i2v_stable: true
|
| 12 |
+
true_cfg_scale: 1.0
|
| 13 |
+
|
| 14 |
+
alg:
|
| 15 |
+
use_low_pass_guidance: True
|
| 16 |
+
|
| 17 |
+
video:
|
| 18 |
+
resolution: 360p
|
| 19 |
+
fps: 30
|
exp_code/1_benchmark/ALG/configs/wan_alg.yaml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
| 3 |
+
dtype: "bfloat16"
|
| 4 |
+
|
| 5 |
+
generation:
|
| 6 |
+
num_frames: 81
|
| 7 |
+
num_inference_steps: 50
|
| 8 |
+
guidance_scale: 5.0
|
| 9 |
+
height: 480
|
| 10 |
+
width: 832
|
| 11 |
+
|
| 12 |
+
alg:
|
| 13 |
+
use_low_pass_guidance: True
|
| 14 |
+
|
| 15 |
+
lp_filter_type: "down_up"
|
| 16 |
+
lp_filter_in_latent: True
|
| 17 |
+
|
| 18 |
+
lp_blur_sigma: null
|
| 19 |
+
lp_blur_kernel_size: null
|
| 20 |
+
lp_resize_factor: 0.4
|
| 21 |
+
|
| 22 |
+
lp_strength_schedule_type: "interval"
|
| 23 |
+
schedule_blur_kernel_size: False
|
| 24 |
+
|
| 25 |
+
schedule_interval_start_time: 0.0
|
| 26 |
+
schedule_interval_end_time: 0.20
|
| 27 |
+
|
| 28 |
+
schedule_linear_start_weight: null
|
| 29 |
+
schedule_linear_end_weight: null
|
| 30 |
+
schedule_linear_end_time: null
|
| 31 |
+
|
| 32 |
+
video:
|
| 33 |
+
fps: 16
|
exp_code/1_benchmark/ALG/configs/wan_default.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
path: "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
| 3 |
+
dtype: "bfloat16"
|
| 4 |
+
|
| 5 |
+
generation:
|
| 6 |
+
num_frames: 81
|
| 7 |
+
num_inference_steps: 50
|
| 8 |
+
guidance_scale: 5.0
|
| 9 |
+
height: 480
|
| 10 |
+
width: 832
|
| 11 |
+
|
| 12 |
+
alg:
|
| 13 |
+
use_low_pass_guidance: False
|
| 14 |
+
|
| 15 |
+
video:
|
| 16 |
+
fps: 16
|
exp_code/1_benchmark/ALG/lp_utils.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import torchvision.transforms.functional as tvF
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def apply_low_pass_filter(
|
| 9 |
+
tensor: torch.Tensor,
|
| 10 |
+
filter_type: str,
|
| 11 |
+
# Gaussian Blur Params
|
| 12 |
+
blur_sigma: float,
|
| 13 |
+
blur_kernel_size: float, # Can be float (relative) or int (absolute)
|
| 14 |
+
# Down/Up Sampling Params
|
| 15 |
+
resize_factor: float,
|
| 16 |
+
):
|
| 17 |
+
"""
|
| 18 |
+
Applies the specified low-pass filtering operation to the input tensor.
|
| 19 |
+
Handles 4D ([B, C, H, W]) and 5D ([B, C, F, H, W]) tensors by temporarily
|
| 20 |
+
reshaping 5D tensors for spatial filtering.
|
| 21 |
+
"""
|
| 22 |
+
# --- Early Exits for No-Op Cases ---
|
| 23 |
+
if filter_type == "none":
|
| 24 |
+
return tensor
|
| 25 |
+
if filter_type == "down_up" and resize_factor == 1.0:
|
| 26 |
+
return tensor
|
| 27 |
+
if filter_type == "gaussian_blur" and blur_sigma == 0:
|
| 28 |
+
return tensor
|
| 29 |
+
|
| 30 |
+
# --- Reshape 5D tensor for spatial filtering ---
|
| 31 |
+
is_5d = tensor.ndim == 5
|
| 32 |
+
if is_5d:
|
| 33 |
+
B, C, K, H, W = tensor.shape
|
| 34 |
+
# Flatten frames into batch dimension using view
|
| 35 |
+
tensor = tensor.view(B * K, C, H, W)
|
| 36 |
+
else:
|
| 37 |
+
B, C, H, W = tensor.shape
|
| 38 |
+
|
| 39 |
+
# --- Apply Selected Filter ---
|
| 40 |
+
if filter_type == "gaussian_blur":
|
| 41 |
+
if isinstance(blur_kernel_size, float):
|
| 42 |
+
kernel_val = max(int(blur_kernel_size * H), 1)
|
| 43 |
+
else:
|
| 44 |
+
kernel_val = int(blur_kernel_size)
|
| 45 |
+
if kernel_val % 2 == 0:
|
| 46 |
+
kernel_val += 1
|
| 47 |
+
tensor = tvF.gaussian_blur(tensor, kernel_size=[kernel_val, kernel_val], sigma=[blur_sigma, blur_sigma])
|
| 48 |
+
|
| 49 |
+
elif filter_type == "down_up":
|
| 50 |
+
h0, w0 = tensor.shape[-2:]
|
| 51 |
+
h1 = max(1, int(round(h0 * resize_factor)))
|
| 52 |
+
w1 = max(1, int(round(w0 * resize_factor)))
|
| 53 |
+
tensor = F.interpolate(tensor, size=(h1, w1), mode="bilinear", align_corners=False, antialias=True)
|
| 54 |
+
tensor = F.interpolate(tensor, size=(h0, w0), mode="bilinear", align_corners=False, antialias=True)
|
| 55 |
+
|
| 56 |
+
# --- Restore original 5D shape if necessary ---
|
| 57 |
+
if is_5d:
|
| 58 |
+
tensor = tensor.view(B, C, K, H, W)
|
| 59 |
+
|
| 60 |
+
return tensor
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def get_lp_strength(
|
| 64 |
+
step_index: int,
|
| 65 |
+
total_steps: int,
|
| 66 |
+
lp_strength_schedule_type: str,
|
| 67 |
+
# Interval params
|
| 68 |
+
schedule_interval_start_time: float,
|
| 69 |
+
schedule_interval_end_time: float,
|
| 70 |
+
# Linear params
|
| 71 |
+
schedule_linear_start_weight: float,
|
| 72 |
+
schedule_linear_end_weight: float,
|
| 73 |
+
schedule_linear_end_time: float,
|
| 74 |
+
# Exponential params
|
| 75 |
+
schedule_exp_decay_rate: float,
|
| 76 |
+
) -> float:
|
| 77 |
+
"""
|
| 78 |
+
Calculates the low-pass guidance strength multiplier for the current timestep
|
| 79 |
+
based on the specified schedule.
|
| 80 |
+
"""
|
| 81 |
+
step_norm = step_index / max(total_steps - 1, 1)
|
| 82 |
+
|
| 83 |
+
if lp_strength_schedule_type == "linear":
|
| 84 |
+
schedule_duration_fraction = schedule_linear_end_time
|
| 85 |
+
if schedule_duration_fraction <= 0:
|
| 86 |
+
return schedule_linear_start_weight
|
| 87 |
+
if step_norm >= schedule_duration_fraction:
|
| 88 |
+
current_strength = schedule_linear_end_weight
|
| 89 |
+
else:
|
| 90 |
+
progress = step_norm / schedule_duration_fraction
|
| 91 |
+
current_strength = schedule_linear_start_weight * (1 - progress) + schedule_linear_end_weight * progress
|
| 92 |
+
return current_strength
|
| 93 |
+
|
| 94 |
+
elif lp_strength_schedule_type == "interval":
|
| 95 |
+
if schedule_interval_start_time <= step_norm <= schedule_interval_end_time:
|
| 96 |
+
return 1.0
|
| 97 |
+
else:
|
| 98 |
+
return 0.0
|
| 99 |
+
|
| 100 |
+
elif lp_strength_schedule_type == "exponential":
|
| 101 |
+
decay_rate = schedule_exp_decay_rate
|
| 102 |
+
if decay_rate < 0:
|
| 103 |
+
print(f"Warning: Negative exponential_decay_rate ({decay_rate}) is unusual. Using abs value.")
|
| 104 |
+
decay_rate = abs(decay_rate)
|
| 105 |
+
return math.exp(-decay_rate * step_norm)
|
| 106 |
+
|
| 107 |
+
elif lp_strength_schedule_type == "none":
|
| 108 |
+
return 1.0
|
| 109 |
+
else:
|
| 110 |
+
print(f"Warning: Unknown lp_strength_schedule_type '{lp_strength_schedule_type}'. Using constant strength 1.0.")
|
| 111 |
+
return 1.0
|
| 112 |
+
|
| 113 |
+
def _generate_crop_size_list(base_size=256, patch_size=32, max_ratio=4.0):
|
| 114 |
+
"""generate crop size list (HunyuanVideo)
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
base_size (int, optional): the base size for generate bucket. Defaults to 256.
|
| 118 |
+
patch_size (int, optional): the stride to generate bucket. Defaults to 32.
|
| 119 |
+
max_ratio (float, optional): th max ratio for h or w based on base_size . Defaults to 4.0.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
list: generate crop size list
|
| 123 |
+
"""
|
| 124 |
+
num_patches = round((base_size / patch_size) ** 2)
|
| 125 |
+
assert max_ratio >= 1.0
|
| 126 |
+
crop_size_list = []
|
| 127 |
+
wp, hp = num_patches, 1
|
| 128 |
+
while wp > 0:
|
| 129 |
+
if max(wp, hp) / min(wp, hp) <= max_ratio:
|
| 130 |
+
crop_size_list.append((wp * patch_size, hp * patch_size))
|
| 131 |
+
if (hp + 1) * wp <= num_patches:
|
| 132 |
+
hp += 1
|
| 133 |
+
else:
|
| 134 |
+
wp -= 1
|
| 135 |
+
return crop_size_list
|
| 136 |
+
|
| 137 |
+
def _get_closest_ratio(height: float, width: float, ratios: list, buckets: list):
|
| 138 |
+
"""get the closest ratio in the buckets (HunyuanVideo)
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
height (float): video height
|
| 142 |
+
width (float): video width
|
| 143 |
+
ratios (list): video aspect ratio
|
| 144 |
+
buckets (list): buckets generate by `generate_crop_size_list`
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
the closest ratio in the buckets and the corresponding ratio
|
| 148 |
+
"""
|
| 149 |
+
aspect_ratio = float(height) / float(width)
|
| 150 |
+
diff_ratios = ratios - aspect_ratio
|
| 151 |
+
|
| 152 |
+
if aspect_ratio >= 1:
|
| 153 |
+
indices = [(index, x) for index, x in enumerate(diff_ratios) if x <= 0]
|
| 154 |
+
else:
|
| 155 |
+
indices = [(index, x) for index, x in enumerate(diff_ratios) if x > 0]
|
| 156 |
+
|
| 157 |
+
closest_ratio_id = min(indices, key=lambda pair: abs(pair[1]))[0]
|
| 158 |
+
closest_size = buckets[closest_ratio_id]
|
| 159 |
+
closest_ratio = ratios[closest_ratio_id]
|
| 160 |
+
|
| 161 |
+
return closest_size, closest_ratio
|
| 162 |
+
|
| 163 |
+
def get_hunyuan_video_size(i2v_resolution, input_image):
|
| 164 |
+
"""
|
| 165 |
+
Map to target height and width based on resolution for HunyuanVideo
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
height (float): video height
|
| 169 |
+
width (float): video width
|
| 170 |
+
ratios (list): video aspect ratio
|
| 171 |
+
buckets (list): buckets generate by `generate_crop_size_list`
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
the closest ratio in the buckets and the corresponding ratio
|
| 175 |
+
"""
|
| 176 |
+
if i2v_resolution == "720p":
|
| 177 |
+
bucket_hw_base_size = 960
|
| 178 |
+
elif i2v_resolution == "540p":
|
| 179 |
+
bucket_hw_base_size = 720
|
| 180 |
+
elif i2v_resolution == "360p":
|
| 181 |
+
bucket_hw_base_size = 480
|
| 182 |
+
|
| 183 |
+
origin_size = input_image.size
|
| 184 |
+
|
| 185 |
+
crop_size_list = _generate_crop_size_list(bucket_hw_base_size, 32)
|
| 186 |
+
aspect_ratios = np.array([round(float(h)/float(w), 5) for h, w in crop_size_list])
|
| 187 |
+
closest_size, _ = _get_closest_ratio(origin_size[1], origin_size[0], aspect_ratios, crop_size_list)
|
| 188 |
+
target_height, target_width = closest_size
|
| 189 |
+
return target_height, target_width
|
exp_code/1_benchmark/ALG/pipeline_cogvideox_image2video_lowpass.py
ADDED
|
@@ -0,0 +1,1158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
import math
|
| 18 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Set
|
| 19 |
+
|
| 20 |
+
import PIL
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import torchvision.transforms.functional as tvF
|
| 24 |
+
from transformers import T5EncoderModel, T5Tokenizer
|
| 25 |
+
|
| 26 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 27 |
+
from diffusers.image_processor import PipelineImageInput
|
| 28 |
+
from diffusers.loaders import CogVideoXLoraLoaderMixin
|
| 29 |
+
from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
|
| 30 |
+
from diffusers.models.embeddings import get_3d_rotary_pos_embed
|
| 31 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 32 |
+
from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
|
| 33 |
+
from diffusers.utils import (
|
| 34 |
+
is_torch_xla_available,
|
| 35 |
+
logging,
|
| 36 |
+
replace_example_docstring,
|
| 37 |
+
)
|
| 38 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 39 |
+
from diffusers.video_processor import VideoProcessor
|
| 40 |
+
|
| 41 |
+
from diffusers.pipelines.cogvideo.pipeline_output import CogVideoXPipelineOutput
|
| 42 |
+
|
| 43 |
+
import lp_utils
|
| 44 |
+
|
| 45 |
+
if is_torch_xla_available():
|
| 46 |
+
import torch_xla.core.xla_model as xm
|
| 47 |
+
|
| 48 |
+
XLA_AVAILABLE = True
|
| 49 |
+
else:
|
| 50 |
+
XLA_AVAILABLE = False
|
| 51 |
+
|
| 52 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
EXAMPLE_DOC_STRING = """
|
| 56 |
+
Examples:
|
| 57 |
+
```py
|
| 58 |
+
>>> import torch
|
| 59 |
+
>>> from diffusers import CogVideoXImageToVideoPipeline
|
| 60 |
+
>>> from diffusers.utils import export_to_video, load_image
|
| 61 |
+
|
| 62 |
+
>>> pipe = CogVideoXImageToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b-I2V", torch_dtype=torch.bfloat16)
|
| 63 |
+
>>> pipe.to("cuda")
|
| 64 |
+
|
| 65 |
+
>>> prompt = "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
|
| 66 |
+
>>> image = load_image(
|
| 67 |
+
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg"
|
| 68 |
+
... )
|
| 69 |
+
>>> video = pipe(image, prompt, use_dynamic_cfg=True)
|
| 70 |
+
>>> export_to_video(video.frames[0], "output.mp4", fps=8)
|
| 71 |
+
```
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
|
| 76 |
+
def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
|
| 77 |
+
tw = tgt_width
|
| 78 |
+
th = tgt_height
|
| 79 |
+
h, w = src
|
| 80 |
+
r = h / w
|
| 81 |
+
if r > (th / tw):
|
| 82 |
+
resize_height = th
|
| 83 |
+
resize_width = int(round(th / h * w))
|
| 84 |
+
else:
|
| 85 |
+
resize_width = tw
|
| 86 |
+
resize_height = int(round(tw / w * h))
|
| 87 |
+
|
| 88 |
+
crop_top = int(round((th - resize_height) / 2.0))
|
| 89 |
+
crop_left = int(round((tw - resize_width) / 2.0))
|
| 90 |
+
|
| 91 |
+
return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 95 |
+
def retrieve_timesteps(
|
| 96 |
+
scheduler,
|
| 97 |
+
num_inference_steps: Optional[int] = None,
|
| 98 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 99 |
+
timesteps: Optional[List[int]] = None,
|
| 100 |
+
sigmas: Optional[List[float]] = None,
|
| 101 |
+
**kwargs,
|
| 102 |
+
):
|
| 103 |
+
r"""
|
| 104 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 105 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
scheduler (`SchedulerMixin`):
|
| 109 |
+
The scheduler to get timesteps from.
|
| 110 |
+
num_inference_steps (`int`):
|
| 111 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 112 |
+
must be `None`.
|
| 113 |
+
device (`str` or `torch.device`, *optional*):
|
| 114 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 115 |
+
timesteps (`List[int]`, *optional*):
|
| 116 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 117 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 118 |
+
sigmas (`List[float]`, *optional*):
|
| 119 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 120 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 124 |
+
second element is the number of inference steps.
|
| 125 |
+
"""
|
| 126 |
+
if timesteps is not None and sigmas is not None:
|
| 127 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 128 |
+
if timesteps is not None:
|
| 129 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 130 |
+
if not accepts_timesteps:
|
| 131 |
+
raise ValueError(
|
| 132 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 133 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 134 |
+
)
|
| 135 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 136 |
+
timesteps = scheduler.timesteps
|
| 137 |
+
num_inference_steps = len(timesteps)
|
| 138 |
+
elif sigmas is not None:
|
| 139 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 140 |
+
if not accept_sigmas:
|
| 141 |
+
raise ValueError(
|
| 142 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 143 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 144 |
+
)
|
| 145 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 146 |
+
timesteps = scheduler.timesteps
|
| 147 |
+
num_inference_steps = len(timesteps)
|
| 148 |
+
else:
|
| 149 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 150 |
+
timesteps = scheduler.timesteps
|
| 151 |
+
return timesteps, num_inference_steps
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 155 |
+
def retrieve_latents(
|
| 156 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 157 |
+
):
|
| 158 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 159 |
+
return encoder_output.latent_dist.sample(generator)
|
| 160 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 161 |
+
return encoder_output.latent_dist.mode()
|
| 162 |
+
elif hasattr(encoder_output, "latents"):
|
| 163 |
+
return encoder_output.latents
|
| 164 |
+
else:
|
| 165 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class CogVideoXImageToVideoPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
|
| 169 |
+
r"""
|
| 170 |
+
Pipeline for image-to-video generation using CogVideoX.
|
| 171 |
+
|
| 172 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 173 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
vae ([`AutoencoderKL`]):
|
| 177 |
+
Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
|
| 178 |
+
text_encoder ([`T5EncoderModel`]):
|
| 179 |
+
Frozen text-encoder. CogVideoX uses
|
| 180 |
+
[T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
|
| 181 |
+
[t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
|
| 182 |
+
tokenizer (`T5Tokenizer`):
|
| 183 |
+
Tokenizer of class
|
| 184 |
+
[T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
|
| 185 |
+
transformer ([`CogVideoXTransformer3DModel`]):
|
| 186 |
+
A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
|
| 187 |
+
scheduler ([`SchedulerMixin`]):
|
| 188 |
+
A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
_optional_components = []
|
| 192 |
+
model_cpu_offload_seq = "text_encoder->transformer->vae"
|
| 193 |
+
|
| 194 |
+
_callback_tensor_inputs = [
|
| 195 |
+
"latents",
|
| 196 |
+
"prompt_embeds",
|
| 197 |
+
"negative_prompt_embeds",
|
| 198 |
+
]
|
| 199 |
+
|
| 200 |
+
def __init__(
|
| 201 |
+
self,
|
| 202 |
+
tokenizer: T5Tokenizer,
|
| 203 |
+
text_encoder: T5EncoderModel,
|
| 204 |
+
vae: AutoencoderKLCogVideoX,
|
| 205 |
+
transformer: CogVideoXTransformer3DModel,
|
| 206 |
+
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
|
| 207 |
+
):
|
| 208 |
+
super().__init__()
|
| 209 |
+
|
| 210 |
+
self.register_modules(
|
| 211 |
+
tokenizer=tokenizer,
|
| 212 |
+
text_encoder=text_encoder,
|
| 213 |
+
vae=vae,
|
| 214 |
+
transformer=transformer,
|
| 215 |
+
scheduler=scheduler,
|
| 216 |
+
)
|
| 217 |
+
self.vae_scale_factor_spatial = (
|
| 218 |
+
2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 219 |
+
)
|
| 220 |
+
self.vae_scale_factor_temporal = (
|
| 221 |
+
self.vae.config.temporal_compression_ratio if getattr(self, "vae", None) else 4
|
| 222 |
+
)
|
| 223 |
+
self.vae_scaling_factor_image = self.vae.config.scaling_factor if getattr(self, "vae", None) else 0.7
|
| 224 |
+
|
| 225 |
+
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
|
| 226 |
+
|
| 227 |
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds
|
| 228 |
+
def _get_t5_prompt_embeds(
|
| 229 |
+
self,
|
| 230 |
+
prompt: Union[str, List[str]] = None,
|
| 231 |
+
num_videos_per_prompt: int = 1,
|
| 232 |
+
max_sequence_length: int = 226,
|
| 233 |
+
device: Optional[torch.device] = None,
|
| 234 |
+
dtype: Optional[torch.dtype] = None,
|
| 235 |
+
):
|
| 236 |
+
device = device or self._execution_device
|
| 237 |
+
dtype = dtype or self.text_encoder.dtype
|
| 238 |
+
|
| 239 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 240 |
+
batch_size = len(prompt)
|
| 241 |
+
|
| 242 |
+
text_inputs = self.tokenizer(
|
| 243 |
+
prompt,
|
| 244 |
+
padding="max_length",
|
| 245 |
+
max_length=max_sequence_length,
|
| 246 |
+
truncation=True,
|
| 247 |
+
add_special_tokens=True,
|
| 248 |
+
return_tensors="pt",
|
| 249 |
+
)
|
| 250 |
+
text_input_ids = text_inputs.input_ids
|
| 251 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 252 |
+
|
| 253 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 254 |
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
| 255 |
+
logger.warning(
|
| 256 |
+
"The following part of your input was truncated because `max_sequence_length` is set to "
|
| 257 |
+
f" {max_sequence_length} tokens: {removed_text}"
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
|
| 261 |
+
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
| 262 |
+
|
| 263 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 264 |
+
_, seq_len, _ = prompt_embeds.shape
|
| 265 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
| 266 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
| 267 |
+
|
| 268 |
+
return prompt_embeds
|
| 269 |
+
|
| 270 |
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt
|
| 271 |
+
def encode_prompt(
|
| 272 |
+
self,
|
| 273 |
+
prompt: Union[str, List[str]],
|
| 274 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 275 |
+
do_classifier_free_guidance: bool = True,
|
| 276 |
+
num_videos_per_prompt: int = 1,
|
| 277 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 278 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 279 |
+
max_sequence_length: int = 226,
|
| 280 |
+
device: Optional[torch.device] = None,
|
| 281 |
+
dtype: Optional[torch.dtype] = None,
|
| 282 |
+
):
|
| 283 |
+
r"""
|
| 284 |
+
Encodes the prompt into text encoder hidden states.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 288 |
+
prompt to be encoded
|
| 289 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 290 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 291 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 292 |
+
less than `1`).
|
| 293 |
+
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
|
| 294 |
+
Whether to use classifier free guidance or not.
|
| 295 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 296 |
+
Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
|
| 297 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 298 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 299 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 300 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 301 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 302 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 303 |
+
argument.
|
| 304 |
+
device: (`torch.device`, *optional*):
|
| 305 |
+
torch device
|
| 306 |
+
dtype: (`torch.dtype`, *optional*):
|
| 307 |
+
torch dtype
|
| 308 |
+
"""
|
| 309 |
+
device = device or self._execution_device
|
| 310 |
+
|
| 311 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 312 |
+
if prompt is not None:
|
| 313 |
+
batch_size = len(prompt)
|
| 314 |
+
else:
|
| 315 |
+
batch_size = prompt_embeds.shape[0]
|
| 316 |
+
|
| 317 |
+
if prompt_embeds is None:
|
| 318 |
+
prompt_embeds = self._get_t5_prompt_embeds(
|
| 319 |
+
prompt=prompt,
|
| 320 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 321 |
+
max_sequence_length=max_sequence_length,
|
| 322 |
+
device=device,
|
| 323 |
+
dtype=dtype,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 327 |
+
negative_prompt = negative_prompt or ""
|
| 328 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 329 |
+
|
| 330 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 331 |
+
raise TypeError(
|
| 332 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 333 |
+
f" {type(prompt)}."
|
| 334 |
+
)
|
| 335 |
+
elif batch_size != len(negative_prompt):
|
| 336 |
+
raise ValueError(
|
| 337 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 338 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 339 |
+
" the batch size of `prompt`."
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
negative_prompt_embeds = self._get_t5_prompt_embeds(
|
| 343 |
+
prompt=negative_prompt,
|
| 344 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 345 |
+
max_sequence_length=max_sequence_length,
|
| 346 |
+
device=device,
|
| 347 |
+
dtype=dtype,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
return prompt_embeds, negative_prompt_embeds
|
| 351 |
+
|
| 352 |
+
def prepare_latents(
|
| 353 |
+
self,
|
| 354 |
+
image: torch.Tensor,
|
| 355 |
+
batch_size: int = 1,
|
| 356 |
+
num_channels_latents: int = 16,
|
| 357 |
+
num_frames: int = 13,
|
| 358 |
+
height: int = 60,
|
| 359 |
+
width: int = 90,
|
| 360 |
+
dtype: Optional[torch.dtype] = None,
|
| 361 |
+
device: Optional[torch.device] = None,
|
| 362 |
+
generator: Optional[torch.Generator] = None,
|
| 363 |
+
latents: Optional[torch.Tensor] = None,
|
| 364 |
+
):
|
| 365 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 366 |
+
raise ValueError(
|
| 367 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 368 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
num_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
| 372 |
+
shape = (
|
| 373 |
+
batch_size,
|
| 374 |
+
num_frames,
|
| 375 |
+
num_channels_latents,
|
| 376 |
+
height // self.vae_scale_factor_spatial,
|
| 377 |
+
width // self.vae_scale_factor_spatial,
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# For CogVideoX1.5, the latent should add 1 for padding (Not use)
|
| 381 |
+
if self.transformer.config.patch_size_t is not None:
|
| 382 |
+
shape = shape[:1] + (shape[1] + shape[1] % self.transformer.config.patch_size_t,) + shape[2:]
|
| 383 |
+
|
| 384 |
+
image = image.unsqueeze(2) # [B, C, F, H, W]
|
| 385 |
+
|
| 386 |
+
if isinstance(generator, list):
|
| 387 |
+
image_latents = [
|
| 388 |
+
retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i]) for i in range(batch_size)
|
| 389 |
+
]
|
| 390 |
+
else:
|
| 391 |
+
image_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator) for img in image]
|
| 392 |
+
|
| 393 |
+
image_latents = torch.cat(image_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) # [B, F, C, H, W]
|
| 394 |
+
|
| 395 |
+
if not self.vae.config.invert_scale_latents:
|
| 396 |
+
image_latents = self.vae_scaling_factor_image * image_latents
|
| 397 |
+
else:
|
| 398 |
+
# This is awkward but required because the CogVideoX team forgot to multiply the
|
| 399 |
+
# scaling factor during training :)
|
| 400 |
+
image_latents = 1 / self.vae_scaling_factor_image * image_latents
|
| 401 |
+
|
| 402 |
+
padding_shape = (
|
| 403 |
+
batch_size,
|
| 404 |
+
num_frames - 1,
|
| 405 |
+
num_channels_latents,
|
| 406 |
+
height // self.vae_scale_factor_spatial,
|
| 407 |
+
width // self.vae_scale_factor_spatial,
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
latent_padding = torch.zeros(padding_shape, device=device, dtype=dtype)
|
| 411 |
+
image_latents = torch.cat([image_latents, latent_padding], dim=1)
|
| 412 |
+
|
| 413 |
+
# Select the first frame along the second dimension
|
| 414 |
+
if self.transformer.config.patch_size_t is not None:
|
| 415 |
+
first_frame = image_latents[:, : image_latents.size(1) % self.transformer.config.patch_size_t, ...]
|
| 416 |
+
image_latents = torch.cat([first_frame, image_latents], dim=1)
|
| 417 |
+
|
| 418 |
+
if latents is None:
|
| 419 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 420 |
+
else:
|
| 421 |
+
latents = latents.to(device)
|
| 422 |
+
|
| 423 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 424 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 425 |
+
return latents, image_latents
|
| 426 |
+
|
| 427 |
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.decode_latents
|
| 428 |
+
def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
|
| 429 |
+
latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
|
| 430 |
+
latents = 1 / self.vae_scaling_factor_image * latents
|
| 431 |
+
|
| 432 |
+
frames = self.vae.decode(latents).sample
|
| 433 |
+
return frames
|
| 434 |
+
|
| 435 |
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps
|
| 436 |
+
def get_timesteps(self, num_inference_steps, timesteps, strength, device):
|
| 437 |
+
# get the original timestep using init_timestep
|
| 438 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 439 |
+
|
| 440 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 441 |
+
timesteps = timesteps[t_start * self.scheduler.order :]
|
| 442 |
+
|
| 443 |
+
return timesteps, num_inference_steps - t_start
|
| 444 |
+
|
| 445 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 446 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 447 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 448 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 449 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 450 |
+
# and should be between [0, 1]
|
| 451 |
+
|
| 452 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 453 |
+
extra_step_kwargs = {}
|
| 454 |
+
if accepts_eta:
|
| 455 |
+
extra_step_kwargs["eta"] = eta
|
| 456 |
+
|
| 457 |
+
# check if the scheduler accepts generator
|
| 458 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 459 |
+
if accepts_generator:
|
| 460 |
+
extra_step_kwargs["generator"] = generator
|
| 461 |
+
return extra_step_kwargs
|
| 462 |
+
|
| 463 |
+
def check_inputs(
|
| 464 |
+
self,
|
| 465 |
+
image,
|
| 466 |
+
prompt,
|
| 467 |
+
height,
|
| 468 |
+
width,
|
| 469 |
+
negative_prompt,
|
| 470 |
+
callback_on_step_end_tensor_inputs,
|
| 471 |
+
latents=None,
|
| 472 |
+
prompt_embeds=None,
|
| 473 |
+
negative_prompt_embeds=None,
|
| 474 |
+
):
|
| 475 |
+
if (
|
| 476 |
+
not isinstance(image, torch.Tensor)
|
| 477 |
+
and not isinstance(image, PIL.Image.Image)
|
| 478 |
+
and not isinstance(image, list)
|
| 479 |
+
):
|
| 480 |
+
raise ValueError(
|
| 481 |
+
"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
|
| 482 |
+
f" {type(image)}"
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 486 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 487 |
+
|
| 488 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 489 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 490 |
+
):
|
| 491 |
+
raise ValueError(
|
| 492 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 493 |
+
)
|
| 494 |
+
if prompt is not None and prompt_embeds is not None:
|
| 495 |
+
raise ValueError(
|
| 496 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 497 |
+
" only forward one of the two."
|
| 498 |
+
)
|
| 499 |
+
elif prompt is None and prompt_embeds is None:
|
| 500 |
+
raise ValueError(
|
| 501 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 502 |
+
)
|
| 503 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 504 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 505 |
+
|
| 506 |
+
if prompt is not None and negative_prompt_embeds is not None:
|
| 507 |
+
raise ValueError(
|
| 508 |
+
f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
|
| 509 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 513 |
+
raise ValueError(
|
| 514 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 515 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 519 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 520 |
+
raise ValueError(
|
| 521 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 522 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 523 |
+
f" {negative_prompt_embeds.shape}."
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.fuse_qkv_projections
|
| 527 |
+
def fuse_qkv_projections(self) -> None:
|
| 528 |
+
r"""Enables fused QKV projections."""
|
| 529 |
+
self.fusing_transformer = True
|
| 530 |
+
self.transformer.fuse_qkv_projections()
|
| 531 |
+
|
| 532 |
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.unfuse_qkv_projections
|
| 533 |
+
def unfuse_qkv_projections(self) -> None:
|
| 534 |
+
r"""Disable QKV projection fusion if enabled."""
|
| 535 |
+
if not self.fusing_transformer:
|
| 536 |
+
logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
|
| 537 |
+
else:
|
| 538 |
+
self.transformer.unfuse_qkv_projections()
|
| 539 |
+
self.fusing_transformer = False
|
| 540 |
+
|
| 541 |
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._prepare_rotary_positional_embeddings
|
| 542 |
+
def _prepare_rotary_positional_embeddings(
|
| 543 |
+
self,
|
| 544 |
+
height: int,
|
| 545 |
+
width: int,
|
| 546 |
+
num_frames: int,
|
| 547 |
+
device: torch.device,
|
| 548 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 549 |
+
grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
|
| 550 |
+
grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
|
| 551 |
+
|
| 552 |
+
p = self.transformer.config.patch_size
|
| 553 |
+
p_t = self.transformer.config.patch_size_t
|
| 554 |
+
|
| 555 |
+
base_size_width = self.transformer.config.sample_width // p
|
| 556 |
+
base_size_height = self.transformer.config.sample_height // p
|
| 557 |
+
|
| 558 |
+
if p_t is None:
|
| 559 |
+
# CogVideoX 1.0
|
| 560 |
+
grid_crops_coords = get_resize_crop_region_for_grid(
|
| 561 |
+
(grid_height, grid_width), base_size_width, base_size_height
|
| 562 |
+
)
|
| 563 |
+
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
|
| 564 |
+
embed_dim=self.transformer.config.attention_head_dim,
|
| 565 |
+
crops_coords=grid_crops_coords,
|
| 566 |
+
grid_size=(grid_height, grid_width),
|
| 567 |
+
temporal_size=num_frames,
|
| 568 |
+
device=device,
|
| 569 |
+
)
|
| 570 |
+
else:
|
| 571 |
+
# CogVideoX 1.5
|
| 572 |
+
base_num_frames = (num_frames + p_t - 1) // p_t
|
| 573 |
+
|
| 574 |
+
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
|
| 575 |
+
embed_dim=self.transformer.config.attention_head_dim,
|
| 576 |
+
crops_coords=None,
|
| 577 |
+
grid_size=(grid_height, grid_width),
|
| 578 |
+
temporal_size=base_num_frames,
|
| 579 |
+
grid_type="slice",
|
| 580 |
+
max_size=(base_size_height, base_size_width),
|
| 581 |
+
device=device,
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
return freqs_cos, freqs_sin
|
| 585 |
+
|
| 586 |
+
def prepare_lp(
|
| 587 |
+
self,
|
| 588 |
+
# --- Filter Selection & Strength ---
|
| 589 |
+
lp_filter_type: str,
|
| 590 |
+
lp_blur_sigma: float,
|
| 591 |
+
lp_blur_kernel_size: float,
|
| 592 |
+
lp_resize_factor: float,
|
| 593 |
+
# --- Contextual Info ---
|
| 594 |
+
generator: torch.Generator,
|
| 595 |
+
num_frames: int,
|
| 596 |
+
use_low_pass_guidance: bool,
|
| 597 |
+
lp_filter_in_latent: bool,
|
| 598 |
+
# --- Inputs to filter ---
|
| 599 |
+
orig_image_latents: torch.Tensor, # Shape [B, F_padded, C, H, W]
|
| 600 |
+
orig_image_tensor: torch.Tensor # Shape [B, C, H_orig, W_orig] (preprocessed RGB)
|
| 601 |
+
) -> torch.Tensor | None:
|
| 602 |
+
"""
|
| 603 |
+
Prepares a low-pass filtered version of the initial image condition for guidance. (CogVideoX)
|
| 604 |
+
The resulting low-pass filtered latents are padded to match the required number of frames and temporal
|
| 605 |
+
patch size for the transformer model.
|
| 606 |
+
|
| 607 |
+
Args:
|
| 608 |
+
lp_filter_type (`str`): The type of low-pass filter to apply, e.g., 'gaussian_blur', 'down_up'.
|
| 609 |
+
lp_blur_sigma (`float`): The sigma value for the Gaussian blur filter.
|
| 610 |
+
lp_blur_kernel_size (`float`): The kernel size for the Gaussian blur filter.
|
| 611 |
+
lp_resize_factor (`float`): The resizing factor for the 'down_up' filter.
|
| 612 |
+
generator (`torch.Generator`): A random generator, used for VAE sampling when filtering in image space.
|
| 613 |
+
num_frames (`int`): The target number of frames for the final video, used to determine padding.
|
| 614 |
+
use_low_pass_guidance (`bool`): If `False`, the function returns `None` immediately.
|
| 615 |
+
lp_filter_in_latent (`bool`): If `True`, filtering is applied in latent space. Otherwise, in image space.
|
| 616 |
+
orig_image_latents (`torch.Tensor`): The VAE-encoded latents of the original image. Used when
|
| 617 |
+
`lp_filter_in_latent` is `True`. Shape: `(batch_size, num_frames_padded, channels, height, width)`.
|
| 618 |
+
orig_image_tensor (`torch.Tensor`): The preprocessed original image tensor (RGB). Used when
|
| 619 |
+
`lp_filter_in_latent` is `False`. Shape: `(batch_size, channels, height, width)`.
|
| 620 |
+
|
| 621 |
+
Returns:
|
| 622 |
+
`Optional[torch.Tensor]`: A tensor containing the low-pass filtered image latents, correctly shaped and
|
| 623 |
+
padded for the transformer, or `None` if `use_low_pass_guidance` is `False`.
|
| 624 |
+
"""
|
| 625 |
+
if not use_low_pass_guidance:
|
| 626 |
+
return None
|
| 627 |
+
|
| 628 |
+
if not lp_filter_in_latent:
|
| 629 |
+
# --- Filter in Image (RGB) Space ---
|
| 630 |
+
|
| 631 |
+
# 1. Apply the filter to the original 4D RGB tensor.
|
| 632 |
+
image_lp = lp_utils.apply_low_pass_filter(
|
| 633 |
+
orig_image_tensor, # Should be [B, C, H, W]
|
| 634 |
+
filter_type=lp_filter_type,
|
| 635 |
+
blur_sigma=lp_blur_sigma,
|
| 636 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 637 |
+
resize_factor=lp_resize_factor,
|
| 638 |
+
)
|
| 639 |
+
# image_lp: [B, C, H, W]
|
| 640 |
+
|
| 641 |
+
# 2. Add the frame dimension BEFORE encoding
|
| 642 |
+
image_lp_vae_input = image_lp.unsqueeze(2) # Shape: [B, C, 1, H, W]
|
| 643 |
+
|
| 644 |
+
# 3. Encode the 5D tensor
|
| 645 |
+
encoded_lp = self.vae.encode(image_lp_vae_input).latent_dist.sample(generator=generator)
|
| 646 |
+
|
| 647 |
+
if not self.vae.config.invert_scale_latents:
|
| 648 |
+
encoded_lp = self.vae_scaling_factor_image * encoded_lp
|
| 649 |
+
else:
|
| 650 |
+
encoded_lp = 1 / self.vae_scaling_factor_image * encoded_lp
|
| 651 |
+
|
| 652 |
+
encoded_lp = encoded_lp.permute(0, 2, 1, 3, 4)
|
| 653 |
+
|
| 654 |
+
# Calculate required latent frames based on output num_frames
|
| 655 |
+
padded_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
| 656 |
+
|
| 657 |
+
# Pad with zeros if needed
|
| 658 |
+
current_frames = encoded_lp.shape[1] # Should be 1 here
|
| 659 |
+
if padded_frames > current_frames:
|
| 660 |
+
batch_size, _, latent_channels, latent_height, latent_width = encoded_lp.shape
|
| 661 |
+
padding_shape = (
|
| 662 |
+
batch_size,
|
| 663 |
+
padded_frames - current_frames,
|
| 664 |
+
latent_channels,
|
| 665 |
+
latent_height,
|
| 666 |
+
latent_width,
|
| 667 |
+
)
|
| 668 |
+
lp_padding = torch.zeros(padding_shape, device=encoded_lp.device, dtype=encoded_lp.dtype)
|
| 669 |
+
lp_image_latents = torch.cat([encoded_lp, lp_padding], dim=1)
|
| 670 |
+
else:
|
| 671 |
+
lp_image_latents = encoded_lp[:, :padded_frames, ...]
|
| 672 |
+
|
| 673 |
+
if self.transformer.config.patch_size_t is not None:
|
| 674 |
+
remainder = lp_image_latents.size(1) % self.transformer.config.patch_size_t
|
| 675 |
+
if remainder != 0:
|
| 676 |
+
num_to_prepend = self.transformer.config.patch_size_t - remainder
|
| 677 |
+
# Ensure num_to_prepend doesn't exceed available frames if F=1 initially
|
| 678 |
+
num_to_prepend = min(num_to_prepend, lp_image_latents.shape[1])
|
| 679 |
+
first_frames_to_prepend = lp_image_latents[:, :num_to_prepend, ...]
|
| 680 |
+
lp_image_latents = torch.cat([first_frames_to_prepend, lp_image_latents], dim=1)
|
| 681 |
+
|
| 682 |
+
else:
|
| 683 |
+
# --- Filter in Latent Space ---
|
| 684 |
+
orig_image_latents_perm = orig_image_latents.permute(0, 2, 1, 3, 4).contiguous()
|
| 685 |
+
lp_image_latents = lp_utils.apply_low_pass_filter(
|
| 686 |
+
orig_image_latents_perm, # Input has shape [B, C, F_padded, H, W]
|
| 687 |
+
filter_type=lp_filter_type,
|
| 688 |
+
blur_sigma=lp_blur_sigma,
|
| 689 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 690 |
+
resize_factor=lp_resize_factor,
|
| 691 |
+
)
|
| 692 |
+
lp_image_latents = lp_image_latents.permute(0, 2, 1, 3, 4).contiguous()
|
| 693 |
+
if self.transformer.config.patch_size_t is not None:
|
| 694 |
+
remainder = lp_image_latents.size(1) % self.transformer.config.patch_size_t
|
| 695 |
+
if remainder != 0:
|
| 696 |
+
num_to_prepend = self.transformer.config.patch_size_t - remainder
|
| 697 |
+
num_to_prepend = min(num_to_prepend, lp_image_latents.shape[1])
|
| 698 |
+
first_frames_to_prepend = lp_image_latents[:, :num_to_prepend, ...]
|
| 699 |
+
lp_image_latents = torch.cat([first_frames_to_prepend, lp_image_latents], dim=1)
|
| 700 |
+
|
| 701 |
+
lp_image_latents = lp_image_latents.to(dtype=orig_image_latents.dtype)
|
| 702 |
+
|
| 703 |
+
return lp_image_latents
|
| 704 |
+
|
| 705 |
+
@property
|
| 706 |
+
def guidance_scale(self):
|
| 707 |
+
return self._guidance_scale
|
| 708 |
+
|
| 709 |
+
@property
|
| 710 |
+
def num_timesteps(self):
|
| 711 |
+
return self._num_timesteps
|
| 712 |
+
|
| 713 |
+
@property
|
| 714 |
+
def attention_kwargs(self):
|
| 715 |
+
return self._attention_kwargs
|
| 716 |
+
|
| 717 |
+
@property
|
| 718 |
+
def current_timestep(self):
|
| 719 |
+
return self._current_timestep
|
| 720 |
+
|
| 721 |
+
@property
|
| 722 |
+
def interrupt(self):
|
| 723 |
+
return self._interrupt
|
| 724 |
+
|
| 725 |
+
@torch.no_grad()
|
| 726 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 727 |
+
def __call__(
|
| 728 |
+
self,
|
| 729 |
+
image: PipelineImageInput,
|
| 730 |
+
prompt: Optional[Union[str, List[str]]] = None,
|
| 731 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 732 |
+
height: Optional[int] = None,
|
| 733 |
+
width: Optional[int] = None,
|
| 734 |
+
num_frames: int = 49,
|
| 735 |
+
num_inference_steps: int = 50,
|
| 736 |
+
timesteps: Optional[List[int]] = None,
|
| 737 |
+
guidance_scale: float = 6.0,
|
| 738 |
+
use_dynamic_cfg: bool = False,
|
| 739 |
+
num_videos_per_prompt: int = 1,
|
| 740 |
+
eta: float = 0.0,
|
| 741 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 742 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 743 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 744 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 745 |
+
output_type: str = "pil",
|
| 746 |
+
return_dict: bool = True,
|
| 747 |
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 748 |
+
callback_on_step_end: Optional[
|
| 749 |
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 750 |
+
] = None,
|
| 751 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 752 |
+
max_sequence_length: int = 226,
|
| 753 |
+
use_low_pass_guidance: bool = False,
|
| 754 |
+
lp_filter_type: str = "none", # {'gaussian_blur', 'down_up'}
|
| 755 |
+
lp_filter_in_latent: bool = False, # When set to True, low-pass filter is done after encoder. If False, low-pass filter is applied to image directly before encoder.
|
| 756 |
+
lp_blur_sigma: float = 15.0, # Used with 'gaussian_blur'. Gaussian filter sigma value.
|
| 757 |
+
lp_blur_kernel_size: float = 0.02734375, # Used with 'gaussian_blur'. Gaussian filter size. When set to int, used directly as kernel size. When set to float, H * `lp_blur_kernel_size` is used as kernel size.
|
| 758 |
+
lp_resize_factor: float = 0.25, # Used with 'down_up'. Image is bilinearly downsized to (`lp_resize_factor` * WIDTH, `lp_resize_factor` * HEIGHT) and then back to original.
|
| 759 |
+
|
| 760 |
+
lp_strength_schedule_type: str = "none", # Scheduling type for low-pass filtering strength. Options: {"none", "linear", "interval", "exponential"}
|
| 761 |
+
schedule_blur_kernel_size: bool = False, # If True, schedule blur kernel size as well. Otherwise, fix to initial value.
|
| 762 |
+
|
| 763 |
+
# --- Constant Interval Scheduling Params for LP Strength ---
|
| 764 |
+
schedule_interval_start_time: float = 0.0, # Starting timestep for interval scheduling
|
| 765 |
+
schedule_interval_end_time: float = 0.05, # Ending timestep for interval scheduling
|
| 766 |
+
|
| 767 |
+
# --- Linear Scheduling Params for LP Strength ---
|
| 768 |
+
schedule_linear_start_weight: float = 1.0, # Starting LP weight for linear scheduling at t=T (step 0)
|
| 769 |
+
schedule_linear_end_weight: float = 0.0, # Ending LP weight for linear scheduling at t=T * schedule_linear_end_time
|
| 770 |
+
schedule_linear_end_time: float = 0.5, # Timestep fraction at which schedule_linear_end is reached
|
| 771 |
+
|
| 772 |
+
# --- Exponential Scheduling Params for LP Strength ---
|
| 773 |
+
schedule_exp_decay_rate: float = 10.0, # Decay rate for 'exponential' schedule. Higher values decay faster. Strength = exp(-rate * time_fraction).
|
| 774 |
+
) -> Union[CogVideoXPipelineOutput, Tuple]:
|
| 775 |
+
"""
|
| 776 |
+
Function invoked when calling the pipeline for generation.
|
| 777 |
+
|
| 778 |
+
Args:
|
| 779 |
+
image (`PipelineImageInput`):
|
| 780 |
+
The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`.
|
| 781 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 782 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 783 |
+
instead.
|
| 784 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 785 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 786 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 787 |
+
less than `1`).
|
| 788 |
+
height (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
|
| 789 |
+
The height in pixels of the generated image. This is set to 480 by default for the best results.
|
| 790 |
+
width (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
|
| 791 |
+
The width in pixels of the generated image. This is set to 720 by default for the best results.
|
| 792 |
+
num_frames (`int`, defaults to `48`):
|
| 793 |
+
Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
|
| 794 |
+
contain 1 extra frame because CogVideoX is conditioned with (num_seconds * fps + 1) frames where
|
| 795 |
+
num_seconds is 6 and fps is 8. However, since videos can be saved at any fps, the only condition that
|
| 796 |
+
needs to be satisfied is that of divisibility mentioned above.
|
| 797 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 798 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 799 |
+
expense of slower inference.
|
| 800 |
+
timesteps (`List[int]`, *optional*):
|
| 801 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 802 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 803 |
+
passed will be used. Must be in descending order.
|
| 804 |
+
guidance_scale (`float`, *optional*, defaults to 7.0):
|
| 805 |
+
Guidance scale as defined in [Classifier-Free Diffusion
|
| 806 |
+
Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
|
| 807 |
+
of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
|
| 808 |
+
`guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
|
| 809 |
+
the text `prompt`, usually at the expense of lower image quality.
|
| 810 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 811 |
+
The number of videos to generate per prompt.
|
| 812 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 813 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 814 |
+
to make generation deterministic.
|
| 815 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 816 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 817 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 818 |
+
tensor will ge generated by sampling using the supplied random `generator`.
|
| 819 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 820 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 821 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 822 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 823 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 824 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 825 |
+
argument.
|
| 826 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 827 |
+
The output format of the generate image. Choose between
|
| 828 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 829 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 830 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 831 |
+
of a plain tuple.
|
| 832 |
+
attention_kwargs (`dict`, *optional*):
|
| 833 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 834 |
+
`self.processor` in
|
| 835 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 836 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 837 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 838 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 839 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 840 |
+
`callback_on_step_end_tensor_inputs`.
|
| 841 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 842 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 843 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 844 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 845 |
+
max_sequence_length (`int`, defaults to `226`):
|
| 846 |
+
Maximum sequence length in encoded prompt. Must be consistent with
|
| 847 |
+
`self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
|
| 848 |
+
use_low_pass_guidance (`bool`, *optional*, defaults to `False`):
|
| 849 |
+
Whether to use low-pass guidance. This can help to improve the temporal consistency of the generated
|
| 850 |
+
video.
|
| 851 |
+
lp_filter_type (`str`, *optional*, defaults to `"none"`):
|
| 852 |
+
The type of low-pass filter to apply. Can be one of `gaussian_blur` or `down_up`.
|
| 853 |
+
lp_filter_in_latent (`bool`, *optional*, defaults to `False`):
|
| 854 |
+
If `True`, the low-pass filter is applied to the latent representation of the image. If `False`, it is
|
| 855 |
+
applied to the image in pixel space before encoding.
|
| 856 |
+
lp_blur_sigma (`float`, *optional*, defaults to `15.0`):
|
| 857 |
+
The sigma value for the Gaussian blur filter. Only used if `lp_filter_type` is `gaussian_blur`.
|
| 858 |
+
lp_blur_kernel_size (`float`, *optional*, defaults to `0.02734375`):
|
| 859 |
+
The kernel size for the Gaussian blur filter. If an `int`, it's used directly. If a `float`, the kernel
|
| 860 |
+
size is calculated as `height * lp_blur_kernel_size`. Only used if `lp_filter_type` is `gaussian_blur`.
|
| 861 |
+
lp_resize_factor (`float`, *optional*, defaults to `0.25`):
|
| 862 |
+
The resize factor for the down-sampling and up-sampling filter. Only used if `lp_filter_type` is
|
| 863 |
+
`down_up`.
|
| 864 |
+
lp_strength_schedule_type (`str`, *optional*, defaults to `"none"`):
|
| 865 |
+
The scheduling type for the low-pass filter strength. Can be one of `none`, `linear`, `interval`, or
|
| 866 |
+
`exponential`.
|
| 867 |
+
schedule_blur_kernel_size (`bool`, *optional*, defaults to `False`):
|
| 868 |
+
If `True`, the blur kernel size is also scheduled along with the strength. Otherwise, it remains fixed.
|
| 869 |
+
schedule_interval_start_time (`float`, *optional*, defaults to `0.0`):
|
| 870 |
+
The starting timestep fraction for interval scheduling. Only used if `lp_strength_schedule_type` is
|
| 871 |
+
`interval`.
|
| 872 |
+
schedule_interval_end_time (`float`, *optional*, defaults to `0.05`):
|
| 873 |
+
The ending timestep fraction for interval scheduling. Only used if `lp_strength_schedule_type` is
|
| 874 |
+
`interval`.
|
| 875 |
+
schedule_linear_start_weight (`float`, *optional*, defaults to `1.0`):
|
| 876 |
+
The starting weight for the low-pass filter strength in a linear schedule. Corresponds to the first
|
| 877 |
+
timestep. Only used if `lp_strength_schedule_type` is `linear`.
|
| 878 |
+
schedule_linear_end_weight (`float`, *optional*, defaults to `0.0`):
|
| 879 |
+
The ending weight for the low-pass filter strength in a linear schedule. Only used if
|
| 880 |
+
`lp_strength_schedule_type` is `linear`.
|
| 881 |
+
schedule_linear_end_time (`float`, *optional*, defaults to `0.5`):
|
| 882 |
+
The timestep fraction at which `schedule_linear_end_weight` is reached in a linear schedule. Only used
|
| 883 |
+
if `lp_strength_schedule_type` is `linear`.
|
| 884 |
+
schedule_exp_decay_rate (`float`, *optional*, defaults to `10.0`):
|
| 885 |
+
The decay rate for the exponential schedule. Higher values lead to faster decay. Only used if
|
| 886 |
+
`lp_strength_schedule_type` is `exponential`.
|
| 887 |
+
|
| 888 |
+
Examples:
|
| 889 |
+
|
| 890 |
+
Returns:
|
| 891 |
+
[`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] or `tuple`:
|
| 892 |
+
[`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a
|
| 893 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 894 |
+
"""
|
| 895 |
+
|
| 896 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 897 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 898 |
+
|
| 899 |
+
height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
|
| 900 |
+
width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
|
| 901 |
+
num_frames = num_frames or self.transformer.config.sample_frames
|
| 902 |
+
|
| 903 |
+
num_videos_per_prompt = 1
|
| 904 |
+
|
| 905 |
+
# 1. Check inputs. Raise error if not correct
|
| 906 |
+
self.check_inputs(
|
| 907 |
+
image=image,
|
| 908 |
+
prompt=prompt,
|
| 909 |
+
height=height,
|
| 910 |
+
width=width,
|
| 911 |
+
negative_prompt=negative_prompt,
|
| 912 |
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
| 913 |
+
latents=latents,
|
| 914 |
+
prompt_embeds=prompt_embeds,
|
| 915 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 916 |
+
)
|
| 917 |
+
self._guidance_scale = guidance_scale
|
| 918 |
+
self._current_timestep = None
|
| 919 |
+
self._attention_kwargs = attention_kwargs
|
| 920 |
+
self._interrupt = False
|
| 921 |
+
|
| 922 |
+
# 2. Default call parameters
|
| 923 |
+
if prompt is not None and isinstance(prompt, str):
|
| 924 |
+
batch_size = 1
|
| 925 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 926 |
+
batch_size = len(prompt)
|
| 927 |
+
else:
|
| 928 |
+
batch_size = prompt_embeds.shape[0]
|
| 929 |
+
|
| 930 |
+
device = self._execution_device
|
| 931 |
+
|
| 932 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 933 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 934 |
+
# corresponds to doing no classifier free guidance.
|
| 935 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 936 |
+
|
| 937 |
+
# 3. Encode input prompt
|
| 938 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 939 |
+
prompt=prompt,
|
| 940 |
+
negative_prompt=negative_prompt,
|
| 941 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 942 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 943 |
+
prompt_embeds=prompt_embeds,
|
| 944 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 945 |
+
max_sequence_length=max_sequence_length,
|
| 946 |
+
device=device,
|
| 947 |
+
)
|
| 948 |
+
if do_classifier_free_guidance and use_low_pass_guidance:
|
| 949 |
+
prompt_embeds_orig = prompt_embeds
|
| 950 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, negative_prompt_embeds, prompt_embeds_orig], dim=0)
|
| 951 |
+
prompt_embeds_init = torch.cat([negative_prompt_embeds, prompt_embeds_orig], dim=0)
|
| 952 |
+
elif do_classifier_free_guidance:
|
| 953 |
+
prompt_embeds_orig = prompt_embeds
|
| 954 |
+
prompt_embeds_init = torch.cat([negative_prompt_embeds, prompt_embeds_orig], dim=0)
|
| 955 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds_orig], dim=0)
|
| 956 |
+
|
| 957 |
+
# 4. Prepare timesteps
|
| 958 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 959 |
+
self._num_timesteps = len(timesteps)
|
| 960 |
+
|
| 961 |
+
# 5. Prepare latents
|
| 962 |
+
latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
| 963 |
+
# For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t
|
| 964 |
+
patch_size_t = self.transformer.config.patch_size_t
|
| 965 |
+
additional_frames = 0
|
| 966 |
+
if patch_size_t is not None and latent_frames % patch_size_t != 0:
|
| 967 |
+
additional_frames = patch_size_t - latent_frames % patch_size_t
|
| 968 |
+
num_frames += additional_frames * self.vae_scale_factor_temporal
|
| 969 |
+
image_tensor = self.video_processor.preprocess(image, height=height, width=width).to(
|
| 970 |
+
device, dtype=prompt_embeds.dtype
|
| 971 |
+
)
|
| 972 |
+
|
| 973 |
+
latent_channels = self.transformer.config.in_channels // 2
|
| 974 |
+
latents, image_latents = self.prepare_latents(
|
| 975 |
+
image_tensor,
|
| 976 |
+
batch_size * num_videos_per_prompt,
|
| 977 |
+
latent_channels,
|
| 978 |
+
num_frames,
|
| 979 |
+
height,
|
| 980 |
+
width,
|
| 981 |
+
prompt_embeds.dtype,
|
| 982 |
+
device,
|
| 983 |
+
generator,
|
| 984 |
+
latents,
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
# 6. Prepare extra step kwargs
|
| 988 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 989 |
+
|
| 990 |
+
# 7. Create rotary embeds if required
|
| 991 |
+
image_rotary_emb = (
|
| 992 |
+
self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
|
| 993 |
+
if self.transformer.config.use_rotary_positional_embeddings
|
| 994 |
+
else None
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
# 8. Create ofs embeds if required
|
| 998 |
+
ofs_emb = None if self.transformer.config.ofs_embed_dim is None else latents.new_full((1,), fill_value=2.0)
|
| 999 |
+
|
| 1000 |
+
# 9. Denoising loop
|
| 1001 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1002 |
+
|
| 1003 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1004 |
+
old_pred_original_sample = None
|
| 1005 |
+
for i, t in enumerate(timesteps):
|
| 1006 |
+
if self.interrupt:
|
| 1007 |
+
continue
|
| 1008 |
+
|
| 1009 |
+
self._current_timestep = t
|
| 1010 |
+
|
| 1011 |
+
if not use_low_pass_guidance:
|
| 1012 |
+
two_pass = True
|
| 1013 |
+
|
| 1014 |
+
# Low-pass version input
|
| 1015 |
+
if do_classifier_free_guidance and use_low_pass_guidance:
|
| 1016 |
+
# Timestep scheduled low-pass filter strength ([0, 1] range)
|
| 1017 |
+
lp_strength = lp_utils.get_lp_strength(
|
| 1018 |
+
step_index=i,
|
| 1019 |
+
total_steps=num_inference_steps,
|
| 1020 |
+
lp_strength_schedule_type=lp_strength_schedule_type,
|
| 1021 |
+
schedule_interval_start_time=schedule_interval_start_time,
|
| 1022 |
+
schedule_interval_end_time=schedule_interval_end_time,
|
| 1023 |
+
schedule_linear_start_weight=schedule_linear_start_weight,
|
| 1024 |
+
schedule_linear_end_weight=schedule_linear_end_weight,
|
| 1025 |
+
schedule_linear_end_time=schedule_linear_end_time,
|
| 1026 |
+
schedule_exp_decay_rate=schedule_exp_decay_rate,
|
| 1027 |
+
)
|
| 1028 |
+
|
| 1029 |
+
two_pass = (lp_strength == 0 or not use_low_pass_guidance)
|
| 1030 |
+
|
| 1031 |
+
if lp_strength_schedule_type == 'exponential' and lp_strength < 0.1: # Rounding for exponential (for performance)
|
| 1032 |
+
two_pass = True
|
| 1033 |
+
|
| 1034 |
+
modulated_lp_blur_sigma = lp_blur_sigma * lp_strength
|
| 1035 |
+
if schedule_blur_kernel_size:
|
| 1036 |
+
modulated_lp_blur_kernel_size = lp_blur_kernel_size * lp_strength # Kernel size also scales down
|
| 1037 |
+
else:
|
| 1038 |
+
modulated_lp_blur_kernel_size = lp_blur_kernel_size
|
| 1039 |
+
|
| 1040 |
+
modulated_lp_resize_factor = 1.0 - (1.0 - lp_resize_factor) * lp_strength
|
| 1041 |
+
|
| 1042 |
+
# low-pass filter
|
| 1043 |
+
lp_image_latents = self.prepare_lp(
|
| 1044 |
+
# --- Filter Selection & Strength (Modulated) ---
|
| 1045 |
+
lp_filter_type=lp_filter_type,
|
| 1046 |
+
lp_blur_sigma=modulated_lp_blur_sigma,
|
| 1047 |
+
lp_blur_kernel_size=modulated_lp_blur_kernel_size,
|
| 1048 |
+
lp_resize_factor=modulated_lp_resize_factor,
|
| 1049 |
+
# --- Contextual Info ---
|
| 1050 |
+
generator=generator,
|
| 1051 |
+
num_frames=num_frames,
|
| 1052 |
+
use_low_pass_guidance=use_low_pass_guidance,
|
| 1053 |
+
lp_filter_in_latent=lp_filter_in_latent,
|
| 1054 |
+
# --- Inputs to filter ---
|
| 1055 |
+
orig_image_latents=image_latents,
|
| 1056 |
+
orig_image_tensor=image_tensor
|
| 1057 |
+
)
|
| 1058 |
+
|
| 1059 |
+
# latent_model_input = torch.cat([latents] * 2)
|
| 1060 |
+
if two_pass:
|
| 1061 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 1062 |
+
else:
|
| 1063 |
+
latent_model_input = torch.cat([latents] * 3)
|
| 1064 |
+
|
| 1065 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1066 |
+
# latent_model_input = torch.cat([latent_model_input, torch.cat([lp_image_latents] * 2, dim=0)], dim=2)
|
| 1067 |
+
if two_pass:
|
| 1068 |
+
latent_model_input = torch.cat([latent_model_input, torch.cat([lp_image_latents] * 2, dim=0)], dim=2)
|
| 1069 |
+
else:
|
| 1070 |
+
latent_model_input = torch.cat([latent_model_input, torch.cat([image_latents,lp_image_latents,lp_image_latents], dim=0)], dim=2)
|
| 1071 |
+
|
| 1072 |
+
elif do_classifier_free_guidance:
|
| 1073 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 1074 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1075 |
+
latent_model_input = torch.cat([latent_model_input, torch.cat([image_latents] * 2, dim=0)], dim=2)
|
| 1076 |
+
else:
|
| 1077 |
+
latent_model_input = latents
|
| 1078 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1079 |
+
latent_model_input = torch.cat([latent_model_input, image_latents], dim=2)
|
| 1080 |
+
|
| 1081 |
+
timestep = t.expand(latent_model_input.shape[0])
|
| 1082 |
+
noise_pred = self.transformer(
|
| 1083 |
+
hidden_states=latent_model_input,
|
| 1084 |
+
encoder_hidden_states=prompt_embeds_init if two_pass else prompt_embeds,
|
| 1085 |
+
timestep=timestep,
|
| 1086 |
+
ofs=ofs_emb,
|
| 1087 |
+
image_rotary_emb=image_rotary_emb,
|
| 1088 |
+
attention_kwargs=attention_kwargs,
|
| 1089 |
+
return_dict=False,
|
| 1090 |
+
)[0]
|
| 1091 |
+
noise_pred = noise_pred.float()
|
| 1092 |
+
|
| 1093 |
+
# 12. Combine noise predictions with scheduled weights (triple pass)
|
| 1094 |
+
if use_low_pass_guidance and do_classifier_free_guidance:
|
| 1095 |
+
if two_pass:
|
| 1096 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1097 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1098 |
+
else:
|
| 1099 |
+
noise_pred_uncond_init, noise_pred_uncond, noise_pred_text = noise_pred.chunk(3)
|
| 1100 |
+
noise_pred = (
|
| 1101 |
+
noise_pred_uncond_init + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1102 |
+
)
|
| 1103 |
+
elif do_classifier_free_guidance:
|
| 1104 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1105 |
+
if use_dynamic_cfg:
|
| 1106 |
+
self._guidance_scale = 1 + guidance_scale * (
|
| 1107 |
+
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
|
| 1108 |
+
)
|
| 1109 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1110 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1111 |
+
if not isinstance(self.scheduler, CogVideoXDPMScheduler):
|
| 1112 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1113 |
+
else:
|
| 1114 |
+
latents, old_pred_original_sample = self.scheduler.step(
|
| 1115 |
+
noise_pred,
|
| 1116 |
+
old_pred_original_sample,
|
| 1117 |
+
t,
|
| 1118 |
+
timesteps[i - 1] if i > 0 else None,
|
| 1119 |
+
latents,
|
| 1120 |
+
**extra_step_kwargs,
|
| 1121 |
+
return_dict=False,
|
| 1122 |
+
)
|
| 1123 |
+
latents = latents.to(prompt_embeds.dtype)
|
| 1124 |
+
|
| 1125 |
+
# call the callback, if provided
|
| 1126 |
+
if callback_on_step_end is not None:
|
| 1127 |
+
callback_kwargs = {}
|
| 1128 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1129 |
+
callback_kwargs[k] = locals()[k]
|
| 1130 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1131 |
+
|
| 1132 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1133 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1134 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1135 |
+
|
| 1136 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1137 |
+
progress_bar.update()
|
| 1138 |
+
|
| 1139 |
+
if XLA_AVAILABLE:
|
| 1140 |
+
xm.mark_step()
|
| 1141 |
+
|
| 1142 |
+
self._current_timestep = None
|
| 1143 |
+
|
| 1144 |
+
if not output_type == "latent":
|
| 1145 |
+
# Discard any padding frames that were added for CogVideoX 1.5
|
| 1146 |
+
latents = latents[:, additional_frames:]
|
| 1147 |
+
video = self.decode_latents(latents)
|
| 1148 |
+
video = self.video_processor.postprocess_video(video=video, output_type=output_type)
|
| 1149 |
+
else:
|
| 1150 |
+
video = latents
|
| 1151 |
+
|
| 1152 |
+
# Offload all models
|
| 1153 |
+
self.maybe_free_model_hooks()
|
| 1154 |
+
|
| 1155 |
+
if not return_dict:
|
| 1156 |
+
return (video,)
|
| 1157 |
+
|
| 1158 |
+
return CogVideoXPipelineOutput(frames=video)
|
exp_code/1_benchmark/ALG/pipeline_hunyuan_video_image2video_lowpass.py
ADDED
|
@@ -0,0 +1,1308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HunyuanVideo Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import PIL.Image
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import (
|
| 22 |
+
CLIPImageProcessor,
|
| 23 |
+
CLIPTextModel,
|
| 24 |
+
CLIPTokenizer,
|
| 25 |
+
LlamaTokenizerFast,
|
| 26 |
+
LlavaForConditionalGeneration,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 30 |
+
from diffusers.loaders import HunyuanVideoLoraLoaderMixin
|
| 31 |
+
from diffusers.models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel
|
| 32 |
+
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 33 |
+
from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
|
| 34 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 35 |
+
from diffusers.video_processor import VideoProcessor
|
| 36 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 37 |
+
from diffusers.pipelines.hunyuan_video.pipeline_output import HunyuanVideoPipelineOutput
|
| 38 |
+
import math
|
| 39 |
+
import torchvision.transforms.functional as tvF
|
| 40 |
+
import torch.nn.functional as F
|
| 41 |
+
|
| 42 |
+
import lp_utils
|
| 43 |
+
|
| 44 |
+
if is_torch_xla_available():
|
| 45 |
+
import torch_xla.core.xla_model as xm
|
| 46 |
+
|
| 47 |
+
XLA_AVAILABLE = True
|
| 48 |
+
else:
|
| 49 |
+
XLA_AVAILABLE = False
|
| 50 |
+
|
| 51 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
EXAMPLE_DOC_STRING = """
|
| 55 |
+
Examples:
|
| 56 |
+
```python
|
| 57 |
+
>>> import torch
|
| 58 |
+
>>> from diffusers import HunyuanVideoImageToVideoPipeline, HunyuanVideoTransformer3DModel
|
| 59 |
+
>>> from diffusers.utils import load_image, export_to_video
|
| 60 |
+
|
| 61 |
+
>>> # Available checkpoints: hunyuanvideo-community/HunyuanVideo-I2V, hunyuanvideo-community/HunyuanVideo-I2V-33ch
|
| 62 |
+
>>> model_id = "hunyuanvideo-community/HunyuanVideo-I2V"
|
| 63 |
+
>>> transformer = HunyuanVideoTransformer3DModel.from_pretrained(
|
| 64 |
+
... model_id, subfolder="transformer", torch_dtype=torch.bfloat16
|
| 65 |
+
... )
|
| 66 |
+
>>> pipe = HunyuanVideoImageToVideoPipeline.from_pretrained(
|
| 67 |
+
... model_id, transformer=transformer, torch_dtype=torch.float16
|
| 68 |
+
... )
|
| 69 |
+
>>> pipe.vae.enable_tiling()
|
| 70 |
+
>>> pipe.to("cuda")
|
| 71 |
+
|
| 72 |
+
>>> prompt = "A man with short gray hair plays a red electric guitar."
|
| 73 |
+
>>> image = load_image(
|
| 74 |
+
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png"
|
| 75 |
+
... )
|
| 76 |
+
|
| 77 |
+
>>> # If using hunyuanvideo-community/HunyuanVideo-I2V
|
| 78 |
+
>>> output = pipe(image=image, prompt=prompt, guidance_scale=6.0).frames[0]
|
| 79 |
+
|
| 80 |
+
>>> # If using hunyuanvideo-community/HunyuanVideo-I2V-33ch
|
| 81 |
+
>>> output = pipe(image=image, prompt=prompt, guidance_scale=1.0, true_cfg_scale=1.0).frames[0]
|
| 82 |
+
|
| 83 |
+
>>> export_to_video(output, "output.mp4", fps=15)
|
| 84 |
+
```
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
DEFAULT_PROMPT_TEMPLATE = {
|
| 89 |
+
"template": (
|
| 90 |
+
"<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: "
|
| 91 |
+
"1. The main content and theme of the video."
|
| 92 |
+
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
| 93 |
+
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
| 94 |
+
"4. background environment, light, style and atmosphere."
|
| 95 |
+
"5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n"
|
| 96 |
+
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
| 97 |
+
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
| 98 |
+
),
|
| 99 |
+
"crop_start": 103,
|
| 100 |
+
"image_emb_start": 5,
|
| 101 |
+
"image_emb_end": 581,
|
| 102 |
+
"image_emb_len": 576,
|
| 103 |
+
"double_return_token_id": 271,
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _expand_input_ids_with_image_tokens(
|
| 108 |
+
text_input_ids,
|
| 109 |
+
prompt_attention_mask,
|
| 110 |
+
max_sequence_length,
|
| 111 |
+
image_token_index,
|
| 112 |
+
image_emb_len,
|
| 113 |
+
image_emb_start,
|
| 114 |
+
image_emb_end,
|
| 115 |
+
pad_token_id,
|
| 116 |
+
):
|
| 117 |
+
special_image_token_mask = text_input_ids == image_token_index
|
| 118 |
+
num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
|
| 119 |
+
batch_indices, non_image_indices = torch.where(text_input_ids != image_token_index)
|
| 120 |
+
|
| 121 |
+
max_expanded_length = max_sequence_length + (num_special_image_tokens.max() * (image_emb_len - 1))
|
| 122 |
+
new_token_positions = torch.cumsum((special_image_token_mask * (image_emb_len - 1) + 1), -1) - 1
|
| 123 |
+
text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
|
| 124 |
+
|
| 125 |
+
expanded_input_ids = torch.full(
|
| 126 |
+
(text_input_ids.shape[0], max_expanded_length),
|
| 127 |
+
pad_token_id,
|
| 128 |
+
dtype=text_input_ids.dtype,
|
| 129 |
+
device=text_input_ids.device,
|
| 130 |
+
)
|
| 131 |
+
expanded_input_ids[batch_indices, text_to_overwrite] = text_input_ids[batch_indices, non_image_indices]
|
| 132 |
+
expanded_input_ids[batch_indices, image_emb_start:image_emb_end] = image_token_index
|
| 133 |
+
|
| 134 |
+
expanded_attention_mask = torch.zeros(
|
| 135 |
+
(text_input_ids.shape[0], max_expanded_length),
|
| 136 |
+
dtype=prompt_attention_mask.dtype,
|
| 137 |
+
device=prompt_attention_mask.device,
|
| 138 |
+
)
|
| 139 |
+
attn_batch_indices, attention_indices = torch.where(expanded_input_ids != pad_token_id)
|
| 140 |
+
expanded_attention_mask[attn_batch_indices, attention_indices] = 1.0
|
| 141 |
+
expanded_attention_mask = expanded_attention_mask.to(prompt_attention_mask.dtype)
|
| 142 |
+
position_ids = (expanded_attention_mask.cumsum(-1) - 1).masked_fill_((expanded_attention_mask == 0), 1)
|
| 143 |
+
|
| 144 |
+
return {
|
| 145 |
+
"input_ids": expanded_input_ids,
|
| 146 |
+
"attention_mask": expanded_attention_mask,
|
| 147 |
+
"position_ids": position_ids,
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def retrieve_timesteps(
|
| 153 |
+
scheduler,
|
| 154 |
+
num_inference_steps: Optional[int] = None,
|
| 155 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 156 |
+
timesteps: Optional[List[int]] = None,
|
| 157 |
+
sigmas: Optional[List[float]] = None,
|
| 158 |
+
**kwargs,
|
| 159 |
+
):
|
| 160 |
+
r"""
|
| 161 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 162 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
scheduler (`SchedulerMixin`):
|
| 166 |
+
The scheduler to get timesteps from.
|
| 167 |
+
num_inference_steps (`int`):
|
| 168 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 169 |
+
must be `None`.
|
| 170 |
+
device (`str` or `torch.device`, *optional*):
|
| 171 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 172 |
+
timesteps (`List[int]`, *optional*):
|
| 173 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 174 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 175 |
+
sigmas (`List[float]`, *optional*):
|
| 176 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 177 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 181 |
+
second element is the number of inference steps.
|
| 182 |
+
"""
|
| 183 |
+
if timesteps is not None and sigmas is not None:
|
| 184 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 185 |
+
if timesteps is not None:
|
| 186 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 187 |
+
if not accepts_timesteps:
|
| 188 |
+
raise ValueError(
|
| 189 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 190 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 191 |
+
)
|
| 192 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 193 |
+
timesteps = scheduler.timesteps
|
| 194 |
+
num_inference_steps = len(timesteps)
|
| 195 |
+
elif sigmas is not None:
|
| 196 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 197 |
+
if not accept_sigmas:
|
| 198 |
+
raise ValueError(
|
| 199 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 200 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 201 |
+
)
|
| 202 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 203 |
+
timesteps = scheduler.timesteps
|
| 204 |
+
num_inference_steps = len(timesteps)
|
| 205 |
+
else:
|
| 206 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 207 |
+
timesteps = scheduler.timesteps
|
| 208 |
+
return timesteps, num_inference_steps
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def retrieve_latents(
|
| 212 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 213 |
+
):
|
| 214 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 215 |
+
return encoder_output.latent_dist.sample(generator)
|
| 216 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 217 |
+
return encoder_output.latent_dist.mode()
|
| 218 |
+
elif hasattr(encoder_output, "latents"):
|
| 219 |
+
return encoder_output.latents
|
| 220 |
+
else:
|
| 221 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
class HunyuanVideoImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin):
|
| 225 |
+
r"""
|
| 226 |
+
Pipeline for image-to-video generation using HunyuanVideo.
|
| 227 |
+
|
| 228 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 229 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
text_encoder ([`LlavaForConditionalGeneration`]):
|
| 233 |
+
[Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers).
|
| 234 |
+
tokenizer (`LlamaTokenizer`):
|
| 235 |
+
Tokenizer from [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers).
|
| 236 |
+
transformer ([`HunyuanVideoTransformer3DModel`]):
|
| 237 |
+
Conditional Transformer to denoise the encoded image latents.
|
| 238 |
+
scheduler ([`FlowMatchEulerDiscreteScheduler`]):
|
| 239 |
+
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
| 240 |
+
vae ([`AutoencoderKLHunyuanVideo`]):
|
| 241 |
+
Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
|
| 242 |
+
text_encoder_2 ([`CLIPTextModel`]):
|
| 243 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 244 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 245 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 246 |
+
Tokenizer of class
|
| 247 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
|
| 251 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds"]
|
| 252 |
+
|
| 253 |
+
def __init__(
|
| 254 |
+
self,
|
| 255 |
+
text_encoder: LlavaForConditionalGeneration,
|
| 256 |
+
tokenizer: LlamaTokenizerFast,
|
| 257 |
+
transformer: HunyuanVideoTransformer3DModel,
|
| 258 |
+
vae: AutoencoderKLHunyuanVideo,
|
| 259 |
+
scheduler: FlowMatchEulerDiscreteScheduler,
|
| 260 |
+
text_encoder_2: CLIPTextModel,
|
| 261 |
+
tokenizer_2: CLIPTokenizer,
|
| 262 |
+
image_processor: CLIPImageProcessor,
|
| 263 |
+
):
|
| 264 |
+
super().__init__()
|
| 265 |
+
|
| 266 |
+
self.register_modules(
|
| 267 |
+
vae=vae,
|
| 268 |
+
text_encoder=text_encoder,
|
| 269 |
+
tokenizer=tokenizer,
|
| 270 |
+
transformer=transformer,
|
| 271 |
+
scheduler=scheduler,
|
| 272 |
+
text_encoder_2=text_encoder_2,
|
| 273 |
+
tokenizer_2=tokenizer_2,
|
| 274 |
+
image_processor=image_processor,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
self.vae_scaling_factor = self.vae.config.scaling_factor if getattr(self, "vae", None) else 0.476986
|
| 278 |
+
self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4
|
| 279 |
+
self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 8
|
| 280 |
+
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
|
| 281 |
+
|
| 282 |
+
def _get_llama_prompt_embeds(
|
| 283 |
+
self,
|
| 284 |
+
image: torch.Tensor,
|
| 285 |
+
prompt: Union[str, List[str]],
|
| 286 |
+
prompt_template: Dict[str, Any],
|
| 287 |
+
num_videos_per_prompt: int = 1,
|
| 288 |
+
device: Optional[torch.device] = None,
|
| 289 |
+
dtype: Optional[torch.dtype] = None,
|
| 290 |
+
max_sequence_length: int = 256,
|
| 291 |
+
num_hidden_layers_to_skip: int = 2,
|
| 292 |
+
image_embed_interleave: int = 2,
|
| 293 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 294 |
+
device = device or self._execution_device
|
| 295 |
+
dtype = dtype or self.text_encoder.dtype
|
| 296 |
+
|
| 297 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 298 |
+
prompt = [prompt_template["template"].format(p) for p in prompt]
|
| 299 |
+
|
| 300 |
+
crop_start = prompt_template.get("crop_start", None)
|
| 301 |
+
|
| 302 |
+
image_emb_len = prompt_template.get("image_emb_len", 576)
|
| 303 |
+
image_emb_start = prompt_template.get("image_emb_start", 5)
|
| 304 |
+
image_emb_end = prompt_template.get("image_emb_end", 581)
|
| 305 |
+
double_return_token_id = prompt_template.get("double_return_token_id", 271)
|
| 306 |
+
|
| 307 |
+
if crop_start is None:
|
| 308 |
+
prompt_template_input = self.tokenizer(
|
| 309 |
+
prompt_template["template"],
|
| 310 |
+
padding="max_length",
|
| 311 |
+
return_tensors="pt",
|
| 312 |
+
return_length=False,
|
| 313 |
+
return_overflowing_tokens=False,
|
| 314 |
+
return_attention_mask=False,
|
| 315 |
+
)
|
| 316 |
+
crop_start = prompt_template_input["input_ids"].shape[-1]
|
| 317 |
+
# Remove <|start_header_id|>, <|end_header_id|>, assistant, <|eot_id|>, and placeholder {}
|
| 318 |
+
crop_start -= 5
|
| 319 |
+
|
| 320 |
+
max_sequence_length += crop_start
|
| 321 |
+
text_inputs = self.tokenizer(
|
| 322 |
+
prompt,
|
| 323 |
+
max_length=max_sequence_length,
|
| 324 |
+
padding="max_length",
|
| 325 |
+
truncation=True,
|
| 326 |
+
return_tensors="pt",
|
| 327 |
+
return_length=False,
|
| 328 |
+
return_overflowing_tokens=False,
|
| 329 |
+
return_attention_mask=True,
|
| 330 |
+
)
|
| 331 |
+
text_input_ids = text_inputs.input_ids.to(device=device)
|
| 332 |
+
prompt_attention_mask = text_inputs.attention_mask.to(device=device)
|
| 333 |
+
|
| 334 |
+
image_embeds = self.image_processor(image, return_tensors="pt").pixel_values.to(device)
|
| 335 |
+
|
| 336 |
+
image_token_index = self.text_encoder.config.image_token_index
|
| 337 |
+
pad_token_id = self.text_encoder.config.pad_token_id
|
| 338 |
+
expanded_inputs = _expand_input_ids_with_image_tokens(
|
| 339 |
+
text_input_ids,
|
| 340 |
+
prompt_attention_mask,
|
| 341 |
+
max_sequence_length,
|
| 342 |
+
image_token_index,
|
| 343 |
+
image_emb_len,
|
| 344 |
+
image_emb_start,
|
| 345 |
+
image_emb_end,
|
| 346 |
+
pad_token_id,
|
| 347 |
+
)
|
| 348 |
+
prompt_embeds = self.text_encoder(
|
| 349 |
+
**expanded_inputs,
|
| 350 |
+
pixel_values=image_embeds,
|
| 351 |
+
output_hidden_states=True,
|
| 352 |
+
).hidden_states[-(num_hidden_layers_to_skip + 1)]
|
| 353 |
+
prompt_embeds = prompt_embeds.to(dtype=dtype)
|
| 354 |
+
|
| 355 |
+
if crop_start is not None and crop_start > 0:
|
| 356 |
+
text_crop_start = crop_start - 1 + image_emb_len
|
| 357 |
+
batch_indices, last_double_return_token_indices = torch.where(text_input_ids == double_return_token_id)
|
| 358 |
+
|
| 359 |
+
if last_double_return_token_indices.shape[0] == 3:
|
| 360 |
+
# in case the prompt is too long
|
| 361 |
+
last_double_return_token_indices = torch.cat(
|
| 362 |
+
(last_double_return_token_indices, torch.tensor([text_input_ids.shape[-1]]))
|
| 363 |
+
)
|
| 364 |
+
batch_indices = torch.cat((batch_indices, torch.tensor([0])))
|
| 365 |
+
|
| 366 |
+
last_double_return_token_indices = last_double_return_token_indices.reshape(text_input_ids.shape[0], -1)[
|
| 367 |
+
:, -1
|
| 368 |
+
]
|
| 369 |
+
batch_indices = batch_indices.reshape(text_input_ids.shape[0], -1)[:, -1]
|
| 370 |
+
assistant_crop_start = last_double_return_token_indices - 1 + image_emb_len - 4
|
| 371 |
+
assistant_crop_end = last_double_return_token_indices - 1 + image_emb_len
|
| 372 |
+
attention_mask_assistant_crop_start = last_double_return_token_indices - 4
|
| 373 |
+
attention_mask_assistant_crop_end = last_double_return_token_indices
|
| 374 |
+
|
| 375 |
+
prompt_embed_list = []
|
| 376 |
+
prompt_attention_mask_list = []
|
| 377 |
+
image_embed_list = []
|
| 378 |
+
image_attention_mask_list = []
|
| 379 |
+
|
| 380 |
+
for i in range(text_input_ids.shape[0]):
|
| 381 |
+
prompt_embed_list.append(
|
| 382 |
+
torch.cat(
|
| 383 |
+
[
|
| 384 |
+
prompt_embeds[i, text_crop_start : assistant_crop_start[i].item()],
|
| 385 |
+
prompt_embeds[i, assistant_crop_end[i].item() :],
|
| 386 |
+
]
|
| 387 |
+
)
|
| 388 |
+
)
|
| 389 |
+
prompt_attention_mask_list.append(
|
| 390 |
+
torch.cat(
|
| 391 |
+
[
|
| 392 |
+
prompt_attention_mask[i, crop_start : attention_mask_assistant_crop_start[i].item()],
|
| 393 |
+
prompt_attention_mask[i, attention_mask_assistant_crop_end[i].item() :],
|
| 394 |
+
]
|
| 395 |
+
)
|
| 396 |
+
)
|
| 397 |
+
image_embed_list.append(prompt_embeds[i, image_emb_start:image_emb_end])
|
| 398 |
+
image_attention_mask_list.append(
|
| 399 |
+
torch.ones(image_embed_list[-1].shape[0]).to(prompt_embeds.device).to(prompt_attention_mask.dtype)
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
prompt_embed_list = torch.stack(prompt_embed_list)
|
| 403 |
+
prompt_attention_mask_list = torch.stack(prompt_attention_mask_list)
|
| 404 |
+
image_embed_list = torch.stack(image_embed_list)
|
| 405 |
+
image_attention_mask_list = torch.stack(image_attention_mask_list)
|
| 406 |
+
|
| 407 |
+
if 0 < image_embed_interleave < 6:
|
| 408 |
+
image_embed_list = image_embed_list[:, ::image_embed_interleave, :]
|
| 409 |
+
image_attention_mask_list = image_attention_mask_list[:, ::image_embed_interleave]
|
| 410 |
+
|
| 411 |
+
assert (
|
| 412 |
+
prompt_embed_list.shape[0] == prompt_attention_mask_list.shape[0]
|
| 413 |
+
and image_embed_list.shape[0] == image_attention_mask_list.shape[0]
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
prompt_embeds = torch.cat([image_embed_list, prompt_embed_list], dim=1)
|
| 417 |
+
prompt_attention_mask = torch.cat([image_attention_mask_list, prompt_attention_mask_list], dim=1)
|
| 418 |
+
|
| 419 |
+
return prompt_embeds, prompt_attention_mask
|
| 420 |
+
|
| 421 |
+
def _get_clip_prompt_embeds(
|
| 422 |
+
self,
|
| 423 |
+
prompt: Union[str, List[str]],
|
| 424 |
+
num_videos_per_prompt: int = 1,
|
| 425 |
+
device: Optional[torch.device] = None,
|
| 426 |
+
dtype: Optional[torch.dtype] = None,
|
| 427 |
+
max_sequence_length: int = 77,
|
| 428 |
+
) -> torch.Tensor:
|
| 429 |
+
device = device or self._execution_device
|
| 430 |
+
dtype = dtype or self.text_encoder_2.dtype
|
| 431 |
+
|
| 432 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 433 |
+
|
| 434 |
+
text_inputs = self.tokenizer_2(
|
| 435 |
+
prompt,
|
| 436 |
+
padding="max_length",
|
| 437 |
+
max_length=max_sequence_length,
|
| 438 |
+
truncation=True,
|
| 439 |
+
return_tensors="pt",
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
text_input_ids = text_inputs.input_ids
|
| 443 |
+
untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
|
| 444 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 445 |
+
removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
| 446 |
+
logger.warning(
|
| 447 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 448 |
+
f" {max_sequence_length} tokens: {removed_text}"
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output
|
| 452 |
+
return prompt_embeds
|
| 453 |
+
|
| 454 |
+
def encode_prompt(
|
| 455 |
+
self,
|
| 456 |
+
image: torch.Tensor,
|
| 457 |
+
prompt: Union[str, List[str]],
|
| 458 |
+
prompt_2: Union[str, List[str]] = None,
|
| 459 |
+
prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE,
|
| 460 |
+
num_videos_per_prompt: int = 1,
|
| 461 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 462 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 463 |
+
prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 464 |
+
device: Optional[torch.device] = None,
|
| 465 |
+
dtype: Optional[torch.dtype] = None,
|
| 466 |
+
max_sequence_length: int = 256,
|
| 467 |
+
image_embed_interleave: int = 2,
|
| 468 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 469 |
+
if prompt_embeds is None:
|
| 470 |
+
prompt_embeds, prompt_attention_mask = self._get_llama_prompt_embeds(
|
| 471 |
+
image,
|
| 472 |
+
prompt,
|
| 473 |
+
prompt_template,
|
| 474 |
+
num_videos_per_prompt,
|
| 475 |
+
device=device,
|
| 476 |
+
dtype=dtype,
|
| 477 |
+
max_sequence_length=max_sequence_length,
|
| 478 |
+
image_embed_interleave=image_embed_interleave,
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
if pooled_prompt_embeds is None:
|
| 482 |
+
if prompt_2 is None:
|
| 483 |
+
prompt_2 = prompt
|
| 484 |
+
pooled_prompt_embeds = self._get_clip_prompt_embeds(
|
| 485 |
+
prompt,
|
| 486 |
+
num_videos_per_prompt,
|
| 487 |
+
device=device,
|
| 488 |
+
dtype=dtype,
|
| 489 |
+
max_sequence_length=77,
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask
|
| 493 |
+
|
| 494 |
+
def check_inputs(
|
| 495 |
+
self,
|
| 496 |
+
prompt,
|
| 497 |
+
prompt_2,
|
| 498 |
+
height,
|
| 499 |
+
width,
|
| 500 |
+
prompt_embeds=None,
|
| 501 |
+
callback_on_step_end_tensor_inputs=None,
|
| 502 |
+
prompt_template=None,
|
| 503 |
+
true_cfg_scale=1.0,
|
| 504 |
+
guidance_scale=1.0,
|
| 505 |
+
):
|
| 506 |
+
if height % 16 != 0 or width % 16 != 0:
|
| 507 |
+
raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.")
|
| 508 |
+
|
| 509 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 510 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 511 |
+
):
|
| 512 |
+
raise ValueError(
|
| 513 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
if prompt is not None and prompt_embeds is not None:
|
| 517 |
+
raise ValueError(
|
| 518 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 519 |
+
" only forward one of the two."
|
| 520 |
+
)
|
| 521 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 522 |
+
raise ValueError(
|
| 523 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 524 |
+
" only forward one of the two."
|
| 525 |
+
)
|
| 526 |
+
elif prompt is None and prompt_embeds is None:
|
| 527 |
+
raise ValueError(
|
| 528 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 529 |
+
)
|
| 530 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 531 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 532 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 533 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 534 |
+
|
| 535 |
+
if prompt_template is not None:
|
| 536 |
+
if not isinstance(prompt_template, dict):
|
| 537 |
+
raise ValueError(f"`prompt_template` has to be of type `dict` but is {type(prompt_template)}")
|
| 538 |
+
if "template" not in prompt_template:
|
| 539 |
+
raise ValueError(
|
| 540 |
+
f"`prompt_template` has to contain a key `template` but only found {prompt_template.keys()}"
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
if true_cfg_scale > 1.0 and guidance_scale > 1.0:
|
| 544 |
+
logger.warning(
|
| 545 |
+
"Both `true_cfg_scale` and `guidance_scale` are greater than 1.0. This will result in both "
|
| 546 |
+
"classifier-free guidance and embedded-guidance to be applied. This is not recommended "
|
| 547 |
+
"as it may lead to higher memory usage, slower inference and potentially worse results."
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
def prepare_latents(
|
| 551 |
+
self,
|
| 552 |
+
image: torch.Tensor,
|
| 553 |
+
batch_size: int,
|
| 554 |
+
num_channels_latents: int = 32,
|
| 555 |
+
height: int = 720,
|
| 556 |
+
width: int = 1280,
|
| 557 |
+
num_frames: int = 129,
|
| 558 |
+
dtype: Optional[torch.dtype] = None,
|
| 559 |
+
device: Optional[torch.device] = None,
|
| 560 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 561 |
+
latents: Optional[torch.Tensor] = None,
|
| 562 |
+
image_condition_type: str = "latent_concat",
|
| 563 |
+
i2v_stable: bool = False,
|
| 564 |
+
) -> torch.Tensor:
|
| 565 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 566 |
+
raise ValueError(
|
| 567 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 568 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
| 572 |
+
latent_height, latent_width = height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial
|
| 573 |
+
shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width)
|
| 574 |
+
|
| 575 |
+
image = image.unsqueeze(2) # [B, C, 1, H, W]
|
| 576 |
+
if isinstance(generator, list):
|
| 577 |
+
image_latents = [
|
| 578 |
+
retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i], "argmax")
|
| 579 |
+
for i in range(batch_size)
|
| 580 |
+
]
|
| 581 |
+
else:
|
| 582 |
+
image_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator, "argmax") for img in image]
|
| 583 |
+
|
| 584 |
+
image_latents = torch.cat(image_latents, dim=0).to(dtype) * self.vae_scaling_factor
|
| 585 |
+
|
| 586 |
+
if latents is None:
|
| 587 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 588 |
+
else:
|
| 589 |
+
latents = latents.to(device=device, dtype=dtype)
|
| 590 |
+
|
| 591 |
+
if i2v_stable:
|
| 592 |
+
image_latents = image_latents.repeat(1, 1, num_latent_frames, 1, 1)
|
| 593 |
+
t = torch.tensor([0.999]).to(device=device)
|
| 594 |
+
latents = latents * t + image_latents * (1 - t)
|
| 595 |
+
|
| 596 |
+
if image_condition_type == "token_replace":
|
| 597 |
+
image_latents = image_latents[:, :, :1]
|
| 598 |
+
|
| 599 |
+
return latents, image_latents
|
| 600 |
+
|
| 601 |
+
def enable_vae_slicing(self):
|
| 602 |
+
r"""
|
| 603 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
| 604 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
| 605 |
+
"""
|
| 606 |
+
self.vae.enable_slicing()
|
| 607 |
+
|
| 608 |
+
def disable_vae_slicing(self):
|
| 609 |
+
r"""
|
| 610 |
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
| 611 |
+
computing decoding in one step.
|
| 612 |
+
"""
|
| 613 |
+
self.vae.disable_slicing()
|
| 614 |
+
|
| 615 |
+
def enable_vae_tiling(self):
|
| 616 |
+
r"""
|
| 617 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
| 618 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
| 619 |
+
processing larger images.
|
| 620 |
+
"""
|
| 621 |
+
self.vae.enable_tiling()
|
| 622 |
+
|
| 623 |
+
def disable_vae_tiling(self):
|
| 624 |
+
r"""
|
| 625 |
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
| 626 |
+
computing decoding in one step.
|
| 627 |
+
"""
|
| 628 |
+
self.vae.disable_tiling()
|
| 629 |
+
|
| 630 |
+
@property
|
| 631 |
+
def guidance_scale(self):
|
| 632 |
+
return self._guidance_scale
|
| 633 |
+
|
| 634 |
+
@property
|
| 635 |
+
def num_timesteps(self):
|
| 636 |
+
return self._num_timesteps
|
| 637 |
+
|
| 638 |
+
@property
|
| 639 |
+
def attention_kwargs(self):
|
| 640 |
+
return self._attention_kwargs
|
| 641 |
+
|
| 642 |
+
@property
|
| 643 |
+
def current_timestep(self):
|
| 644 |
+
return self._current_timestep
|
| 645 |
+
|
| 646 |
+
@property
|
| 647 |
+
def interrupt(self):
|
| 648 |
+
return self._interrupt
|
| 649 |
+
|
| 650 |
+
def prepare_lp(
|
| 651 |
+
self,
|
| 652 |
+
# --- Filter Selection & Strength ---
|
| 653 |
+
lp_filter_type: str,
|
| 654 |
+
lp_blur_sigma: float,
|
| 655 |
+
lp_blur_kernel_size: float,
|
| 656 |
+
lp_resize_factor: float,
|
| 657 |
+
# --- Contextual Info ---
|
| 658 |
+
generator: torch.Generator,
|
| 659 |
+
num_frames: int,
|
| 660 |
+
use_low_pass_guidance: bool,
|
| 661 |
+
lp_filter_in_latent: bool,
|
| 662 |
+
# --- Inputs to filter ---
|
| 663 |
+
orig_image_latents: torch.Tensor,
|
| 664 |
+
orig_image_tensor: torch.Tensor,
|
| 665 |
+
last_image: Optional[torch.Tensor] = None,
|
| 666 |
+
) -> Optional[torch.Tensor]:
|
| 667 |
+
"""
|
| 668 |
+
Prepares a low-pass filtered version of the initial image condition for guidance. (HunyuanVideo)
|
| 669 |
+
|
| 670 |
+
This function works in two modes:
|
| 671 |
+
1. **Filtering in Image (RGB) Space (`lp_filter_in_latent=False`)**:
|
| 672 |
+
It applies a low-pass filter to the source image, constructs a video tensor (e.g., first frame is
|
| 673 |
+
the filtered image, last frame is an optionally provided filtered `last_image`, and the rest are zeros),
|
| 674 |
+
encodes this video tensor with the VAE, normalizes the result, and finally prepends a temporal mask
|
| 675 |
+
to create a condition tensor in the format expected by the transformer (`[mask, latents]`).
|
| 676 |
+
2. **Filtering in Latent Space (`lp_filter_in_latent=True`)**:
|
| 677 |
+
Directly applies the low-pass filter to the already-encoded `orig_image_latents`.
|
| 678 |
+
|
| 679 |
+
Args:
|
| 680 |
+
lp_filter_type (`str`): The type of low-pass filter to apply, e.g., 'gaussian_blur', 'down_up'.
|
| 681 |
+
lp_blur_sigma (`float`): The sigma value for the Gaussian blur filter.
|
| 682 |
+
lp_blur_kernel_size (`float`): The kernel size for the Gaussian blur filter.
|
| 683 |
+
lp_resize_factor (`float`): The resizing factor for the 'down_up' filter.
|
| 684 |
+
generator (`torch.Generator`): A random generator, used for VAE sampling when filtering in image space.
|
| 685 |
+
num_frames (`int`): The target number of frames for the video condition tensor.
|
| 686 |
+
use_low_pass_guidance (`bool`): If `False`, the function returns `None` immediately.
|
| 687 |
+
lp_filter_in_latent (`bool`): If `True`, filtering is applied in latent space. Otherwise, in image space.
|
| 688 |
+
orig_image_latents (`torch.Tensor`): The VAE-encoded latents of the original image. Used when
|
| 689 |
+
`lp_filter_in_latent` is `True`.
|
| 690 |
+
orig_image_tensor (`torch.Tensor`): The preprocessed original image tensor (RGB). Used when
|
| 691 |
+
`lp_filter_in_latent` is `False`.
|
| 692 |
+
last_image (`Optional[torch.Tensor]`, defaults to `None`):
|
| 693 |
+
An optional image tensor for the last frame. If provided (and when filtering in image space), it will
|
| 694 |
+
also be low-pass filtered and used as the last frame of the VAE input.
|
| 695 |
+
|
| 696 |
+
Returns:
|
| 697 |
+
`Optional[torch.Tensor]`: A tensor containing the low-pass filtered image condition ready for the
|
| 698 |
+
transformer, or `None` if `use_low_pass_guidance` is `False`.
|
| 699 |
+
"""
|
| 700 |
+
if not use_low_pass_guidance:
|
| 701 |
+
return None
|
| 702 |
+
|
| 703 |
+
if not lp_filter_in_latent:
|
| 704 |
+
# --- Filter in Image (RGB) Space ---
|
| 705 |
+
# 1. Apply the low-pass filter to the source image(s).
|
| 706 |
+
image_lp = lp_utils.apply_low_pass_filter(
|
| 707 |
+
orig_image_tensor,
|
| 708 |
+
filter_type=lp_filter_type,
|
| 709 |
+
blur_sigma=lp_blur_sigma,
|
| 710 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 711 |
+
resize_factor=lp_resize_factor,
|
| 712 |
+
)
|
| 713 |
+
image_lp_vae_input = image_lp.unsqueeze(2)
|
| 714 |
+
|
| 715 |
+
batch_size,_,height,width = orig_image_tensor.shape
|
| 716 |
+
latent_height = height // self.vae_scale_factor_spatial
|
| 717 |
+
latent_width = width // self.vae_scale_factor_spatial
|
| 718 |
+
|
| 719 |
+
# 2. Construct a video tensor to be encoded. This tensor has the filtered image as the first frame.
|
| 720 |
+
# If a `last_image` is given, it's also filtered and placed at the end. Intermediate frames are black.
|
| 721 |
+
if last_image is None:
|
| 722 |
+
video_condition = torch.cat(
|
| 723 |
+
[image_lp_vae_input, image_lp_vae_input.new_zeros(image_lp_vae_input.shape[0], image_lp_vae_input.shape[1], num_frames - 1, height, width)], dim=2
|
| 724 |
+
)
|
| 725 |
+
else:
|
| 726 |
+
|
| 727 |
+
last_image_lp = lp_utils.apply_low_pass_filter(
|
| 728 |
+
last_image,
|
| 729 |
+
filter_type=lp_filter_type,
|
| 730 |
+
blur_sigma=lp_blur_sigma,
|
| 731 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 732 |
+
resize_factor=lp_resize_factor,
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
last_image_lp = last_image_lp.unsqueeze(2)
|
| 736 |
+
video_condition = torch.cat(
|
| 737 |
+
[image_lp_vae_input, image_lp_vae_input.new_zeros(image_lp_vae_input.shape[0], image_lp_vae_input.shape[1], num_frames - 2, height, width), last_image_lp],
|
| 738 |
+
dim=2,
|
| 739 |
+
)
|
| 740 |
+
# 3. Encode the constructed video tensor and normalize the resulting latents.
|
| 741 |
+
latents_mean = (
|
| 742 |
+
torch.tensor(self.vae.config.latents_mean)
|
| 743 |
+
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
| 744 |
+
.to(image_lp.device, image_lp.dtype)
|
| 745 |
+
)
|
| 746 |
+
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
|
| 747 |
+
image_lp.device, image_lp.dtype
|
| 748 |
+
)
|
| 749 |
+
encoded_lp = self.vae.encode(video_condition).latent_dist.sample(generator=generator)
|
| 750 |
+
latent_condition = (encoded_lp - latents_mean) * latents_std
|
| 751 |
+
|
| 752 |
+
# 4. Create a temporal mask. The transformer condition is `[mask, latents]`.
|
| 753 |
+
# The mask is 1 for conditioned frames (first, and optionally last) and 0 for unconditioned frames.
|
| 754 |
+
mask_lat_size = torch.ones(batch_size, 1, num_frames, latent_height, latent_width)
|
| 755 |
+
|
| 756 |
+
if last_image is None:
|
| 757 |
+
mask_lat_size[:, :, list(range(1, num_frames))] = 0
|
| 758 |
+
else:
|
| 759 |
+
mask_lat_size[:, :, list(range(1, num_frames - 1))] = 0
|
| 760 |
+
first_frame_mask = mask_lat_size[:, :, 0:1]
|
| 761 |
+
first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal)
|
| 762 |
+
mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2)
|
| 763 |
+
mask_lat_size = mask_lat_size.view(batch_size, -1, self.vae_scale_factor_temporal, latent_height, latent_width)
|
| 764 |
+
mask_lat_size = mask_lat_size.transpose(1, 2)
|
| 765 |
+
mask_lat_size = mask_lat_size.to(latent_condition.device)
|
| 766 |
+
|
| 767 |
+
# 5. Concatenate the mask and the normalized latents along the channel dimension.
|
| 768 |
+
lp_image_latents = torch.concat([mask_lat_size, latent_condition], dim=1)
|
| 769 |
+
|
| 770 |
+
else:
|
| 771 |
+
# --- Filter Directly in Latent Space ---
|
| 772 |
+
# This path assumes `orig_image_latents` is already prepared and just needs filtering.
|
| 773 |
+
lp_image_latents = lp_utils.apply_low_pass_filter(
|
| 774 |
+
orig_image_latents,
|
| 775 |
+
filter_type=lp_filter_type,
|
| 776 |
+
blur_sigma=lp_blur_sigma,
|
| 777 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 778 |
+
resize_factor=lp_resize_factor,
|
| 779 |
+
)
|
| 780 |
+
|
| 781 |
+
if self.transformer.config.patch_size is not None:
|
| 782 |
+
remainder = lp_image_latents.size(1) % self.transformer.config.patch_size
|
| 783 |
+
if remainder != 0:
|
| 784 |
+
num_to_prepend = self.transformer.config.patch_size - remainder
|
| 785 |
+
num_to_prepend = min(num_to_prepend, lp_image_latents.shape[1])
|
| 786 |
+
first_frames_to_prepend = lp_image_latents[:, :num_to_prepend, ...]
|
| 787 |
+
lp_image_latents = torch.cat([first_frames_to_prepend, lp_image_latents], dim=1)
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
lp_image_latents = lp_image_latents.to(dtype=orig_image_latents.dtype)
|
| 791 |
+
|
| 792 |
+
return lp_image_latents
|
| 793 |
+
|
| 794 |
+
@torch.no_grad()
|
| 795 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 796 |
+
def __call__(
|
| 797 |
+
self,
|
| 798 |
+
image: PIL.Image.Image,
|
| 799 |
+
prompt: Union[str, List[str]] = None,
|
| 800 |
+
prompt_2: Union[str, List[str]] = None,
|
| 801 |
+
negative_prompt: Union[str, List[str]] = "bad quality",
|
| 802 |
+
negative_prompt_2: Union[str, List[str]] = None,
|
| 803 |
+
height: int = 720,
|
| 804 |
+
width: int = 1280,
|
| 805 |
+
num_frames: int = 129,
|
| 806 |
+
num_inference_steps: int = 50,
|
| 807 |
+
sigmas: List[float] = None,
|
| 808 |
+
true_cfg_scale: float = 1.0,
|
| 809 |
+
guidance_scale: float = 1.0,
|
| 810 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 811 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 812 |
+
latents: Optional[torch.Tensor] = None,
|
| 813 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 814 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 815 |
+
prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 816 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 817 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 818 |
+
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 819 |
+
output_type: Optional[str] = "pil",
|
| 820 |
+
return_dict: bool = True,
|
| 821 |
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 822 |
+
callback_on_step_end: Optional[
|
| 823 |
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 824 |
+
] = None,
|
| 825 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 826 |
+
prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE,
|
| 827 |
+
max_sequence_length: int = 256,
|
| 828 |
+
image_embed_interleave: Optional[int] = None,
|
| 829 |
+
|
| 830 |
+
use_low_pass_guidance: bool = False,
|
| 831 |
+
lp_filter_type: str = "none", # {'gaussian_blur', 'down_up'}
|
| 832 |
+
lp_filter_in_latent: bool = False, # When set to True, low-pass filter is done after encoder. If False, low-pass filter is applied to image directly before encoder.
|
| 833 |
+
lp_blur_sigma: float = 15.0, # Used with 'gaussian_blur'. Gaussian filter sigma value.
|
| 834 |
+
lp_blur_kernel_size: float = 0.02734375, # Used with 'gaussian_blur'. Gaussian filter size. When set to int, used directly as kernel size. When set to float, H * `lp_blur_kernel_size` is used as kernel size.
|
| 835 |
+
lp_resize_factor: float = 0.25, # Used with 'down_up'. Image is bilinearly downsized to (`lp_resize_factor` * WIDTH, `lp_resize_factor` * HEIGHT) and then back to original.
|
| 836 |
+
|
| 837 |
+
lp_strength_schedule_type: str = "none", # Scheduling type for low-pass filtering strength. Options: {"none", "linear", "interval", "exponential"}
|
| 838 |
+
schedule_blur_kernel_size: bool = False, # If True, schedule blur kernel size as well. Otherwise, fix to initial value.
|
| 839 |
+
|
| 840 |
+
# --- Constant Interval Scheduling Params for LP Strength ---
|
| 841 |
+
schedule_interval_start_time: float = 0.0, # Starting timestep for interval scheduling
|
| 842 |
+
schedule_interval_end_time: float = 0.05, # Ending timestep for interval scheduling
|
| 843 |
+
|
| 844 |
+
# --- Linear Scheduling Params for LP Strength ---
|
| 845 |
+
schedule_linear_start_weight: float = 1.0, # Starting LP weight for linear scheduling at t=T (step 0)
|
| 846 |
+
schedule_linear_end_weight: float = 0.0, # Ending LP weight for linear scheduling at t=T * schedule_linear_end_time
|
| 847 |
+
schedule_linear_end_time: float = 0.5, # Timestep fraction at which schedule_linear_end is reached
|
| 848 |
+
|
| 849 |
+
# --- Exponential Scheduling Params for LP Strength ---
|
| 850 |
+
schedule_exp_decay_rate: float = 10.0, # Decay rate for 'exponential' schedule. Higher values decay faster. Strength = exp(-rate * time_fraction).
|
| 851 |
+
|
| 852 |
+
lp_on_noisy_latent = False,
|
| 853 |
+
enable_lp_img_embeds = False,
|
| 854 |
+
i2v_stable= False,
|
| 855 |
+
):
|
| 856 |
+
r"""
|
| 857 |
+
The call function to the pipeline for generation.
|
| 858 |
+
|
| 859 |
+
Args:
|
| 860 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 861 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 862 |
+
instead.
|
| 863 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 864 |
+
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 865 |
+
will be used instead.
|
| 866 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 867 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 868 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
|
| 869 |
+
not greater than `1`).
|
| 870 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 871 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 872 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
|
| 873 |
+
height (`int`, defaults to `720`):
|
| 874 |
+
The height in pixels of the generated image.
|
| 875 |
+
width (`int`, defaults to `1280`):
|
| 876 |
+
The width in pixels of the generated image.
|
| 877 |
+
num_frames (`int`, defaults to `129`):
|
| 878 |
+
The number of frames in the generated video.
|
| 879 |
+
num_inference_steps (`int`, defaults to `50`):
|
| 880 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 881 |
+
expense of slower inference.
|
| 882 |
+
sigmas (`List[float]`, *optional*):
|
| 883 |
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
| 884 |
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
| 885 |
+
will be used.
|
| 886 |
+
true_cfg_scale (`float`, *optional*, defaults to 1.0):
|
| 887 |
+
When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance.
|
| 888 |
+
guidance_scale (`float`, defaults to `1.0`):
|
| 889 |
+
Guidance scale as defined in [Classifier-Free Diffusion
|
| 890 |
+
Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
|
| 891 |
+
of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
|
| 892 |
+
`guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
|
| 893 |
+
the text `prompt`, usually at the expense of lower image quality. Note that the only available
|
| 894 |
+
HunyuanVideo model is CFG-distilled, which means that traditional guidance between unconditional and
|
| 895 |
+
conditional latent is not applied.
|
| 896 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 897 |
+
The number of images to generate per prompt.
|
| 898 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 899 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 900 |
+
generation deterministic.
|
| 901 |
+
latents (`torch.Tensor`, *optional*):
|
| 902 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 903 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 904 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 905 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 906 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 907 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 908 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 909 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 910 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 911 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 912 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 913 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 914 |
+
argument.
|
| 915 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 916 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 917 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 918 |
+
input argument.
|
| 919 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 920 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 921 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 922 |
+
Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a plain tuple.
|
| 923 |
+
attention_kwargs (`dict`, *optional*):
|
| 924 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 925 |
+
`self.processor` in
|
| 926 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 927 |
+
clip_skip (`int`, *optional*):
|
| 928 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 929 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 930 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 931 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 932 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 933 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 934 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 935 |
+
prompt_template (`Dict[str, Any]`, *optional*, defaults to `DEFAULT_PROMPT_TEMPLATE`):
|
| 936 |
+
A dictionary defining the template for constructing the LLaVA prompt. It should include keys like
|
| 937 |
+
`"template"`, `"crop_start"`, `"image_emb_start"`, `"image_emb_end"`, `"image_emb_len"`, and
|
| 938 |
+
`"double_return_token_id"`.
|
| 939 |
+
max_sequence_length (`int`, *optional*, defaults to 256):
|
| 940 |
+
The maximum sequence length for the LLaVA text encoder.
|
| 941 |
+
image_embed_interleave (`int`, *optional*):
|
| 942 |
+
The interleave factor for image embeddings. Defaults to 2 if `image_condition_type` is
|
| 943 |
+
`"latent_concat"`, 4 if `"token_replace"`, otherwise 1.
|
| 944 |
+
use_low_pass_guidance (`bool`, *optional*, defaults to `False`):
|
| 945 |
+
Whether to use low-pass guidance. This can help to improve the temporal consistency of the generated
|
| 946 |
+
video.
|
| 947 |
+
lp_filter_type (`str`, *optional*, defaults to `"none"`):
|
| 948 |
+
The type of low-pass filter to apply. Can be one of `gaussian_blur` or `down_up`.
|
| 949 |
+
lp_filter_in_latent (`bool`, *optional*, defaults to `False`):
|
| 950 |
+
If `True`, the low-pass filter is applied to the latent representation of the image. If `False`, it is
|
| 951 |
+
applied to the image in pixel space before encoding.
|
| 952 |
+
lp_blur_sigma (`float`, *optional*, defaults to `15.0`):
|
| 953 |
+
The sigma value for the Gaussian blur filter. Only used if `lp_filter_type` is `gaussian_blur`.
|
| 954 |
+
lp_blur_kernel_size (`float`, *optional*, defaults to `0.02734375`):
|
| 955 |
+
The kernel size for the Gaussian blur filter. If an `int`, it's used directly. If a `float`, the kernel
|
| 956 |
+
size is calculated as `height * lp_blur_kernel_size`. Only used if `lp_filter_type` is `gaussian_blur`.
|
| 957 |
+
lp_resize_factor (`float`, *optional*, defaults to `0.25`):
|
| 958 |
+
The resize factor for the down-sampling and up-sampling filter. Only used if `lp_filter_type` is
|
| 959 |
+
`down_up`.
|
| 960 |
+
lp_strength_schedule_type (`str`, *optional*, defaults to `"none"`):
|
| 961 |
+
The scheduling type for the low-pass filter strength. Can be one of `none`, `linear`, `interval`, or
|
| 962 |
+
`exponential`.
|
| 963 |
+
schedule_blur_kernel_size (`bool`, *optional*, defaults to `False`):
|
| 964 |
+
If `True`, the blur kernel size is also scheduled along with the strength. Otherwise, it remains fixed.
|
| 965 |
+
schedule_interval_start_time (`float`, *optional*, defaults to `0.0`):
|
| 966 |
+
The starting timestep fraction for interval scheduling. Only used if `lp_strength_schedule_type` is
|
| 967 |
+
`interval`.
|
| 968 |
+
schedule_interval_end_time (`float`, *optional*, defaults to `0.05`):
|
| 969 |
+
The ending timestep fraction for interval scheduling. Only used if `lp_strength_schedule_type` is
|
| 970 |
+
`interval`.
|
| 971 |
+
schedule_linear_start_weight (`float`, *optional*, defaults to `1.0`):
|
| 972 |
+
The starting weight for the low-pass filter strength in a linear schedule. Corresponds to the first
|
| 973 |
+
timestep. Only used if `lp_strength_schedule_type` is `linear`.
|
| 974 |
+
schedule_linear_end_weight (`float`, *optional*, defaults to `0.0`):
|
| 975 |
+
The ending weight for the low-pass filter strength in a linear schedule. Only used if
|
| 976 |
+
`lp_strength_schedule_type` is `linear`.
|
| 977 |
+
schedule_linear_end_time (`float`, *optional*, defaults to `0.5`):
|
| 978 |
+
The timestep fraction at which `schedule_linear_end_weight` is reached in a linear schedule. Only used
|
| 979 |
+
if `lp_strength_schedule_type` is `linear`.
|
| 980 |
+
schedule_exp_decay_rate (`float`, *optional*, defaults to `10.0`):
|
| 981 |
+
The decay rate for the exponential schedule. Higher values lead to faster decay. Only used if
|
| 982 |
+
`lp_strength_schedule_type` is `exponential`.
|
| 983 |
+
lp_on_noisy_latent (`bool`, *optional*, defaults to `False`):
|
| 984 |
+
If `True` and using low-pass guidance with true CFG, applies the low-pass condition to the noisy latent input
|
| 985 |
+
when the low-pass strength is zero, instead of using the original image condition.
|
| 986 |
+
enable_lp_img_embeds (`bool`, *optional*, defaults to `False`):
|
| 987 |
+
Whether to apply low-pass filtering to image embeddings.
|
| 988 |
+
i2v_stable (`bool`, *optional*, defaults to `False`):
|
| 989 |
+
If `True`, initializes the video latents with initial image latents.
|
| 990 |
+
|
| 991 |
+
Examples:
|
| 992 |
+
|
| 993 |
+
Returns:
|
| 994 |
+
[`~HunyuanVideoPipelineOutput`] or `tuple`:
|
| 995 |
+
If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, otherwise a `tuple` is returned
|
| 996 |
+
where the first element is a list with the generated images and the second element is a list of `bool`s
|
| 997 |
+
indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content.
|
| 998 |
+
"""
|
| 999 |
+
|
| 1000 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 1001 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 1002 |
+
|
| 1003 |
+
# 1. Check inputs. Raise error if not correct
|
| 1004 |
+
self.check_inputs(
|
| 1005 |
+
prompt,
|
| 1006 |
+
prompt_2,
|
| 1007 |
+
height,
|
| 1008 |
+
width,
|
| 1009 |
+
prompt_embeds,
|
| 1010 |
+
callback_on_step_end_tensor_inputs,
|
| 1011 |
+
prompt_template,
|
| 1012 |
+
true_cfg_scale,
|
| 1013 |
+
guidance_scale,
|
| 1014 |
+
)
|
| 1015 |
+
|
| 1016 |
+
image_condition_type = self.transformer.config.image_condition_type
|
| 1017 |
+
has_neg_prompt = negative_prompt is not None or (
|
| 1018 |
+
negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None
|
| 1019 |
+
)
|
| 1020 |
+
do_true_cfg = true_cfg_scale > 1 and has_neg_prompt
|
| 1021 |
+
image_embed_interleave = (
|
| 1022 |
+
image_embed_interleave
|
| 1023 |
+
if image_embed_interleave is not None
|
| 1024 |
+
else (
|
| 1025 |
+
2 if image_condition_type == "latent_concat" else 4 if image_condition_type == "token_replace" else 1
|
| 1026 |
+
)
|
| 1027 |
+
)
|
| 1028 |
+
|
| 1029 |
+
self._guidance_scale = guidance_scale
|
| 1030 |
+
self._attention_kwargs = attention_kwargs
|
| 1031 |
+
self._current_timestep = None
|
| 1032 |
+
self._interrupt = False
|
| 1033 |
+
|
| 1034 |
+
device = self._execution_device
|
| 1035 |
+
|
| 1036 |
+
# 2. Define call parameters
|
| 1037 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1038 |
+
batch_size = 1
|
| 1039 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1040 |
+
batch_size = len(prompt)
|
| 1041 |
+
else:
|
| 1042 |
+
batch_size = prompt_embeds.shape[0]
|
| 1043 |
+
|
| 1044 |
+
# 3. Prepare latent variables
|
| 1045 |
+
vae_dtype = self.vae.dtype
|
| 1046 |
+
image_tensor = self.video_processor.preprocess(image, height, width).to(device, vae_dtype)
|
| 1047 |
+
|
| 1048 |
+
if image_condition_type == "latent_concat":
|
| 1049 |
+
num_channels_latents = (self.transformer.config.in_channels - 1) // 2
|
| 1050 |
+
elif image_condition_type == "token_replace":
|
| 1051 |
+
num_channels_latents = self.transformer.config.in_channels
|
| 1052 |
+
|
| 1053 |
+
latents, image_latents = self.prepare_latents(
|
| 1054 |
+
image_tensor,
|
| 1055 |
+
batch_size * num_videos_per_prompt,
|
| 1056 |
+
num_channels_latents,
|
| 1057 |
+
height,
|
| 1058 |
+
width,
|
| 1059 |
+
num_frames,
|
| 1060 |
+
torch.float32,
|
| 1061 |
+
device,
|
| 1062 |
+
generator,
|
| 1063 |
+
latents,
|
| 1064 |
+
image_condition_type,
|
| 1065 |
+
i2v_stable
|
| 1066 |
+
)
|
| 1067 |
+
if image_condition_type == "latent_concat":
|
| 1068 |
+
image_latents[:, :, 1:] = 0
|
| 1069 |
+
mask = image_latents.new_ones(image_latents.shape[0], 1, *image_latents.shape[2:])
|
| 1070 |
+
mask[:, :, 1:] = 0
|
| 1071 |
+
|
| 1072 |
+
# 4. Encode input prompt
|
| 1073 |
+
transformer_dtype = self.transformer.dtype
|
| 1074 |
+
prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = self.encode_prompt(
|
| 1075 |
+
image=image,
|
| 1076 |
+
prompt=prompt,
|
| 1077 |
+
prompt_2=prompt_2,
|
| 1078 |
+
prompt_template=prompt_template,
|
| 1079 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 1080 |
+
prompt_embeds=prompt_embeds,
|
| 1081 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1082 |
+
prompt_attention_mask=prompt_attention_mask,
|
| 1083 |
+
device=device,
|
| 1084 |
+
max_sequence_length=max_sequence_length,
|
| 1085 |
+
image_embed_interleave=image_embed_interleave,
|
| 1086 |
+
)
|
| 1087 |
+
prompt_embeds = prompt_embeds.to(transformer_dtype)
|
| 1088 |
+
prompt_attention_mask = prompt_attention_mask.to(transformer_dtype)
|
| 1089 |
+
pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype)
|
| 1090 |
+
|
| 1091 |
+
if do_true_cfg:
|
| 1092 |
+
black_image = PIL.Image.new("RGB", (width, height), 0)
|
| 1093 |
+
negative_prompt_embeds, negative_pooled_prompt_embeds, negative_prompt_attention_mask = self.encode_prompt(
|
| 1094 |
+
image=black_image,
|
| 1095 |
+
prompt=negative_prompt,
|
| 1096 |
+
prompt_2=negative_prompt_2,
|
| 1097 |
+
prompt_template=prompt_template,
|
| 1098 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 1099 |
+
prompt_embeds=negative_prompt_embeds,
|
| 1100 |
+
pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1101 |
+
prompt_attention_mask=negative_prompt_attention_mask,
|
| 1102 |
+
device=device,
|
| 1103 |
+
max_sequence_length=max_sequence_length,
|
| 1104 |
+
image_embed_interleave=image_embed_interleave,
|
| 1105 |
+
)
|
| 1106 |
+
negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype)
|
| 1107 |
+
negative_prompt_attention_mask = negative_prompt_attention_mask.to(transformer_dtype)
|
| 1108 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.to(transformer_dtype)
|
| 1109 |
+
|
| 1110 |
+
# 5. Prepare timesteps
|
| 1111 |
+
sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas
|
| 1112 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas)
|
| 1113 |
+
|
| 1114 |
+
# 6. Prepare guidance condition
|
| 1115 |
+
guidance = None
|
| 1116 |
+
if self.transformer.config.guidance_embeds:
|
| 1117 |
+
guidance = (
|
| 1118 |
+
torch.tensor([guidance_scale] * latents.shape[0], dtype=transformer_dtype, device=device) * 1000.0
|
| 1119 |
+
)
|
| 1120 |
+
|
| 1121 |
+
# 7. Denoising loop
|
| 1122 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1123 |
+
self._num_timesteps = len(timesteps)
|
| 1124 |
+
|
| 1125 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1126 |
+
for i, t in enumerate(timesteps):
|
| 1127 |
+
if self.interrupt:
|
| 1128 |
+
continue
|
| 1129 |
+
|
| 1130 |
+
self._current_timestep = t
|
| 1131 |
+
if do_true_cfg and use_low_pass_guidance:
|
| 1132 |
+
lp_strength = lp_utils.get_lp_strength(
|
| 1133 |
+
step_index=i,
|
| 1134 |
+
total_steps=num_inference_steps,
|
| 1135 |
+
lp_strength_schedule_type=lp_strength_schedule_type,
|
| 1136 |
+
schedule_interval_start_time=schedule_interval_start_time,
|
| 1137 |
+
schedule_interval_end_time=schedule_interval_end_time,
|
| 1138 |
+
schedule_linear_start_weight=schedule_linear_start_weight,
|
| 1139 |
+
schedule_linear_end_weight=schedule_linear_end_weight,
|
| 1140 |
+
schedule_linear_end_time=schedule_linear_end_time,
|
| 1141 |
+
schedule_exp_decay_rate=schedule_exp_decay_rate,
|
| 1142 |
+
)
|
| 1143 |
+
|
| 1144 |
+
modulated_lp_blur_sigma = lp_blur_sigma * lp_strength
|
| 1145 |
+
if schedule_blur_kernel_size:
|
| 1146 |
+
modulated_lp_blur_kernel_size = lp_blur_kernel_size * lp_strength
|
| 1147 |
+
else:
|
| 1148 |
+
modulated_lp_blur_kernel_size = lp_blur_kernel_size
|
| 1149 |
+
|
| 1150 |
+
# No-effect resize_factor is 1.0
|
| 1151 |
+
modulated_lp_resize_factor = 1.0 - (1.0 - lp_resize_factor) * lp_strength
|
| 1152 |
+
|
| 1153 |
+
if enable_lp_img_embeds:
|
| 1154 |
+
assert False, "Low-pass filter on image embeds is not supported in HunyuanVideo pipeline. Please set enable_lp_img_embeds = False"
|
| 1155 |
+
|
| 1156 |
+
lp_image_latents = self.prepare_lp(
|
| 1157 |
+
lp_filter_type=lp_filter_type,
|
| 1158 |
+
lp_blur_sigma=modulated_lp_blur_sigma,
|
| 1159 |
+
lp_blur_kernel_size=modulated_lp_blur_kernel_size,
|
| 1160 |
+
lp_resize_factor=modulated_lp_resize_factor,
|
| 1161 |
+
generator=generator,
|
| 1162 |
+
num_frames=num_frames,
|
| 1163 |
+
use_low_pass_guidance=use_low_pass_guidance,
|
| 1164 |
+
lp_filter_in_latent=lp_filter_in_latent,
|
| 1165 |
+
orig_image_latents=image_latents,
|
| 1166 |
+
orig_image_tensor=image
|
| 1167 |
+
)
|
| 1168 |
+
if lp_strength == 0.0 or lp_on_noisy_latent:
|
| 1169 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 1170 |
+
img_cond = torch.cat([image_latents,image_latents], dim=0).to(transformer_dtype)
|
| 1171 |
+
latent_model_input = torch.cat([img_cond, latent_model_input[:, :, 1:]], dim=2).to(transformer_dtype)
|
| 1172 |
+
|
| 1173 |
+
concat_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1174 |
+
concat_pooled_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
|
| 1175 |
+
concat_prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
|
| 1176 |
+
else:
|
| 1177 |
+
latent_model_input = torch.cat([latents] * 3)
|
| 1178 |
+
img_cond = torch.cat([image_latents,lp_image_latents,lp_image_latents], dim=0)
|
| 1179 |
+
latent_model_input = torch.cat([img_cond, latent_model_input[:, :, 1:]], dim=2).to(transformer_dtype)
|
| 1180 |
+
concat_prompt_embeds = torch.cat([negative_prompt_embeds,negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1181 |
+
concat_pooled_embeds = torch.cat([negative_pooled_prompt_embeds,negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
|
| 1182 |
+
concat_prompt_attention_mask = torch.cat([negative_prompt_attention_mask,negative_prompt_attention_mask, prompt_attention_mask], dim=0)
|
| 1183 |
+
elif do_true_cfg:
|
| 1184 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 1185 |
+
img_cond = torch.cat([image_latents,image_latents], dim=0).to(transformer_dtype)
|
| 1186 |
+
latent_model_input = torch.cat([img_cond, latent_model_input[:, :, 1:]], dim=2).to(transformer_dtype)
|
| 1187 |
+
|
| 1188 |
+
concat_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1189 |
+
concat_pooled_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
|
| 1190 |
+
concat_prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
|
| 1191 |
+
elif not use_low_pass_guidance:
|
| 1192 |
+
latent_model_input = torch.cat([image_latents, latents[:, :, 1:]], dim=2).to(transformer_dtype)
|
| 1193 |
+
concat_prompt_embeds = prompt_embeds
|
| 1194 |
+
concat_pooled_embeds = pooled_prompt_embeds
|
| 1195 |
+
concat_prompt_attention_mask = prompt_attention_mask
|
| 1196 |
+
else:
|
| 1197 |
+
lp_strength = lp_utils.get_lp_strength(
|
| 1198 |
+
step_index=i,
|
| 1199 |
+
total_steps=num_inference_steps,
|
| 1200 |
+
lp_strength_schedule_type=lp_strength_schedule_type,
|
| 1201 |
+
schedule_interval_start_time=schedule_interval_start_time,
|
| 1202 |
+
schedule_interval_end_time=schedule_interval_end_time,
|
| 1203 |
+
schedule_linear_start_weight=schedule_linear_start_weight,
|
| 1204 |
+
schedule_linear_end_weight=schedule_linear_end_weight,
|
| 1205 |
+
schedule_linear_end_time=schedule_linear_end_time,
|
| 1206 |
+
schedule_exp_decay_rate=schedule_exp_decay_rate,
|
| 1207 |
+
)
|
| 1208 |
+
|
| 1209 |
+
modulated_lp_blur_sigma = lp_blur_sigma * lp_strength
|
| 1210 |
+
if schedule_blur_kernel_size:
|
| 1211 |
+
modulated_lp_blur_kernel_size = lp_blur_kernel_size * lp_strength
|
| 1212 |
+
else:
|
| 1213 |
+
modulated_lp_blur_kernel_size = lp_blur_kernel_size
|
| 1214 |
+
|
| 1215 |
+
modulated_lp_resize_factor = 1.0 - (1.0 - lp_resize_factor) * lp_strength
|
| 1216 |
+
|
| 1217 |
+
if enable_lp_img_embeds:
|
| 1218 |
+
assert False, "Low-pass filter on image embeds is not supported in HunyuanVideo pipeline. Please set enable_lp_img_embeds = False"
|
| 1219 |
+
|
| 1220 |
+
lp_image_latents = self.prepare_lp(
|
| 1221 |
+
lp_filter_type=lp_filter_type,
|
| 1222 |
+
lp_blur_sigma=modulated_lp_blur_sigma,
|
| 1223 |
+
lp_blur_kernel_size=modulated_lp_blur_kernel_size,
|
| 1224 |
+
lp_resize_factor=modulated_lp_resize_factor,
|
| 1225 |
+
generator=generator,
|
| 1226 |
+
num_frames=num_frames,
|
| 1227 |
+
use_low_pass_guidance=use_low_pass_guidance,
|
| 1228 |
+
lp_filter_in_latent=lp_filter_in_latent,
|
| 1229 |
+
orig_image_latents=image_latents,
|
| 1230 |
+
orig_image_tensor=image
|
| 1231 |
+
)
|
| 1232 |
+
latent_model_input = torch.cat([lp_image_latents, latents[:, :, 1:]], dim=2).to(transformer_dtype)
|
| 1233 |
+
concat_prompt_embeds = prompt_embeds
|
| 1234 |
+
concat_pooled_embeds = pooled_prompt_embeds
|
| 1235 |
+
concat_prompt_attention_mask = prompt_attention_mask
|
| 1236 |
+
|
| 1237 |
+
timestep = t.expand(latent_model_input.shape[0]).to(transformer_dtype)
|
| 1238 |
+
latent_model_input = latent_model_input.to(transformer_dtype)
|
| 1239 |
+
prompt_embeds = prompt_embeds.to(transformer_dtype)
|
| 1240 |
+
prompt_attention_mask = prompt_attention_mask.to(transformer_dtype)
|
| 1241 |
+
pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype)
|
| 1242 |
+
|
| 1243 |
+
noise_pred = self.transformer(
|
| 1244 |
+
hidden_states=latent_model_input,
|
| 1245 |
+
timestep=timestep,
|
| 1246 |
+
encoder_hidden_states=concat_prompt_embeds,
|
| 1247 |
+
encoder_attention_mask=concat_prompt_attention_mask,
|
| 1248 |
+
pooled_projections=concat_pooled_embeds,
|
| 1249 |
+
guidance=guidance,
|
| 1250 |
+
attention_kwargs=attention_kwargs,
|
| 1251 |
+
return_dict=False,
|
| 1252 |
+
)[0]
|
| 1253 |
+
|
| 1254 |
+
if noise_pred.shape[0] == 3:
|
| 1255 |
+
noise_pred_uncond_init, noise_pred_uncond, noise_pred_text = noise_pred.chunk(3)
|
| 1256 |
+
noise_pred = (
|
| 1257 |
+
noise_pred_uncond_init + true_cfg_scale * (noise_pred_text - noise_pred_uncond)
|
| 1258 |
+
)
|
| 1259 |
+
elif noise_pred.shape[0] == 2:
|
| 1260 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1261 |
+
noise_pred = noise_pred_uncond + true_cfg_scale * (noise_pred_text - noise_pred_uncond)
|
| 1262 |
+
|
| 1263 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1264 |
+
if image_condition_type == "latent_concat":
|
| 1265 |
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 1266 |
+
elif image_condition_type == "token_replace":
|
| 1267 |
+
latents = latents = self.scheduler.step(
|
| 1268 |
+
noise_pred[:, :, 1:], t, latents[:, :, 1:], return_dict=False
|
| 1269 |
+
)[0]
|
| 1270 |
+
latents = torch.cat([image_latents, latents], dim=2)
|
| 1271 |
+
|
| 1272 |
+
if callback_on_step_end is not None:
|
| 1273 |
+
callback_kwargs = {}
|
| 1274 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1275 |
+
callback_kwargs[k] = locals()[k]
|
| 1276 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1277 |
+
|
| 1278 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1279 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1280 |
+
|
| 1281 |
+
# call the callback, if provided
|
| 1282 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1283 |
+
progress_bar.update()
|
| 1284 |
+
|
| 1285 |
+
if XLA_AVAILABLE:
|
| 1286 |
+
xm.mark_step()
|
| 1287 |
+
|
| 1288 |
+
self._current_timestep = None
|
| 1289 |
+
|
| 1290 |
+
if not output_type == "latent":
|
| 1291 |
+
latents = latents.to(self.vae.dtype) / self.vae_scaling_factor
|
| 1292 |
+
video = self.vae.decode(latents, return_dict=False)[0]
|
| 1293 |
+
if image_condition_type == "latent_concat":
|
| 1294 |
+
video = video[:, :, 4:, :, :]
|
| 1295 |
+
video = self.video_processor.postprocess_video(video, output_type=output_type)
|
| 1296 |
+
else:
|
| 1297 |
+
if image_condition_type == "latent_concat":
|
| 1298 |
+
video = latents[:, :, 1:, :, :]
|
| 1299 |
+
else:
|
| 1300 |
+
video = latents
|
| 1301 |
+
|
| 1302 |
+
# Offload all models
|
| 1303 |
+
self.maybe_free_model_hooks()
|
| 1304 |
+
|
| 1305 |
+
if not return_dict:
|
| 1306 |
+
return (video,)
|
| 1307 |
+
|
| 1308 |
+
return HunyuanVideoPipelineOutput(frames=video)
|
exp_code/1_benchmark/ALG/pipeline_wan_image2video_lowpass.py
ADDED
|
@@ -0,0 +1,970 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import html
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import PIL
|
| 19 |
+
import regex as re
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel
|
| 22 |
+
|
| 23 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 24 |
+
from diffusers.image_processor import PipelineImageInput
|
| 25 |
+
from diffusers.loaders import WanLoraLoaderMixin
|
| 26 |
+
from diffusers.models import AutoencoderKLWan, WanTransformer3DModel
|
| 27 |
+
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 28 |
+
from diffusers.utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring
|
| 29 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 30 |
+
from diffusers.video_processor import VideoProcessor
|
| 31 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 32 |
+
from diffusers.pipelines.wan.pipeline_output import WanPipelineOutput
|
| 33 |
+
|
| 34 |
+
import lp_utils
|
| 35 |
+
|
| 36 |
+
if is_torch_xla_available():
|
| 37 |
+
import torch_xla.core.xla_model as xm
|
| 38 |
+
|
| 39 |
+
XLA_AVAILABLE = True
|
| 40 |
+
else:
|
| 41 |
+
XLA_AVAILABLE = False
|
| 42 |
+
|
| 43 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 44 |
+
|
| 45 |
+
if is_ftfy_available():
|
| 46 |
+
import ftfy
|
| 47 |
+
|
| 48 |
+
EXAMPLE_DOC_STRING = """
|
| 49 |
+
Examples:
|
| 50 |
+
```python
|
| 51 |
+
>>> import torch
|
| 52 |
+
>>> import numpy as np
|
| 53 |
+
>>> from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
|
| 54 |
+
>>> from diffusers.utils import export_to_video, load_image
|
| 55 |
+
>>> from transformers import CLIPVisionModel
|
| 56 |
+
|
| 57 |
+
>>> # Available models: Wan-AI/Wan2.1-I2V-14B-480P-Diffusers, Wan-AI/Wan2.1-I2V-14B-720P-Diffusers
|
| 58 |
+
>>> model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
| 59 |
+
>>> image_encoder = CLIPVisionModel.from_pretrained(
|
| 60 |
+
... model_id, subfolder="image_encoder", torch_dtype=torch.float32
|
| 61 |
+
... )
|
| 62 |
+
>>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
| 63 |
+
>>> pipe = WanImageToVideoPipeline.from_pretrained(
|
| 64 |
+
... model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
|
| 65 |
+
... )
|
| 66 |
+
>>> pipe.to("cuda")
|
| 67 |
+
|
| 68 |
+
>>> image = load_image(
|
| 69 |
+
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg"
|
| 70 |
+
... )
|
| 71 |
+
>>> max_area = 480 * 832
|
| 72 |
+
>>> aspect_ratio = image.height / image.width
|
| 73 |
+
>>> mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
|
| 74 |
+
>>> height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
|
| 75 |
+
>>> width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
|
| 76 |
+
>>> image = image.resize((width, height))
|
| 77 |
+
>>> prompt = (
|
| 78 |
+
... "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in "
|
| 79 |
+
... "the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
|
| 80 |
+
... )
|
| 81 |
+
>>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
|
| 82 |
+
|
| 83 |
+
>>> output = pipe(
|
| 84 |
+
... image=image,
|
| 85 |
+
... prompt=prompt,
|
| 86 |
+
... negative_prompt=negative_prompt,
|
| 87 |
+
... height=height,
|
| 88 |
+
... width=width,
|
| 89 |
+
... num_frames=81,
|
| 90 |
+
... guidance_scale=5.0,
|
| 91 |
+
... ).frames[0]
|
| 92 |
+
>>> export_to_video(output, "output.mp4", fps=16)
|
| 93 |
+
```
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def basic_clean(text):
|
| 98 |
+
text = ftfy.fix_text(text)
|
| 99 |
+
text = html.unescape(html.unescape(text))
|
| 100 |
+
return text.strip()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def whitespace_clean(text):
|
| 104 |
+
text = re.sub(r"\s+", " ", text)
|
| 105 |
+
text = text.strip()
|
| 106 |
+
return text
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def prompt_clean(text):
|
| 110 |
+
text = whitespace_clean(basic_clean(text))
|
| 111 |
+
return text
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 115 |
+
def retrieve_latents(
|
| 116 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 117 |
+
):
|
| 118 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 119 |
+
return encoder_output.latent_dist.sample(generator)
|
| 120 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 121 |
+
return encoder_output.latent_dist.mode()
|
| 122 |
+
elif hasattr(encoder_output, "latents"):
|
| 123 |
+
return encoder_output.latents
|
| 124 |
+
else:
|
| 125 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class WanImageToVideoPipeline(DiffusionPipeline, WanLoraLoaderMixin):
|
| 129 |
+
r"""
|
| 130 |
+
Pipeline for image-to-video generation using Wan.
|
| 131 |
+
|
| 132 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 133 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
tokenizer ([`T5Tokenizer`]):
|
| 137 |
+
Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer),
|
| 138 |
+
specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant.
|
| 139 |
+
text_encoder ([`T5EncoderModel`]):
|
| 140 |
+
[T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
|
| 141 |
+
the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant.
|
| 142 |
+
image_encoder ([`CLIPVisionModel`]):
|
| 143 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModel), specifically
|
| 144 |
+
the
|
| 145 |
+
[clip-vit-huge-patch14](https://github.com/mlfoundations/open_clip/blob/main/docs/PRETRAINED.md#vit-h14-xlm-roberta-large)
|
| 146 |
+
variant.
|
| 147 |
+
transformer ([`WanTransformer3DModel`]):
|
| 148 |
+
Conditional Transformer to denoise the input latents.
|
| 149 |
+
scheduler ([`UniPCMultistepScheduler`]):
|
| 150 |
+
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
| 151 |
+
vae ([`AutoencoderKLWan`]):
|
| 152 |
+
Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->transformer->vae"
|
| 156 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 157 |
+
|
| 158 |
+
def __init__(
|
| 159 |
+
self,
|
| 160 |
+
tokenizer: AutoTokenizer,
|
| 161 |
+
text_encoder: UMT5EncoderModel,
|
| 162 |
+
image_encoder: CLIPVisionModel,
|
| 163 |
+
image_processor: CLIPImageProcessor,
|
| 164 |
+
transformer: WanTransformer3DModel,
|
| 165 |
+
vae: AutoencoderKLWan,
|
| 166 |
+
scheduler: FlowMatchEulerDiscreteScheduler,
|
| 167 |
+
):
|
| 168 |
+
super().__init__()
|
| 169 |
+
|
| 170 |
+
self.register_modules(
|
| 171 |
+
vae=vae,
|
| 172 |
+
text_encoder=text_encoder,
|
| 173 |
+
tokenizer=tokenizer,
|
| 174 |
+
image_encoder=image_encoder,
|
| 175 |
+
transformer=transformer,
|
| 176 |
+
scheduler=scheduler,
|
| 177 |
+
image_processor=image_processor,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4
|
| 181 |
+
self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8
|
| 182 |
+
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
|
| 183 |
+
self.image_processor = image_processor
|
| 184 |
+
|
| 185 |
+
def _get_t5_prompt_embeds(
|
| 186 |
+
self,
|
| 187 |
+
prompt: Union[str, List[str]] = None,
|
| 188 |
+
num_videos_per_prompt: int = 1,
|
| 189 |
+
max_sequence_length: int = 512,
|
| 190 |
+
device: Optional[torch.device] = None,
|
| 191 |
+
dtype: Optional[torch.dtype] = None,
|
| 192 |
+
):
|
| 193 |
+
device = device or self._execution_device
|
| 194 |
+
dtype = dtype or self.text_encoder.dtype
|
| 195 |
+
|
| 196 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 197 |
+
prompt = [prompt_clean(u) for u in prompt]
|
| 198 |
+
batch_size = len(prompt)
|
| 199 |
+
|
| 200 |
+
text_inputs = self.tokenizer(
|
| 201 |
+
prompt,
|
| 202 |
+
padding="max_length",
|
| 203 |
+
max_length=max_sequence_length,
|
| 204 |
+
truncation=True,
|
| 205 |
+
add_special_tokens=True,
|
| 206 |
+
return_attention_mask=True,
|
| 207 |
+
return_tensors="pt",
|
| 208 |
+
)
|
| 209 |
+
text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask
|
| 210 |
+
seq_lens = mask.gt(0).sum(dim=1).long()
|
| 211 |
+
|
| 212 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state
|
| 213 |
+
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
| 214 |
+
prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)]
|
| 215 |
+
prompt_embeds = torch.stack(
|
| 216 |
+
[torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 220 |
+
_, seq_len, _ = prompt_embeds.shape
|
| 221 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
| 222 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
| 223 |
+
|
| 224 |
+
return prompt_embeds
|
| 225 |
+
|
| 226 |
+
def encode_image(
|
| 227 |
+
self,
|
| 228 |
+
image: PipelineImageInput,
|
| 229 |
+
device: Optional[torch.device] = None,
|
| 230 |
+
):
|
| 231 |
+
device = device or self._execution_device
|
| 232 |
+
image = self.image_processor(images=image, return_tensors="pt").to(device)
|
| 233 |
+
image_embeds = self.image_encoder(**image, output_hidden_states=True)
|
| 234 |
+
return image_embeds.hidden_states[-2]
|
| 235 |
+
|
| 236 |
+
# Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt
|
| 237 |
+
def encode_prompt(
|
| 238 |
+
self,
|
| 239 |
+
prompt: Union[str, List[str]],
|
| 240 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 241 |
+
do_classifier_free_guidance: bool = True,
|
| 242 |
+
num_videos_per_prompt: int = 1,
|
| 243 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 244 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 245 |
+
max_sequence_length: int = 226,
|
| 246 |
+
device: Optional[torch.device] = None,
|
| 247 |
+
dtype: Optional[torch.dtype] = None,
|
| 248 |
+
):
|
| 249 |
+
r"""
|
| 250 |
+
Encodes the prompt into text encoder hidden states.
|
| 251 |
+
|
| 252 |
+
Args:
|
| 253 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 254 |
+
prompt to be encoded
|
| 255 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 256 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 257 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 258 |
+
less than `1`).
|
| 259 |
+
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
|
| 260 |
+
Whether to use classifier free guidance or not.
|
| 261 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 262 |
+
Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
|
| 263 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 264 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 265 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 266 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 267 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 268 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 269 |
+
argument.
|
| 270 |
+
device: (`torch.device`, *optional*):
|
| 271 |
+
torch device
|
| 272 |
+
dtype: (`torch.dtype`, *optional*):
|
| 273 |
+
torch dtype
|
| 274 |
+
"""
|
| 275 |
+
device = device or self._execution_device
|
| 276 |
+
|
| 277 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 278 |
+
if prompt is not None:
|
| 279 |
+
batch_size = len(prompt)
|
| 280 |
+
else:
|
| 281 |
+
batch_size = prompt_embeds.shape[0]
|
| 282 |
+
|
| 283 |
+
if prompt_embeds is None:
|
| 284 |
+
prompt_embeds = self._get_t5_prompt_embeds(
|
| 285 |
+
prompt=prompt,
|
| 286 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 287 |
+
max_sequence_length=max_sequence_length,
|
| 288 |
+
device=device,
|
| 289 |
+
dtype=dtype,
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 293 |
+
negative_prompt = negative_prompt or ""
|
| 294 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 295 |
+
|
| 296 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 297 |
+
raise TypeError(
|
| 298 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 299 |
+
f" {type(prompt)}."
|
| 300 |
+
)
|
| 301 |
+
elif batch_size != len(negative_prompt):
|
| 302 |
+
raise ValueError(
|
| 303 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 304 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 305 |
+
" the batch size of `prompt`."
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
negative_prompt_embeds = self._get_t5_prompt_embeds(
|
| 309 |
+
prompt=negative_prompt,
|
| 310 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 311 |
+
max_sequence_length=max_sequence_length,
|
| 312 |
+
device=device,
|
| 313 |
+
dtype=dtype,
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
return prompt_embeds, negative_prompt_embeds
|
| 317 |
+
|
| 318 |
+
def check_inputs(
|
| 319 |
+
self,
|
| 320 |
+
prompt,
|
| 321 |
+
negative_prompt,
|
| 322 |
+
image,
|
| 323 |
+
height,
|
| 324 |
+
width,
|
| 325 |
+
prompt_embeds=None,
|
| 326 |
+
negative_prompt_embeds=None,
|
| 327 |
+
image_embeds=None,
|
| 328 |
+
callback_on_step_end_tensor_inputs=None,
|
| 329 |
+
):
|
| 330 |
+
if image is not None and image_embeds is not None:
|
| 331 |
+
raise ValueError(
|
| 332 |
+
f"Cannot forward both `image`: {image} and `image_embeds`: {image_embeds}. Please make sure to"
|
| 333 |
+
" only forward one of the two."
|
| 334 |
+
)
|
| 335 |
+
if image is None and image_embeds is None:
|
| 336 |
+
raise ValueError(
|
| 337 |
+
"Provide either `image` or `prompt_embeds`. Cannot leave both `image` and `image_embeds` undefined."
|
| 338 |
+
)
|
| 339 |
+
if image is not None and not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image):
|
| 340 |
+
raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}")
|
| 341 |
+
if height % 16 != 0 or width % 16 != 0:
|
| 342 |
+
raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.")
|
| 343 |
+
|
| 344 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 345 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 346 |
+
):
|
| 347 |
+
raise ValueError(
|
| 348 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
if prompt is not None and prompt_embeds is not None:
|
| 352 |
+
raise ValueError(
|
| 353 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 354 |
+
" only forward one of the two."
|
| 355 |
+
)
|
| 356 |
+
elif negative_prompt is not None and negative_prompt_embeds is not None:
|
| 357 |
+
raise ValueError(
|
| 358 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to"
|
| 359 |
+
" only forward one of the two."
|
| 360 |
+
)
|
| 361 |
+
elif prompt is None and prompt_embeds is None:
|
| 362 |
+
raise ValueError(
|
| 363 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 364 |
+
)
|
| 365 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 366 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 367 |
+
elif negative_prompt is not None and (
|
| 368 |
+
not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
|
| 369 |
+
):
|
| 370 |
+
raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
|
| 371 |
+
|
| 372 |
+
def prepare_latents(
|
| 373 |
+
self,
|
| 374 |
+
image: PipelineImageInput,
|
| 375 |
+
batch_size: int,
|
| 376 |
+
num_channels_latents: int = 16,
|
| 377 |
+
height: int = 480,
|
| 378 |
+
width: int = 832,
|
| 379 |
+
num_frames: int = 81,
|
| 380 |
+
dtype: Optional[torch.dtype] = None,
|
| 381 |
+
device: Optional[torch.device] = None,
|
| 382 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 383 |
+
latents: Optional[torch.Tensor] = None,
|
| 384 |
+
last_image: Optional[torch.Tensor] = None,
|
| 385 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 386 |
+
num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
| 387 |
+
latent_height = height // self.vae_scale_factor_spatial
|
| 388 |
+
latent_width = width // self.vae_scale_factor_spatial
|
| 389 |
+
|
| 390 |
+
shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width)
|
| 391 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 392 |
+
raise ValueError(
|
| 393 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 394 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
if latents is None:
|
| 398 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 399 |
+
else:
|
| 400 |
+
latents = latents.to(device=device, dtype=dtype)
|
| 401 |
+
|
| 402 |
+
image = image.unsqueeze(2)
|
| 403 |
+
if last_image is None:
|
| 404 |
+
video_condition = torch.cat(
|
| 405 |
+
[image, image.new_zeros(image.shape[0], image.shape[1], num_frames - 1, height, width)], dim=2
|
| 406 |
+
)
|
| 407 |
+
else:
|
| 408 |
+
last_image = last_image.unsqueeze(2)
|
| 409 |
+
video_condition = torch.cat(
|
| 410 |
+
[image, image.new_zeros(image.shape[0], image.shape[1], num_frames - 2, height, width), last_image],
|
| 411 |
+
dim=2,
|
| 412 |
+
)
|
| 413 |
+
video_condition = video_condition.to(device=device, dtype=self.vae.dtype)
|
| 414 |
+
|
| 415 |
+
latents_mean = (
|
| 416 |
+
torch.tensor(self.vae.config.latents_mean)
|
| 417 |
+
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
| 418 |
+
.to(latents.device, latents.dtype)
|
| 419 |
+
)
|
| 420 |
+
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
|
| 421 |
+
latents.device, latents.dtype
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
if isinstance(generator, list):
|
| 425 |
+
latent_condition = [
|
| 426 |
+
retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") for _ in generator
|
| 427 |
+
]
|
| 428 |
+
latent_condition = torch.cat(latent_condition)
|
| 429 |
+
else:
|
| 430 |
+
latent_condition = retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax")
|
| 431 |
+
latent_condition = latent_condition.repeat(batch_size, 1, 1, 1, 1)
|
| 432 |
+
|
| 433 |
+
latent_condition = latent_condition.to(dtype)
|
| 434 |
+
latent_condition = (latent_condition - latents_mean) * latents_std
|
| 435 |
+
|
| 436 |
+
mask_lat_size = torch.ones(batch_size, 1, num_frames, latent_height, latent_width)
|
| 437 |
+
|
| 438 |
+
if last_image is None:
|
| 439 |
+
mask_lat_size[:, :, list(range(1, num_frames))] = 0
|
| 440 |
+
else:
|
| 441 |
+
mask_lat_size[:, :, list(range(1, num_frames - 1))] = 0
|
| 442 |
+
first_frame_mask = mask_lat_size[:, :, 0:1]
|
| 443 |
+
first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal)
|
| 444 |
+
mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2)
|
| 445 |
+
mask_lat_size = mask_lat_size.view(batch_size, -1, self.vae_scale_factor_temporal, latent_height, latent_width)
|
| 446 |
+
mask_lat_size = mask_lat_size.transpose(1, 2)
|
| 447 |
+
mask_lat_size = mask_lat_size.to(latent_condition.device)
|
| 448 |
+
|
| 449 |
+
return latents, torch.concat([mask_lat_size, latent_condition], dim=1)
|
| 450 |
+
|
| 451 |
+
def prepare_lp(
|
| 452 |
+
self,
|
| 453 |
+
# --- Filter Selection & Strength ---
|
| 454 |
+
lp_filter_type: str,
|
| 455 |
+
lp_blur_sigma: float,
|
| 456 |
+
lp_blur_kernel_size: float,
|
| 457 |
+
lp_resize_factor: float,
|
| 458 |
+
# --- Contextual Info ---
|
| 459 |
+
generator: torch.Generator,
|
| 460 |
+
num_frames: int,
|
| 461 |
+
use_low_pass_guidance: bool,
|
| 462 |
+
lp_filter_in_latent: bool,
|
| 463 |
+
# --- Inputs to filter ---
|
| 464 |
+
orig_image_latents: torch.Tensor,
|
| 465 |
+
orig_image_tensor: torch.Tensor,
|
| 466 |
+
) -> Optional[torch.Tensor]:
|
| 467 |
+
"""
|
| 468 |
+
Prepares a low-pass filtered version of the initial image condition for guidance. (Wan 2.1)
|
| 469 |
+
The resulting low-pass filtered latents are padded to match the required number of frames and temporal
|
| 470 |
+
patch size for the transformer model.
|
| 471 |
+
|
| 472 |
+
Args:
|
| 473 |
+
lp_filter_type (`str`): The type of low-pass filter to apply, e.g., 'gaussian_blur', 'down_up'.
|
| 474 |
+
lp_blur_sigma (`float`): The sigma value for the Gaussian blur filter.
|
| 475 |
+
lp_blur_kernel_size (`float`): The kernel size for the Gaussian blur filter.
|
| 476 |
+
lp_resize_factor (`float`): The resizing factor for the 'down_up' filter.
|
| 477 |
+
generator (`torch.Generator`): A random generator, used for VAE sampling when filtering in image space.
|
| 478 |
+
num_frames (`int`): The target number of frames for the final video, used to determine padding.
|
| 479 |
+
use_low_pass_guidance (`bool`): If `False`, the function returns `None` immediately.
|
| 480 |
+
lp_filter_in_latent (`bool`): If `True`, filtering is applied in latent space. Otherwise, in image space.
|
| 481 |
+
orig_image_latents (`torch.Tensor`): The VAE-encoded latents of the original image. Used when
|
| 482 |
+
`lp_filter_in_latent` is `True`. Shape: `(batch_size, num_frames_padded, channels, height, width)`.
|
| 483 |
+
orig_image_tensor (`torch.Tensor`): The preprocessed original image tensor (RGB). Used when
|
| 484 |
+
`lp_filter_in_latent` is `False`. Shape: `(batch_size, channels, height, width)`.
|
| 485 |
+
|
| 486 |
+
Returns:
|
| 487 |
+
`Optional[torch.Tensor]`: A tensor containing the low-pass filtered image latents, correctly shaped and
|
| 488 |
+
padded for the transformer, or `None` if `use_low_pass_guidance` is `False`.
|
| 489 |
+
"""
|
| 490 |
+
if not use_low_pass_guidance:
|
| 491 |
+
return None
|
| 492 |
+
|
| 493 |
+
if not lp_filter_in_latent:
|
| 494 |
+
# --- Filter in Image (RGB) Space ---
|
| 495 |
+
image_lp = lp_utils.apply_low_pass_filter(
|
| 496 |
+
orig_image_tensor,
|
| 497 |
+
filter_type=lp_filter_type,
|
| 498 |
+
blur_sigma=lp_blur_sigma,
|
| 499 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 500 |
+
resize_factor=lp_resize_factor,
|
| 501 |
+
)
|
| 502 |
+
image_lp_vae_input = image_lp.unsqueeze(2)
|
| 503 |
+
|
| 504 |
+
batch_size, _, height, width = orig_image_tensor.shape
|
| 505 |
+
latent_height = height // self.vae_scale_factor_spatial
|
| 506 |
+
latent_width = width // self.vae_scale_factor_spatial
|
| 507 |
+
|
| 508 |
+
# --- Zero padding ---
|
| 509 |
+
video_condition = torch.cat(
|
| 510 |
+
[
|
| 511 |
+
image_lp_vae_input,
|
| 512 |
+
image_lp_vae_input.new_zeros(
|
| 513 |
+
image_lp_vae_input.shape[0], image_lp_vae_input.shape[1], num_frames - 1, height, width
|
| 514 |
+
),
|
| 515 |
+
],
|
| 516 |
+
dim=2,
|
| 517 |
+
)
|
| 518 |
+
latents_mean = (
|
| 519 |
+
torch.tensor(self.vae.config.latents_mean)
|
| 520 |
+
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
| 521 |
+
.to(image_lp.device, image_lp.dtype)
|
| 522 |
+
)
|
| 523 |
+
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(
|
| 524 |
+
1, self.vae.config.z_dim, 1, 1, 1
|
| 525 |
+
).to(image_lp.device, image_lp.dtype)
|
| 526 |
+
encoded_lp = self.vae.encode(video_condition).latent_dist.sample(generator=generator)
|
| 527 |
+
latent_condition = (encoded_lp - latents_mean) * latents_std
|
| 528 |
+
|
| 529 |
+
mask_lat_size = torch.ones(batch_size, 1, num_frames, latent_height, latent_width)
|
| 530 |
+
mask_lat_size[:, :, list(range(1, num_frames))] = 0
|
| 531 |
+
first_frame_mask = mask_lat_size[:, :, 0:1]
|
| 532 |
+
first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal)
|
| 533 |
+
mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2)
|
| 534 |
+
mask_lat_size = mask_lat_size.view(
|
| 535 |
+
batch_size, -1, self.vae_scale_factor_temporal, latent_height, latent_width
|
| 536 |
+
)
|
| 537 |
+
mask_lat_size = mask_lat_size.transpose(1, 2)
|
| 538 |
+
mask_lat_size = mask_lat_size.to(latent_condition.device)
|
| 539 |
+
|
| 540 |
+
lp_image_latents = torch.concat([mask_lat_size, latent_condition], dim=1)
|
| 541 |
+
else:
|
| 542 |
+
lp_image_latents = lp_utils.apply_low_pass_filter(
|
| 543 |
+
orig_image_latents,
|
| 544 |
+
filter_type=lp_filter_type,
|
| 545 |
+
blur_sigma=lp_blur_sigma,
|
| 546 |
+
blur_kernel_size=lp_blur_kernel_size,
|
| 547 |
+
resize_factor=lp_resize_factor,
|
| 548 |
+
)
|
| 549 |
+
# Ensure the temporal dimension is divisible by the transformer's temporal patch size.
|
| 550 |
+
if self.transformer.config.patch_size is not None:
|
| 551 |
+
remainder = lp_image_latents.size(1) % self.transformer.config.patch_size[0]
|
| 552 |
+
if remainder != 0:
|
| 553 |
+
num_to_prepend = self.transformer.config.patch_size[0] - remainder
|
| 554 |
+
num_to_prepend = min(num_to_prepend, lp_image_latents.shape[1])
|
| 555 |
+
first_frames_to_prepend = lp_image_latents[:, :num_to_prepend, ...]
|
| 556 |
+
lp_image_latents = torch.cat([first_frames_to_prepend, lp_image_latents], dim=1)
|
| 557 |
+
|
| 558 |
+
lp_image_latents = lp_image_latents.to(dtype=orig_image_latents.dtype)
|
| 559 |
+
return lp_image_latents
|
| 560 |
+
|
| 561 |
+
@property
|
| 562 |
+
def guidance_scale(self):
|
| 563 |
+
return self._guidance_scale
|
| 564 |
+
|
| 565 |
+
@property
|
| 566 |
+
def do_classifier_free_guidance(self):
|
| 567 |
+
return self._guidance_scale > 1
|
| 568 |
+
|
| 569 |
+
@property
|
| 570 |
+
def num_timesteps(self):
|
| 571 |
+
return self._num_timesteps
|
| 572 |
+
|
| 573 |
+
@property
|
| 574 |
+
def current_timestep(self):
|
| 575 |
+
return self._current_timestep
|
| 576 |
+
|
| 577 |
+
@property
|
| 578 |
+
def interrupt(self):
|
| 579 |
+
return self._interrupt
|
| 580 |
+
|
| 581 |
+
@property
|
| 582 |
+
def attention_kwargs(self):
|
| 583 |
+
return self._attention_kwargs
|
| 584 |
+
|
| 585 |
+
@torch.no_grad()
|
| 586 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 587 |
+
def __call__(
|
| 588 |
+
self,
|
| 589 |
+
image: PipelineImageInput,
|
| 590 |
+
prompt: Union[str, List[str]] = None,
|
| 591 |
+
negative_prompt: Union[str, List[str]] = None,
|
| 592 |
+
height: int = 480,
|
| 593 |
+
width: int = 832,
|
| 594 |
+
num_frames: int = 81,
|
| 595 |
+
num_inference_steps: int = 50,
|
| 596 |
+
guidance_scale: float = 5.0,
|
| 597 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 598 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 599 |
+
latents: Optional[torch.Tensor] = None,
|
| 600 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 601 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 602 |
+
image_embeds: Optional[torch.Tensor] = None,
|
| 603 |
+
last_image: Optional[torch.Tensor] = None,
|
| 604 |
+
output_type: Optional[str] = "np",
|
| 605 |
+
return_dict: bool = True,
|
| 606 |
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 607 |
+
callback_on_step_end: Optional[
|
| 608 |
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 609 |
+
] = None,
|
| 610 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 611 |
+
max_sequence_length: int = 512,
|
| 612 |
+
use_low_pass_guidance: bool = False,
|
| 613 |
+
lp_filter_type: str = "none", # {'gaussian_blur', 'down_up'}
|
| 614 |
+
lp_filter_in_latent: bool = False, # When set to True, low-pass filter is done after encoder. If False, low-pass filter is applied to image directly before encoder.
|
| 615 |
+
lp_blur_sigma: float = 15.0, # Used with 'gaussian_blur'. Gaussian filter sigma value.
|
| 616 |
+
lp_blur_kernel_size: float = 0.02734375, # Used with 'gaussian_blur'. Gaussian filter size. When set to int, used directly as kernel size. When set to float, H * `lp_blur_kernel_size` is used as kernel size.
|
| 617 |
+
lp_resize_factor: float = 0.25, # Used with 'down_up'. Image is bilinearly downsized to (`lp_resize_factor` * WIDTH, `lp_resize_factor` * HEIGHT) and then back to original.
|
| 618 |
+
|
| 619 |
+
lp_strength_schedule_type: str = "none", # Scheduling type for low-pass filtering strength. Options: {"none", "linear", "interval", "exponential"}
|
| 620 |
+
schedule_blur_kernel_size: bool = False, # If True, schedule blur kernel size as well. Otherwise, fix to initial value.
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
# --- Constant Interval Scheduling Params for LP Strength ---
|
| 624 |
+
schedule_interval_start_time: float = 0.0, # Starting timestep for interval scheduling
|
| 625 |
+
schedule_interval_end_time: float = 0.05, # Ending timestep for interval scheduling
|
| 626 |
+
|
| 627 |
+
# --- Linear Scheduling Params for LP Strength ---
|
| 628 |
+
schedule_linear_start_weight: float = 1.0, # Starting LP weight for linear scheduling at t=T (step 0)
|
| 629 |
+
schedule_linear_end_weight: float = 0.0, # Ending LP weight for linear scheduling at t=T * schedule_linear_end_time
|
| 630 |
+
schedule_linear_end_time: float = 0.5, # Timestep fraction at which schedule_linear_end is reached
|
| 631 |
+
|
| 632 |
+
# --- Exponential Scheduling Params for LP Strength ---
|
| 633 |
+
schedule_exp_decay_rate: float = 10.0, # Decay rate for 'exponential' schedule. Higher values decay faster. Strength = exp(-rate * time_fraction).
|
| 634 |
+
):
|
| 635 |
+
r"""
|
| 636 |
+
The call function to the pipeline for generation.
|
| 637 |
+
|
| 638 |
+
Args:
|
| 639 |
+
image (`PipelineImageInput`):
|
| 640 |
+
The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`.
|
| 641 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 642 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 643 |
+
instead.
|
| 644 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 645 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 646 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 647 |
+
less than `1`).
|
| 648 |
+
height (`int`, defaults to `480`):
|
| 649 |
+
The height of the generated video.
|
| 650 |
+
width (`int`, defaults to `832`):
|
| 651 |
+
The width of the generated video.
|
| 652 |
+
num_frames (`int`, defaults to `81`):
|
| 653 |
+
The number of frames in the generated video.
|
| 654 |
+
num_inference_steps (`int`, defaults to `50`):
|
| 655 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 656 |
+
expense of slower inference.
|
| 657 |
+
guidance_scale (`float`, defaults to `5.0`):
|
| 658 |
+
Guidance scale as defined in [Classifier-Free Diffusion
|
| 659 |
+
Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
|
| 660 |
+
of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
|
| 661 |
+
`guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
|
| 662 |
+
the text `prompt`, usually at the expense of lower image quality.
|
| 663 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 664 |
+
The number of images to generate per prompt.
|
| 665 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 666 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 667 |
+
generation deterministic.
|
| 668 |
+
latents (`torch.Tensor`, *optional*):
|
| 669 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 670 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 671 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 672 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 673 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 674 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 675 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 676 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 677 |
+
provided, text embeddings are generated from the `negative_prompt` input argument.
|
| 678 |
+
image_embeds (`torch.Tensor`, *optional*):
|
| 679 |
+
Pre-generated image embeddings. Can be used to easily tweak image inputs (weighting). If not provided,
|
| 680 |
+
image embeddings are generated from the `image` input argument.
|
| 681 |
+
output_type (`str`, *optional*, defaults to `"np"`):
|
| 682 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 683 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 684 |
+
Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple.
|
| 685 |
+
attention_kwargs (`dict`, *optional*):
|
| 686 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 687 |
+
`self.processor` in
|
| 688 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 689 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 690 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 691 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 692 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 693 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 694 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 695 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 696 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 697 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 698 |
+
max_sequence_length (`int`, *optional*, defaults to `512`):
|
| 699 |
+
The maximum sequence length of the prompt.
|
| 700 |
+
use_low_pass_guidance (`bool`, *optional*, defaults to `False`):
|
| 701 |
+
Whether to use low-pass guidance. This can help to improve the temporal consistency of the generated
|
| 702 |
+
video.
|
| 703 |
+
lp_filter_type (`str`, *optional*, defaults to `"none"`):
|
| 704 |
+
The type of low-pass filter to apply. Can be one of `gaussian_blur` or `down_up`.
|
| 705 |
+
lp_filter_in_latent (`bool`, *optional*, defaults to `False`):
|
| 706 |
+
If `True`, the low-pass filter is applied to the latent representation of the image. If `False`, it is
|
| 707 |
+
applied to the image in pixel space before encoding.
|
| 708 |
+
lp_blur_sigma (`float`, *optional*, defaults to `15.0`):
|
| 709 |
+
The sigma value for the Gaussian blur filter. Only used if `lp_filter_type` is `gaussian_blur`.
|
| 710 |
+
lp_blur_kernel_size (`float`, *optional*, defaults to `0.02734375`):
|
| 711 |
+
The kernel size for the Gaussian blur filter. If an `int`, it's used directly. If a `float`, the kernel
|
| 712 |
+
size is calculated as `height * lp_blur_kernel_size`. Only used if `lp_filter_type` is `gaussian_blur`.
|
| 713 |
+
lp_resize_factor (`float`, *optional*, defaults to `0.25`):
|
| 714 |
+
The resize factor for the down-sampling and up-sampling filter. Only used if `lp_filter_type` is
|
| 715 |
+
`down_up`.
|
| 716 |
+
lp_strength_schedule_type (`str`, *optional*, defaults to `"none"`):
|
| 717 |
+
The scheduling type for the low-pass filter strength. Can be one of `none`, `linear`, `interval`, or
|
| 718 |
+
`exponential`.
|
| 719 |
+
schedule_blur_kernel_size (`bool`, *optional*, defaults to `False`):
|
| 720 |
+
If `True`, the blur kernel size is also scheduled along with the strength. Otherwise, it remains fixed.
|
| 721 |
+
schedule_interval_start_time (`float`, *optional*, defaults to `0.0`):
|
| 722 |
+
The starting timestep fraction for interval scheduling. Only used if `lp_strength_schedule_type` is
|
| 723 |
+
`interval`.
|
| 724 |
+
schedule_interval_end_time (`float`, *optional*, defaults to `0.05`):
|
| 725 |
+
The ending timestep fraction for interval scheduling. Only used if `lp_strength_schedule_type` is
|
| 726 |
+
`interval`.
|
| 727 |
+
schedule_linear_start_weight (`float`, *optional*, defaults to `1.0`):
|
| 728 |
+
The starting weight for the low-pass filter strength in a linear schedule. Corresponds to the first
|
| 729 |
+
timestep. Only used if `lp_strength_schedule_type` is `linear`.
|
| 730 |
+
schedule_linear_end_weight (`float`, *optional*, defaults to `0.0`):
|
| 731 |
+
The ending weight for the low-pass filter strength in a linear schedule. Only used if
|
| 732 |
+
`lp_strength_schedule_type` is `linear`.
|
| 733 |
+
schedule_linear_end_time (`float`, *optional*, defaults to `0.5`):
|
| 734 |
+
The timestep fraction at which `schedule_linear_end_weight` is reached in a linear schedule. Only used
|
| 735 |
+
if `lp_strength_schedule_type` is `linear`.
|
| 736 |
+
schedule_exp_decay_rate (`float`, *optional*, defaults to `10.0`):
|
| 737 |
+
The decay rate for the exponential schedule. Higher values lead to faster decay. Only used if
|
| 738 |
+
`lp_strength_schedule_type` is `exponential`.
|
| 739 |
+
|
| 740 |
+
Examples:
|
| 741 |
+
|
| 742 |
+
Returns:
|
| 743 |
+
[`~WanPipelineOutput`] or `tuple`:
|
| 744 |
+
If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where
|
| 745 |
+
the first element is a list with the generated images and the second element is a list of `bool`s
|
| 746 |
+
indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content.
|
| 747 |
+
"""
|
| 748 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 749 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 750 |
+
|
| 751 |
+
# 1. Check inputs. Raise error if not correct
|
| 752 |
+
self.check_inputs(
|
| 753 |
+
prompt,
|
| 754 |
+
negative_prompt,
|
| 755 |
+
image,
|
| 756 |
+
height,
|
| 757 |
+
width,
|
| 758 |
+
prompt_embeds,
|
| 759 |
+
negative_prompt_embeds,
|
| 760 |
+
image_embeds,
|
| 761 |
+
callback_on_step_end_tensor_inputs,
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
if num_frames % self.vae_scale_factor_temporal != 1:
|
| 765 |
+
logger.warning(
|
| 766 |
+
f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number."
|
| 767 |
+
)
|
| 768 |
+
num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1
|
| 769 |
+
num_frames = max(num_frames, 1)
|
| 770 |
+
|
| 771 |
+
self._guidance_scale = guidance_scale
|
| 772 |
+
self._attention_kwargs = attention_kwargs
|
| 773 |
+
self._current_timestep = None
|
| 774 |
+
self._interrupt = False
|
| 775 |
+
|
| 776 |
+
device = self._execution_device
|
| 777 |
+
|
| 778 |
+
# 2. Define call parameters
|
| 779 |
+
if prompt is not None and isinstance(prompt, str):
|
| 780 |
+
batch_size = 1
|
| 781 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 782 |
+
batch_size = len(prompt)
|
| 783 |
+
else:
|
| 784 |
+
batch_size = prompt_embeds.shape[0]
|
| 785 |
+
|
| 786 |
+
# 3. Encode input prompt
|
| 787 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 788 |
+
prompt=prompt,
|
| 789 |
+
negative_prompt=negative_prompt,
|
| 790 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 791 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 792 |
+
prompt_embeds=prompt_embeds,
|
| 793 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 794 |
+
max_sequence_length=max_sequence_length,
|
| 795 |
+
device=device,
|
| 796 |
+
)
|
| 797 |
+
|
| 798 |
+
# Encode image embedding
|
| 799 |
+
transformer_dtype = self.transformer.dtype
|
| 800 |
+
prompt_embeds = prompt_embeds.to(transformer_dtype)
|
| 801 |
+
if negative_prompt_embeds is not None:
|
| 802 |
+
negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype)
|
| 803 |
+
|
| 804 |
+
if image_embeds is None:
|
| 805 |
+
if last_image is None:
|
| 806 |
+
image_embeds = self.encode_image(image, device)
|
| 807 |
+
else:
|
| 808 |
+
image_embeds = self.encode_image([image, last_image], device)
|
| 809 |
+
dup_b, l, d = image_embeds.shape
|
| 810 |
+
image_embeds = image_embeds.reshape(-1, 2 * l, d)
|
| 811 |
+
image_embeds = image_embeds.repeat(batch_size, 1, 1)
|
| 812 |
+
image_embeds = image_embeds.to(transformer_dtype)
|
| 813 |
+
|
| 814 |
+
# 4. Prepare timesteps
|
| 815 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 816 |
+
timesteps = self.scheduler.timesteps
|
| 817 |
+
|
| 818 |
+
# 5. Prepare latent variables
|
| 819 |
+
num_channels_latents = self.vae.config.z_dim
|
| 820 |
+
image = self.video_processor.preprocess(image, height=height, width=width).to(device, dtype=torch.float32)
|
| 821 |
+
if last_image is not None:
|
| 822 |
+
last_image = self.video_processor.preprocess(last_image, height=height, width=width).to(
|
| 823 |
+
device, dtype=torch.float32
|
| 824 |
+
)
|
| 825 |
+
latents, condition = self.prepare_latents(
|
| 826 |
+
image,
|
| 827 |
+
batch_size * num_videos_per_prompt,
|
| 828 |
+
num_channels_latents,
|
| 829 |
+
height,
|
| 830 |
+
width,
|
| 831 |
+
num_frames,
|
| 832 |
+
torch.float32,
|
| 833 |
+
device,
|
| 834 |
+
generator,
|
| 835 |
+
latents,
|
| 836 |
+
last_image,
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
# 6. Denoising loop
|
| 840 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 841 |
+
self._num_timesteps = len(timesteps)
|
| 842 |
+
|
| 843 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 844 |
+
for i, t in enumerate(timesteps):
|
| 845 |
+
if self.interrupt:
|
| 846 |
+
continue
|
| 847 |
+
|
| 848 |
+
self._current_timestep = t
|
| 849 |
+
|
| 850 |
+
if self.do_classifier_free_guidance and use_low_pass_guidance: # low-pass filtering
|
| 851 |
+
lp_strength = lp_utils.get_lp_strength(
|
| 852 |
+
step_index=i,
|
| 853 |
+
total_steps=num_inference_steps,
|
| 854 |
+
lp_strength_schedule_type=lp_strength_schedule_type,
|
| 855 |
+
schedule_interval_start_time=schedule_interval_start_time,
|
| 856 |
+
schedule_interval_end_time=schedule_interval_end_time,
|
| 857 |
+
schedule_linear_start_weight=schedule_linear_start_weight,
|
| 858 |
+
schedule_linear_end_weight=schedule_linear_end_weight,
|
| 859 |
+
schedule_linear_end_time=schedule_linear_end_time,
|
| 860 |
+
schedule_exp_decay_rate=schedule_exp_decay_rate,
|
| 861 |
+
)
|
| 862 |
+
|
| 863 |
+
modulated_lp_blur_sigma = lp_blur_sigma * lp_strength
|
| 864 |
+
modulated_lp_blur_kernel_size = (
|
| 865 |
+
lp_blur_kernel_size * lp_strength if schedule_blur_kernel_size else lp_blur_kernel_size
|
| 866 |
+
)
|
| 867 |
+
modulated_lp_resize_factor = 1.0 - (1.0 - lp_resize_factor) * lp_strength
|
| 868 |
+
|
| 869 |
+
lp_image_latents = self.prepare_lp(
|
| 870 |
+
lp_filter_type=lp_filter_type,
|
| 871 |
+
lp_blur_sigma=modulated_lp_blur_sigma,
|
| 872 |
+
lp_blur_kernel_size=modulated_lp_blur_kernel_size,
|
| 873 |
+
lp_resize_factor=modulated_lp_resize_factor,
|
| 874 |
+
generator=generator,
|
| 875 |
+
num_frames=num_frames,
|
| 876 |
+
use_low_pass_guidance=use_low_pass_guidance,
|
| 877 |
+
lp_filter_in_latent=lp_filter_in_latent,
|
| 878 |
+
orig_image_latents=condition,
|
| 879 |
+
orig_image_tensor=image,
|
| 880 |
+
)
|
| 881 |
+
|
| 882 |
+
if lp_strength == 0.0: # equivalent to vanilla
|
| 883 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 884 |
+
latent_model_input = torch.cat(
|
| 885 |
+
[latent_model_input, torch.cat([condition, condition], dim=0)], dim=1
|
| 886 |
+
).to(transformer_dtype)
|
| 887 |
+
concat_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 888 |
+
else: # three passes
|
| 889 |
+
latent_model_input = torch.cat([latents] * 3)
|
| 890 |
+
img_cond = torch.cat([condition, lp_image_latents, lp_image_latents], dim=0)
|
| 891 |
+
latent_model_input = torch.cat([latent_model_input, img_cond], dim=1).to(transformer_dtype)
|
| 892 |
+
concat_prompt_embeds = torch.cat(
|
| 893 |
+
[negative_prompt_embeds, negative_prompt_embeds, prompt_embeds], dim=0
|
| 894 |
+
)
|
| 895 |
+
|
| 896 |
+
elif self.do_classifier_free_guidance: # no low-pass filtering
|
| 897 |
+
latent_model_input = torch.cat([latents] * 2)
|
| 898 |
+
latent_model_input = torch.cat(
|
| 899 |
+
[latent_model_input, torch.cat([condition, condition], dim=0)], dim=1
|
| 900 |
+
).to(transformer_dtype)
|
| 901 |
+
concat_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 902 |
+
|
| 903 |
+
timestep = t.expand(latent_model_input.shape[0])
|
| 904 |
+
concat_image_embeds = (
|
| 905 |
+
image_embeds.repeat(latent_model_input.shape[0], 1, 1)
|
| 906 |
+
if image_embeds.shape[0] != latent_model_input.shape[0]
|
| 907 |
+
else image_embeds
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
noise_pred = self.transformer(
|
| 911 |
+
hidden_states=latent_model_input,
|
| 912 |
+
timestep=timestep,
|
| 913 |
+
encoder_hidden_states=concat_prompt_embeds,
|
| 914 |
+
encoder_hidden_states_image=concat_image_embeds,
|
| 915 |
+
attention_kwargs=attention_kwargs,
|
| 916 |
+
return_dict=False,
|
| 917 |
+
)[0]
|
| 918 |
+
|
| 919 |
+
if noise_pred.shape[0] == 3: # three chunks
|
| 920 |
+
noise_pred_uncond_init, noise_pred_uncond, noise_pred_text = noise_pred.chunk(3)
|
| 921 |
+
noise_pred = noise_pred_uncond_init + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 922 |
+
else:
|
| 923 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 924 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 925 |
+
|
| 926 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 927 |
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
| 928 |
+
|
| 929 |
+
if callback_on_step_end is not None:
|
| 930 |
+
callback_kwargs = {}
|
| 931 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 932 |
+
callback_kwargs[k] = locals()[k]
|
| 933 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 934 |
+
|
| 935 |
+
latents = callback_outputs.pop("latents", latents)
|
| 936 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 937 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 938 |
+
|
| 939 |
+
# call the callback, if provided
|
| 940 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 941 |
+
progress_bar.update()
|
| 942 |
+
|
| 943 |
+
if XLA_AVAILABLE:
|
| 944 |
+
xm.mark_step()
|
| 945 |
+
|
| 946 |
+
self._current_timestep = None
|
| 947 |
+
|
| 948 |
+
if not output_type == "latent":
|
| 949 |
+
latents = latents.to(self.vae.dtype)
|
| 950 |
+
latents_mean = (
|
| 951 |
+
torch.tensor(self.vae.config.latents_mean)
|
| 952 |
+
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
| 953 |
+
.to(latents.device, latents.dtype)
|
| 954 |
+
)
|
| 955 |
+
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
|
| 956 |
+
latents.device, latents.dtype
|
| 957 |
+
)
|
| 958 |
+
latents = latents / latents_std + latents_mean
|
| 959 |
+
video = self.vae.decode(latents, return_dict=False)[0]
|
| 960 |
+
video = self.video_processor.postprocess_video(video, output_type=output_type)
|
| 961 |
+
else:
|
| 962 |
+
video = latents
|
| 963 |
+
|
| 964 |
+
# Offload all models
|
| 965 |
+
self.maybe_free_model_hooks()
|
| 966 |
+
|
| 967 |
+
if not return_dict:
|
| 968 |
+
return (video,)
|
| 969 |
+
|
| 970 |
+
return WanPipelineOutput(frames=video)
|
exp_code/1_benchmark/ALG/readme.md
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Enhancing Motion Dynamics of Image-to-Video Models via Adaptive Low-Pass Guidance
|
| 2 |
+
|
| 3 |
+
[<u>`Project Page`</u>](https://choi403.github.io/ALG/) | [<u>`arXiv`</u>](https://arxiv.org/abs/2506.08456) | [<u>`Gallery`</u>](https://choi403.github.io/ALG/gallery/)
|
| 4 |
+
|
| 5 |
+
Official implementation for [<u><b>Enhancing Motion Dynamics of Image-to-Video Models via Adaptive Low-Pass Guidance</b></u>](https://arxiv.org/abs/2506.08456)
|
| 6 |
+
<br>
|
| 7 |
+
<a href="https://choi403.github.io/"><u>June Suk Choi</u></a>,
|
| 8 |
+
<a href="https://kyungmnlee.github.io/"><u>Kyungmin Lee</u></a>,
|
| 9 |
+
<a href="https://sihyun.me"><u>Sihyun Yu</u></a>,
|
| 10 |
+
<a href="https://scholar.google.com/citations?user=pM4aZGYAAAAJ&hl=en"><u>Yisol Choi</u></a>,
|
| 11 |
+
<a href="https://alinlab.kaist.ac.kr/shin.html"><u>Jinwoo Shin</u></a>,
|
| 12 |
+
<a href="https://sites.google.com/view/kiminlee"><u>Kimin Lee</u></a>
|
| 13 |
+
|
| 14 |
+
https://github.com/user-attachments/assets/a1faada7-624a-4259-8b40-dcef50700346
|
| 15 |
+
|
| 16 |
+
**Summary**: We propose **Adaptive Low-pass Guidance (ALG)**, a simple yet effective sampling method for pre-trained Image-to-Video (I2V) models. ALG mitigates the common issue of motion suppression by adaptively applying low-pass filtering to the conditioning image during the early stages of the denoising process. This encourages the generation of more dynamic videos without compromising the visual quality or fidelity to the input image.
|
| 17 |
+
|
| 18 |
+
## 1. Setup
|
| 19 |
+
```bash
|
| 20 |
+
conda create -n alg python=3.11 -y
|
| 21 |
+
conda activate alg
|
| 22 |
+
pip install -r requirements.txt # We recommend using torch version 2.5.1 and CUDA version 12.2 for the best compatibility.
|
| 23 |
+
```
|
| 24 |
+
|
| 25 |
+
## 2. How to Run
|
| 26 |
+
|
| 27 |
+
You can use the main script `run.py` to generate videos using our method. Configuration files are located in `./configs`.
|
| 28 |
+
|
| 29 |
+
### Basic Usage
|
| 30 |
+
|
| 31 |
+
You can generate a video using the following command with your image file and prompt.
|
| 32 |
+
|
| 33 |
+
```bash
|
| 34 |
+
python run.py \
|
| 35 |
+
--config [PATH_TO_CONFIG_FILE] \
|
| 36 |
+
--image_path [PATH_TO_INPUT_IMAGE] \
|
| 37 |
+
--prompt "[YOUR_PROMPT]" \
|
| 38 |
+
--output_path [PATH_TO_SAVE_VIDEO]
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
### Examples
|
| 42 |
+
We include a few example images in the asset folder, coupled with their corresponding prompts below.
|
| 43 |
+
|
| 44 |
+
**Generate a video with ALG enabled (more dynamic)**
|
| 45 |
+
```bash
|
| 46 |
+
python run.py \
|
| 47 |
+
--config ./configs/wan_alg.yaml \
|
| 48 |
+
--image_path ./assets/city.png \
|
| 49 |
+
--prompt "A car chase through narrow city streets at night." \
|
| 50 |
+
--output_path city_alg.mp4
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
**Generate a video without ALG (more static)**
|
| 54 |
+
```bash
|
| 55 |
+
python run.py \
|
| 56 |
+
--config ./configs/wan_default.yaml \
|
| 57 |
+
--image_path ./assets/city.png \
|
| 58 |
+
--prompt "A car chase through narrow city streets at night." \
|
| 59 |
+
--output_path city_baseline.mp4
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
**Example prompts**
|
| 63 |
+
```
|
| 64 |
+
city.png: "A car chase through narrow city streets at night."
|
| 65 |
+
snowboard.png: "A snowboarder doing a backflip off a jump."
|
| 66 |
+
boat.png: "A group of people whitewater rafting in a canyon."
|
| 67 |
+
helicopter.png: "A helicopter hovering over a rescue site."
|
| 68 |
+
tennis.png: "A man swinging a tennis racquet at a tennis ball."
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## Configuration
|
| 72 |
+
|
| 73 |
+
All generation and ALG parameters are defined in a single yaml config file (e.g., `config/wan_alg.yaml`).
|
| 74 |
+
|
| 75 |
+
### Model configuration
|
| 76 |
+
```yaml
|
| 77 |
+
# configs/cogvideox_alg.yaml
|
| 78 |
+
|
| 79 |
+
model:
|
| 80 |
+
path: "THUDM/CogVideoX-5b-I2V" # Hugging Face model path
|
| 81 |
+
dtype: "bfloat16" # Dtype for the model (e.g., float16, bfloat16, float32)
|
| 82 |
+
|
| 83 |
+
generation:
|
| 84 |
+
height: null # Output video height (null for model default)
|
| 85 |
+
width: null # Output video width (null for model default)
|
| 86 |
+
num_frames: 49 # Number of frames to generate
|
| 87 |
+
num_inference_steps: 50 # Denoising steps
|
| 88 |
+
guidance_scale: 6.0 # Classifier-Free Guidance scale
|
| 89 |
+
|
| 90 |
+
video:
|
| 91 |
+
fps: 12 # FPS for the output video file
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
### ALG configuration (low-pass filtering)
|
| 95 |
+
* `use_low_pass_guidance` (`bool`): Enable (`true`) or disable ALG for inference.
|
| 96 |
+
|
| 97 |
+
* **Filter Settings**: Low-pass filtering characteristics.
|
| 98 |
+
|
| 99 |
+
* `lp_filter_type` (`str`): Specifies the type of low-pass filter to use.
|
| 100 |
+
* `"down_up"`: (Recommended) Bilinearly downsamples the image by `lp_resize_factor` and then upsamples it back to the original size.
|
| 101 |
+
* `"gaussian_blur"`: Applies Gaussian blur.
|
| 102 |
+
|
| 103 |
+
* `lp_filter_in_latent` (`bool`): Determines whether the filter is applied in pixel space or latent space.
|
| 104 |
+
* `true`: (Recommended) The filter is applied to the image's latent representation after it has been encoded by the VAE.
|
| 105 |
+
* `false`: The filter is applied directly to the RGB image *before* it is encoded by the VAE.
|
| 106 |
+
|
| 107 |
+
* `lp_resize_factor` (`float`): (for `"down_up"`)
|
| 108 |
+
* The factor by which to downsample the image (e.g., `0.25` means resizing to 25% of the original dimensions). Smaller value means stronget low-pass filtering, and potentially more motion.
|
| 109 |
+
|
| 110 |
+
* `lp_blur_sigma` (`float`): (for `"gaussian_blur"`)
|
| 111 |
+
* The standard deviation (sigma) for the Gaussian kernel. Larger values result in a stronger blur.
|
| 112 |
+
|
| 113 |
+
* `lp_blur_kernel_size` (`float` | `int`): (for `"gaussian_blur"`)
|
| 114 |
+
* The size of the blurring kernel. If a float, it's interpreted as a fraction of the image height.
|
| 115 |
+
|
| 116 |
+
* **Adaptive Scheduling**: Controls how the strength of the low-pass filter changes over the denoising timesteps.
|
| 117 |
+
|
| 118 |
+
* `lp_strength_schedule_type` (`str`): The scheduling strategy. Strength is a multiplier from 0.0 (off) to 1.0 (full).
|
| 119 |
+
* `"interval"`: (Recommended) Applies the filter at full strength (`1.0`) for a specified portion of the denoising process and turns it off (`0.0`) for the rest.
|
| 120 |
+
* `"linear"`: Linearly decays the filter strength from a starting value to an ending value.
|
| 121 |
+
* `"exponential"`: Exponentially decays the filter strength from the beginning.
|
| 122 |
+
* `"none"`: Applies filter at a constant strength throughout.
|
| 123 |
+
|
| 124 |
+
* Parameters for `"interval"` schedule:
|
| 125 |
+
* `schedule_interval_start_time` (`float`): The point to turn the filter on, as a fraction of total steps [`0.0`,`1.0`]. `0.0` is the first step.
|
| 126 |
+
* `schedule_interval_end_time` (`float`): The point to turn the filter off. With 50 steps, `0.06` means the filter is active for the first `50 * 0.06 = 3` steps.
|
| 127 |
+
|
| 128 |
+
* Parameters for `"linear"` schedule:
|
| 129 |
+
* `schedule_linear_start_weight` (`float`): The filter strength at the first timestep (usually `1.0`).
|
| 130 |
+
* `schedule_linear_end_weight` (`float`): The final filter strength to decay towards (usually `0.0`).
|
| 131 |
+
* `schedule_linear_end_time` (`float`): The point in the process (as a fraction of total steps) at which the `end_weight` is reached. The strength remains at `end_weight` after this point.
|
| 132 |
+
|
| 133 |
+
* Parameters for `"exponential"` schedule:
|
| 134 |
+
* `schedule_exp_decay_rate` (`float`): The decay rate `r` for the formula `strength = exp(-r * time_fraction)`. Higher values cause strength to decay more quickly.
|
| 135 |
+
|
| 136 |
+
* `schedule_blur_kernel_size` (`bool`): If `true` and using a scheduler with the `"gaussian_blur"` filter, the blur kernel size will also be scaled down along with the filter strength.
|
| 137 |
+
|
| 138 |
+
## 3. Supported Models
|
| 139 |
+
|
| 140 |
+
We provide implementations and configurations for the following models:
|
| 141 |
+
|
| 142 |
+
* **[CogVideoX](https://huggingface.co/THUDM/CogVideoX-5b-I2V)**: `THUDM/CogVideoX-5b-I2V`
|
| 143 |
+
* **[Wan 2.1](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-480P-Diffusers)**: `Wan-AI/Wan2.1-I2V-14B-480P-Diffusers`
|
| 144 |
+
* **[HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo-I2V)**: `tencent/HunyuanVideo-I2V`
|
| 145 |
+
* [LTX-Video](https://huggingface.co/Lightricks/LTX-Video): `Lightricks/LTX-Video` (Not available yet, coming soon!)
|
| 146 |
+
|
| 147 |
+
We plan to add ALG implementation for LTX-Video as soon as possible!
|
| 148 |
+
|
| 149 |
+
You can create new configuration files for these models by modifying the `model.path` and adjusting the `generation` and `alg` parameters accordingly. Example configs are provided in the `./configs` directory.
|
| 150 |
+
|
| 151 |
+
## 4. More Examples
|
| 152 |
+
|
| 153 |
+
For more qualitative results and video comparisons, please visit the **[Gallery](https://choi403.github.io/ALG/gallery/)** on our project page.
|
| 154 |
+
|
| 155 |
+
## Acknowledgement
|
| 156 |
+
|
| 157 |
+
This code is built upon [Hugging Face Diffusers](https://github.com/huggingface/diffusers) library. We thank the authors of the open-source Image-to-Video models used in our work for making their code and models publicly available.
|
| 158 |
+
|
| 159 |
+
## BibTeX
|
| 160 |
+
|
| 161 |
+
If you find our work useful for your research, please consider citing our paper:
|
| 162 |
+
|
| 163 |
+
```bibtex
|
| 164 |
+
@article{choi2025alg,
|
| 165 |
+
title={Enhancing Motion Dynamics of Image-to-Video Models via Adaptive Low-Pass Guidance},
|
| 166 |
+
author={Choi, June Suk and Lee, Kyungmin and Yu, Sihyun and Choi, Yisol and Shin, Jinwoo and Lee, Kimin},
|
| 167 |
+
year={2025},
|
| 168 |
+
journal={arXiv preprint arXiv:2506.08456},
|
| 169 |
+
}
|
| 170 |
+
```
|
exp_code/1_benchmark/ALG/requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accelerate==1.3.0
|
| 2 |
+
huggingface-hub
|
| 3 |
+
imageio-ffmpeg
|
| 4 |
+
open_clip_torch
|
| 5 |
+
openai-clip
|
| 6 |
+
opencv-python
|
| 7 |
+
peft==0.15.0
|
| 8 |
+
sentencepiece
|
| 9 |
+
torchvision
|
| 10 |
+
transformers==4.48.1
|
| 11 |
+
xformers==0.0.29.post1
|
| 12 |
+
av==12.0.0
|
| 13 |
+
diffusers @ git+https://github.com/huggingface/diffusers.git@be2fb77dc164083bf8f033874b066c96bc6752b8
|
exp_code/1_benchmark/ALG/run.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
import argparse
|
| 3 |
+
import torch
|
| 4 |
+
import torchvision
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import logging
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
# --- Diffusers and Transformers Imports ---
|
| 10 |
+
from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, HunyuanVideoTransformer3DModel, FlowMatchEulerDiscreteScheduler
|
| 11 |
+
from diffusers.utils import load_image
|
| 12 |
+
from transformers import CLIPVisionModel
|
| 13 |
+
|
| 14 |
+
# --- Low-pass Pipelines ---
|
| 15 |
+
from pipeline_wan_image2video_lowpass import WanImageToVideoPipeline
|
| 16 |
+
from pipeline_cogvideox_image2video_lowpass import CogVideoXImageToVideoPipeline
|
| 17 |
+
from pipeline_hunyuan_video_image2video_lowpass import HunyuanVideoImageToVideoPipeline
|
| 18 |
+
|
| 19 |
+
from lp_utils import get_hunyuan_video_size
|
| 20 |
+
|
| 21 |
+
from diffusers.utils import export_to_video
|
| 22 |
+
|
| 23 |
+
# --- Basic Logging Setup ---
|
| 24 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout)
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def main(args):
|
| 29 |
+
# 1. Configuration
|
| 30 |
+
IMAGE_PATH = args.image_path
|
| 31 |
+
PROMPT = args.prompt
|
| 32 |
+
OUTPUT_PATH = args.output_path
|
| 33 |
+
MODEL_CACHE_DIR = args.model_cache_dir
|
| 34 |
+
|
| 35 |
+
with open(args.config, 'r') as f:
|
| 36 |
+
config = yaml.safe_load(f)
|
| 37 |
+
|
| 38 |
+
model_path = config['model']['path']
|
| 39 |
+
model_dtype_str = config['model']['dtype']
|
| 40 |
+
model_dtype = getattr(torch, model_dtype_str)
|
| 41 |
+
|
| 42 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 43 |
+
|
| 44 |
+
logger.info(f"Using device: {device}")
|
| 45 |
+
|
| 46 |
+
# 2. Pipeline preparation
|
| 47 |
+
if "Wan" in model_path:
|
| 48 |
+
image_encoder = CLIPVisionModel.from_pretrained(model_path,
|
| 49 |
+
subfolder="image_encoder",
|
| 50 |
+
torch_dtype=torch.float32,
|
| 51 |
+
cache_dir=MODEL_CACHE_DIR
|
| 52 |
+
)
|
| 53 |
+
vae = AutoencoderKLWan.from_pretrained(model_path,
|
| 54 |
+
subfolder="vae",
|
| 55 |
+
torch_dtype=torch.float32,
|
| 56 |
+
cache_dir=MODEL_CACHE_DIR
|
| 57 |
+
)
|
| 58 |
+
pipe = WanImageToVideoPipeline.from_pretrained(model_path,
|
| 59 |
+
vae=vae,
|
| 60 |
+
image_encoder=image_encoder,
|
| 61 |
+
torch_dtype=model_dtype,
|
| 62 |
+
cache_dir=MODEL_CACHE_DIR
|
| 63 |
+
)
|
| 64 |
+
# Recommended setup (See https://github.com/huggingface/diffusers/blob/3c8b67b3711b668a6e7867e08b54280e51454eb5/src/diffusers/pipelines/wan/pipeline_wan.py#L58C13-L58C23)
|
| 65 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=3.0 if config['generation']['height'] == '480' else 5.0)
|
| 66 |
+
elif "CogVideoX" in model_path:
|
| 67 |
+
pipe = CogVideoXImageToVideoPipeline.from_pretrained(
|
| 68 |
+
model_path,
|
| 69 |
+
torch_dtype=model_dtype,
|
| 70 |
+
cache_dir=MODEL_CACHE_DIR
|
| 71 |
+
)
|
| 72 |
+
elif "HunyuanVideo" in model_path:
|
| 73 |
+
transformer = HunyuanVideoTransformer3DModel.from_pretrained(
|
| 74 |
+
model_path,
|
| 75 |
+
subfolder="transformer",
|
| 76 |
+
torch_dtype=torch.bfloat16,
|
| 77 |
+
cache_dir=MODEL_CACHE_DIR
|
| 78 |
+
)
|
| 79 |
+
pipe = HunyuanVideoImageToVideoPipeline.from_pretrained(
|
| 80 |
+
model_path, transformer=transformer,
|
| 81 |
+
torch_dtype=torch.float16,
|
| 82 |
+
cache_dir=MODEL_CACHE_DIR
|
| 83 |
+
)
|
| 84 |
+
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
|
| 85 |
+
pipe.scheduler.config,
|
| 86 |
+
flow_shift= config['model']['flow_shift'],
|
| 87 |
+
invert_sigmas = config['model']['flow_reverse']
|
| 88 |
+
)
|
| 89 |
+
pipe.to(device)
|
| 90 |
+
|
| 91 |
+
logger.info("Pipeline loaded successfully.")
|
| 92 |
+
|
| 93 |
+
# 3. Prepare inputs
|
| 94 |
+
input_image = load_image(Image.open(IMAGE_PATH))
|
| 95 |
+
|
| 96 |
+
generator = torch.Generator(device=device).manual_seed(42)
|
| 97 |
+
|
| 98 |
+
pipe_kwargs = {
|
| 99 |
+
"image": input_image,
|
| 100 |
+
"prompt": PROMPT,
|
| 101 |
+
"generator": generator,
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
params_from_config = {**config.get('generation', {}), **config.get('alg', {})}
|
| 105 |
+
|
| 106 |
+
for key, value in params_from_config.items():
|
| 107 |
+
if value is not None:
|
| 108 |
+
pipe_kwargs[key] = value
|
| 109 |
+
|
| 110 |
+
logger.info("Starting video generation...")
|
| 111 |
+
log_subset = {k: v for k, v in pipe_kwargs.items() if k not in ['image', 'generator']}
|
| 112 |
+
logger.info(f"Pipeline arguments: {log_subset}")
|
| 113 |
+
|
| 114 |
+
if "HunyuanVideo" in model_path:
|
| 115 |
+
pipe_kwargs["height"], pipe_kwargs["width"] = get_hunyuan_video_size(config['video']['resolution'], input_image)
|
| 116 |
+
|
| 117 |
+
# 4. Generate video
|
| 118 |
+
video_output = pipe(**pipe_kwargs)
|
| 119 |
+
video_frames = video_output.frames[0] # Output is a list containing a list of PIL Images
|
| 120 |
+
logger.info(f"Video generation complete. Received {len(video_frames)} frames.")
|
| 121 |
+
|
| 122 |
+
# # 5. Save video
|
| 123 |
+
# video_tensors = [torchvision.transforms.functional.to_tensor(frame) for frame in video_frames]
|
| 124 |
+
# video_tensor = torch.stack(video_tensors) # Shape: (T, C, H, W)
|
| 125 |
+
# video_tensor = video_tensor.permute(0, 2, 3, 1) # Shape: (T, H, W, C) for write_video
|
| 126 |
+
# video_tensor = (video_tensor * 255).clamp(0, 255).to(torch.uint8).cpu()
|
| 127 |
+
|
| 128 |
+
# logger.info(f"Saving video to: {OUTPUT_PATH}")
|
| 129 |
+
# torchvision.io.write_video(
|
| 130 |
+
# OUTPUT_PATH,
|
| 131 |
+
# video_tensor,
|
| 132 |
+
# fps=config['video']['fps'],
|
| 133 |
+
# video_codec='h264',
|
| 134 |
+
# options={'crf': '18', 'preset': 'slow'}
|
| 135 |
+
# )
|
| 136 |
+
|
| 137 |
+
export_to_video(video_frames, OUTPUT_PATH, fps=config['video']['fps'])
|
| 138 |
+
logger.info("Video saved successfully. Run complete.")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
if __name__ == '__main__':
|
| 142 |
+
parser = argparse.ArgumentParser(description="Arguments")
|
| 143 |
+
parser.add_argument("--config", type=str, default="./configs/hunyuan_video_alg.yaml")
|
| 144 |
+
parser.add_argument("--image_path", type=str, default="./assets/a red double decker bus driving down a street.jpg")
|
| 145 |
+
parser.add_argument("--prompt", type=str, default="a red double decker bus driving down a street")
|
| 146 |
+
parser.add_argument("--output_path", type=str, default="output.mp4")
|
| 147 |
+
parser.add_argument("--model_cache_dir", type=str, default=None)
|
| 148 |
+
args = parser.parse_args()
|
| 149 |
+
|
| 150 |
+
main(args)
|
exp_code/1_benchmark/ALG/run.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python run.py \
|
| 2 |
+
--config ./configs/hunyuan_video_alg.yaml \
|
| 3 |
+
--image_path ./assets/city.png \
|
| 4 |
+
--prompt "A car chase through narrow city streets at night." \
|
| 5 |
+
--output_path city_alg.mp4
|
exp_code/1_benchmark/AccVideo/LICENSE.txt
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT
|
| 2 |
+
Tencent HunyuanVideo Release Date: December 3, 2024
|
| 3 |
+
THIS LICENSE AGREEMENT DOES NOT APPLY IN THE EUROPEAN UNION, UNITED KINGDOM AND SOUTH KOREA AND IS EXPRESSLY LIMITED TO THE TERRITORY, AS DEFINED BELOW.
|
| 4 |
+
By clicking to agree or by using, reproducing, modifying, distributing, performing or displaying any portion or element of the Tencent Hunyuan Works, including via any Hosted Service, You will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
|
| 5 |
+
1. DEFINITIONS.
|
| 6 |
+
a. “Acceptable Use Policy” shall mean the policy made available by Tencent as set forth in the Exhibit A.
|
| 7 |
+
b. “Agreement” shall mean the terms and conditions for use, reproduction, distribution, modification, performance and displaying of Tencent Hunyuan Works or any portion or element thereof set forth herein.
|
| 8 |
+
c. “Documentation” shall mean the specifications, manuals and documentation for Tencent Hunyuan made publicly available by Tencent.
|
| 9 |
+
d. “Hosted Service” shall mean a hosted service offered via an application programming interface (API), web access, or any other electronic or remote means.
|
| 10 |
+
e. “Licensee,” “You” or “Your” shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Tencent Hunyuan Works for any purpose and in any field of use.
|
| 11 |
+
f. “Materials” shall mean, collectively, Tencent’s proprietary Tencent Hunyuan and Documentation (and any portion thereof) as made available by Tencent under this Agreement.
|
| 12 |
+
g. “Model Derivatives” shall mean all: (i) modifications to Tencent Hunyuan or any Model Derivative of Tencent Hunyuan; (ii) works based on Tencent Hunyuan or any Model Derivative of Tencent Hunyuan; or (iii) any other machine learning model which is created by transfer of patterns of the weights, parameters, operations, or Output of Tencent Hunyuan or any Model Derivative of Tencent Hunyuan, to that model in order to cause that model to perform similarly to Tencent Hunyuan or a Model Derivative of Tencent Hunyuan, including distillation methods, methods that use intermediate data representations, or methods based on the generation of synthetic data Outputs by Tencent Hunyuan or a Model Derivative of Tencent Hunyuan for training that model. For clarity, Outputs by themselves are not deemed Model Derivatives.
|
| 13 |
+
h. “Output” shall mean the information and/or content output of Tencent Hunyuan or a Model Derivative that results from operating or otherwise using Tencent Hunyuan or a Model Derivative, including via a Hosted Service.
|
| 14 |
+
i. “Tencent,” “We” or “Us” shall mean THL A29 Limited.
|
| 15 |
+
j. “Tencent Hunyuan” shall mean the large language models, text/image/video/audio/3D generation models, and multimodal large language models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Us, including, without limitation to, Tencent HunyuanVideo released at [https://github.com/Tencent/HunyuanVideo].
|
| 16 |
+
k. “Tencent Hunyuan Works” shall mean: (i) the Materials; (ii) Model Derivatives; and (iii) all derivative works thereof.
|
| 17 |
+
l. “Territory” shall mean the worldwide territory, excluding the territory of the European Union, United Kingdom and South Korea.
|
| 18 |
+
m. “Third Party” or “Third Parties” shall mean individuals or legal entities that are not under common control with Us or You.
|
| 19 |
+
n. “including” shall mean including but not limited to.
|
| 20 |
+
2. GRANT OF RIGHTS.
|
| 21 |
+
We grant You, for the Territory only, a non-exclusive, non-transferable and royalty-free limited license under Tencent’s intellectual property or other rights owned by Us embodied in or utilized by the Materials to use, reproduce, distribute, create derivative works of (including Model Derivatives), and make modifications to the Materials, only in accordance with the terms of this Agreement and the Acceptable Use Policy, and You must not violate (or encourage or permit anyone else to violate) any term of this Agreement or the Acceptable Use Policy.
|
| 22 |
+
3. DISTRIBUTION.
|
| 23 |
+
You may, subject to Your compliance with this Agreement, distribute or make available to Third Parties the Tencent Hunyuan Works, exclusively in the Territory, provided that You meet all of the following conditions:
|
| 24 |
+
a. You must provide all such Third Party recipients of the Tencent Hunyuan Works or products or services using them a copy of this Agreement;
|
| 25 |
+
b. You must cause any modified files to carry prominent notices stating that You changed the files;
|
| 26 |
+
c. You are encouraged to: (i) publish at least one technology introduction blogpost or one public statement expressing Your experience of using the Tencent Hunyuan Works; and (ii) mark the products or services developed by using the Tencent Hunyuan Works to indicate that the product/service is “Powered by Tencent Hunyuan”; and
|
| 27 |
+
d. All distributions to Third Parties (other than through a Hosted Service) must be accompanied by a “Notice” text file that contains the following notice: “Tencent Hunyuan is licensed under the Tencent Hunyuan Community License Agreement, Copyright © 2024 Tencent. All Rights Reserved. The trademark rights of “Tencent Hunyuan” are owned by Tencent or its affiliate.”
|
| 28 |
+
You may add Your own copyright statement to Your modifications and, except as set forth in this Section and in Section 5, may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Model Derivatives as a whole, provided Your use, reproduction, modification, distribution, performance and display of the work otherwise complies with the terms and conditions of this Agreement (including as regards the Territory). If You receive Tencent Hunyuan Works from a Licensee as part of an integrated end user product, then this Section 3 of this Agreement will not apply to You.
|
| 29 |
+
4. ADDITIONAL COMMERCIAL TERMS.
|
| 30 |
+
If, on the Tencent Hunyuan version release date, the monthly active users of all products or services made available by or for Licensee is greater than 100 million monthly active users in the preceding calendar month, You must request a license from Tencent, which Tencent may grant to You in its sole discretion, and You are not authorized to exercise any of the rights under this Agreement unless or until Tencent otherwise expressly grants You such rights.
|
| 31 |
+
5. RULES OF USE.
|
| 32 |
+
a. Your use of the Tencent Hunyuan Works must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Tencent Hunyuan Works, which is hereby incorporated by reference into this Agreement. You must include the use restrictions referenced in these Sections 5(a) and 5(b) as an enforceable provision in any agreement (e.g., license agreement, terms of use, etc.) governing the use and/or distribution of Tencent Hunyuan Works and You must provide notice to subsequent users to whom You distribute that Tencent Hunyuan Works are subject to the use restrictions in these Sections 5(a) and 5(b).
|
| 33 |
+
b. You must not use the Tencent Hunyuan Works or any Output or results of the Tencent Hunyuan Works to improve any other AI model (other than Tencent Hunyuan or Model Derivatives thereof).
|
| 34 |
+
c. You must not use, reproduce, modify, distribute, or display the Tencent Hunyuan Works, Output or results of the Tencent Hunyuan Works outside the Territory. Any such use outside the Territory is unlicensed and unauthorized under this Agreement.
|
| 35 |
+
6. INTELLECTUAL PROPERTY.
|
| 36 |
+
a. Subject to Tencent’s ownership of Tencent Hunyuan Works made by or for Tencent and intellectual property rights therein, conditioned upon Your compliance with the terms and conditions of this Agreement, as between You and Tencent, You will be the owner of any derivative works and modifications of the Materials and any Model Derivatives that are made by or for You.
|
| 37 |
+
b. No trademark licenses are granted under this Agreement, and in connection with the Tencent Hunyuan Works, Licensee may not use any name or mark owned by or associated with Tencent or any of its affiliates, except as required for reasonable and customary use in describing and distributing the Tencent Hunyuan Works. Tencent hereby grants You a license to use “Tencent Hunyuan” (the “Mark”) in the Territory solely as required to comply with the provisions of Section 3(c), provided that You comply with any applicable laws related to trademark protection. All goodwill arising out of Your use of the Mark will inure to the benefit of Tencent.
|
| 38 |
+
c. If You commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any person or entity alleging that the Materials or any Output, or any portion of any of the foregoing, infringe any intellectual property or other right owned or licensable by You, then all licenses granted to You under this Agreement shall terminate as of the date such lawsuit or other proceeding is filed. You will defend, indemnify and hold harmless Us from and against any claim by any Third Party arising out of or related to Your or the Third Party’s use or distribution of the Tencent Hunyuan Works.
|
| 39 |
+
d. Tencent claims no rights in Outputs You generate. You and Your users are solely responsible for Outputs and their subsequent uses.
|
| 40 |
+
7. DISCLAIMERS OF WARRANTY AND LIMITATIONS OF LIABILITY.
|
| 41 |
+
a. We are not obligated to support, update, provide training for, or develop any further version of the Tencent Hunyuan Works or to grant any license thereto.
|
| 42 |
+
b. UNLESS AND ONLY TO THE EXTENT REQUIRED BY APPLICABLE LAW, THE TENCENT HUNYUAN WORKS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED “AS IS” WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES OF ANY KIND INCLUDING ANY WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, COURSE OF DEALING, USAGE OF TRADE, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING, REPRODUCING, MODIFYING, PERFORMING, DISPLAYING OR DISTRIBUTING ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH YOUR OR A THIRD PARTY’S USE OR DISTRIBUTION OF ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS AND YOUR EXERCISE OF RIGHTS AND PERMISSIONS UNDER THIS AGREEMENT.
|
| 43 |
+
c. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL TENCENT OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, FOR ANY DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR LOST PROFITS OF ANY KIND ARISING FROM THIS AGREEMENT OR RELATED TO ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS, EVEN IF TENCENT OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
|
| 44 |
+
8. SURVIVAL AND TERMINATION.
|
| 45 |
+
a. The term of this Agreement shall commence upon Your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
|
| 46 |
+
b. We may terminate this Agreement if You breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, You must promptly delete and cease use of the Tencent Hunyuan Works. Sections 6(a), 6(c), 7 and 9 shall survive the termination of this Agreement.
|
| 47 |
+
9. GOVERNING LAW AND JURISDICTION.
|
| 48 |
+
a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the Hong Kong Special Administrative Region of the People’s Republic of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
|
| 49 |
+
b. Exclusive jurisdiction and venue for any dispute arising out of or relating to this Agreement will be a court of competent jurisdiction in the Hong Kong Special Administrative Region of the People’s Republic of China, and Tencent and Licensee consent to the exclusive jurisdiction of such court with respect to any such dispute.
|
| 50 |
+
|
| 51 |
+
EXHIBIT A
|
| 52 |
+
ACCEPTABLE USE POLICY
|
| 53 |
+
|
| 54 |
+
Tencent reserves the right to update this Acceptable Use Policy from time to time.
|
| 55 |
+
Last modified: November 5, 2024
|
| 56 |
+
|
| 57 |
+
Tencent endeavors to promote safe and fair use of its tools and features, including Tencent Hunyuan. You agree not to use Tencent Hunyuan or Model Derivatives:
|
| 58 |
+
1. Outside the Territory;
|
| 59 |
+
2. In any way that violates any applicable national, federal, state, local, international or any other law or regulation;
|
| 60 |
+
3. To harm Yourself or others;
|
| 61 |
+
4. To repurpose or distribute output from Tencent Hunyuan or any Model Derivatives to harm Yourself or others;
|
| 62 |
+
5. To override or circumvent the safety guardrails and safeguards We have put in place;
|
| 63 |
+
6. For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
|
| 64 |
+
7. To generate or disseminate verifiably false information and/or content with the purpose of harming others or influencing elections;
|
| 65 |
+
8. To generate or facilitate false online engagement, including fake reviews and other means of fake online engagement;
|
| 66 |
+
9. To intentionally defame, disparage or otherwise harass others;
|
| 67 |
+
10. To generate and/or disseminate malware (including ransomware) or any other content to be used for the purpose of harming electronic systems;
|
| 68 |
+
11. To generate or disseminate personal identifiable information with the purpose of harming others;
|
| 69 |
+
12. To generate or disseminate information (including images, code, posts, articles), and place the information in any public context (including –through the use of bot generated tweets), without expressly and conspicuously identifying that the information and/or content is machine generated;
|
| 70 |
+
13. To impersonate another individual without consent, authorization, or legal right;
|
| 71 |
+
14. To make high-stakes automated decisions in domains that affect an individual’s safety, rights or wellbeing (e.g., law enforcement, migration, medicine/health, management of critical infrastructure, safety components of products, essential services, credit, employment, housing, education, social scoring, or insurance);
|
| 72 |
+
15. In a manner that violates or disrespects the social ethics and moral standards of other countries or regions;
|
| 73 |
+
16. To perform, facilitate, threaten, incite, plan, promote or encourage violent extremism or terrorism;
|
| 74 |
+
17. For any use intended to discriminate against or harm individuals or groups based on protected characteristics or categories, online or offline social behavior or known or predicted personal or personality characteristics;
|
| 75 |
+
18. To intentionally exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
|
| 76 |
+
19. For military purposes;
|
| 77 |
+
20. To engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or other professional practices.
|
exp_code/1_benchmark/AccVideo/README.md
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AccVideo: Accelerating Video Diffusion Model with Synthetic Dataset
|
| 2 |
+
|
| 3 |
+
This repository is the official PyTorch implementation of [AccVideo](https://arxiv.org/abs/2503.19462). AccVideo is a novel efficient distillation method to accelerate video diffusion models with synthetic datset. Our method is 8.5x faster than HunyuanVideo.
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
[](https://arxiv.org/abs/2503.19462)
|
| 7 |
+
[](https://aejion.github.io/accvideo/)
|
| 8 |
+
[](https://huggingface.co/aejion/AccVideo)
|
| 9 |
+
|
| 10 |
+
## 🔥🔥🔥 News
|
| 11 |
+
|
| 12 |
+
* May 26, 2025: We release the inference code and [model weights](https://huggingface.co/aejion/AccVideo-WanX-T2V-14B) of AccVideo based on WanXT2V-14B.
|
| 13 |
+
* Mar 31, 2025: [ComfyUI-Kijai (FP8 Inference)](https://huggingface.co/Kijai/HunyuanVideo_comfy/blob/main/accvideo-t2v-5-steps_fp8_e4m3fn.safetensors): ComfyUI-Integration by [Kijai](https://huggingface.co/Kijai)
|
| 14 |
+
* Mar 26, 2025: We release the inference code and [model weights](https://huggingface.co/aejion/AccVideo) of AccVideo based on HunyuanT2V.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
## 🎥 Demo (Based on HunyuanT2V)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
https://github.com/user-attachments/assets/59f3c5db-d585-4773-8d92-366c1eb040f0
|
| 21 |
+
|
| 22 |
+
## 🎥 Demo (Based on WanXT2V-14B)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
https://github.com/user-attachments/assets/ff9724da-b76c-478d-a9bf-0ee7240494b2
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
## 📑 Open-source Plan
|
| 30 |
+
|
| 31 |
+
- [x] Inference
|
| 32 |
+
- [x] Checkpoints
|
| 33 |
+
- [ ] Multi-GPU Inference
|
| 34 |
+
- [ ] Synthetic Video Dataset, SynVid
|
| 35 |
+
- [ ] Training
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
## 🔧 Installation
|
| 39 |
+
The code is tested on Python 3.10.0, CUDA 11.8 and A100.
|
| 40 |
+
```
|
| 41 |
+
conda create -n accvideo python==3.10.0
|
| 42 |
+
conda activate accvideo
|
| 43 |
+
|
| 44 |
+
pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu118
|
| 45 |
+
pip install -r requirements.txt
|
| 46 |
+
pip install flash-attn==2.7.3 --no-build-isolation
|
| 47 |
+
pip install "huggingface_hub[cli]"
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
## 🤗 Checkpoints
|
| 51 |
+
To download the checkpoints (based on HunyuanT2V), use the following command:
|
| 52 |
+
```bash
|
| 53 |
+
# Download the model weight
|
| 54 |
+
huggingface-cli download aejion/AccVideo --local-dir ./ckpts
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
To download the checkpoints (based on WanX-T2V-14B), use the following command:
|
| 58 |
+
```bash
|
| 59 |
+
# Download the model weight
|
| 60 |
+
huggingface-cli download aejion/AccVideo-WanX-T2V-14B --local-dir ./wanx_t2v_ckpts
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
## 🚀 Inference
|
| 64 |
+
We recommend using a GPU with 80GB of memory. We use AccVideo to distill Hunyuan and WanX.
|
| 65 |
+
|
| 66 |
+
### Inference for HunyuanT2V
|
| 67 |
+
|
| 68 |
+
To run the inference, use the following command:
|
| 69 |
+
```bash
|
| 70 |
+
export MODEL_BASE=./ckpts
|
| 71 |
+
python sample_t2v.py \
|
| 72 |
+
--height 544 \
|
| 73 |
+
--width 960 \
|
| 74 |
+
--num_frames 93 \
|
| 75 |
+
--num_inference_steps 5 \
|
| 76 |
+
--guidance_scale 1 \
|
| 77 |
+
--embedded_cfg_scale 6 \
|
| 78 |
+
--flow_shift 7 \
|
| 79 |
+
--flow-reverse \
|
| 80 |
+
--prompt_file ./assets/prompt.txt \
|
| 81 |
+
--seed 1024 \
|
| 82 |
+
--output_path ./results/accvideo-544p \
|
| 83 |
+
--model_path ./ckpts \
|
| 84 |
+
--dit-weight ./ckpts/accvideo-t2v-5-steps/diffusion_pytorch_model.pt
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
The following table shows the comparisons on inference time using a single A100 GPU:
|
| 88 |
+
|
| 89 |
+
| Model | Setting(height/width/frame) | Inference Time(s) |
|
| 90 |
+
|:------------:|:---------------------------:|:-----------------:|
|
| 91 |
+
| HunyuanVideo | 720px1280px129f | 3234 |
|
| 92 |
+
| Ours | 720px1280px129f | 380(8.5x faster) |
|
| 93 |
+
| HunyuanVideo | 544px960px93f | 704 |
|
| 94 |
+
| Ours | 544px960px93f | 91(7.7x faster) |
|
| 95 |
+
|
| 96 |
+
### Inference for WanXT2V
|
| 97 |
+
|
| 98 |
+
To run the inference, use the following command:
|
| 99 |
+
```bash
|
| 100 |
+
python sample_wanx_t2v.py \
|
| 101 |
+
--task t2v-14B \
|
| 102 |
+
--size 832*480 \
|
| 103 |
+
--ckpt_dir ./wanx_t2v_ckpts \
|
| 104 |
+
--sample_solver 'unipc' \
|
| 105 |
+
--save_dir ./results/accvideo_wanx_14B \
|
| 106 |
+
--sample_steps 10
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
The following table shows the comparisons on inference time using a single A100 GPU:
|
| 110 |
+
|
| 111 |
+
| Model | Setting(height/width/frame) | Inference Time(s) |
|
| 112 |
+
|:-----:|:---------------------------:|:-----------------:|
|
| 113 |
+
| Wanx | 480px832px81f | 932 |
|
| 114 |
+
| Ours | 480px832px81f | 97(9.6x faster) |
|
| 115 |
+
|
| 116 |
+
## 🔗 BibTeX
|
| 117 |
+
|
| 118 |
+
If you find [AccVideo](https://arxiv.org/abs/2503.19462) useful for your research and applications, please cite using this BibTeX:
|
| 119 |
+
|
| 120 |
+
```BibTeX
|
| 121 |
+
@article{zhang2025accvideo,
|
| 122 |
+
title={AccVideo: Accelerating Video Diffusion Model with Synthetic Dataset},
|
| 123 |
+
author={Zhang, Haiyu and Chen, Xinyuan and Wang, Yaohui and Liu, Xihui and Wang, Yunhong and Qiao, Yu},
|
| 124 |
+
journal={arXiv preprint arXiv:2503.19462},
|
| 125 |
+
year={2025}
|
| 126 |
+
}
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
## Acknowledgements
|
| 130 |
+
The code is built upon [FastVideo](https://github.com/hao-ai-lab/FastVideo) and [HunyuanVideo](https://github.com/Tencent/HunyuanVideo), we thank all the contributors for open-sourcing.
|
exp_code/1_benchmark/AccVideo/assets/prompt.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
A honeybee drifting between lavender blossoms. Each wingbeat slowed to a gentle wave, pollen particles floating in still air. In super slow motion, even the bee's compound eyes shimmer, revealing details normally invisible to the human eye.
|
| 2 |
+
A hand with delicate fingers picks up a bright yellow lemon from a wooden bowl filled with lemons and sprigs of mint against a peach-colored background. The hand gently tosses the lemon up and catches it, showcasing its smooth texture. A beige string bag sits beside the bowl, adding a rustic touch to the scene. Additional lemons, one halved, are scattered around the base of the bowl. The even lighting enhances the vibrant colors and creates a fresh, inviting atmosphere.
|
| 3 |
+
The camera follows behind a white vintage SUV with a black roof rack as it speeds up a steep dirt road surrounded by pine trees on a steep mountain slope.
|
exp_code/1_benchmark/AccVideo/models/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/AccVideo/models/hunyuan/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/AccVideo/models/hunyuan/constants.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"C_SCALE",
|
| 6 |
+
"PROMPT_TEMPLATE",
|
| 7 |
+
"MODEL_BASE",
|
| 8 |
+
"PRECISIONS",
|
| 9 |
+
"NORMALIZATION_TYPE",
|
| 10 |
+
"ACTIVATION_TYPE",
|
| 11 |
+
"VAE_PATH",
|
| 12 |
+
"TEXT_ENCODER_PATH",
|
| 13 |
+
"TOKENIZER_PATH",
|
| 14 |
+
"TEXT_PROJECTION",
|
| 15 |
+
"DATA_TYPE",
|
| 16 |
+
"NEGATIVE_PROMPT",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
PRECISION_TO_TYPE = {
|
| 20 |
+
"fp32": torch.float32,
|
| 21 |
+
"fp16": torch.float16,
|
| 22 |
+
"bf16": torch.bfloat16,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
# =================== Constant Values =====================
|
| 26 |
+
# Computation scale factor, 1P = 1_000_000_000_000_000. Tensorboard will display the value in PetaFLOPS to avoid
|
| 27 |
+
# overflow error when tensorboard logging values.
|
| 28 |
+
C_SCALE = 1_000_000_000_000_000
|
| 29 |
+
|
| 30 |
+
# When using decoder-only models, we must provide a prompt template to instruct the text encoder
|
| 31 |
+
# on how to generate the text.
|
| 32 |
+
# --------------------------------------------------------------------
|
| 33 |
+
PROMPT_TEMPLATE_ENCODE = (
|
| 34 |
+
"<|start_header_id|>system<|end_header_id|>\n\nDescribe the image by detailing the color, shape, size, texture, "
|
| 35 |
+
"quantity, text, spatial relationships of the objects and background:<|eot_id|>"
|
| 36 |
+
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
| 37 |
+
)
|
| 38 |
+
PROMPT_TEMPLATE_ENCODE_VIDEO = (
|
| 39 |
+
"<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
|
| 40 |
+
"1. The main content and theme of the video."
|
| 41 |
+
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
| 42 |
+
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
| 43 |
+
"4. background environment, light, style and atmosphere."
|
| 44 |
+
"5. camera angles, movements, and transitions used in the video:<|eot_id|>"
|
| 45 |
+
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
NEGATIVE_PROMPT = "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion"
|
| 49 |
+
|
| 50 |
+
PROMPT_TEMPLATE = {
|
| 51 |
+
"dit-llm-encode": {"template": PROMPT_TEMPLATE_ENCODE, "crop_start": 36,},
|
| 52 |
+
"dit-llm-encode-video": {
|
| 53 |
+
"template": PROMPT_TEMPLATE_ENCODE_VIDEO,
|
| 54 |
+
"crop_start": 95,
|
| 55 |
+
},
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
# ======================= Model ======================
|
| 59 |
+
PRECISIONS = {"fp32", "fp16", "bf16"}
|
| 60 |
+
NORMALIZATION_TYPE = {"layer", "rms"}
|
| 61 |
+
ACTIVATION_TYPE = {"relu", "silu", "gelu", "gelu_tanh"}
|
| 62 |
+
|
| 63 |
+
# =================== Model Path =====================
|
| 64 |
+
MODEL_BASE = os.getenv("MODEL_BASE", "./ckpts")
|
| 65 |
+
|
| 66 |
+
# =================== Data =======================
|
| 67 |
+
DATA_TYPE = {"image", "video", "image_video"}
|
| 68 |
+
|
| 69 |
+
# 3D VAE
|
| 70 |
+
VAE_PATH = {"884-16c-hy": f"{MODEL_BASE}/hunyuan-video-t2v-720p/vae"}
|
| 71 |
+
|
| 72 |
+
# Text Encoder
|
| 73 |
+
TEXT_ENCODER_PATH = {
|
| 74 |
+
"clipL": f"{MODEL_BASE}/text_encoder_2",
|
| 75 |
+
"llm": f"{MODEL_BASE}/text_encoder",
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
# Tokenizer
|
| 79 |
+
TOKENIZER_PATH = {
|
| 80 |
+
"clipL": f"{MODEL_BASE}/text_encoder_2",
|
| 81 |
+
"llm": f"{MODEL_BASE}/text_encoder",
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
TEXT_PROJECTION = {
|
| 85 |
+
"linear", # Default, an nn.Linear() layer
|
| 86 |
+
"single_refiner", # Single TokenRefiner. Refer to LI-DiT
|
| 87 |
+
}
|
exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .pipelines import HunyuanVideoPipeline
|
| 2 |
+
from .schedulers import FlowMatchDiscreteScheduler
|
exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/pipelines/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .pipeline_hunyuan_video import HunyuanVideoPipeline
|
exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/pipelines/pipeline_hunyuan_video.py
ADDED
|
@@ -0,0 +1,1114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
#
|
| 16 |
+
# Modified from diffusers==0.29.2
|
| 17 |
+
#
|
| 18 |
+
# ==============================================================================
|
| 19 |
+
import inspect
|
| 20 |
+
import math
|
| 21 |
+
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
|
| 22 |
+
import torch
|
| 23 |
+
import torch.distributed as dist
|
| 24 |
+
import numpy as np
|
| 25 |
+
from dataclasses import dataclass
|
| 26 |
+
from packaging import version
|
| 27 |
+
|
| 28 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 29 |
+
from diffusers.configuration_utils import FrozenDict
|
| 30 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 31 |
+
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
| 32 |
+
from diffusers.models import AutoencoderKL
|
| 33 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 34 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 35 |
+
from diffusers.utils import (
|
| 36 |
+
USE_PEFT_BACKEND,
|
| 37 |
+
deprecate,
|
| 38 |
+
logging,
|
| 39 |
+
replace_example_docstring,
|
| 40 |
+
scale_lora_layers,
|
| 41 |
+
unscale_lora_layers,
|
| 42 |
+
)
|
| 43 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 44 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 45 |
+
from diffusers.utils import BaseOutput
|
| 46 |
+
|
| 47 |
+
from ...constants import PRECISION_TO_TYPE
|
| 48 |
+
from ...vae.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 49 |
+
from ...text_encoder import TextEncoder
|
| 50 |
+
from ...modules import HYVideoDiffusionTransformer
|
| 51 |
+
|
| 52 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 53 |
+
|
| 54 |
+
EXAMPLE_DOC_STRING = """"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 58 |
+
"""
|
| 59 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 60 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 61 |
+
"""
|
| 62 |
+
std_text = noise_pred_text.std(
|
| 63 |
+
dim=list(range(1, noise_pred_text.ndim)), keepdim=True
|
| 64 |
+
)
|
| 65 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 66 |
+
# rescale the results from guidance (fixes overexposure)
|
| 67 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 68 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 69 |
+
noise_cfg = (
|
| 70 |
+
guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 71 |
+
)
|
| 72 |
+
return noise_cfg
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def retrieve_timesteps(
|
| 76 |
+
scheduler,
|
| 77 |
+
num_inference_steps: Optional[int] = None,
|
| 78 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 79 |
+
timesteps: Optional[List[int]] = None,
|
| 80 |
+
sigmas: Optional[List[float]] = None,
|
| 81 |
+
**kwargs,
|
| 82 |
+
):
|
| 83 |
+
"""
|
| 84 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 85 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
scheduler (`SchedulerMixin`):
|
| 89 |
+
The scheduler to get timesteps from.
|
| 90 |
+
num_inference_steps (`int`):
|
| 91 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 92 |
+
must be `None`.
|
| 93 |
+
device (`str` or `torch.device`, *optional*):
|
| 94 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 95 |
+
timesteps (`List[int]`, *optional*):
|
| 96 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 97 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 98 |
+
sigmas (`List[float]`, *optional*):
|
| 99 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 100 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 104 |
+
second element is the number of inference steps.
|
| 105 |
+
"""
|
| 106 |
+
if timesteps is not None and sigmas is not None:
|
| 107 |
+
raise ValueError(
|
| 108 |
+
"Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values"
|
| 109 |
+
)
|
| 110 |
+
if timesteps is not None:
|
| 111 |
+
accepts_timesteps = "timesteps" in set(
|
| 112 |
+
inspect.signature(scheduler.set_timesteps).parameters.keys()
|
| 113 |
+
)
|
| 114 |
+
if not accepts_timesteps:
|
| 115 |
+
raise ValueError(
|
| 116 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 117 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 118 |
+
)
|
| 119 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 120 |
+
timesteps = scheduler.timesteps
|
| 121 |
+
num_inference_steps = len(timesteps)
|
| 122 |
+
elif sigmas is not None:
|
| 123 |
+
accept_sigmas = "sigmas" in set(
|
| 124 |
+
inspect.signature(scheduler.set_timesteps).parameters.keys()
|
| 125 |
+
)
|
| 126 |
+
if not accept_sigmas:
|
| 127 |
+
raise ValueError(
|
| 128 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 129 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 130 |
+
)
|
| 131 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 132 |
+
timesteps = scheduler.timesteps
|
| 133 |
+
num_inference_steps = len(timesteps)
|
| 134 |
+
else:
|
| 135 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 136 |
+
timesteps = scheduler.timesteps
|
| 137 |
+
return timesteps, num_inference_steps
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@dataclass
|
| 141 |
+
class HunyuanVideoPipelineOutput(BaseOutput):
|
| 142 |
+
videos: Union[torch.Tensor, np.ndarray]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class HunyuanVideoPipeline(DiffusionPipeline):
|
| 146 |
+
r"""
|
| 147 |
+
Pipeline for text-to-video generation using HunyuanVideo.
|
| 148 |
+
|
| 149 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 150 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
vae ([`AutoencoderKL`]):
|
| 154 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 155 |
+
text_encoder ([`TextEncoder`]):
|
| 156 |
+
Frozen text-encoder.
|
| 157 |
+
text_encoder_2 ([`TextEncoder`]):
|
| 158 |
+
Frozen text-encoder_2.
|
| 159 |
+
transformer ([`HYVideoDiffusionTransformer`]):
|
| 160 |
+
A `HYVideoDiffusionTransformer` to denoise the encoded video latents.
|
| 161 |
+
scheduler ([`SchedulerMixin`]):
|
| 162 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
|
| 166 |
+
_optional_components = ["text_encoder_2"]
|
| 167 |
+
_exclude_from_cpu_offload = ["transformer"]
|
| 168 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 169 |
+
|
| 170 |
+
def __init__(
|
| 171 |
+
self,
|
| 172 |
+
vae: AutoencoderKL,
|
| 173 |
+
text_encoder: TextEncoder,
|
| 174 |
+
transformer: HYVideoDiffusionTransformer,
|
| 175 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 176 |
+
text_encoder_2: Optional[TextEncoder] = None,
|
| 177 |
+
progress_bar_config: Dict[str, Any] = None,
|
| 178 |
+
args=None,
|
| 179 |
+
):
|
| 180 |
+
super().__init__()
|
| 181 |
+
|
| 182 |
+
# ==========================================================================================
|
| 183 |
+
if progress_bar_config is None:
|
| 184 |
+
progress_bar_config = {}
|
| 185 |
+
if not hasattr(self, "_progress_bar_config"):
|
| 186 |
+
self._progress_bar_config = {}
|
| 187 |
+
self._progress_bar_config.update(progress_bar_config)
|
| 188 |
+
|
| 189 |
+
self.args = args
|
| 190 |
+
# ==========================================================================================
|
| 191 |
+
|
| 192 |
+
if (
|
| 193 |
+
hasattr(scheduler.config, "steps_offset")
|
| 194 |
+
and scheduler.config.steps_offset != 1
|
| 195 |
+
):
|
| 196 |
+
deprecation_message = (
|
| 197 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 198 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 199 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 200 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 201 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 202 |
+
" file"
|
| 203 |
+
)
|
| 204 |
+
deprecate(
|
| 205 |
+
"steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False
|
| 206 |
+
)
|
| 207 |
+
new_config = dict(scheduler.config)
|
| 208 |
+
new_config["steps_offset"] = 1
|
| 209 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 210 |
+
|
| 211 |
+
if (
|
| 212 |
+
hasattr(scheduler.config, "clip_sample")
|
| 213 |
+
and scheduler.config.clip_sample is True
|
| 214 |
+
):
|
| 215 |
+
deprecation_message = (
|
| 216 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 217 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 218 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 219 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 220 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 221 |
+
)
|
| 222 |
+
deprecate(
|
| 223 |
+
"clip_sample not set", "1.0.0", deprecation_message, standard_warn=False
|
| 224 |
+
)
|
| 225 |
+
new_config = dict(scheduler.config)
|
| 226 |
+
new_config["clip_sample"] = False
|
| 227 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 228 |
+
|
| 229 |
+
self.register_modules(
|
| 230 |
+
vae=vae,
|
| 231 |
+
text_encoder=text_encoder,
|
| 232 |
+
transformer=transformer,
|
| 233 |
+
scheduler=scheduler,
|
| 234 |
+
text_encoder_2=text_encoder_2,
|
| 235 |
+
)
|
| 236 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 237 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 238 |
+
self.noise_pertub = 0
|
| 239 |
+
|
| 240 |
+
def encode_prompt(
|
| 241 |
+
self,
|
| 242 |
+
prompt,
|
| 243 |
+
device,
|
| 244 |
+
num_videos_per_prompt,
|
| 245 |
+
do_classifier_free_guidance,
|
| 246 |
+
negative_prompt=None,
|
| 247 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 248 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 249 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 250 |
+
negative_attention_mask: Optional[torch.Tensor] = None,
|
| 251 |
+
lora_scale: Optional[float] = None,
|
| 252 |
+
clip_skip: Optional[int] = None,
|
| 253 |
+
text_encoder: Optional[TextEncoder] = None,
|
| 254 |
+
data_type: Optional[str] = "image",
|
| 255 |
+
):
|
| 256 |
+
r"""
|
| 257 |
+
Encodes the prompt into text encoder hidden states.
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 261 |
+
prompt to be encoded
|
| 262 |
+
device: (`torch.device`):
|
| 263 |
+
torch device
|
| 264 |
+
num_videos_per_prompt (`int`):
|
| 265 |
+
number of videos that should be generated per prompt
|
| 266 |
+
do_classifier_free_guidance (`bool`):
|
| 267 |
+
whether to use classifier free guidance or not
|
| 268 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 269 |
+
The prompt or prompts not to guide the video generation. If not defined, one has to pass
|
| 270 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 271 |
+
less than `1`).
|
| 272 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 273 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 274 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 275 |
+
attention_mask (`torch.Tensor`, *optional*):
|
| 276 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 277 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 278 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 279 |
+
argument.
|
| 280 |
+
negative_attention_mask (`torch.Tensor`, *optional*):
|
| 281 |
+
lora_scale (`float`, *optional*):
|
| 282 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 283 |
+
clip_skip (`int`, *optional*):
|
| 284 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 285 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 286 |
+
text_encoder (TextEncoder, *optional*):
|
| 287 |
+
data_type (`str`, *optional*):
|
| 288 |
+
"""
|
| 289 |
+
if text_encoder is None:
|
| 290 |
+
text_encoder = self.text_encoder
|
| 291 |
+
|
| 292 |
+
# set lora scale so that monkey patched LoRA
|
| 293 |
+
# function of text encoder can correctly access it
|
| 294 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 295 |
+
self._lora_scale = lora_scale
|
| 296 |
+
|
| 297 |
+
# dynamically adjust the LoRA scale
|
| 298 |
+
if not USE_PEFT_BACKEND:
|
| 299 |
+
adjust_lora_scale_text_encoder(text_encoder.model, lora_scale)
|
| 300 |
+
else:
|
| 301 |
+
scale_lora_layers(text_encoder.model, lora_scale)
|
| 302 |
+
|
| 303 |
+
if prompt is not None and isinstance(prompt, str):
|
| 304 |
+
batch_size = 1
|
| 305 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 306 |
+
batch_size = len(prompt)
|
| 307 |
+
else:
|
| 308 |
+
batch_size = prompt_embeds.shape[0]
|
| 309 |
+
|
| 310 |
+
if prompt_embeds is None:
|
| 311 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 312 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 313 |
+
prompt = self.maybe_convert_prompt(prompt, text_encoder.tokenizer)
|
| 314 |
+
|
| 315 |
+
text_inputs = text_encoder.text2tokens(prompt, data_type=data_type)
|
| 316 |
+
|
| 317 |
+
if clip_skip is None:
|
| 318 |
+
prompt_outputs = text_encoder.encode(
|
| 319 |
+
text_inputs, data_type=data_type, device=device
|
| 320 |
+
)
|
| 321 |
+
prompt_embeds = prompt_outputs.hidden_state
|
| 322 |
+
else:
|
| 323 |
+
prompt_outputs = text_encoder.encode(
|
| 324 |
+
text_inputs,
|
| 325 |
+
output_hidden_states=True,
|
| 326 |
+
data_type=data_type,
|
| 327 |
+
device=device,
|
| 328 |
+
)
|
| 329 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 330 |
+
# all the hidden states from the encoder layers. Then index into
|
| 331 |
+
# the tuple to access the hidden states from the desired layer.
|
| 332 |
+
prompt_embeds = prompt_outputs.hidden_states_list[-(clip_skip + 1)]
|
| 333 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 334 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 335 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 336 |
+
# layer.
|
| 337 |
+
prompt_embeds = text_encoder.model.text_model.final_layer_norm(
|
| 338 |
+
prompt_embeds
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
attention_mask = prompt_outputs.attention_mask
|
| 342 |
+
if attention_mask is not None:
|
| 343 |
+
attention_mask = attention_mask.to(device)
|
| 344 |
+
bs_embed, seq_len = attention_mask.shape
|
| 345 |
+
attention_mask = attention_mask.repeat(1, num_videos_per_prompt)
|
| 346 |
+
attention_mask = attention_mask.view(
|
| 347 |
+
bs_embed * num_videos_per_prompt, seq_len
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
if text_encoder is not None:
|
| 351 |
+
prompt_embeds_dtype = text_encoder.dtype
|
| 352 |
+
elif self.transformer is not None:
|
| 353 |
+
prompt_embeds_dtype = self.transformer.dtype
|
| 354 |
+
else:
|
| 355 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 356 |
+
|
| 357 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 358 |
+
|
| 359 |
+
if prompt_embeds.ndim == 2:
|
| 360 |
+
bs_embed, _ = prompt_embeds.shape
|
| 361 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 362 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt)
|
| 363 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, -1)
|
| 364 |
+
else:
|
| 365 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 366 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 367 |
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
| 368 |
+
prompt_embeds = prompt_embeds.view(
|
| 369 |
+
bs_embed * num_videos_per_prompt, seq_len, -1
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
# get unconditional embeddings for classifier free guidance
|
| 373 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 374 |
+
uncond_tokens: List[str]
|
| 375 |
+
if negative_prompt is None:
|
| 376 |
+
uncond_tokens = [""] * batch_size
|
| 377 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 378 |
+
raise TypeError(
|
| 379 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 380 |
+
f" {type(prompt)}."
|
| 381 |
+
)
|
| 382 |
+
elif isinstance(negative_prompt, str):
|
| 383 |
+
uncond_tokens = [negative_prompt]
|
| 384 |
+
elif batch_size != len(negative_prompt):
|
| 385 |
+
raise ValueError(
|
| 386 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 387 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 388 |
+
" the batch size of `prompt`."
|
| 389 |
+
)
|
| 390 |
+
else:
|
| 391 |
+
uncond_tokens = negative_prompt
|
| 392 |
+
|
| 393 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 394 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 395 |
+
uncond_tokens = self.maybe_convert_prompt(
|
| 396 |
+
uncond_tokens, text_encoder.tokenizer
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
# max_length = prompt_embeds.shape[1]
|
| 400 |
+
uncond_input = text_encoder.text2tokens(uncond_tokens, data_type=data_type)
|
| 401 |
+
|
| 402 |
+
negative_prompt_outputs = text_encoder.encode(
|
| 403 |
+
uncond_input, data_type=data_type, device=device
|
| 404 |
+
)
|
| 405 |
+
negative_prompt_embeds = negative_prompt_outputs.hidden_state
|
| 406 |
+
|
| 407 |
+
negative_attention_mask = negative_prompt_outputs.attention_mask
|
| 408 |
+
if negative_attention_mask is not None:
|
| 409 |
+
negative_attention_mask = negative_attention_mask.to(device)
|
| 410 |
+
_, seq_len = negative_attention_mask.shape
|
| 411 |
+
negative_attention_mask = negative_attention_mask.repeat(
|
| 412 |
+
1, num_videos_per_prompt
|
| 413 |
+
)
|
| 414 |
+
negative_attention_mask = negative_attention_mask.view(
|
| 415 |
+
batch_size * num_videos_per_prompt, seq_len
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
if do_classifier_free_guidance:
|
| 419 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 420 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 421 |
+
|
| 422 |
+
negative_prompt_embeds = negative_prompt_embeds.to(
|
| 423 |
+
dtype=prompt_embeds_dtype, device=device
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
if negative_prompt_embeds.ndim == 2:
|
| 427 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(
|
| 428 |
+
1, num_videos_per_prompt
|
| 429 |
+
)
|
| 430 |
+
negative_prompt_embeds = negative_prompt_embeds.view(
|
| 431 |
+
batch_size * num_videos_per_prompt, -1
|
| 432 |
+
)
|
| 433 |
+
else:
|
| 434 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(
|
| 435 |
+
1, num_videos_per_prompt, 1
|
| 436 |
+
)
|
| 437 |
+
negative_prompt_embeds = negative_prompt_embeds.view(
|
| 438 |
+
batch_size * num_videos_per_prompt, seq_len, -1
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
if text_encoder is not None:
|
| 442 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 443 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 444 |
+
unscale_lora_layers(text_encoder.model, lora_scale)
|
| 445 |
+
|
| 446 |
+
return (
|
| 447 |
+
prompt_embeds,
|
| 448 |
+
negative_prompt_embeds,
|
| 449 |
+
attention_mask,
|
| 450 |
+
negative_attention_mask,
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
def decode_latents(self, latents, enable_tiling=True):
|
| 454 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 455 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 456 |
+
|
| 457 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 458 |
+
if enable_tiling:
|
| 459 |
+
self.vae.enable_tiling()
|
| 460 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 461 |
+
else:
|
| 462 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 463 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 464 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 465 |
+
if image.ndim == 4:
|
| 466 |
+
image = image.cpu().permute(0, 2, 3, 1).float()
|
| 467 |
+
else:
|
| 468 |
+
image = image.cpu().float()
|
| 469 |
+
return image
|
| 470 |
+
|
| 471 |
+
def prepare_extra_func_kwargs(self, func, kwargs):
|
| 472 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 473 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 474 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 475 |
+
# and should be between [0, 1]
|
| 476 |
+
extra_step_kwargs = {}
|
| 477 |
+
|
| 478 |
+
for k, v in kwargs.items():
|
| 479 |
+
accepts = k in set(inspect.signature(func).parameters.keys())
|
| 480 |
+
if accepts:
|
| 481 |
+
extra_step_kwargs[k] = v
|
| 482 |
+
return extra_step_kwargs
|
| 483 |
+
|
| 484 |
+
def check_inputs(
|
| 485 |
+
self,
|
| 486 |
+
prompt,
|
| 487 |
+
height,
|
| 488 |
+
width,
|
| 489 |
+
video_length,
|
| 490 |
+
callback_steps,
|
| 491 |
+
negative_prompt=None,
|
| 492 |
+
prompt_embeds=None,
|
| 493 |
+
negative_prompt_embeds=None,
|
| 494 |
+
callback_on_step_end_tensor_inputs=None,
|
| 495 |
+
vae_ver="88-4c-sd",
|
| 496 |
+
):
|
| 497 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 498 |
+
raise ValueError(
|
| 499 |
+
f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
if video_length is not None:
|
| 503 |
+
if "884" in vae_ver:
|
| 504 |
+
if video_length != 1 and (video_length - 1) % 4 != 0:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
f"`video_length` has to be 1 or a multiple of 4 but is {video_length}."
|
| 507 |
+
)
|
| 508 |
+
elif "888" in vae_ver:
|
| 509 |
+
if video_length != 1 and (video_length - 1) % 8 != 0:
|
| 510 |
+
raise ValueError(
|
| 511 |
+
f"`video_length` has to be 1 or a multiple of 8 but is {video_length}."
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
if callback_steps is not None and (
|
| 515 |
+
not isinstance(callback_steps, int) or callback_steps <= 0
|
| 516 |
+
):
|
| 517 |
+
raise ValueError(
|
| 518 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 519 |
+
f" {type(callback_steps)}."
|
| 520 |
+
)
|
| 521 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 522 |
+
k in self._callback_tensor_inputs
|
| 523 |
+
for k in callback_on_step_end_tensor_inputs
|
| 524 |
+
):
|
| 525 |
+
raise ValueError(
|
| 526 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
if prompt is not None and prompt_embeds is not None:
|
| 530 |
+
raise ValueError(
|
| 531 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 532 |
+
" only forward one of the two."
|
| 533 |
+
)
|
| 534 |
+
elif prompt is None and prompt_embeds is None:
|
| 535 |
+
raise ValueError(
|
| 536 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 537 |
+
)
|
| 538 |
+
elif prompt is not None and (
|
| 539 |
+
not isinstance(prompt, str) and not isinstance(prompt, list)
|
| 540 |
+
):
|
| 541 |
+
raise ValueError(
|
| 542 |
+
f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 546 |
+
raise ValueError(
|
| 547 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 548 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 549 |
+
)
|
| 550 |
+
|
| 551 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 552 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 553 |
+
raise ValueError(
|
| 554 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 555 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 556 |
+
f" {negative_prompt_embeds.shape}."
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
def prepare_latents(
|
| 560 |
+
self,
|
| 561 |
+
batch_size,
|
| 562 |
+
num_channels_latents,
|
| 563 |
+
height,
|
| 564 |
+
width,
|
| 565 |
+
video_length,
|
| 566 |
+
dtype,
|
| 567 |
+
device,
|
| 568 |
+
generator,
|
| 569 |
+
latents=None,
|
| 570 |
+
):
|
| 571 |
+
shape = (
|
| 572 |
+
batch_size,
|
| 573 |
+
num_channels_latents,
|
| 574 |
+
video_length,
|
| 575 |
+
int(height) // self.vae_scale_factor,
|
| 576 |
+
int(width) // self.vae_scale_factor,
|
| 577 |
+
)
|
| 578 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 579 |
+
raise ValueError(
|
| 580 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 581 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
if latents is None:
|
| 585 |
+
latents = randn_tensor(
|
| 586 |
+
shape, generator=generator, device=device, dtype=dtype
|
| 587 |
+
)
|
| 588 |
+
else:
|
| 589 |
+
latents = latents.to(device)
|
| 590 |
+
|
| 591 |
+
# Check existence to make it compatible with FlowMatchEulerDiscreteScheduler
|
| 592 |
+
if hasattr(self.scheduler, "init_noise_sigma"):
|
| 593 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 594 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 595 |
+
|
| 596 |
+
# noise_ = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 597 |
+
# latents = math.sqrt(1 - self.noise_pertub) * latents + noise_ * math.sqrt(self.noise_pertub)
|
| 598 |
+
# self.noise_pertub += 0.05
|
| 599 |
+
return latents
|
| 600 |
+
|
| 601 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 602 |
+
def get_guidance_scale_embedding(
|
| 603 |
+
self,
|
| 604 |
+
w: torch.Tensor,
|
| 605 |
+
embedding_dim: int = 512,
|
| 606 |
+
dtype: torch.dtype = torch.float32,
|
| 607 |
+
) -> torch.Tensor:
|
| 608 |
+
"""
|
| 609 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 610 |
+
|
| 611 |
+
Args:
|
| 612 |
+
w (`torch.Tensor`):
|
| 613 |
+
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
|
| 614 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 615 |
+
Dimension of the embeddings to generate.
|
| 616 |
+
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
|
| 617 |
+
Data type of the generated embeddings.
|
| 618 |
+
|
| 619 |
+
Returns:
|
| 620 |
+
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
|
| 621 |
+
"""
|
| 622 |
+
assert len(w.shape) == 1
|
| 623 |
+
w = w * 1000.0
|
| 624 |
+
|
| 625 |
+
half_dim = embedding_dim // 2
|
| 626 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 627 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 628 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 629 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 630 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 631 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 632 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 633 |
+
return emb
|
| 634 |
+
|
| 635 |
+
@property
|
| 636 |
+
def guidance_scale(self):
|
| 637 |
+
return self._guidance_scale
|
| 638 |
+
|
| 639 |
+
@property
|
| 640 |
+
def guidance_rescale(self):
|
| 641 |
+
return self._guidance_rescale
|
| 642 |
+
|
| 643 |
+
@property
|
| 644 |
+
def clip_skip(self):
|
| 645 |
+
return self._clip_skip
|
| 646 |
+
|
| 647 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 648 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 649 |
+
# corresponds to doing no classifier free guidance.
|
| 650 |
+
@property
|
| 651 |
+
def do_classifier_free_guidance(self):
|
| 652 |
+
# return self._guidance_scale > 1 and self.transformer.config.time_cond_proj_dim is None
|
| 653 |
+
return self._guidance_scale > 1
|
| 654 |
+
|
| 655 |
+
@property
|
| 656 |
+
def cross_attention_kwargs(self):
|
| 657 |
+
return self._cross_attention_kwargs
|
| 658 |
+
|
| 659 |
+
@property
|
| 660 |
+
def num_timesteps(self):
|
| 661 |
+
return self._num_timesteps
|
| 662 |
+
|
| 663 |
+
@property
|
| 664 |
+
def interrupt(self):
|
| 665 |
+
return self._interrupt
|
| 666 |
+
|
| 667 |
+
@torch.no_grad()
|
| 668 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 669 |
+
def __call__(
|
| 670 |
+
self,
|
| 671 |
+
prompt: Union[str, List[str]],
|
| 672 |
+
height: int,
|
| 673 |
+
width: int,
|
| 674 |
+
video_length: int,
|
| 675 |
+
data_type: str = "video",
|
| 676 |
+
num_inference_steps: int = 50,
|
| 677 |
+
timesteps: List[int] = None,
|
| 678 |
+
sigmas: List[float] = None,
|
| 679 |
+
guidance_scale: float = 7.5,
|
| 680 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 681 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 682 |
+
eta: float = 0.0,
|
| 683 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 684 |
+
latents: Optional[torch.Tensor] = None,
|
| 685 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 686 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 687 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 688 |
+
negative_attention_mask: Optional[torch.Tensor] = None,
|
| 689 |
+
output_type: Optional[str] = "pil",
|
| 690 |
+
return_dict: bool = True,
|
| 691 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 692 |
+
guidance_rescale: float = 0.0,
|
| 693 |
+
clip_skip: Optional[int] = None,
|
| 694 |
+
callback_on_step_end: Optional[
|
| 695 |
+
Union[
|
| 696 |
+
Callable[[int, int, Dict], None],
|
| 697 |
+
PipelineCallback,
|
| 698 |
+
MultiPipelineCallbacks,
|
| 699 |
+
]
|
| 700 |
+
] = None,
|
| 701 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 702 |
+
freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None,
|
| 703 |
+
vae_ver: str = "88-4c-sd",
|
| 704 |
+
enable_tiling: bool = False,
|
| 705 |
+
n_tokens: Optional[int] = None,
|
| 706 |
+
embedded_guidance_scale: Optional[float] = None,
|
| 707 |
+
few_step: bool = False,
|
| 708 |
+
**kwargs,
|
| 709 |
+
):
|
| 710 |
+
r"""
|
| 711 |
+
The call function to the pipeline for generation.
|
| 712 |
+
|
| 713 |
+
Args:
|
| 714 |
+
prompt (`str` or `List[str]`):
|
| 715 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 716 |
+
height (`int`):
|
| 717 |
+
The height in pixels of the generated image.
|
| 718 |
+
width (`int`):
|
| 719 |
+
The width in pixels of the generated image.
|
| 720 |
+
video_length (`int`):
|
| 721 |
+
The number of frames in the generated video.
|
| 722 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 723 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 724 |
+
expense of slower inference.
|
| 725 |
+
timesteps (`List[int]`, *optional*):
|
| 726 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 727 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 728 |
+
passed will be used. Must be in descending order.
|
| 729 |
+
sigmas (`List[float]`, *optional*):
|
| 730 |
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
| 731 |
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
| 732 |
+
will be used.
|
| 733 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 734 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 735 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 736 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 737 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 738 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 739 |
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
| 740 |
+
The number of images to generate per prompt.
|
| 741 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 742 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 743 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 744 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 745 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 746 |
+
generation deterministic.
|
| 747 |
+
latents (`torch.Tensor`, *optional*):
|
| 748 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 749 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 750 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 751 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 752 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 753 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 754 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 755 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 756 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 757 |
+
|
| 758 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 759 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 760 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 761 |
+
Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a
|
| 762 |
+
plain tuple.
|
| 763 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 764 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 765 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 766 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 767 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 768 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 769 |
+
using zero terminal SNR.
|
| 770 |
+
clip_skip (`int`, *optional*):
|
| 771 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 772 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 773 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 774 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 775 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 776 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 777 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 778 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 779 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 780 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 781 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 782 |
+
|
| 783 |
+
Examples:
|
| 784 |
+
|
| 785 |
+
Returns:
|
| 786 |
+
[`~HunyuanVideoPipelineOutput`] or `tuple`:
|
| 787 |
+
If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned,
|
| 788 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 789 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 790 |
+
"not-safe-for-work" (nsfw) content.
|
| 791 |
+
"""
|
| 792 |
+
callback = kwargs.pop("callback", None)
|
| 793 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 794 |
+
|
| 795 |
+
if callback is not None:
|
| 796 |
+
deprecate(
|
| 797 |
+
"callback",
|
| 798 |
+
"1.0.0",
|
| 799 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 800 |
+
)
|
| 801 |
+
if callback_steps is not None:
|
| 802 |
+
deprecate(
|
| 803 |
+
"callback_steps",
|
| 804 |
+
"1.0.0",
|
| 805 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 809 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 810 |
+
|
| 811 |
+
# 0. Default height and width to unet
|
| 812 |
+
# height = height or self.transformer.config.sample_size * self.vae_scale_factor
|
| 813 |
+
# width = width or self.transformer.config.sample_size * self.vae_scale_factor
|
| 814 |
+
# to deal with lora scaling and other possible forward hooks
|
| 815 |
+
|
| 816 |
+
# 1. Check inputs. Raise error if not correct
|
| 817 |
+
self.check_inputs(
|
| 818 |
+
prompt,
|
| 819 |
+
height,
|
| 820 |
+
width,
|
| 821 |
+
video_length,
|
| 822 |
+
callback_steps,
|
| 823 |
+
negative_prompt,
|
| 824 |
+
prompt_embeds,
|
| 825 |
+
negative_prompt_embeds,
|
| 826 |
+
callback_on_step_end_tensor_inputs,
|
| 827 |
+
vae_ver=vae_ver,
|
| 828 |
+
)
|
| 829 |
+
|
| 830 |
+
self._guidance_scale = guidance_scale
|
| 831 |
+
self._guidance_rescale = guidance_rescale
|
| 832 |
+
self._clip_skip = clip_skip
|
| 833 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 834 |
+
self._interrupt = False
|
| 835 |
+
|
| 836 |
+
# 2. Define call parameters
|
| 837 |
+
if prompt is not None and isinstance(prompt, str):
|
| 838 |
+
batch_size = 1
|
| 839 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 840 |
+
batch_size = len(prompt)
|
| 841 |
+
else:
|
| 842 |
+
batch_size = prompt_embeds.shape[0]
|
| 843 |
+
|
| 844 |
+
device = torch.device(f"cuda:{dist.get_rank()}") if dist.is_initialized() else self._execution_device
|
| 845 |
+
|
| 846 |
+
# 3. Encode input prompt
|
| 847 |
+
lora_scale = (
|
| 848 |
+
self.cross_attention_kwargs.get("scale", None)
|
| 849 |
+
if self.cross_attention_kwargs is not None
|
| 850 |
+
else None
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
(
|
| 854 |
+
prompt_embeds,
|
| 855 |
+
negative_prompt_embeds,
|
| 856 |
+
prompt_mask,
|
| 857 |
+
negative_prompt_mask,
|
| 858 |
+
) = self.encode_prompt(
|
| 859 |
+
prompt,
|
| 860 |
+
device,
|
| 861 |
+
num_videos_per_prompt,
|
| 862 |
+
self.do_classifier_free_guidance,
|
| 863 |
+
negative_prompt,
|
| 864 |
+
prompt_embeds=prompt_embeds,
|
| 865 |
+
attention_mask=attention_mask,
|
| 866 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 867 |
+
negative_attention_mask=negative_attention_mask,
|
| 868 |
+
lora_scale=lora_scale,
|
| 869 |
+
clip_skip=self.clip_skip,
|
| 870 |
+
data_type=data_type,
|
| 871 |
+
)
|
| 872 |
+
if self.text_encoder_2 is not None:
|
| 873 |
+
(
|
| 874 |
+
prompt_embeds_2,
|
| 875 |
+
negative_prompt_embeds_2,
|
| 876 |
+
prompt_mask_2,
|
| 877 |
+
negative_prompt_mask_2,
|
| 878 |
+
) = self.encode_prompt(
|
| 879 |
+
prompt,
|
| 880 |
+
device,
|
| 881 |
+
num_videos_per_prompt,
|
| 882 |
+
self.do_classifier_free_guidance,
|
| 883 |
+
negative_prompt,
|
| 884 |
+
prompt_embeds=None,
|
| 885 |
+
attention_mask=None,
|
| 886 |
+
negative_prompt_embeds=None,
|
| 887 |
+
negative_attention_mask=None,
|
| 888 |
+
lora_scale=lora_scale,
|
| 889 |
+
clip_skip=self.clip_skip,
|
| 890 |
+
text_encoder=self.text_encoder_2,
|
| 891 |
+
data_type=data_type,
|
| 892 |
+
)
|
| 893 |
+
else:
|
| 894 |
+
prompt_embeds_2 = None
|
| 895 |
+
negative_prompt_embeds_2 = None
|
| 896 |
+
prompt_mask_2 = None
|
| 897 |
+
negative_prompt_mask_2 = None
|
| 898 |
+
|
| 899 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 900 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 901 |
+
# to avoid doing two forward passes
|
| 902 |
+
if self.do_classifier_free_guidance:
|
| 903 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 904 |
+
if prompt_mask is not None:
|
| 905 |
+
prompt_mask = torch.cat([negative_prompt_mask, prompt_mask])
|
| 906 |
+
if prompt_embeds_2 is not None:
|
| 907 |
+
prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2])
|
| 908 |
+
if prompt_mask_2 is not None:
|
| 909 |
+
prompt_mask_2 = torch.cat([negative_prompt_mask_2, prompt_mask_2])
|
| 910 |
+
|
| 911 |
+
# 4. Prepare timesteps
|
| 912 |
+
extra_set_timesteps_kwargs = self.prepare_extra_func_kwargs(
|
| 913 |
+
self.scheduler.set_timesteps, {"n_tokens": n_tokens}
|
| 914 |
+
)
|
| 915 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
| 916 |
+
self.scheduler,
|
| 917 |
+
num_inference_steps,
|
| 918 |
+
device,
|
| 919 |
+
timesteps,
|
| 920 |
+
sigmas,
|
| 921 |
+
**extra_set_timesteps_kwargs,
|
| 922 |
+
)
|
| 923 |
+
|
| 924 |
+
if "884" in vae_ver:
|
| 925 |
+
video_length = (video_length - 1) // 4 + 1
|
| 926 |
+
elif "888" in vae_ver:
|
| 927 |
+
video_length = (video_length - 1) // 8 + 1
|
| 928 |
+
else:
|
| 929 |
+
video_length = video_length
|
| 930 |
+
|
| 931 |
+
# 5. Prepare latent variables
|
| 932 |
+
num_channels_latents = self.transformer.config.in_channels
|
| 933 |
+
latents = self.prepare_latents(
|
| 934 |
+
batch_size * num_videos_per_prompt,
|
| 935 |
+
num_channels_latents,
|
| 936 |
+
height,
|
| 937 |
+
width,
|
| 938 |
+
video_length,
|
| 939 |
+
prompt_embeds.dtype,
|
| 940 |
+
device,
|
| 941 |
+
generator,
|
| 942 |
+
latents,
|
| 943 |
+
)
|
| 944 |
+
|
| 945 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 946 |
+
extra_step_kwargs = self.prepare_extra_func_kwargs(
|
| 947 |
+
self.scheduler.step,
|
| 948 |
+
{"generator": generator, "eta": eta},
|
| 949 |
+
)
|
| 950 |
+
|
| 951 |
+
target_dtype = PRECISION_TO_TYPE[self.args.precision]
|
| 952 |
+
autocast_enabled = (
|
| 953 |
+
target_dtype != torch.float32
|
| 954 |
+
) and not self.args.disable_autocast
|
| 955 |
+
vae_dtype = PRECISION_TO_TYPE[self.args.vae_precision]
|
| 956 |
+
vae_autocast_enabled = (
|
| 957 |
+
vae_dtype != torch.float32
|
| 958 |
+
) and not self.args.disable_autocast
|
| 959 |
+
|
| 960 |
+
# 7. Denoising loop
|
| 961 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 962 |
+
self._num_timesteps = len(timesteps)
|
| 963 |
+
|
| 964 |
+
# if few_step:
|
| 965 |
+
# start_latent_list = [0, 10, 20, 30, 40, 50]
|
| 966 |
+
# self.scheduler.sigmas = self.scheduler.sigmas[start_latent_list]
|
| 967 |
+
# num_inference_steps = 5
|
| 968 |
+
# timesteps = timesteps[start_latent_list[:num_inference_steps]]
|
| 969 |
+
|
| 970 |
+
print('sigmas used in generation:', self.scheduler.sigmas)
|
| 971 |
+
print('inference timesteps used in generation:', timesteps)
|
| 972 |
+
|
| 973 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 974 |
+
for i, t in enumerate(timesteps):
|
| 975 |
+
if self.interrupt:
|
| 976 |
+
continue
|
| 977 |
+
# expand the latents if we are doing classifier free guidance
|
| 978 |
+
latent_model_input = (
|
| 979 |
+
torch.cat([latents] * 2)
|
| 980 |
+
if self.do_classifier_free_guidance
|
| 981 |
+
else latents
|
| 982 |
+
)
|
| 983 |
+
latent_model_input = self.scheduler.scale_model_input(
|
| 984 |
+
latent_model_input, t
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
t_expand = t.repeat(latent_model_input.shape[0])
|
| 988 |
+
guidance_expand = (
|
| 989 |
+
torch.tensor(
|
| 990 |
+
[embedded_guidance_scale] * latent_model_input.shape[0],
|
| 991 |
+
dtype=torch.float32,
|
| 992 |
+
device=device,
|
| 993 |
+
).to(target_dtype)
|
| 994 |
+
* 1000.0
|
| 995 |
+
if embedded_guidance_scale is not None
|
| 996 |
+
else None
|
| 997 |
+
)
|
| 998 |
+
|
| 999 |
+
# predict the noise residual
|
| 1000 |
+
with torch.autocast(
|
| 1001 |
+
device_type="cuda", dtype=target_dtype, enabled=autocast_enabled
|
| 1002 |
+
):
|
| 1003 |
+
noise_pred = self.transformer( # For an input image (129, 192, 336) (1, 256, 256)
|
| 1004 |
+
latent_model_input, # [2, 16, 33, 24, 42]
|
| 1005 |
+
t_expand, # [2]
|
| 1006 |
+
text_states=prompt_embeds, # [2, 256, 4096]
|
| 1007 |
+
text_mask=prompt_mask, # [2, 256]
|
| 1008 |
+
text_states_2=prompt_embeds_2, # [2, 768]
|
| 1009 |
+
freqs_cos=freqs_cis[0], # [seqlen, head_dim]
|
| 1010 |
+
freqs_sin=freqs_cis[1], # [seqlen, head_dim]
|
| 1011 |
+
guidance=guidance_expand,
|
| 1012 |
+
return_dict=True,
|
| 1013 |
+
)[
|
| 1014 |
+
"x"
|
| 1015 |
+
]
|
| 1016 |
+
|
| 1017 |
+
# perform guidance
|
| 1018 |
+
if self.do_classifier_free_guidance:
|
| 1019 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1020 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (
|
| 1021 |
+
noise_pred_text - noise_pred_uncond
|
| 1022 |
+
)
|
| 1023 |
+
|
| 1024 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1025 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1026 |
+
noise_pred = rescale_noise_cfg(
|
| 1027 |
+
noise_pred,
|
| 1028 |
+
noise_pred_text,
|
| 1029 |
+
guidance_rescale=self.guidance_rescale,
|
| 1030 |
+
)
|
| 1031 |
+
|
| 1032 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1033 |
+
latents = self.scheduler.step(
|
| 1034 |
+
noise_pred, t, latents, **extra_step_kwargs, return_dict=False
|
| 1035 |
+
)[0]
|
| 1036 |
+
|
| 1037 |
+
if callback_on_step_end is not None:
|
| 1038 |
+
callback_kwargs = {}
|
| 1039 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1040 |
+
callback_kwargs[k] = locals()[k]
|
| 1041 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1042 |
+
|
| 1043 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1044 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1045 |
+
negative_prompt_embeds = callback_outputs.pop(
|
| 1046 |
+
"negative_prompt_embeds", negative_prompt_embeds
|
| 1047 |
+
)
|
| 1048 |
+
|
| 1049 |
+
# call the callback, if provided
|
| 1050 |
+
if i == len(timesteps) - 1 or (
|
| 1051 |
+
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
|
| 1052 |
+
):
|
| 1053 |
+
if progress_bar is not None:
|
| 1054 |
+
progress_bar.update()
|
| 1055 |
+
if callback is not None and i % callback_steps == 0:
|
| 1056 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1057 |
+
callback(step_idx, t, latents)
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
if not output_type == "latent":
|
| 1061 |
+
expand_temporal_dim = False
|
| 1062 |
+
if len(latents.shape) == 4:
|
| 1063 |
+
if isinstance(self.vae, AutoencoderKLCausal3D):
|
| 1064 |
+
latents = latents.unsqueeze(2)
|
| 1065 |
+
expand_temporal_dim = True
|
| 1066 |
+
elif len(latents.shape) == 5:
|
| 1067 |
+
pass
|
| 1068 |
+
else:
|
| 1069 |
+
raise ValueError(
|
| 1070 |
+
f"Only support latents with shape (b, c, h, w) or (b, c, f, h, w), but got {latents.shape}."
|
| 1071 |
+
)
|
| 1072 |
+
|
| 1073 |
+
if (
|
| 1074 |
+
hasattr(self.vae.config, "shift_factor")
|
| 1075 |
+
and self.vae.config.shift_factor
|
| 1076 |
+
):
|
| 1077 |
+
latents = (
|
| 1078 |
+
latents / self.vae.config.scaling_factor
|
| 1079 |
+
+ self.vae.config.shift_factor
|
| 1080 |
+
)
|
| 1081 |
+
else:
|
| 1082 |
+
latents = latents / self.vae.config.scaling_factor
|
| 1083 |
+
|
| 1084 |
+
with torch.autocast(
|
| 1085 |
+
device_type="cuda", dtype=vae_dtype, enabled=vae_autocast_enabled
|
| 1086 |
+
):
|
| 1087 |
+
if enable_tiling:
|
| 1088 |
+
self.vae.enable_tiling()
|
| 1089 |
+
image = self.vae.decode(
|
| 1090 |
+
latents, return_dict=False, generator=generator
|
| 1091 |
+
)[0]
|
| 1092 |
+
else:
|
| 1093 |
+
image = self.vae.decode(
|
| 1094 |
+
latents, return_dict=False, generator=generator
|
| 1095 |
+
)[0]
|
| 1096 |
+
|
| 1097 |
+
if expand_temporal_dim or image.shape[2] == 1:
|
| 1098 |
+
image = image.squeeze(2)
|
| 1099 |
+
|
| 1100 |
+
else:
|
| 1101 |
+
image = latents
|
| 1102 |
+
|
| 1103 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 1104 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
| 1105 |
+
image = image.cpu().float()
|
| 1106 |
+
print(image.shape)
|
| 1107 |
+
|
| 1108 |
+
# Offload all models
|
| 1109 |
+
self.maybe_free_model_hooks()
|
| 1110 |
+
|
| 1111 |
+
if not return_dict:
|
| 1112 |
+
return image
|
| 1113 |
+
|
| 1114 |
+
return HunyuanVideoPipelineOutput(videos=image)
|
exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/schedulers/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .scheduling_flow_match_discrete import FlowMatchDiscreteScheduler
|
exp_code/1_benchmark/AccVideo/models/hunyuan/diffusion/schedulers/scheduling_flow_match_discrete.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
#
|
| 16 |
+
# Modified from diffusers==0.29.2
|
| 17 |
+
#
|
| 18 |
+
# ==============================================================================
|
| 19 |
+
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from typing import Optional, Tuple, Union
|
| 22 |
+
|
| 23 |
+
import numpy as np
|
| 24 |
+
import torch
|
| 25 |
+
|
| 26 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 27 |
+
from diffusers.utils import BaseOutput, logging
|
| 28 |
+
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class FlowMatchDiscreteSchedulerOutput(BaseOutput):
|
| 36 |
+
"""
|
| 37 |
+
Output class for the scheduler's `step` function output.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 41 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 42 |
+
denoising loop.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
prev_sample: torch.FloatTensor
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class FlowMatchDiscreteScheduler(SchedulerMixin, ConfigMixin):
|
| 49 |
+
"""
|
| 50 |
+
Euler scheduler.
|
| 51 |
+
|
| 52 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 53 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 57 |
+
The number of diffusion steps to train the model.
|
| 58 |
+
timestep_spacing (`str`, defaults to `"linspace"`):
|
| 59 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 60 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 61 |
+
shift (`float`, defaults to 1.0):
|
| 62 |
+
The shift value for the timestep schedule.
|
| 63 |
+
reverse (`bool`, defaults to `True`):
|
| 64 |
+
Whether to reverse the timestep schedule.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
_compatibles = []
|
| 68 |
+
order = 1
|
| 69 |
+
|
| 70 |
+
@register_to_config
|
| 71 |
+
def __init__(
|
| 72 |
+
self,
|
| 73 |
+
num_train_timesteps: int = 1000,
|
| 74 |
+
shift: float = 1.0,
|
| 75 |
+
reverse: bool = True,
|
| 76 |
+
solver: str = "euler",
|
| 77 |
+
n_tokens: Optional[int] = None,
|
| 78 |
+
):
|
| 79 |
+
sigmas = torch.linspace(1, 0, num_train_timesteps + 1)
|
| 80 |
+
|
| 81 |
+
if not reverse:
|
| 82 |
+
sigmas = sigmas.flip(0)
|
| 83 |
+
|
| 84 |
+
self.sigmas = sigmas
|
| 85 |
+
# the value fed to model
|
| 86 |
+
self.timesteps = (sigmas[:-1] * num_train_timesteps).to(dtype=torch.float32)
|
| 87 |
+
|
| 88 |
+
self._step_index = None
|
| 89 |
+
self._begin_index = None
|
| 90 |
+
|
| 91 |
+
self.supported_solver = ["euler"]
|
| 92 |
+
if solver not in self.supported_solver:
|
| 93 |
+
raise ValueError(
|
| 94 |
+
f"Solver {solver} not supported. Supported solvers: {self.supported_solver}"
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
@property
|
| 98 |
+
def step_index(self):
|
| 99 |
+
"""
|
| 100 |
+
The index counter for current timestep. It will increase 1 after each scheduler step.
|
| 101 |
+
"""
|
| 102 |
+
return self._step_index
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def begin_index(self):
|
| 106 |
+
"""
|
| 107 |
+
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
|
| 108 |
+
"""
|
| 109 |
+
return self._begin_index
|
| 110 |
+
|
| 111 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
|
| 112 |
+
def set_begin_index(self, begin_index: int = 0):
|
| 113 |
+
"""
|
| 114 |
+
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
begin_index (`int`):
|
| 118 |
+
The begin index for the scheduler.
|
| 119 |
+
"""
|
| 120 |
+
self._begin_index = begin_index
|
| 121 |
+
|
| 122 |
+
def _sigma_to_t(self, sigma):
|
| 123 |
+
return sigma * self.config.num_train_timesteps
|
| 124 |
+
|
| 125 |
+
def set_timesteps(
|
| 126 |
+
self,
|
| 127 |
+
num_inference_steps: int,
|
| 128 |
+
device: Union[str, torch.device] = None,
|
| 129 |
+
n_tokens: int = None,
|
| 130 |
+
):
|
| 131 |
+
"""
|
| 132 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
num_inference_steps (`int`):
|
| 136 |
+
The number of diffusion steps used when generating samples with a pre-trained model.
|
| 137 |
+
device (`str` or `torch.device`, *optional*):
|
| 138 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 139 |
+
n_tokens (`int`, *optional*):
|
| 140 |
+
Number of tokens in the input sequence.
|
| 141 |
+
"""
|
| 142 |
+
self.num_inference_steps = num_inference_steps
|
| 143 |
+
|
| 144 |
+
sigmas = torch.linspace(1, 0, num_inference_steps + 1)
|
| 145 |
+
sigmas = self.sd3_time_shift(sigmas)
|
| 146 |
+
|
| 147 |
+
if not self.config.reverse:
|
| 148 |
+
sigmas = 1 - sigmas
|
| 149 |
+
|
| 150 |
+
self.sigmas = sigmas
|
| 151 |
+
self.timesteps = (sigmas[:-1] * self.config.num_train_timesteps).to(
|
| 152 |
+
dtype=torch.float32, device=device
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# Reset step index
|
| 156 |
+
self._step_index = None
|
| 157 |
+
|
| 158 |
+
def index_for_timestep(self, timestep, schedule_timesteps=None):
|
| 159 |
+
if schedule_timesteps is None:
|
| 160 |
+
schedule_timesteps = self.timesteps
|
| 161 |
+
|
| 162 |
+
indices = (schedule_timesteps == timestep).nonzero()
|
| 163 |
+
|
| 164 |
+
# The sigma index that is taken for the **very** first `step`
|
| 165 |
+
# is always the second index (or the last index if there is only 1)
|
| 166 |
+
# This way we can ensure we don't accidentally skip a sigma in
|
| 167 |
+
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
|
| 168 |
+
pos = 1 if len(indices) > 1 else 0
|
| 169 |
+
|
| 170 |
+
return indices[pos].item()
|
| 171 |
+
|
| 172 |
+
def _init_step_index(self, timestep):
|
| 173 |
+
if self.begin_index is None:
|
| 174 |
+
if isinstance(timestep, torch.Tensor):
|
| 175 |
+
timestep = timestep.to(self.timesteps.device)
|
| 176 |
+
self._step_index = self.index_for_timestep(timestep)
|
| 177 |
+
else:
|
| 178 |
+
self._step_index = self._begin_index
|
| 179 |
+
|
| 180 |
+
def scale_model_input(
|
| 181 |
+
self, sample: torch.Tensor, timestep: Optional[int] = None
|
| 182 |
+
) -> torch.Tensor:
|
| 183 |
+
return sample
|
| 184 |
+
|
| 185 |
+
def sd3_time_shift(self, t: torch.Tensor):
|
| 186 |
+
return (self.config.shift * t) / (1 + (self.config.shift - 1) * t)
|
| 187 |
+
|
| 188 |
+
def step(
|
| 189 |
+
self,
|
| 190 |
+
model_output: torch.FloatTensor,
|
| 191 |
+
timestep: Union[float, torch.FloatTensor],
|
| 192 |
+
sample: torch.FloatTensor,
|
| 193 |
+
return_dict: bool = True,
|
| 194 |
+
) -> Union[FlowMatchDiscreteSchedulerOutput, Tuple]:
|
| 195 |
+
"""
|
| 196 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 197 |
+
process from the learned model outputs (most often the predicted noise).
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
model_output (`torch.FloatTensor`):
|
| 201 |
+
The direct output from learned diffusion model.
|
| 202 |
+
timestep (`float`):
|
| 203 |
+
The current discrete timestep in the diffusion chain.
|
| 204 |
+
sample (`torch.FloatTensor`):
|
| 205 |
+
A current instance of a sample created by the diffusion process.
|
| 206 |
+
generator (`torch.Generator`, *optional*):
|
| 207 |
+
A random number generator.
|
| 208 |
+
n_tokens (`int`, *optional*):
|
| 209 |
+
Number of tokens in the input sequence.
|
| 210 |
+
return_dict (`bool`):
|
| 211 |
+
Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or
|
| 212 |
+
tuple.
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
[`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`:
|
| 216 |
+
If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is
|
| 217 |
+
returned, otherwise a tuple is returned where the first element is the sample tensor.
|
| 218 |
+
"""
|
| 219 |
+
|
| 220 |
+
if (
|
| 221 |
+
isinstance(timestep, int)
|
| 222 |
+
or isinstance(timestep, torch.IntTensor)
|
| 223 |
+
or isinstance(timestep, torch.LongTensor)
|
| 224 |
+
):
|
| 225 |
+
raise ValueError(
|
| 226 |
+
(
|
| 227 |
+
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
|
| 228 |
+
" `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
|
| 229 |
+
" one of the `scheduler.timesteps` as a timestep."
|
| 230 |
+
),
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
if self.step_index is None:
|
| 234 |
+
self._init_step_index(timestep)
|
| 235 |
+
|
| 236 |
+
# Upcast to avoid precision issues when computing prev_sample
|
| 237 |
+
sample = sample.to(torch.float32)
|
| 238 |
+
|
| 239 |
+
dt = self.sigmas[self.step_index + 1] - self.sigmas[self.step_index]
|
| 240 |
+
|
| 241 |
+
if self.config.solver == "euler":
|
| 242 |
+
prev_sample = sample + model_output.to(torch.float32) * dt
|
| 243 |
+
else:
|
| 244 |
+
raise ValueError(
|
| 245 |
+
f"Solver {self.config.solver} not supported. Supported solvers: {self.supported_solver}"
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
# upon completion increase step index by one
|
| 249 |
+
self._step_index += 1
|
| 250 |
+
|
| 251 |
+
if not return_dict:
|
| 252 |
+
return (prev_sample,)
|
| 253 |
+
|
| 254 |
+
return FlowMatchDiscreteSchedulerOutput(prev_sample=prev_sample)
|
| 255 |
+
|
| 256 |
+
def __len__(self):
|
| 257 |
+
return self.config.num_train_timesteps
|
exp_code/1_benchmark/AccVideo/models/hunyuan/idle_config.py
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from .constants import *
|
| 3 |
+
import re
|
| 4 |
+
from .modules.models import HUNYUAN_VIDEO_CONFIG
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def parse_args(namespace=None):
|
| 8 |
+
parser = argparse.ArgumentParser(description="HunyuanVideo inference script")
|
| 9 |
+
|
| 10 |
+
parser = add_network_args(parser)
|
| 11 |
+
parser = add_extra_models_args(parser)
|
| 12 |
+
parser = add_denoise_schedule_args(parser)
|
| 13 |
+
parser = add_inference_args(parser)
|
| 14 |
+
parser = add_parallel_args(parser)
|
| 15 |
+
|
| 16 |
+
args = parser.parse_args(namespace=namespace)
|
| 17 |
+
args = sanity_check_args(args)
|
| 18 |
+
|
| 19 |
+
return args
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def add_network_args(parser: argparse.ArgumentParser):
|
| 23 |
+
group = parser.add_argument_group(title="HunyuanVideo network args")
|
| 24 |
+
|
| 25 |
+
# Main model
|
| 26 |
+
group.add_argument(
|
| 27 |
+
"--model",
|
| 28 |
+
type=str,
|
| 29 |
+
choices=list(HUNYUAN_VIDEO_CONFIG.keys()),
|
| 30 |
+
default="HYVideo-T/2-cfgdistill",
|
| 31 |
+
)
|
| 32 |
+
group.add_argument(
|
| 33 |
+
"--latent-channels",
|
| 34 |
+
type=str,
|
| 35 |
+
default=16,
|
| 36 |
+
help="Number of latent channels of DiT. If None, it will be determined by `vae`. If provided, "
|
| 37 |
+
"it still needs to match the latent channels of the VAE model.",
|
| 38 |
+
)
|
| 39 |
+
group.add_argument(
|
| 40 |
+
"--precision",
|
| 41 |
+
type=str,
|
| 42 |
+
default="bf16",
|
| 43 |
+
choices=PRECISIONS,
|
| 44 |
+
help="Precision mode. Options: fp32, fp16, bf16. Applied to the backbone model and optimizer.",
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# RoPE
|
| 48 |
+
group.add_argument(
|
| 49 |
+
"--rope-theta", type=int, default=256, help="Theta used in RoPE."
|
| 50 |
+
)
|
| 51 |
+
return parser
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def add_extra_models_args(parser: argparse.ArgumentParser):
|
| 55 |
+
group = parser.add_argument_group(
|
| 56 |
+
title="Extra models args, including vae, text encoders and tokenizers)"
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# - VAE
|
| 60 |
+
group.add_argument(
|
| 61 |
+
"--vae",
|
| 62 |
+
type=str,
|
| 63 |
+
default="884-16c-hy",
|
| 64 |
+
choices=list(VAE_PATH),
|
| 65 |
+
help="Name of the VAE model.",
|
| 66 |
+
)
|
| 67 |
+
group.add_argument(
|
| 68 |
+
"--vae-precision",
|
| 69 |
+
type=str,
|
| 70 |
+
default="fp16",
|
| 71 |
+
choices=PRECISIONS,
|
| 72 |
+
help="Precision mode for the VAE model.",
|
| 73 |
+
)
|
| 74 |
+
group.add_argument(
|
| 75 |
+
"--vae-tiling",
|
| 76 |
+
action="store_true",
|
| 77 |
+
help="Enable tiling for the VAE model to save GPU memory.",
|
| 78 |
+
)
|
| 79 |
+
group.set_defaults(vae_tiling=True)
|
| 80 |
+
|
| 81 |
+
group.add_argument(
|
| 82 |
+
"--text-encoder",
|
| 83 |
+
type=str,
|
| 84 |
+
default="llm",
|
| 85 |
+
choices=list(TEXT_ENCODER_PATH),
|
| 86 |
+
help="Name of the text encoder model.",
|
| 87 |
+
)
|
| 88 |
+
group.add_argument(
|
| 89 |
+
"--text-encoder-precision",
|
| 90 |
+
type=str,
|
| 91 |
+
default="fp16",
|
| 92 |
+
choices=PRECISIONS,
|
| 93 |
+
help="Precision mode for the text encoder model.",
|
| 94 |
+
)
|
| 95 |
+
group.add_argument(
|
| 96 |
+
"--text-states-dim",
|
| 97 |
+
type=int,
|
| 98 |
+
default=4096,
|
| 99 |
+
help="Dimension of the text encoder hidden states.",
|
| 100 |
+
)
|
| 101 |
+
group.add_argument(
|
| 102 |
+
"--text-len", type=int, default=256, help="Maximum length of the text input."
|
| 103 |
+
)
|
| 104 |
+
group.add_argument(
|
| 105 |
+
"--tokenizer",
|
| 106 |
+
type=str,
|
| 107 |
+
default="llm",
|
| 108 |
+
choices=list(TOKENIZER_PATH),
|
| 109 |
+
help="Name of the tokenizer model.",
|
| 110 |
+
)
|
| 111 |
+
group.add_argument(
|
| 112 |
+
"--prompt-template",
|
| 113 |
+
type=str,
|
| 114 |
+
default="dit-llm-encode",
|
| 115 |
+
choices=PROMPT_TEMPLATE,
|
| 116 |
+
help="Image prompt template for the decoder-only text encoder model.",
|
| 117 |
+
)
|
| 118 |
+
group.add_argument(
|
| 119 |
+
"--prompt-template-video",
|
| 120 |
+
type=str,
|
| 121 |
+
default="dit-llm-encode-video",
|
| 122 |
+
choices=PROMPT_TEMPLATE,
|
| 123 |
+
help="Video prompt template for the decoder-only text encoder model.",
|
| 124 |
+
)
|
| 125 |
+
group.add_argument(
|
| 126 |
+
"--hidden-state-skip-layer",
|
| 127 |
+
type=int,
|
| 128 |
+
default=2,
|
| 129 |
+
help="Skip layer for hidden states.",
|
| 130 |
+
)
|
| 131 |
+
group.add_argument(
|
| 132 |
+
"--apply-final-norm",
|
| 133 |
+
action="store_true",
|
| 134 |
+
help="Apply final normalization to the used text encoder hidden states.",
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
# - CLIP
|
| 138 |
+
group.add_argument(
|
| 139 |
+
"--text-encoder-2",
|
| 140 |
+
type=str,
|
| 141 |
+
default="clipL",
|
| 142 |
+
choices=list(TEXT_ENCODER_PATH),
|
| 143 |
+
help="Name of the second text encoder model.",
|
| 144 |
+
)
|
| 145 |
+
group.add_argument(
|
| 146 |
+
"--text-encoder-precision-2",
|
| 147 |
+
type=str,
|
| 148 |
+
default="fp16",
|
| 149 |
+
choices=PRECISIONS,
|
| 150 |
+
help="Precision mode for the second text encoder model.",
|
| 151 |
+
)
|
| 152 |
+
group.add_argument(
|
| 153 |
+
"--text-states-dim-2",
|
| 154 |
+
type=int,
|
| 155 |
+
default=768,
|
| 156 |
+
help="Dimension of the second text encoder hidden states.",
|
| 157 |
+
)
|
| 158 |
+
group.add_argument(
|
| 159 |
+
"--tokenizer-2",
|
| 160 |
+
type=str,
|
| 161 |
+
default="clipL",
|
| 162 |
+
choices=list(TOKENIZER_PATH),
|
| 163 |
+
help="Name of the second tokenizer model.",
|
| 164 |
+
)
|
| 165 |
+
group.add_argument(
|
| 166 |
+
"--text-len-2",
|
| 167 |
+
type=int,
|
| 168 |
+
default=77,
|
| 169 |
+
help="Maximum length of the second text input.",
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
return parser
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def add_denoise_schedule_args(parser: argparse.ArgumentParser):
|
| 176 |
+
group = parser.add_argument_group(title="Denoise schedule args")
|
| 177 |
+
|
| 178 |
+
group.add_argument(
|
| 179 |
+
"--denoise-type",
|
| 180 |
+
type=str,
|
| 181 |
+
default="flow",
|
| 182 |
+
help="Denoise type for noised inputs.",
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
# Flow Matching
|
| 186 |
+
group.add_argument(
|
| 187 |
+
"--flow-shift",
|
| 188 |
+
type=float,
|
| 189 |
+
default=7.0,
|
| 190 |
+
help="Shift factor for flow matching schedulers.",
|
| 191 |
+
)
|
| 192 |
+
group.add_argument(
|
| 193 |
+
"--flow-reverse",
|
| 194 |
+
action="store_true",
|
| 195 |
+
help="If reverse, learning/sampling from t=1 -> t=0.",
|
| 196 |
+
)
|
| 197 |
+
group.add_argument(
|
| 198 |
+
"--flow-solver", type=str, default="euler", help="Solver for flow matching.",
|
| 199 |
+
)
|
| 200 |
+
group.add_argument(
|
| 201 |
+
"--use-linear-quadratic-schedule",
|
| 202 |
+
action="store_true",
|
| 203 |
+
help="Use linear quadratic schedule for flow matching."
|
| 204 |
+
"Following MovieGen (https://ai.meta.com/static-resource/movie-gen-research-paper)",
|
| 205 |
+
)
|
| 206 |
+
group.add_argument(
|
| 207 |
+
"--linear-schedule-end",
|
| 208 |
+
type=int,
|
| 209 |
+
default=25,
|
| 210 |
+
help="End step for linear quadratic schedule for flow matching.",
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
return parser
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def add_inference_args(parser: argparse.ArgumentParser):
|
| 217 |
+
group = parser.add_argument_group(title="Inference args")
|
| 218 |
+
|
| 219 |
+
# ======================== Model loads ========================
|
| 220 |
+
group.add_argument(
|
| 221 |
+
"--model-base",
|
| 222 |
+
type=str,
|
| 223 |
+
default="ckpts",
|
| 224 |
+
help="Root path of all the models, including t2v models and extra models.",
|
| 225 |
+
)
|
| 226 |
+
group.add_argument(
|
| 227 |
+
"--dit-weight",
|
| 228 |
+
type=str,
|
| 229 |
+
default="ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt",
|
| 230 |
+
help="Path to the HunyuanVideo model. If None, search the model in the args.model_root."
|
| 231 |
+
"1. If it is a file, load the model directly."
|
| 232 |
+
"2. If it is a directory, search the model in the directory. Support two types of models: "
|
| 233 |
+
"1) named `pytorch_model_*.pt`"
|
| 234 |
+
"2) named `*_model_states.pt`, where * can be `mp_rank_00`.",
|
| 235 |
+
)
|
| 236 |
+
group.add_argument(
|
| 237 |
+
"--model-resolution",
|
| 238 |
+
type=str,
|
| 239 |
+
default="540p",
|
| 240 |
+
choices=["540p", "720p"],
|
| 241 |
+
help="Root path of all the models, including t2v models and extra models.",
|
| 242 |
+
)
|
| 243 |
+
group.add_argument(
|
| 244 |
+
"--load-key",
|
| 245 |
+
type=str,
|
| 246 |
+
default="module",
|
| 247 |
+
help="Key to load the model states. 'module' for the main model, 'ema' for the EMA model.",
|
| 248 |
+
)
|
| 249 |
+
group.add_argument(
|
| 250 |
+
"--use-cpu-offload",
|
| 251 |
+
action="store_true",
|
| 252 |
+
help="Use CPU offload for the model load.",
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# ======================== Inference general setting ========================
|
| 256 |
+
group.add_argument(
|
| 257 |
+
"--batch-size",
|
| 258 |
+
type=int,
|
| 259 |
+
default=1,
|
| 260 |
+
help="Batch size for inference and evaluation.",
|
| 261 |
+
)
|
| 262 |
+
group.add_argument(
|
| 263 |
+
"--infer-steps",
|
| 264 |
+
type=int,
|
| 265 |
+
default=50,
|
| 266 |
+
help="Number of denoising steps for inference.",
|
| 267 |
+
)
|
| 268 |
+
group.add_argument(
|
| 269 |
+
"--disable-autocast",
|
| 270 |
+
action="store_true",
|
| 271 |
+
help="Disable autocast for denoising loop and vae decoding in pipeline sampling.",
|
| 272 |
+
)
|
| 273 |
+
group.add_argument(
|
| 274 |
+
"--save-path",
|
| 275 |
+
type=str,
|
| 276 |
+
default="./results",
|
| 277 |
+
help="Path to save the generated samples.",
|
| 278 |
+
)
|
| 279 |
+
group.add_argument(
|
| 280 |
+
"--save-path-suffix",
|
| 281 |
+
type=str,
|
| 282 |
+
default="",
|
| 283 |
+
help="Suffix for the directory of saved samples.",
|
| 284 |
+
)
|
| 285 |
+
group.add_argument(
|
| 286 |
+
"--name-suffix",
|
| 287 |
+
type=str,
|
| 288 |
+
default="",
|
| 289 |
+
help="Suffix for the names of saved samples.",
|
| 290 |
+
)
|
| 291 |
+
group.add_argument(
|
| 292 |
+
"--num-videos",
|
| 293 |
+
type=int,
|
| 294 |
+
default=1,
|
| 295 |
+
help="Number of videos to generate for each prompt.",
|
| 296 |
+
)
|
| 297 |
+
# ---sample size---
|
| 298 |
+
group.add_argument(
|
| 299 |
+
"--video-size",
|
| 300 |
+
type=int,
|
| 301 |
+
nargs="+",
|
| 302 |
+
default=(720, 1280),
|
| 303 |
+
help="Video size for training. If a single value is provided, it will be used for both height "
|
| 304 |
+
"and width. If two values are provided, they will be used for height and width "
|
| 305 |
+
"respectively.",
|
| 306 |
+
)
|
| 307 |
+
group.add_argument(
|
| 308 |
+
"--video-length",
|
| 309 |
+
type=int,
|
| 310 |
+
default=129,
|
| 311 |
+
help="How many frames to sample from a video. if using 3d vae, the number should be 4n+1",
|
| 312 |
+
)
|
| 313 |
+
# --- prompt ---
|
| 314 |
+
group.add_argument(
|
| 315 |
+
"--prompt",
|
| 316 |
+
type=str,
|
| 317 |
+
default=None,
|
| 318 |
+
help="Prompt for sampling during evaluation.",
|
| 319 |
+
)
|
| 320 |
+
group.add_argument(
|
| 321 |
+
"--seed-type",
|
| 322 |
+
type=str,
|
| 323 |
+
default="auto",
|
| 324 |
+
choices=["file", "random", "fixed", "auto"],
|
| 325 |
+
help="Seed type for evaluation. If file, use the seed from the CSV file. If random, generate a "
|
| 326 |
+
"random seed. If fixed, use the fixed seed given by `--seed`. If auto, `csv` will use the "
|
| 327 |
+
"seed column if available, otherwise use the fixed `seed` value. `prompt` will use the "
|
| 328 |
+
"fixed `seed` value.",
|
| 329 |
+
)
|
| 330 |
+
group.add_argument("--seed", type=int, default=None, help="Seed for evaluation.")
|
| 331 |
+
|
| 332 |
+
# Classifier-Free Guidance
|
| 333 |
+
group.add_argument(
|
| 334 |
+
"--neg-prompt", type=str, default=None, help="Negative prompt for sampling."
|
| 335 |
+
)
|
| 336 |
+
group.add_argument(
|
| 337 |
+
"--cfg-scale", type=float, default=1.0, help="Classifier free guidance scale."
|
| 338 |
+
)
|
| 339 |
+
group.add_argument(
|
| 340 |
+
"--embedded-cfg-scale",
|
| 341 |
+
type=float,
|
| 342 |
+
default=6.0,
|
| 343 |
+
help="Embeded classifier free guidance scale.",
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
group.add_argument(
|
| 347 |
+
"--reproduce",
|
| 348 |
+
action="store_true",
|
| 349 |
+
help="Enable reproducibility by setting random seeds and deterministic algorithms.",
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
return parser
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def add_parallel_args(parser: argparse.ArgumentParser):
|
| 356 |
+
group = parser.add_argument_group(title="Parallel args")
|
| 357 |
+
|
| 358 |
+
# ======================== Model loads ========================
|
| 359 |
+
group.add_argument(
|
| 360 |
+
"--ulysses-degree", type=int, default=1, help="Ulysses degree.",
|
| 361 |
+
)
|
| 362 |
+
group.add_argument(
|
| 363 |
+
"--ring-degree", type=int, default=1, help="Ulysses degree.",
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
return parser
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def sanity_check_args(args):
|
| 370 |
+
# VAE channels
|
| 371 |
+
vae_pattern = r"\d{2,3}-\d{1,2}c-\w+"
|
| 372 |
+
if not re.match(vae_pattern, args.vae):
|
| 373 |
+
raise ValueError(
|
| 374 |
+
f"Invalid VAE model: {args.vae}. Must be in the format of '{vae_pattern}'."
|
| 375 |
+
)
|
| 376 |
+
vae_channels = int(args.vae.split("-")[1][:-1])
|
| 377 |
+
if args.latent_channels is None:
|
| 378 |
+
args.latent_channels = vae_channels
|
| 379 |
+
if vae_channels != args.latent_channels:
|
| 380 |
+
raise ValueError(
|
| 381 |
+
f"Latent channels ({args.latent_channels}) must match the VAE channels ({vae_channels})."
|
| 382 |
+
)
|
| 383 |
+
return args
|
exp_code/1_benchmark/AccVideo/models/hunyuan/inference.py
ADDED
|
@@ -0,0 +1,687 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import random
|
| 4 |
+
import functools
|
| 5 |
+
from typing import List, Optional, Tuple, Union
|
| 6 |
+
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from loguru import logger
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.distributed as dist
|
| 12 |
+
from models.hunyuan.constants import PROMPT_TEMPLATE, NEGATIVE_PROMPT, PRECISION_TO_TYPE
|
| 13 |
+
from models.hunyuan.vae import load_vae
|
| 14 |
+
from models.hunyuan.modules import load_model
|
| 15 |
+
from models.hunyuan.text_encoder import TextEncoder
|
| 16 |
+
from models.hunyuan.utils.data_utils import align_to
|
| 17 |
+
from models.hunyuan.modules.posemb_layers import get_nd_rotary_pos_embed
|
| 18 |
+
from models.hunyuan.modules.fp8_optimization import convert_fp8_linear
|
| 19 |
+
from models.hunyuan.diffusion.schedulers import FlowMatchDiscreteScheduler
|
| 20 |
+
from models.hunyuan.diffusion.pipelines import HunyuanVideoPipeline
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
import xfuser
|
| 24 |
+
from xfuser.core.distributed import (
|
| 25 |
+
get_sequence_parallel_world_size,
|
| 26 |
+
get_sequence_parallel_rank,
|
| 27 |
+
get_sp_group,
|
| 28 |
+
initialize_model_parallel,
|
| 29 |
+
init_distributed_environment
|
| 30 |
+
)
|
| 31 |
+
except:
|
| 32 |
+
xfuser = None
|
| 33 |
+
get_sequence_parallel_world_size = None
|
| 34 |
+
get_sequence_parallel_rank = None
|
| 35 |
+
get_sp_group = None
|
| 36 |
+
initialize_model_parallel = None
|
| 37 |
+
init_distributed_environment = None
|
| 38 |
+
|
| 39 |
+
from safetensors import safe_open
|
| 40 |
+
import io
|
| 41 |
+
|
| 42 |
+
def parallelize_transformer(pipe):
|
| 43 |
+
transformer = pipe.transformer
|
| 44 |
+
original_forward = transformer.forward
|
| 45 |
+
|
| 46 |
+
@functools.wraps(transformer.__class__.forward)
|
| 47 |
+
def new_forward(
|
| 48 |
+
self,
|
| 49 |
+
x: torch.Tensor,
|
| 50 |
+
t: torch.Tensor, # Should be in range(0, 1000).
|
| 51 |
+
text_states: torch.Tensor = None,
|
| 52 |
+
text_mask: torch.Tensor = None, # Now we don't use it.
|
| 53 |
+
text_states_2: Optional[torch.Tensor] = None, # Text embedding for modulation.
|
| 54 |
+
freqs_cos: Optional[torch.Tensor] = None,
|
| 55 |
+
freqs_sin: Optional[torch.Tensor] = None,
|
| 56 |
+
guidance: torch.Tensor = None, # Guidance for modulation, should be cfg_scale x 1000.
|
| 57 |
+
return_dict: bool = True,
|
| 58 |
+
):
|
| 59 |
+
if x.shape[-2] // 2 % get_sequence_parallel_world_size() == 0:
|
| 60 |
+
# try to split x by height
|
| 61 |
+
split_dim = -2
|
| 62 |
+
elif x.shape[-1] // 2 % get_sequence_parallel_world_size() == 0:
|
| 63 |
+
# try to split x by width
|
| 64 |
+
split_dim = -1
|
| 65 |
+
else:
|
| 66 |
+
raise ValueError(
|
| 67 |
+
f"Cannot split video sequence into ulysses_degree x ring_degree ({get_sequence_parallel_world_size()}) parts evenly")
|
| 68 |
+
|
| 69 |
+
# patch sizes for the temporal, height, and width dimensions are 1, 2, and 2.
|
| 70 |
+
temporal_size, h, w = x.shape[2], x.shape[3] // 2, x.shape[4] // 2
|
| 71 |
+
|
| 72 |
+
x = torch.chunk(x, get_sequence_parallel_world_size(), dim=split_dim)[get_sequence_parallel_rank()]
|
| 73 |
+
|
| 74 |
+
dim_thw = freqs_cos.shape[-1]
|
| 75 |
+
freqs_cos = freqs_cos.reshape(temporal_size, h, w, dim_thw)
|
| 76 |
+
freqs_cos = torch.chunk(freqs_cos, get_sequence_parallel_world_size(), dim=split_dim - 1)[
|
| 77 |
+
get_sequence_parallel_rank()]
|
| 78 |
+
freqs_cos = freqs_cos.reshape(-1, dim_thw)
|
| 79 |
+
dim_thw = freqs_sin.shape[-1]
|
| 80 |
+
freqs_sin = freqs_sin.reshape(temporal_size, h, w, dim_thw)
|
| 81 |
+
freqs_sin = torch.chunk(freqs_sin, get_sequence_parallel_world_size(), dim=split_dim - 1)[
|
| 82 |
+
get_sequence_parallel_rank()]
|
| 83 |
+
freqs_sin = freqs_sin.reshape(-1, dim_thw)
|
| 84 |
+
|
| 85 |
+
from xfuser.core.long_ctx_attention import xFuserLongContextAttention
|
| 86 |
+
|
| 87 |
+
for block in transformer.double_blocks + transformer.single_blocks:
|
| 88 |
+
block.hybrid_seq_parallel_attn = xFuserLongContextAttention()
|
| 89 |
+
|
| 90 |
+
output = original_forward(
|
| 91 |
+
x,
|
| 92 |
+
t,
|
| 93 |
+
text_states,
|
| 94 |
+
text_mask,
|
| 95 |
+
text_states_2,
|
| 96 |
+
freqs_cos,
|
| 97 |
+
freqs_sin,
|
| 98 |
+
guidance,
|
| 99 |
+
return_dict,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
return_dict = not isinstance(output, tuple)
|
| 103 |
+
sample = output["x"]
|
| 104 |
+
sample = get_sp_group().all_gather(sample, dim=split_dim)
|
| 105 |
+
output["x"] = sample
|
| 106 |
+
return output
|
| 107 |
+
|
| 108 |
+
new_forward = new_forward.__get__(transformer)
|
| 109 |
+
transformer.forward = new_forward
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class Inference(object):
|
| 113 |
+
def __init__(
|
| 114 |
+
self,
|
| 115 |
+
args,
|
| 116 |
+
vae,
|
| 117 |
+
vae_kwargs,
|
| 118 |
+
text_encoder,
|
| 119 |
+
model,
|
| 120 |
+
text_encoder_2=None,
|
| 121 |
+
pipeline=None,
|
| 122 |
+
use_cpu_offload=False,
|
| 123 |
+
device=None,
|
| 124 |
+
logger=None,
|
| 125 |
+
parallel_args=None,
|
| 126 |
+
):
|
| 127 |
+
self.vae = vae
|
| 128 |
+
self.vae_kwargs = vae_kwargs
|
| 129 |
+
|
| 130 |
+
self.text_encoder = text_encoder
|
| 131 |
+
self.text_encoder_2 = text_encoder_2
|
| 132 |
+
|
| 133 |
+
self.model = model
|
| 134 |
+
self.pipeline = pipeline
|
| 135 |
+
self.use_cpu_offload = use_cpu_offload
|
| 136 |
+
|
| 137 |
+
self.args = args
|
| 138 |
+
self.device = (
|
| 139 |
+
device
|
| 140 |
+
if device is not None
|
| 141 |
+
else "cuda"
|
| 142 |
+
if torch.cuda.is_available()
|
| 143 |
+
else "cpu"
|
| 144 |
+
)
|
| 145 |
+
self.logger = logger
|
| 146 |
+
self.parallel_args = parallel_args
|
| 147 |
+
|
| 148 |
+
@classmethod
|
| 149 |
+
def from_pretrained(cls, pretrained_model_path, args, device=None, **kwargs):
|
| 150 |
+
"""
|
| 151 |
+
Initialize the Inference pipeline.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
pretrained_model_path (str or pathlib.Path): The model path, including t2v, text encoder and vae checkpoints.
|
| 155 |
+
args (argparse.Namespace): The arguments for the pipeline.
|
| 156 |
+
device (int): The device for inference. Default is 0.
|
| 157 |
+
"""
|
| 158 |
+
# ========================================================================
|
| 159 |
+
logger.info(f"Got text-to-video model root path: {pretrained_model_path}")
|
| 160 |
+
|
| 161 |
+
# ==================== Initialize Distributed Environment ================
|
| 162 |
+
if args.ulysses_degree > 1 or args.ring_degree > 1:
|
| 163 |
+
assert xfuser is not None, \
|
| 164 |
+
"Ulysses Attention and Ring Attention requires xfuser package."
|
| 165 |
+
|
| 166 |
+
assert args.use_cpu_offload is False, \
|
| 167 |
+
"Cannot enable use_cpu_offload in the distributed environment."
|
| 168 |
+
|
| 169 |
+
dist.init_process_group("nccl")
|
| 170 |
+
|
| 171 |
+
assert dist.get_world_size() == args.ring_degree * args.ulysses_degree, \
|
| 172 |
+
"number of GPUs should be equal to ring_degree * ulysses_degree."
|
| 173 |
+
|
| 174 |
+
init_distributed_environment(rank=dist.get_rank(), world_size=dist.get_world_size())
|
| 175 |
+
|
| 176 |
+
initialize_model_parallel(
|
| 177 |
+
sequence_parallel_degree=dist.get_world_size(),
|
| 178 |
+
ring_degree=args.ring_degree,
|
| 179 |
+
ulysses_degree=args.ulysses_degree,
|
| 180 |
+
)
|
| 181 |
+
device = torch.device(f"cuda:{os.environ['LOCAL_RANK']}")
|
| 182 |
+
else:
|
| 183 |
+
if device is None:
|
| 184 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 185 |
+
|
| 186 |
+
parallel_args = {"ulysses_degree": args.ulysses_degree, "ring_degree": args.ring_degree}
|
| 187 |
+
|
| 188 |
+
# ======================== Get the args path =============================
|
| 189 |
+
|
| 190 |
+
# Disable gradient
|
| 191 |
+
torch.set_grad_enabled(False)
|
| 192 |
+
|
| 193 |
+
# =========================== Build main model ===========================
|
| 194 |
+
logger.info("Building model...")
|
| 195 |
+
factor_kwargs = {"device": device, "dtype": PRECISION_TO_TYPE[args.precision]}
|
| 196 |
+
in_channels = args.latent_channels
|
| 197 |
+
out_channels = args.latent_channels
|
| 198 |
+
|
| 199 |
+
model = load_model(
|
| 200 |
+
args,
|
| 201 |
+
in_channels=in_channels,
|
| 202 |
+
out_channels=out_channels,
|
| 203 |
+
factor_kwargs=factor_kwargs,
|
| 204 |
+
)
|
| 205 |
+
if args.use_fp8:
|
| 206 |
+
convert_fp8_linear(model, args.dit_weight, original_dtype=PRECISION_TO_TYPE[args.precision])
|
| 207 |
+
model = model.to(device)
|
| 208 |
+
model = Inference.load_state_dict(args, model, pretrained_model_path)
|
| 209 |
+
model.eval()
|
| 210 |
+
# model = None
|
| 211 |
+
|
| 212 |
+
# ============================= Build extra models ========================
|
| 213 |
+
# VAE
|
| 214 |
+
vae, _, s_ratio, t_ratio = load_vae(
|
| 215 |
+
args.vae,
|
| 216 |
+
args.vae_precision,
|
| 217 |
+
logger=logger,
|
| 218 |
+
device=device if not args.use_cpu_offload else "cpu",
|
| 219 |
+
)
|
| 220 |
+
vae_kwargs = {"s_ratio": s_ratio, "t_ratio": t_ratio}
|
| 221 |
+
|
| 222 |
+
# Text encoder
|
| 223 |
+
if args.prompt_template_video is not None:
|
| 224 |
+
crop_start = PROMPT_TEMPLATE[args.prompt_template_video].get(
|
| 225 |
+
"crop_start", 0
|
| 226 |
+
)
|
| 227 |
+
elif args.prompt_template is not None:
|
| 228 |
+
crop_start = PROMPT_TEMPLATE[args.prompt_template].get("crop_start", 0)
|
| 229 |
+
else:
|
| 230 |
+
crop_start = 0
|
| 231 |
+
max_length = args.text_len + crop_start
|
| 232 |
+
|
| 233 |
+
# prompt_template
|
| 234 |
+
prompt_template = (
|
| 235 |
+
PROMPT_TEMPLATE[args.prompt_template]
|
| 236 |
+
if args.prompt_template is not None
|
| 237 |
+
else None
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
# prompt_template_video
|
| 241 |
+
prompt_template_video = (
|
| 242 |
+
PROMPT_TEMPLATE[args.prompt_template_video]
|
| 243 |
+
if args.prompt_template_video is not None
|
| 244 |
+
else None
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
text_encoder = TextEncoder(
|
| 248 |
+
text_encoder_type=args.text_encoder,
|
| 249 |
+
max_length=max_length,
|
| 250 |
+
text_encoder_precision=args.text_encoder_precision,
|
| 251 |
+
tokenizer_type=args.tokenizer,
|
| 252 |
+
prompt_template=prompt_template,
|
| 253 |
+
prompt_template_video=prompt_template_video,
|
| 254 |
+
hidden_state_skip_layer=args.hidden_state_skip_layer,
|
| 255 |
+
apply_final_norm=args.apply_final_norm,
|
| 256 |
+
reproduce=args.reproduce,
|
| 257 |
+
logger=logger,
|
| 258 |
+
device=device if not args.use_cpu_offload else "cpu",
|
| 259 |
+
)
|
| 260 |
+
text_encoder_2 = None
|
| 261 |
+
if args.text_encoder_2 is not None:
|
| 262 |
+
text_encoder_2 = TextEncoder(
|
| 263 |
+
text_encoder_type=args.text_encoder_2,
|
| 264 |
+
max_length=args.text_len_2,
|
| 265 |
+
text_encoder_precision=args.text_encoder_precision_2,
|
| 266 |
+
tokenizer_type=args.tokenizer_2,
|
| 267 |
+
reproduce=args.reproduce,
|
| 268 |
+
logger=logger,
|
| 269 |
+
device=device if not args.use_cpu_offload else "cpu",
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
return cls(
|
| 273 |
+
args=args,
|
| 274 |
+
vae=vae,
|
| 275 |
+
vae_kwargs=vae_kwargs,
|
| 276 |
+
text_encoder=text_encoder,
|
| 277 |
+
text_encoder_2=text_encoder_2,
|
| 278 |
+
model=model,
|
| 279 |
+
use_cpu_offload=args.use_cpu_offload,
|
| 280 |
+
device=device,
|
| 281 |
+
logger=logger,
|
| 282 |
+
parallel_args=parallel_args
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
@staticmethod
|
| 286 |
+
def load_state_dict(args, model, pretrained_model_path):
|
| 287 |
+
load_key = args.load_key
|
| 288 |
+
dit_weight = Path(args.dit_weight)
|
| 289 |
+
|
| 290 |
+
if dit_weight is None:
|
| 291 |
+
model_dir = pretrained_model_path / f"t2v_{args.model_resolution}"
|
| 292 |
+
files = list(model_dir.glob("*.pt"))
|
| 293 |
+
if len(files) == 0:
|
| 294 |
+
raise ValueError(f"No model weights found in {model_dir}")
|
| 295 |
+
if str(files[0]).startswith("pytorch_model_"):
|
| 296 |
+
model_path = dit_weight / f"pytorch_model_{load_key}.pt"
|
| 297 |
+
bare_model = True
|
| 298 |
+
elif any(str(f).endswith("_model_states.pt") for f in files):
|
| 299 |
+
files = [f for f in files if str(f).endswith("_model_states.pt")]
|
| 300 |
+
model_path = files[0]
|
| 301 |
+
if len(files) > 1:
|
| 302 |
+
logger.warning(
|
| 303 |
+
f"Multiple model weights found in {dit_weight}, using {model_path}"
|
| 304 |
+
)
|
| 305 |
+
bare_model = False
|
| 306 |
+
else:
|
| 307 |
+
raise ValueError(
|
| 308 |
+
f"Invalid model path: {dit_weight} with unrecognized weight format: "
|
| 309 |
+
f"{list(map(str, files))}. When given a directory as --dit-weight, only "
|
| 310 |
+
f"`pytorch_model_*.pt`(provided by HunyuanDiT official) and "
|
| 311 |
+
f"`*_model_states.pt`(saved by deepspeed) can be parsed. If you want to load a "
|
| 312 |
+
f"specific weight file, please provide the full path to the file."
|
| 313 |
+
)
|
| 314 |
+
else:
|
| 315 |
+
if dit_weight.is_dir():
|
| 316 |
+
files = list(dit_weight.glob("*.pt"))
|
| 317 |
+
if len(files) == 0:
|
| 318 |
+
raise ValueError(f"No model weights found in {dit_weight}")
|
| 319 |
+
if str(files[0]).startswith("pytorch_model_"):
|
| 320 |
+
model_path = dit_weight / f"pytorch_model_{load_key}.pt"
|
| 321 |
+
bare_model = True
|
| 322 |
+
elif any(str(f).endswith("_model_states.pt") for f in files):
|
| 323 |
+
files = [f for f in files if str(f).endswith("_model_states.pt")]
|
| 324 |
+
model_path = files[0]
|
| 325 |
+
if len(files) > 1:
|
| 326 |
+
logger.warning(
|
| 327 |
+
f"Multiple model weights found in {dit_weight}, using {model_path}"
|
| 328 |
+
)
|
| 329 |
+
bare_model = False
|
| 330 |
+
else:
|
| 331 |
+
raise ValueError(
|
| 332 |
+
f"Invalid model path: {dit_weight} with unrecognized weight format: "
|
| 333 |
+
f"{list(map(str, files))}. When given a directory as --dit-weight, only "
|
| 334 |
+
f"`pytorch_model_*.pt`(provided by HunyuanDiT official) and "
|
| 335 |
+
f"`*_model_states.pt`(saved by deepspeed) can be parsed. If you want to load a "
|
| 336 |
+
f"specific weight file, please provide the full path to the file."
|
| 337 |
+
)
|
| 338 |
+
elif dit_weight.is_file():
|
| 339 |
+
model_path = dit_weight
|
| 340 |
+
bare_model = "unknown"
|
| 341 |
+
else:
|
| 342 |
+
model_path = args.dit_weight
|
| 343 |
+
bare_model = "unknown"
|
| 344 |
+
# raise ValueError(f"Invalid model path: {dit_weight}")
|
| 345 |
+
|
| 346 |
+
# if not model_path.exists():
|
| 347 |
+
# raise ValueError(f"model_path not exists: {model_path}")
|
| 348 |
+
logger.info(f"Loading torch model {model_path}...")
|
| 349 |
+
if str(model_path).endswith(".safetensors"):
|
| 350 |
+
state_dict = {}
|
| 351 |
+
with safe_open(str(model_path), framework="pt", device="cpu") as file:
|
| 352 |
+
for k in file.keys():
|
| 353 |
+
state_dict[k] = file.get_tensor(k)
|
| 354 |
+
else:
|
| 355 |
+
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
|
| 356 |
+
|
| 357 |
+
if bare_model == "unknown" and ("ema" in state_dict or "module" in state_dict):
|
| 358 |
+
bare_model = False
|
| 359 |
+
if bare_model is False:
|
| 360 |
+
if load_key in state_dict:
|
| 361 |
+
state_dict = state_dict[load_key]
|
| 362 |
+
else:
|
| 363 |
+
raise KeyError(
|
| 364 |
+
f"Missing key: `{load_key}` in the checkpoint: {model_path}. The keys in the checkpoint "
|
| 365 |
+
f"are: {list(state_dict.keys())}."
|
| 366 |
+
)
|
| 367 |
+
model.load_state_dict(state_dict, strict=True)
|
| 368 |
+
return model
|
| 369 |
+
|
| 370 |
+
@staticmethod
|
| 371 |
+
def parse_size(size):
|
| 372 |
+
if isinstance(size, int):
|
| 373 |
+
size = [size]
|
| 374 |
+
if not isinstance(size, (list, tuple)):
|
| 375 |
+
raise ValueError(f"Size must be an integer or (height, width), got {size}.")
|
| 376 |
+
if len(size) == 1:
|
| 377 |
+
size = [size[0], size[0]]
|
| 378 |
+
if len(size) != 2:
|
| 379 |
+
raise ValueError(f"Size must be an integer or (height, width), got {size}.")
|
| 380 |
+
return size
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class HunyuanVideoSampler(Inference):
|
| 384 |
+
def __init__(
|
| 385 |
+
self,
|
| 386 |
+
args,
|
| 387 |
+
vae,
|
| 388 |
+
vae_kwargs,
|
| 389 |
+
text_encoder,
|
| 390 |
+
model,
|
| 391 |
+
text_encoder_2=None,
|
| 392 |
+
pipeline=None,
|
| 393 |
+
use_cpu_offload=False,
|
| 394 |
+
device=0,
|
| 395 |
+
logger=None,
|
| 396 |
+
parallel_args=None
|
| 397 |
+
):
|
| 398 |
+
super().__init__(
|
| 399 |
+
args,
|
| 400 |
+
vae,
|
| 401 |
+
vae_kwargs,
|
| 402 |
+
text_encoder,
|
| 403 |
+
model,
|
| 404 |
+
text_encoder_2=text_encoder_2,
|
| 405 |
+
pipeline=pipeline,
|
| 406 |
+
use_cpu_offload=use_cpu_offload,
|
| 407 |
+
device=device,
|
| 408 |
+
logger=logger,
|
| 409 |
+
parallel_args=parallel_args
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
self.pipeline = self.load_diffusion_pipeline(
|
| 413 |
+
args=args,
|
| 414 |
+
vae=self.vae,
|
| 415 |
+
text_encoder=self.text_encoder,
|
| 416 |
+
text_encoder_2=self.text_encoder_2,
|
| 417 |
+
model=self.model,
|
| 418 |
+
device=self.device,
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
self.default_negative_prompt = NEGATIVE_PROMPT
|
| 422 |
+
if self.parallel_args['ulysses_degree'] > 1 or self.parallel_args['ring_degree'] > 1:
|
| 423 |
+
parallelize_transformer(self.pipeline)
|
| 424 |
+
|
| 425 |
+
def load_diffusion_pipeline(
|
| 426 |
+
self,
|
| 427 |
+
args,
|
| 428 |
+
vae,
|
| 429 |
+
text_encoder,
|
| 430 |
+
text_encoder_2,
|
| 431 |
+
model,
|
| 432 |
+
scheduler=None,
|
| 433 |
+
device=None,
|
| 434 |
+
progress_bar_config=None,
|
| 435 |
+
data_type="video",
|
| 436 |
+
):
|
| 437 |
+
"""Load the denoising scheduler for inference."""
|
| 438 |
+
if scheduler is None:
|
| 439 |
+
if args.denoise_type == "flow":
|
| 440 |
+
scheduler = FlowMatchDiscreteScheduler(
|
| 441 |
+
shift=args.flow_shift,
|
| 442 |
+
reverse=args.flow_reverse,
|
| 443 |
+
solver=args.flow_solver,
|
| 444 |
+
)
|
| 445 |
+
else:
|
| 446 |
+
raise ValueError(f"Invalid denoise type {args.denoise_type}")
|
| 447 |
+
|
| 448 |
+
pipeline = HunyuanVideoPipeline(
|
| 449 |
+
vae=vae,
|
| 450 |
+
text_encoder=text_encoder,
|
| 451 |
+
text_encoder_2=text_encoder_2,
|
| 452 |
+
transformer=model,
|
| 453 |
+
scheduler=scheduler,
|
| 454 |
+
progress_bar_config=progress_bar_config,
|
| 455 |
+
args=args,
|
| 456 |
+
)
|
| 457 |
+
if self.use_cpu_offload:
|
| 458 |
+
pipeline.enable_sequential_cpu_offload()
|
| 459 |
+
else:
|
| 460 |
+
pipeline = pipeline.to(device)
|
| 461 |
+
|
| 462 |
+
return pipeline
|
| 463 |
+
|
| 464 |
+
def get_rotary_pos_embed(self, video_length, height, width):
|
| 465 |
+
target_ndim = 3
|
| 466 |
+
ndim = 5 - 2
|
| 467 |
+
# 884
|
| 468 |
+
if "884" in self.args.vae:
|
| 469 |
+
latents_size = [(video_length - 1) // 4 + 1, height // 8, width // 8]
|
| 470 |
+
elif "888" in self.args.vae:
|
| 471 |
+
latents_size = [(video_length - 1) // 8 + 1, height // 8, width // 8]
|
| 472 |
+
else:
|
| 473 |
+
latents_size = [video_length, height // 8, width // 8]
|
| 474 |
+
|
| 475 |
+
if isinstance(self.model.patch_size, int):
|
| 476 |
+
assert all(s % self.model.patch_size == 0 for s in latents_size), (
|
| 477 |
+
f"Latent size(last {ndim} dimensions) should be divisible by patch size({self.model.patch_size}), "
|
| 478 |
+
f"but got {latents_size}."
|
| 479 |
+
)
|
| 480 |
+
rope_sizes = [s // self.model.patch_size for s in latents_size]
|
| 481 |
+
elif isinstance(self.model.patch_size, list):
|
| 482 |
+
assert all(
|
| 483 |
+
s % self.model.patch_size[idx] == 0
|
| 484 |
+
for idx, s in enumerate(latents_size)
|
| 485 |
+
), (
|
| 486 |
+
f"Latent size(last {ndim} dimensions) should be divisible by patch size({self.model.patch_size}), "
|
| 487 |
+
f"but got {latents_size}."
|
| 488 |
+
)
|
| 489 |
+
rope_sizes = [
|
| 490 |
+
s // self.model.patch_size[idx] for idx, s in enumerate(latents_size)
|
| 491 |
+
]
|
| 492 |
+
|
| 493 |
+
if len(rope_sizes) != target_ndim:
|
| 494 |
+
rope_sizes = [1] * (target_ndim - len(rope_sizes)) + rope_sizes # time axis
|
| 495 |
+
head_dim = self.model.hidden_size // self.model.heads_num
|
| 496 |
+
rope_dim_list = self.model.rope_dim_list
|
| 497 |
+
if rope_dim_list is None:
|
| 498 |
+
rope_dim_list = [head_dim // target_ndim for _ in range(target_ndim)]
|
| 499 |
+
assert (
|
| 500 |
+
sum(rope_dim_list) == head_dim
|
| 501 |
+
), "sum(rope_dim_list) should equal to head_dim of attention layer"
|
| 502 |
+
freqs_cos, freqs_sin = get_nd_rotary_pos_embed(
|
| 503 |
+
rope_dim_list,
|
| 504 |
+
rope_sizes,
|
| 505 |
+
theta=self.args.rope_theta,
|
| 506 |
+
use_real=True,
|
| 507 |
+
theta_rescale_factor=1,
|
| 508 |
+
)
|
| 509 |
+
return freqs_cos, freqs_sin
|
| 510 |
+
|
| 511 |
+
@torch.no_grad()
|
| 512 |
+
def predict(
|
| 513 |
+
self,
|
| 514 |
+
prompt,
|
| 515 |
+
height=192,
|
| 516 |
+
width=336,
|
| 517 |
+
video_length=129,
|
| 518 |
+
seed=None,
|
| 519 |
+
negative_prompt=None,
|
| 520 |
+
infer_steps=50,
|
| 521 |
+
guidance_scale=6,
|
| 522 |
+
flow_shift=5.0,
|
| 523 |
+
embedded_guidance_scale=None,
|
| 524 |
+
batch_size=1,
|
| 525 |
+
num_videos_per_prompt=1,
|
| 526 |
+
few_step=False,
|
| 527 |
+
**kwargs,
|
| 528 |
+
):
|
| 529 |
+
"""
|
| 530 |
+
Predict the image/video from the given text.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
prompt (str or List[str]): The input text.
|
| 534 |
+
kwargs:
|
| 535 |
+
height (int): The height of the output video. Default is 192.
|
| 536 |
+
width (int): The width of the output video. Default is 336.
|
| 537 |
+
video_length (int): The frame number of the output video. Default is 129.
|
| 538 |
+
seed (int or List[str]): The random seed for the generation. Default is a random integer.
|
| 539 |
+
negative_prompt (str or List[str]): The negative text prompt. Default is an empty string.
|
| 540 |
+
guidance_scale (float): The guidance scale for the generation. Default is 6.0.
|
| 541 |
+
num_images_per_prompt (int): The number of images per prompt. Default is 1.
|
| 542 |
+
infer_steps (int): The number of inference steps. Default is 100.
|
| 543 |
+
"""
|
| 544 |
+
out_dict = dict()
|
| 545 |
+
|
| 546 |
+
# ========================================================================
|
| 547 |
+
# Arguments: seed
|
| 548 |
+
# ========================================================================
|
| 549 |
+
if isinstance(seed, torch.Tensor):
|
| 550 |
+
seed = seed.tolist()
|
| 551 |
+
if seed is None:
|
| 552 |
+
seeds = [
|
| 553 |
+
random.randint(0, 1_000_000)
|
| 554 |
+
for _ in range(batch_size * num_videos_per_prompt)
|
| 555 |
+
]
|
| 556 |
+
elif isinstance(seed, int):
|
| 557 |
+
seeds = [
|
| 558 |
+
seed + i
|
| 559 |
+
for _ in range(batch_size)
|
| 560 |
+
for i in range(num_videos_per_prompt)
|
| 561 |
+
]
|
| 562 |
+
elif isinstance(seed, (list, tuple)):
|
| 563 |
+
if len(seed) == batch_size:
|
| 564 |
+
seeds = [
|
| 565 |
+
int(seed[i]) + j
|
| 566 |
+
for i in range(batch_size)
|
| 567 |
+
for j in range(num_videos_per_prompt)
|
| 568 |
+
]
|
| 569 |
+
elif len(seed) == batch_size * num_videos_per_prompt:
|
| 570 |
+
seeds = [int(s) for s in seed]
|
| 571 |
+
else:
|
| 572 |
+
raise ValueError(
|
| 573 |
+
f"Length of seed must be equal to number of prompt(batch_size) or "
|
| 574 |
+
f"batch_size * num_videos_per_prompt ({batch_size} * {num_videos_per_prompt}), got {seed}."
|
| 575 |
+
)
|
| 576 |
+
else:
|
| 577 |
+
raise ValueError(
|
| 578 |
+
f"Seed must be an integer, a list of integers, or None, got {seed}."
|
| 579 |
+
)
|
| 580 |
+
generator = [torch.Generator(self.device).manual_seed(seed) for seed in seeds]
|
| 581 |
+
out_dict["seeds"] = seeds
|
| 582 |
+
|
| 583 |
+
# ========================================================================
|
| 584 |
+
# Arguments: target_width, target_height, target_video_length
|
| 585 |
+
# ========================================================================
|
| 586 |
+
if width <= 0 or height <= 0 or video_length <= 0:
|
| 587 |
+
raise ValueError(
|
| 588 |
+
f"`height` and `width` and `video_length` must be positive integers, got height={height}, width={width}, video_length={video_length}"
|
| 589 |
+
)
|
| 590 |
+
if (video_length - 1) % 4 != 0:
|
| 591 |
+
raise ValueError(
|
| 592 |
+
f"`video_length-1` must be a multiple of 4, got {video_length}"
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
logger.info(
|
| 596 |
+
f"Input (height, width, video_length) = ({height}, {width}, {video_length})"
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
target_height = align_to(height, 16)
|
| 600 |
+
target_width = align_to(width, 16)
|
| 601 |
+
target_video_length = video_length
|
| 602 |
+
|
| 603 |
+
out_dict["size"] = (target_height, target_width, target_video_length)
|
| 604 |
+
|
| 605 |
+
# ========================================================================
|
| 606 |
+
# Arguments: prompt, new_prompt, negative_prompt
|
| 607 |
+
# ========================================================================
|
| 608 |
+
if not isinstance(prompt, str):
|
| 609 |
+
raise TypeError(f"`prompt` must be a string, but got {type(prompt)}")
|
| 610 |
+
prompt = [prompt.strip()]
|
| 611 |
+
|
| 612 |
+
# negative prompt
|
| 613 |
+
if negative_prompt is None or negative_prompt == "":
|
| 614 |
+
negative_prompt = self.default_negative_prompt
|
| 615 |
+
if not isinstance(negative_prompt, str):
|
| 616 |
+
raise TypeError(
|
| 617 |
+
f"`negative_prompt` must be a string, but got {type(negative_prompt)}"
|
| 618 |
+
)
|
| 619 |
+
negative_prompt = [negative_prompt.strip()]
|
| 620 |
+
|
| 621 |
+
# ========================================================================
|
| 622 |
+
# Scheduler
|
| 623 |
+
# ========================================================================
|
| 624 |
+
scheduler = FlowMatchDiscreteScheduler(
|
| 625 |
+
shift=flow_shift,
|
| 626 |
+
reverse=self.args.flow_reverse,
|
| 627 |
+
solver=self.args.flow_solver
|
| 628 |
+
)
|
| 629 |
+
self.pipeline.scheduler = scheduler
|
| 630 |
+
|
| 631 |
+
# ========================================================================
|
| 632 |
+
# Build Rope freqs
|
| 633 |
+
# ========================================================================
|
| 634 |
+
freqs_cos, freqs_sin = self.get_rotary_pos_embed(
|
| 635 |
+
target_video_length, target_height, target_width
|
| 636 |
+
)
|
| 637 |
+
n_tokens = freqs_cos.shape[0]
|
| 638 |
+
|
| 639 |
+
# ========================================================================
|
| 640 |
+
# Print infer args
|
| 641 |
+
# ========================================================================
|
| 642 |
+
debug_str = f"""
|
| 643 |
+
height: {target_height}
|
| 644 |
+
width: {target_width}
|
| 645 |
+
video_length: {target_video_length}
|
| 646 |
+
prompt: {prompt}
|
| 647 |
+
neg_prompt: {negative_prompt}
|
| 648 |
+
seed: {seed}
|
| 649 |
+
infer_steps: {infer_steps}
|
| 650 |
+
num_videos_per_prompt: {num_videos_per_prompt}
|
| 651 |
+
guidance_scale: {guidance_scale}
|
| 652 |
+
n_tokens: {n_tokens}
|
| 653 |
+
flow_shift: {flow_shift}
|
| 654 |
+
few_step: {few_step}
|
| 655 |
+
embedded_guidance_scale: {embedded_guidance_scale}"""
|
| 656 |
+
logger.debug(debug_str)
|
| 657 |
+
|
| 658 |
+
# ========================================================================
|
| 659 |
+
# Pipeline inference
|
| 660 |
+
# ========================================================================
|
| 661 |
+
start_time = time.time()
|
| 662 |
+
samples = self.pipeline(
|
| 663 |
+
prompt=prompt,
|
| 664 |
+
height=target_height,
|
| 665 |
+
width=target_width,
|
| 666 |
+
video_length=target_video_length,
|
| 667 |
+
num_inference_steps=infer_steps,
|
| 668 |
+
guidance_scale=guidance_scale,
|
| 669 |
+
negative_prompt=negative_prompt,
|
| 670 |
+
num_videos_per_prompt=num_videos_per_prompt,
|
| 671 |
+
generator=generator,
|
| 672 |
+
output_type="pil",
|
| 673 |
+
freqs_cis=(freqs_cos, freqs_sin),
|
| 674 |
+
n_tokens=n_tokens,
|
| 675 |
+
embedded_guidance_scale=embedded_guidance_scale,
|
| 676 |
+
data_type="video" if target_video_length > 1 else "image",
|
| 677 |
+
is_progress_bar=True,
|
| 678 |
+
vae_ver=self.args.vae,
|
| 679 |
+
enable_tiling=self.args.vae_tiling,
|
| 680 |
+
return_dict=False,
|
| 681 |
+
few_step=few_step,
|
| 682 |
+
)
|
| 683 |
+
out_dict["samples"] = samples
|
| 684 |
+
gen_time = time.time() - start_time
|
| 685 |
+
logger.info(f"Success, time: {gen_time}")
|
| 686 |
+
|
| 687 |
+
return out_dict
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .models import HYVideoDiffusionTransformer, HUNYUAN_VIDEO_CONFIG
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def load_model(args, in_channels, out_channels, factor_kwargs):
|
| 5 |
+
"""load hunyuan video model
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
args (dict): model args
|
| 9 |
+
in_channels (int): input channels number
|
| 10 |
+
out_channels (int): output channels number
|
| 11 |
+
factor_kwargs (dict): factor kwargs
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
model (nn.Module): The hunyuan video model
|
| 15 |
+
"""
|
| 16 |
+
if args.model in HUNYUAN_VIDEO_CONFIG.keys():
|
| 17 |
+
model = HYVideoDiffusionTransformer(
|
| 18 |
+
args,
|
| 19 |
+
in_channels=in_channels,
|
| 20 |
+
out_channels=out_channels,
|
| 21 |
+
**HUNYUAN_VIDEO_CONFIG[args.model],
|
| 22 |
+
**factor_kwargs,
|
| 23 |
+
)
|
| 24 |
+
return model
|
| 25 |
+
else:
|
| 26 |
+
raise NotImplementedError()
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/activation_layers.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def get_activation_layer(act_type):
|
| 5 |
+
"""get activation layer
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
act_type (str): the activation type
|
| 9 |
+
|
| 10 |
+
Returns:
|
| 11 |
+
torch.nn.functional: the activation layer
|
| 12 |
+
"""
|
| 13 |
+
if act_type == "gelu":
|
| 14 |
+
return lambda: nn.GELU()
|
| 15 |
+
elif act_type == "gelu_tanh":
|
| 16 |
+
# Approximate `tanh` requires torch >= 1.13
|
| 17 |
+
return lambda: nn.GELU(approximate="tanh")
|
| 18 |
+
elif act_type == "relu":
|
| 19 |
+
return nn.ReLU
|
| 20 |
+
elif act_type == "silu":
|
| 21 |
+
return nn.SiLU
|
| 22 |
+
else:
|
| 23 |
+
raise ValueError(f"Unknown activation type: {act_type}")
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/attenion.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.metadata
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
import flash_attn
|
| 10 |
+
from flash_attn.flash_attn_interface import _flash_attn_forward
|
| 11 |
+
from flash_attn.flash_attn_interface import flash_attn_varlen_func
|
| 12 |
+
except ImportError:
|
| 13 |
+
flash_attn = None
|
| 14 |
+
flash_attn_varlen_func = None
|
| 15 |
+
_flash_attn_forward = None
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
MEMORY_LAYOUT = {
|
| 19 |
+
"flash": (
|
| 20 |
+
lambda x: x.view(x.shape[0] * x.shape[1], *x.shape[2:]),
|
| 21 |
+
lambda x: x,
|
| 22 |
+
),
|
| 23 |
+
"torch": (
|
| 24 |
+
lambda x: x.transpose(1, 2),
|
| 25 |
+
lambda x: x.transpose(1, 2),
|
| 26 |
+
),
|
| 27 |
+
"vanilla": (
|
| 28 |
+
lambda x: x.transpose(1, 2),
|
| 29 |
+
lambda x: x.transpose(1, 2),
|
| 30 |
+
),
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_cu_seqlens(text_mask, img_len):
|
| 35 |
+
"""Calculate cu_seqlens_q, cu_seqlens_kv using text_mask and img_len
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
text_mask (torch.Tensor): the mask of text
|
| 39 |
+
img_len (int): the length of image
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
torch.Tensor: the calculated cu_seqlens for flash attention
|
| 43 |
+
"""
|
| 44 |
+
batch_size = text_mask.shape[0]
|
| 45 |
+
text_len = text_mask.sum(dim=1)
|
| 46 |
+
max_len = text_mask.shape[1] + img_len
|
| 47 |
+
|
| 48 |
+
cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device="cuda")
|
| 49 |
+
|
| 50 |
+
for i in range(batch_size):
|
| 51 |
+
s = text_len[i] + img_len
|
| 52 |
+
s1 = i * max_len + s
|
| 53 |
+
s2 = (i + 1) * max_len
|
| 54 |
+
cu_seqlens[2 * i + 1] = s1
|
| 55 |
+
cu_seqlens[2 * i + 2] = s2
|
| 56 |
+
|
| 57 |
+
return cu_seqlens
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def attention(
|
| 61 |
+
q,
|
| 62 |
+
k,
|
| 63 |
+
v,
|
| 64 |
+
mode="flash",
|
| 65 |
+
drop_rate=0,
|
| 66 |
+
attn_mask=None,
|
| 67 |
+
causal=False,
|
| 68 |
+
cu_seqlens_q=None,
|
| 69 |
+
cu_seqlens_kv=None,
|
| 70 |
+
max_seqlen_q=None,
|
| 71 |
+
max_seqlen_kv=None,
|
| 72 |
+
batch_size=1,
|
| 73 |
+
):
|
| 74 |
+
"""
|
| 75 |
+
Perform QKV self attention.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
q (torch.Tensor): Query tensor with shape [b, s, a, d], where a is the number of heads.
|
| 79 |
+
k (torch.Tensor): Key tensor with shape [b, s1, a, d]
|
| 80 |
+
v (torch.Tensor): Value tensor with shape [b, s1, a, d]
|
| 81 |
+
mode (str): Attention mode. Choose from 'self_flash', 'cross_flash', 'torch', and 'vanilla'.
|
| 82 |
+
drop_rate (float): Dropout rate in attention map. (default: 0)
|
| 83 |
+
attn_mask (torch.Tensor): Attention mask with shape [b, s1] (cross_attn), or [b, a, s, s1] (torch or vanilla).
|
| 84 |
+
(default: None)
|
| 85 |
+
causal (bool): Whether to use causal attention. (default: False)
|
| 86 |
+
cu_seqlens_q (torch.Tensor): dtype torch.int32. The cumulative sequence lengths of the sequences in the batch,
|
| 87 |
+
used to index into q.
|
| 88 |
+
cu_seqlens_kv (torch.Tensor): dtype torch.int32. The cumulative sequence lengths of the sequences in the batch,
|
| 89 |
+
used to index into kv.
|
| 90 |
+
max_seqlen_q (int): The maximum sequence length in the batch of q.
|
| 91 |
+
max_seqlen_kv (int): The maximum sequence length in the batch of k and v.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
torch.Tensor: Output tensor after self attention with shape [b, s, ad]
|
| 95 |
+
"""
|
| 96 |
+
pre_attn_layout, post_attn_layout = MEMORY_LAYOUT[mode]
|
| 97 |
+
q = pre_attn_layout(q)
|
| 98 |
+
k = pre_attn_layout(k)
|
| 99 |
+
v = pre_attn_layout(v)
|
| 100 |
+
|
| 101 |
+
if mode == "torch":
|
| 102 |
+
if attn_mask is not None and attn_mask.dtype != torch.bool:
|
| 103 |
+
attn_mask = attn_mask.to(q.dtype)
|
| 104 |
+
x = F.scaled_dot_product_attention(
|
| 105 |
+
q, k, v, attn_mask=attn_mask, dropout_p=drop_rate, is_causal=causal
|
| 106 |
+
)
|
| 107 |
+
elif mode == "flash":
|
| 108 |
+
x = flash_attn_varlen_func(
|
| 109 |
+
q,
|
| 110 |
+
k,
|
| 111 |
+
v,
|
| 112 |
+
cu_seqlens_q,
|
| 113 |
+
cu_seqlens_kv,
|
| 114 |
+
max_seqlen_q,
|
| 115 |
+
max_seqlen_kv,
|
| 116 |
+
)
|
| 117 |
+
# x with shape [(bxs), a, d]
|
| 118 |
+
x = x.view(
|
| 119 |
+
batch_size, max_seqlen_q, x.shape[-2], x.shape[-1]
|
| 120 |
+
) # reshape x to [b, s, a, d]
|
| 121 |
+
elif mode == "vanilla":
|
| 122 |
+
scale_factor = 1 / math.sqrt(q.size(-1))
|
| 123 |
+
|
| 124 |
+
b, a, s, _ = q.shape
|
| 125 |
+
s1 = k.size(2)
|
| 126 |
+
attn_bias = torch.zeros(b, a, s, s1, dtype=q.dtype, device=q.device)
|
| 127 |
+
if causal:
|
| 128 |
+
# Only applied to self attention
|
| 129 |
+
assert (
|
| 130 |
+
attn_mask is None
|
| 131 |
+
), "Causal mask and attn_mask cannot be used together"
|
| 132 |
+
temp_mask = torch.ones(b, a, s, s, dtype=torch.bool, device=q.device).tril(
|
| 133 |
+
diagonal=0
|
| 134 |
+
)
|
| 135 |
+
attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
|
| 136 |
+
attn_bias.to(q.dtype)
|
| 137 |
+
|
| 138 |
+
if attn_mask is not None:
|
| 139 |
+
if attn_mask.dtype == torch.bool:
|
| 140 |
+
attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
|
| 141 |
+
else:
|
| 142 |
+
attn_bias += attn_mask
|
| 143 |
+
|
| 144 |
+
# TODO: Maybe force q and k to be float32 to avoid numerical overflow
|
| 145 |
+
attn = (q @ k.transpose(-2, -1)) * scale_factor
|
| 146 |
+
attn += attn_bias
|
| 147 |
+
attn = attn.softmax(dim=-1)
|
| 148 |
+
attn = torch.dropout(attn, p=drop_rate, train=True)
|
| 149 |
+
x = attn @ v
|
| 150 |
+
else:
|
| 151 |
+
raise NotImplementedError(f"Unsupported attention mode: {mode}")
|
| 152 |
+
|
| 153 |
+
x = post_attn_layout(x)
|
| 154 |
+
b, s, a, d = x.shape
|
| 155 |
+
out = x.reshape(b, s, -1)
|
| 156 |
+
return out
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def parallel_attention(
|
| 160 |
+
hybrid_seq_parallel_attn,
|
| 161 |
+
q,
|
| 162 |
+
k,
|
| 163 |
+
v,
|
| 164 |
+
img_q_len,
|
| 165 |
+
img_kv_len,
|
| 166 |
+
cu_seqlens_q,
|
| 167 |
+
cu_seqlens_kv
|
| 168 |
+
):
|
| 169 |
+
attn1 = hybrid_seq_parallel_attn(
|
| 170 |
+
None,
|
| 171 |
+
q[:, :img_q_len, :, :],
|
| 172 |
+
k[:, :img_kv_len, :, :],
|
| 173 |
+
v[:, :img_kv_len, :, :],
|
| 174 |
+
dropout_p=0.0,
|
| 175 |
+
causal=False,
|
| 176 |
+
joint_tensor_query=q[:,img_q_len:cu_seqlens_q[1]],
|
| 177 |
+
joint_tensor_key=k[:,img_kv_len:cu_seqlens_kv[1]],
|
| 178 |
+
joint_tensor_value=v[:,img_kv_len:cu_seqlens_kv[1]],
|
| 179 |
+
joint_strategy="rear",
|
| 180 |
+
)
|
| 181 |
+
if flash_attn.__version__ >= '2.7.0':
|
| 182 |
+
attn2, *_ = _flash_attn_forward(
|
| 183 |
+
q[:,cu_seqlens_q[1]:],
|
| 184 |
+
k[:,cu_seqlens_kv[1]:],
|
| 185 |
+
v[:,cu_seqlens_kv[1]:],
|
| 186 |
+
dropout_p=0.0,
|
| 187 |
+
softmax_scale=q.shape[-1] ** (-0.5),
|
| 188 |
+
causal=False,
|
| 189 |
+
window_size_left=-1,
|
| 190 |
+
window_size_right=-1,
|
| 191 |
+
softcap=0.0,
|
| 192 |
+
alibi_slopes=None,
|
| 193 |
+
return_softmax=False,
|
| 194 |
+
)
|
| 195 |
+
else:
|
| 196 |
+
attn2, *_ = _flash_attn_forward(
|
| 197 |
+
q[:,cu_seqlens_q[1]:],
|
| 198 |
+
k[:,cu_seqlens_kv[1]:],
|
| 199 |
+
v[:,cu_seqlens_kv[1]:],
|
| 200 |
+
dropout_p=0.0,
|
| 201 |
+
softmax_scale=q.shape[-1] ** (-0.5),
|
| 202 |
+
causal=False,
|
| 203 |
+
window_size=(-1, -1),
|
| 204 |
+
softcap=0.0,
|
| 205 |
+
alibi_slopes=None,
|
| 206 |
+
return_softmax=False,
|
| 207 |
+
)
|
| 208 |
+
attn = torch.cat([attn1, attn2], dim=1)
|
| 209 |
+
b, s, a, d = attn.shape
|
| 210 |
+
attn = attn.reshape(b, s, -1)
|
| 211 |
+
|
| 212 |
+
return attn
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/embed_layers.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from einops import rearrange, repeat
|
| 5 |
+
|
| 6 |
+
from ..utils.helpers import to_2tuple
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class PatchEmbed(nn.Module):
|
| 10 |
+
"""2D Image to Patch Embedding
|
| 11 |
+
|
| 12 |
+
Image to Patch Embedding using Conv2d
|
| 13 |
+
|
| 14 |
+
A convolution based approach to patchifying a 2D image w/ embedding projection.
|
| 15 |
+
|
| 16 |
+
Based on the impl in https://github.com/google-research/vision_transformer
|
| 17 |
+
|
| 18 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
| 19 |
+
|
| 20 |
+
Remove the _assert function in forward function to be compatible with multi-resolution images.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
patch_size=16,
|
| 26 |
+
in_chans=3,
|
| 27 |
+
embed_dim=768,
|
| 28 |
+
norm_layer=None,
|
| 29 |
+
flatten=True,
|
| 30 |
+
bias=True,
|
| 31 |
+
dtype=None,
|
| 32 |
+
device=None,
|
| 33 |
+
):
|
| 34 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 35 |
+
super().__init__()
|
| 36 |
+
patch_size = to_2tuple(patch_size)
|
| 37 |
+
self.patch_size = patch_size
|
| 38 |
+
self.flatten = flatten
|
| 39 |
+
|
| 40 |
+
self.proj = nn.Conv3d(
|
| 41 |
+
in_chans,
|
| 42 |
+
embed_dim,
|
| 43 |
+
kernel_size=patch_size,
|
| 44 |
+
stride=patch_size,
|
| 45 |
+
bias=bias,
|
| 46 |
+
**factory_kwargs
|
| 47 |
+
)
|
| 48 |
+
nn.init.xavier_uniform_(self.proj.weight.view(self.proj.weight.size(0), -1))
|
| 49 |
+
if bias:
|
| 50 |
+
nn.init.zeros_(self.proj.bias)
|
| 51 |
+
|
| 52 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
| 53 |
+
|
| 54 |
+
def forward(self, x):
|
| 55 |
+
x = self.proj(x)
|
| 56 |
+
if self.flatten:
|
| 57 |
+
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
| 58 |
+
x = self.norm(x)
|
| 59 |
+
return x
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class TextProjection(nn.Module):
|
| 63 |
+
"""
|
| 64 |
+
Projects text embeddings. Also handles dropout for classifier-free guidance.
|
| 65 |
+
|
| 66 |
+
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def __init__(self, in_channels, hidden_size, act_layer, dtype=None, device=None):
|
| 70 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 71 |
+
super().__init__()
|
| 72 |
+
self.linear_1 = nn.Linear(
|
| 73 |
+
in_features=in_channels,
|
| 74 |
+
out_features=hidden_size,
|
| 75 |
+
bias=True,
|
| 76 |
+
**factory_kwargs
|
| 77 |
+
)
|
| 78 |
+
self.act_1 = act_layer()
|
| 79 |
+
self.linear_2 = nn.Linear(
|
| 80 |
+
in_features=hidden_size,
|
| 81 |
+
out_features=hidden_size,
|
| 82 |
+
bias=True,
|
| 83 |
+
**factory_kwargs
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def forward(self, caption):
|
| 87 |
+
hidden_states = self.linear_1(caption)
|
| 88 |
+
hidden_states = self.act_1(hidden_states)
|
| 89 |
+
hidden_states = self.linear_2(hidden_states)
|
| 90 |
+
return hidden_states
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def timestep_embedding(t, dim, max_period=10000):
|
| 94 |
+
"""
|
| 95 |
+
Create sinusoidal timestep embeddings.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
t (torch.Tensor): a 1-D Tensor of N indices, one per batch element. These may be fractional.
|
| 99 |
+
dim (int): the dimension of the output.
|
| 100 |
+
max_period (int): controls the minimum frequency of the embeddings.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
embedding (torch.Tensor): An (N, D) Tensor of positional embeddings.
|
| 104 |
+
|
| 105 |
+
.. ref_link: https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
| 106 |
+
"""
|
| 107 |
+
half = dim // 2
|
| 108 |
+
freqs = torch.exp(
|
| 109 |
+
-math.log(max_period)
|
| 110 |
+
* torch.arange(start=0, end=half, dtype=torch.float32)
|
| 111 |
+
/ half
|
| 112 |
+
).to(device=t.device)
|
| 113 |
+
args = t[:, None].float() * freqs[None]
|
| 114 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 115 |
+
if dim % 2:
|
| 116 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 117 |
+
return embedding
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class TimestepEmbedder(nn.Module):
|
| 121 |
+
"""
|
| 122 |
+
Embeds scalar timesteps into vector representations.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
def __init__(
|
| 126 |
+
self,
|
| 127 |
+
hidden_size,
|
| 128 |
+
act_layer,
|
| 129 |
+
frequency_embedding_size=256,
|
| 130 |
+
max_period=10000,
|
| 131 |
+
out_size=None,
|
| 132 |
+
dtype=None,
|
| 133 |
+
device=None,
|
| 134 |
+
):
|
| 135 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 136 |
+
super().__init__()
|
| 137 |
+
self.frequency_embedding_size = frequency_embedding_size
|
| 138 |
+
self.max_period = max_period
|
| 139 |
+
if out_size is None:
|
| 140 |
+
out_size = hidden_size
|
| 141 |
+
|
| 142 |
+
self.mlp = nn.Sequential(
|
| 143 |
+
nn.Linear(
|
| 144 |
+
frequency_embedding_size, hidden_size, bias=True, **factory_kwargs
|
| 145 |
+
),
|
| 146 |
+
act_layer(),
|
| 147 |
+
nn.Linear(hidden_size, out_size, bias=True, **factory_kwargs),
|
| 148 |
+
)
|
| 149 |
+
nn.init.normal_(self.mlp[0].weight, std=0.02)
|
| 150 |
+
nn.init.normal_(self.mlp[2].weight, std=0.02)
|
| 151 |
+
|
| 152 |
+
def forward(self, t):
|
| 153 |
+
t_freq = timestep_embedding(
|
| 154 |
+
t, self.frequency_embedding_size, self.max_period
|
| 155 |
+
).type(self.mlp[0].weight.dtype)
|
| 156 |
+
t_emb = self.mlp(t_freq)
|
| 157 |
+
return t_emb
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/fp8_optimization.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
def get_fp_maxval(bits=8, mantissa_bit=3, sign_bits=1):
|
| 8 |
+
_bits = torch.tensor(bits)
|
| 9 |
+
_mantissa_bit = torch.tensor(mantissa_bit)
|
| 10 |
+
_sign_bits = torch.tensor(sign_bits)
|
| 11 |
+
M = torch.clamp(torch.round(_mantissa_bit), 1, _bits - _sign_bits)
|
| 12 |
+
E = _bits - _sign_bits - M
|
| 13 |
+
bias = 2 ** (E - 1) - 1
|
| 14 |
+
mantissa = 1
|
| 15 |
+
for i in range(mantissa_bit - 1):
|
| 16 |
+
mantissa += 1 / (2 ** (i+1))
|
| 17 |
+
maxval = mantissa * 2 ** (2**E - 1 - bias)
|
| 18 |
+
return maxval
|
| 19 |
+
|
| 20 |
+
def quantize_to_fp8(x, bits=8, mantissa_bit=3, sign_bits=1):
|
| 21 |
+
"""
|
| 22 |
+
Default is E4M3.
|
| 23 |
+
"""
|
| 24 |
+
bits = torch.tensor(bits)
|
| 25 |
+
mantissa_bit = torch.tensor(mantissa_bit)
|
| 26 |
+
sign_bits = torch.tensor(sign_bits)
|
| 27 |
+
M = torch.clamp(torch.round(mantissa_bit), 1, bits - sign_bits)
|
| 28 |
+
E = bits - sign_bits - M
|
| 29 |
+
bias = 2 ** (E - 1) - 1
|
| 30 |
+
mantissa = 1
|
| 31 |
+
for i in range(mantissa_bit - 1):
|
| 32 |
+
mantissa += 1 / (2 ** (i+1))
|
| 33 |
+
maxval = mantissa * 2 ** (2**E - 1 - bias)
|
| 34 |
+
minval = - maxval
|
| 35 |
+
minval = - maxval if sign_bits == 1 else torch.zeros_like(maxval)
|
| 36 |
+
input_clamp = torch.min(torch.max(x, minval), maxval)
|
| 37 |
+
log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(input_clamp)) + bias)).detach(), 1.0)
|
| 38 |
+
log_scales = 2.0 ** (log_scales - M - bias.type(x.dtype))
|
| 39 |
+
# dequant
|
| 40 |
+
qdq_out = torch.round(input_clamp / log_scales) * log_scales
|
| 41 |
+
return qdq_out, log_scales
|
| 42 |
+
|
| 43 |
+
def fp8_tensor_quant(x, scale, bits=8, mantissa_bit=3, sign_bits=1):
|
| 44 |
+
for i in range(len(x.shape) - 1):
|
| 45 |
+
scale = scale.unsqueeze(-1)
|
| 46 |
+
new_x = x / scale
|
| 47 |
+
quant_dequant_x, log_scales = quantize_to_fp8(new_x, bits=bits, mantissa_bit=mantissa_bit, sign_bits=sign_bits)
|
| 48 |
+
return quant_dequant_x, scale, log_scales
|
| 49 |
+
|
| 50 |
+
def fp8_activation_dequant(qdq_out, scale, dtype):
|
| 51 |
+
qdq_out = qdq_out.type(dtype)
|
| 52 |
+
quant_dequant_x = qdq_out * scale.to(dtype)
|
| 53 |
+
return quant_dequant_x
|
| 54 |
+
|
| 55 |
+
def fp8_linear_forward(cls, original_dtype, input):
|
| 56 |
+
weight_dtype = cls.weight.dtype
|
| 57 |
+
#####
|
| 58 |
+
if cls.weight.dtype != torch.float8_e4m3fn:
|
| 59 |
+
maxval = get_fp_maxval()
|
| 60 |
+
scale = torch.max(torch.abs(cls.weight.flatten())) / maxval
|
| 61 |
+
linear_weight, scale, log_scales = fp8_tensor_quant(cls.weight, scale)
|
| 62 |
+
linear_weight = linear_weight.to(torch.float8_e4m3fn)
|
| 63 |
+
weight_dtype = linear_weight.dtype
|
| 64 |
+
else:
|
| 65 |
+
scale = cls.fp8_scale.to(cls.weight.device)
|
| 66 |
+
linear_weight = cls.weight
|
| 67 |
+
#####
|
| 68 |
+
|
| 69 |
+
if weight_dtype == torch.float8_e4m3fn and cls.weight.sum() != 0:
|
| 70 |
+
if True or len(input.shape) == 3:
|
| 71 |
+
cls_dequant = fp8_activation_dequant(linear_weight, scale, original_dtype)
|
| 72 |
+
if cls.bias != None:
|
| 73 |
+
output = F.linear(input, cls_dequant, cls.bias)
|
| 74 |
+
else:
|
| 75 |
+
output = F.linear(input, cls_dequant)
|
| 76 |
+
return output
|
| 77 |
+
else:
|
| 78 |
+
return cls.original_forward(input.to(original_dtype))
|
| 79 |
+
else:
|
| 80 |
+
return cls.original_forward(input)
|
| 81 |
+
|
| 82 |
+
def convert_fp8_linear(module, dit_weight_path, original_dtype, params_to_keep={}):
|
| 83 |
+
setattr(module, "fp8_matmul_enabled", True)
|
| 84 |
+
|
| 85 |
+
# loading fp8 mapping file
|
| 86 |
+
fp8_map_path = dit_weight_path.replace('.pt', '_map.pt')
|
| 87 |
+
if os.path.exists(fp8_map_path):
|
| 88 |
+
fp8_map = torch.load(fp8_map_path, map_location=lambda storage, loc: storage)
|
| 89 |
+
else:
|
| 90 |
+
raise ValueError(f"Invalid fp8_map path: {fp8_map_path}.")
|
| 91 |
+
|
| 92 |
+
fp8_layers = []
|
| 93 |
+
for key, layer in module.named_modules():
|
| 94 |
+
if isinstance(layer, nn.Linear) and ('double_blocks' in key or 'single_blocks' in key):
|
| 95 |
+
fp8_layers.append(key)
|
| 96 |
+
original_forward = layer.forward
|
| 97 |
+
layer.weight = torch.nn.Parameter(layer.weight.to(torch.float8_e4m3fn))
|
| 98 |
+
setattr(layer, "fp8_scale", fp8_map[key].to(dtype=original_dtype))
|
| 99 |
+
setattr(layer, "original_forward", original_forward)
|
| 100 |
+
setattr(layer, "forward", lambda input, m=layer: fp8_linear_forward(m, original_dtype, input))
|
| 101 |
+
|
| 102 |
+
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/mlp_layers.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from timm library:
|
| 2 |
+
# https://github.com/huggingface/pytorch-image-models/blob/648aaa41233ba83eb38faf5ba9d415d574823241/timm/layers/mlp.py#L13
|
| 3 |
+
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
|
| 9 |
+
from .modulate_layers import modulate
|
| 10 |
+
from ..utils.helpers import to_2tuple
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MLP(nn.Module):
|
| 14 |
+
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
in_channels,
|
| 19 |
+
hidden_channels=None,
|
| 20 |
+
out_features=None,
|
| 21 |
+
act_layer=nn.GELU,
|
| 22 |
+
norm_layer=None,
|
| 23 |
+
bias=True,
|
| 24 |
+
drop=0.0,
|
| 25 |
+
use_conv=False,
|
| 26 |
+
device=None,
|
| 27 |
+
dtype=None,
|
| 28 |
+
):
|
| 29 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 30 |
+
super().__init__()
|
| 31 |
+
out_features = out_features or in_channels
|
| 32 |
+
hidden_channels = hidden_channels or in_channels
|
| 33 |
+
bias = to_2tuple(bias)
|
| 34 |
+
drop_probs = to_2tuple(drop)
|
| 35 |
+
linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
|
| 36 |
+
|
| 37 |
+
self.fc1 = linear_layer(
|
| 38 |
+
in_channels, hidden_channels, bias=bias[0], **factory_kwargs
|
| 39 |
+
)
|
| 40 |
+
self.act = act_layer()
|
| 41 |
+
self.drop1 = nn.Dropout(drop_probs[0])
|
| 42 |
+
self.norm = (
|
| 43 |
+
norm_layer(hidden_channels, **factory_kwargs)
|
| 44 |
+
if norm_layer is not None
|
| 45 |
+
else nn.Identity()
|
| 46 |
+
)
|
| 47 |
+
self.fc2 = linear_layer(
|
| 48 |
+
hidden_channels, out_features, bias=bias[1], **factory_kwargs
|
| 49 |
+
)
|
| 50 |
+
self.drop2 = nn.Dropout(drop_probs[1])
|
| 51 |
+
|
| 52 |
+
def forward(self, x):
|
| 53 |
+
x = self.fc1(x)
|
| 54 |
+
x = self.act(x)
|
| 55 |
+
x = self.drop1(x)
|
| 56 |
+
x = self.norm(x)
|
| 57 |
+
x = self.fc2(x)
|
| 58 |
+
x = self.drop2(x)
|
| 59 |
+
return x
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
#
|
| 63 |
+
class MLPEmbedder(nn.Module):
|
| 64 |
+
"""copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/modules/layers.py"""
|
| 65 |
+
def __init__(self, in_dim: int, hidden_dim: int, device=None, dtype=None):
|
| 66 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 67 |
+
super().__init__()
|
| 68 |
+
self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True, **factory_kwargs)
|
| 69 |
+
self.silu = nn.SiLU()
|
| 70 |
+
self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True, **factory_kwargs)
|
| 71 |
+
|
| 72 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 73 |
+
return self.out_layer(self.silu(self.in_layer(x)))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class FinalLayer(nn.Module):
|
| 77 |
+
"""The final layer of DiT."""
|
| 78 |
+
|
| 79 |
+
def __init__(
|
| 80 |
+
self, hidden_size, patch_size, out_channels, act_layer, device=None, dtype=None
|
| 81 |
+
):
|
| 82 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 83 |
+
super().__init__()
|
| 84 |
+
|
| 85 |
+
# Just use LayerNorm for the final layer
|
| 86 |
+
self.norm_final = nn.LayerNorm(
|
| 87 |
+
hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs
|
| 88 |
+
)
|
| 89 |
+
if isinstance(patch_size, int):
|
| 90 |
+
self.linear = nn.Linear(
|
| 91 |
+
hidden_size,
|
| 92 |
+
patch_size * patch_size * out_channels,
|
| 93 |
+
bias=True,
|
| 94 |
+
**factory_kwargs
|
| 95 |
+
)
|
| 96 |
+
else:
|
| 97 |
+
self.linear = nn.Linear(
|
| 98 |
+
hidden_size,
|
| 99 |
+
patch_size[0] * patch_size[1] * patch_size[2] * out_channels,
|
| 100 |
+
bias=True,
|
| 101 |
+
)
|
| 102 |
+
nn.init.zeros_(self.linear.weight)
|
| 103 |
+
nn.init.zeros_(self.linear.bias)
|
| 104 |
+
|
| 105 |
+
# Here we don't distinguish between the modulate types. Just use the simple one.
|
| 106 |
+
self.adaLN_modulation = nn.Sequential(
|
| 107 |
+
act_layer(),
|
| 108 |
+
nn.Linear(hidden_size, 2 * hidden_size, bias=True, **factory_kwargs),
|
| 109 |
+
)
|
| 110 |
+
# Zero-initialize the modulation
|
| 111 |
+
nn.init.zeros_(self.adaLN_modulation[1].weight)
|
| 112 |
+
nn.init.zeros_(self.adaLN_modulation[1].bias)
|
| 113 |
+
|
| 114 |
+
def forward(self, x, c):
|
| 115 |
+
shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
|
| 116 |
+
x = modulate(self.norm_final(x), shift=shift, scale=scale)
|
| 117 |
+
x = self.linear(x)
|
| 118 |
+
return x
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/models.py
ADDED
|
@@ -0,0 +1,816 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, List, Tuple, Optional, Union, Dict
|
| 2 |
+
from einops import rearrange
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
from diffusers.models import ModelMixin
|
| 9 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 10 |
+
|
| 11 |
+
from .activation_layers import get_activation_layer
|
| 12 |
+
from .norm_layers import get_norm_layer
|
| 13 |
+
from .embed_layers import TimestepEmbedder, PatchEmbed, TextProjection
|
| 14 |
+
from .attenion import attention, parallel_attention, get_cu_seqlens
|
| 15 |
+
from .posemb_layers import apply_rotary_emb
|
| 16 |
+
from .mlp_layers import MLP, MLPEmbedder, FinalLayer
|
| 17 |
+
from .modulate_layers import ModulateDiT, modulate, apply_gate
|
| 18 |
+
from .token_refiner import SingleTokenRefiner
|
| 19 |
+
|
| 20 |
+
from ..parallel_states import nccl_info
|
| 21 |
+
from .posemb_layers import get_nd_rotary_pos_embed
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class MMDoubleStreamBlock(nn.Module):
|
| 25 |
+
"""
|
| 26 |
+
A multimodal dit block with seperate modulation for
|
| 27 |
+
text and image/video, see more details (SD3): https://arxiv.org/abs/2403.03206
|
| 28 |
+
(Flux.1): https://github.com/black-forest-labs/flux
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
hidden_size: int,
|
| 34 |
+
heads_num: int,
|
| 35 |
+
mlp_width_ratio: float,
|
| 36 |
+
mlp_act_type: str = "gelu_tanh",
|
| 37 |
+
qk_norm: bool = True,
|
| 38 |
+
qk_norm_type: str = "rms",
|
| 39 |
+
qkv_bias: bool = False,
|
| 40 |
+
dtype: Optional[torch.dtype] = None,
|
| 41 |
+
device: Optional[torch.device] = None,
|
| 42 |
+
):
|
| 43 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 44 |
+
super().__init__()
|
| 45 |
+
|
| 46 |
+
self.deterministic = False
|
| 47 |
+
self.heads_num = heads_num
|
| 48 |
+
head_dim = hidden_size // heads_num
|
| 49 |
+
mlp_hidden_dim = int(hidden_size * mlp_width_ratio)
|
| 50 |
+
|
| 51 |
+
self.img_mod = ModulateDiT(
|
| 52 |
+
hidden_size,
|
| 53 |
+
factor=6,
|
| 54 |
+
act_layer=get_activation_layer("silu"),
|
| 55 |
+
**factory_kwargs,
|
| 56 |
+
)
|
| 57 |
+
self.img_norm1 = nn.LayerNorm(
|
| 58 |
+
hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
self.img_attn_qkv = nn.Linear(
|
| 62 |
+
hidden_size, hidden_size * 3, bias=qkv_bias, **factory_kwargs
|
| 63 |
+
)
|
| 64 |
+
qk_norm_layer = get_norm_layer(qk_norm_type)
|
| 65 |
+
self.img_attn_q_norm = (
|
| 66 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 67 |
+
if qk_norm
|
| 68 |
+
else nn.Identity()
|
| 69 |
+
)
|
| 70 |
+
self.img_attn_k_norm = (
|
| 71 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 72 |
+
if qk_norm
|
| 73 |
+
else nn.Identity()
|
| 74 |
+
)
|
| 75 |
+
self.img_attn_proj = nn.Linear(
|
| 76 |
+
hidden_size, hidden_size, bias=qkv_bias, **factory_kwargs
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
self.img_norm2 = nn.LayerNorm(
|
| 80 |
+
hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs
|
| 81 |
+
)
|
| 82 |
+
self.img_mlp = MLP(
|
| 83 |
+
hidden_size,
|
| 84 |
+
mlp_hidden_dim,
|
| 85 |
+
act_layer=get_activation_layer(mlp_act_type),
|
| 86 |
+
bias=True,
|
| 87 |
+
**factory_kwargs,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
self.txt_mod = ModulateDiT(
|
| 91 |
+
hidden_size,
|
| 92 |
+
factor=6,
|
| 93 |
+
act_layer=get_activation_layer("silu"),
|
| 94 |
+
**factory_kwargs,
|
| 95 |
+
)
|
| 96 |
+
self.txt_norm1 = nn.LayerNorm(
|
| 97 |
+
hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
self.txt_attn_qkv = nn.Linear(
|
| 101 |
+
hidden_size, hidden_size * 3, bias=qkv_bias, **factory_kwargs
|
| 102 |
+
)
|
| 103 |
+
self.txt_attn_q_norm = (
|
| 104 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 105 |
+
if qk_norm
|
| 106 |
+
else nn.Identity()
|
| 107 |
+
)
|
| 108 |
+
self.txt_attn_k_norm = (
|
| 109 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 110 |
+
if qk_norm
|
| 111 |
+
else nn.Identity()
|
| 112 |
+
)
|
| 113 |
+
self.txt_attn_proj = nn.Linear(
|
| 114 |
+
hidden_size, hidden_size, bias=qkv_bias, **factory_kwargs
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
self.txt_norm2 = nn.LayerNorm(
|
| 118 |
+
hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs
|
| 119 |
+
)
|
| 120 |
+
self.txt_mlp = MLP(
|
| 121 |
+
hidden_size,
|
| 122 |
+
mlp_hidden_dim,
|
| 123 |
+
act_layer=get_activation_layer(mlp_act_type),
|
| 124 |
+
bias=True,
|
| 125 |
+
**factory_kwargs,
|
| 126 |
+
)
|
| 127 |
+
self.hybrid_seq_parallel_attn = None
|
| 128 |
+
|
| 129 |
+
def enable_deterministic(self):
|
| 130 |
+
self.deterministic = True
|
| 131 |
+
|
| 132 |
+
def disable_deterministic(self):
|
| 133 |
+
self.deterministic = False
|
| 134 |
+
|
| 135 |
+
def forward(
|
| 136 |
+
self,
|
| 137 |
+
img: torch.Tensor,
|
| 138 |
+
txt: torch.Tensor,
|
| 139 |
+
vec: torch.Tensor,
|
| 140 |
+
cu_seqlens_q: Optional[torch.Tensor] = None,
|
| 141 |
+
cu_seqlens_kv: Optional[torch.Tensor] = None,
|
| 142 |
+
max_seqlen_q: Optional[int] = None,
|
| 143 |
+
max_seqlen_kv: Optional[int] = None,
|
| 144 |
+
freqs_cis: tuple = None,
|
| 145 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 146 |
+
(
|
| 147 |
+
img_mod1_shift,
|
| 148 |
+
img_mod1_scale,
|
| 149 |
+
img_mod1_gate,
|
| 150 |
+
img_mod2_shift,
|
| 151 |
+
img_mod2_scale,
|
| 152 |
+
img_mod2_gate,
|
| 153 |
+
) = self.img_mod(vec).chunk(6, dim=-1)
|
| 154 |
+
(
|
| 155 |
+
txt_mod1_shift,
|
| 156 |
+
txt_mod1_scale,
|
| 157 |
+
txt_mod1_gate,
|
| 158 |
+
txt_mod2_shift,
|
| 159 |
+
txt_mod2_scale,
|
| 160 |
+
txt_mod2_gate,
|
| 161 |
+
) = self.txt_mod(vec).chunk(6, dim=-1)
|
| 162 |
+
|
| 163 |
+
# Prepare image for attention.
|
| 164 |
+
img_modulated = self.img_norm1(img)
|
| 165 |
+
img_modulated = modulate(
|
| 166 |
+
img_modulated, shift=img_mod1_shift, scale=img_mod1_scale
|
| 167 |
+
)
|
| 168 |
+
img_qkv = self.img_attn_qkv(img_modulated)
|
| 169 |
+
img_q, img_k, img_v = rearrange(
|
| 170 |
+
img_qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num
|
| 171 |
+
)
|
| 172 |
+
# Apply QK-Norm if needed
|
| 173 |
+
img_q = self.img_attn_q_norm(img_q).to(img_v)
|
| 174 |
+
img_k = self.img_attn_k_norm(img_k).to(img_v)
|
| 175 |
+
|
| 176 |
+
# Apply RoPE if needed.
|
| 177 |
+
if freqs_cis is not None:
|
| 178 |
+
img_qq, img_kk = apply_rotary_emb(img_q, img_k, freqs_cis, head_first=False)
|
| 179 |
+
assert (
|
| 180 |
+
img_qq.shape == img_q.shape and img_kk.shape == img_k.shape
|
| 181 |
+
), f"img_kk: {img_qq.shape}, img_q: {img_q.shape}, img_kk: {img_kk.shape}, img_k: {img_k.shape}"
|
| 182 |
+
img_q, img_k = img_qq, img_kk
|
| 183 |
+
|
| 184 |
+
# Prepare txt for attention.
|
| 185 |
+
txt_modulated = self.txt_norm1(txt)
|
| 186 |
+
txt_modulated = modulate(
|
| 187 |
+
txt_modulated, shift=txt_mod1_shift, scale=txt_mod1_scale
|
| 188 |
+
)
|
| 189 |
+
txt_qkv = self.txt_attn_qkv(txt_modulated)
|
| 190 |
+
txt_q, txt_k, txt_v = rearrange(
|
| 191 |
+
txt_qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num
|
| 192 |
+
)
|
| 193 |
+
# Apply QK-Norm if needed.
|
| 194 |
+
txt_q = self.txt_attn_q_norm(txt_q).to(txt_v)
|
| 195 |
+
txt_k = self.txt_attn_k_norm(txt_k).to(txt_v)
|
| 196 |
+
|
| 197 |
+
# Run actual attention.
|
| 198 |
+
q = torch.cat((img_q, txt_q), dim=1)
|
| 199 |
+
k = torch.cat((img_k, txt_k), dim=1)
|
| 200 |
+
v = torch.cat((img_v, txt_v), dim=1)
|
| 201 |
+
assert (
|
| 202 |
+
cu_seqlens_q.shape[0] == 2 * img.shape[0] + 1
|
| 203 |
+
), f"cu_seqlens_q.shape:{cu_seqlens_q.shape}, img.shape[0]:{img.shape[0]}"
|
| 204 |
+
|
| 205 |
+
# attention computation start
|
| 206 |
+
if not self.hybrid_seq_parallel_attn:
|
| 207 |
+
attn = attention(
|
| 208 |
+
q,
|
| 209 |
+
k,
|
| 210 |
+
v,
|
| 211 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 212 |
+
cu_seqlens_kv=cu_seqlens_kv,
|
| 213 |
+
max_seqlen_q=max_seqlen_q,
|
| 214 |
+
max_seqlen_kv=max_seqlen_kv,
|
| 215 |
+
batch_size=img_k.shape[0],
|
| 216 |
+
)
|
| 217 |
+
else:
|
| 218 |
+
attn = parallel_attention(
|
| 219 |
+
self.hybrid_seq_parallel_attn,
|
| 220 |
+
q,
|
| 221 |
+
k,
|
| 222 |
+
v,
|
| 223 |
+
img_q_len=img_q.shape[1],
|
| 224 |
+
img_kv_len=img_k.shape[1],
|
| 225 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 226 |
+
cu_seqlens_kv=cu_seqlens_kv
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# attention computation end
|
| 230 |
+
|
| 231 |
+
img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1] :]
|
| 232 |
+
|
| 233 |
+
# Calculate the img bloks.
|
| 234 |
+
img = img + apply_gate(self.img_attn_proj(img_attn), gate=img_mod1_gate)
|
| 235 |
+
img = img + apply_gate(
|
| 236 |
+
self.img_mlp(
|
| 237 |
+
modulate(
|
| 238 |
+
self.img_norm2(img), shift=img_mod2_shift, scale=img_mod2_scale
|
| 239 |
+
)
|
| 240 |
+
),
|
| 241 |
+
gate=img_mod2_gate,
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# Calculate the txt bloks.
|
| 245 |
+
txt = txt + apply_gate(self.txt_attn_proj(txt_attn), gate=txt_mod1_gate)
|
| 246 |
+
txt = txt + apply_gate(
|
| 247 |
+
self.txt_mlp(
|
| 248 |
+
modulate(
|
| 249 |
+
self.txt_norm2(txt), shift=txt_mod2_shift, scale=txt_mod2_scale
|
| 250 |
+
)
|
| 251 |
+
),
|
| 252 |
+
gate=txt_mod2_gate,
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
return img, txt
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
class MMSingleStreamBlock(nn.Module):
|
| 259 |
+
"""
|
| 260 |
+
A DiT block with parallel linear layers as described in
|
| 261 |
+
https://arxiv.org/abs/2302.05442 and adapted modulation interface.
|
| 262 |
+
Also refer to (SD3): https://arxiv.org/abs/2403.03206
|
| 263 |
+
(Flux.1): https://github.com/black-forest-labs/flux
|
| 264 |
+
"""
|
| 265 |
+
|
| 266 |
+
def __init__(
|
| 267 |
+
self,
|
| 268 |
+
hidden_size: int,
|
| 269 |
+
heads_num: int,
|
| 270 |
+
mlp_width_ratio: float = 4.0,
|
| 271 |
+
mlp_act_type: str = "gelu_tanh",
|
| 272 |
+
qk_norm: bool = True,
|
| 273 |
+
qk_norm_type: str = "rms",
|
| 274 |
+
qk_scale: float = None,
|
| 275 |
+
dtype: Optional[torch.dtype] = None,
|
| 276 |
+
device: Optional[torch.device] = None,
|
| 277 |
+
):
|
| 278 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 279 |
+
super().__init__()
|
| 280 |
+
|
| 281 |
+
self.deterministic = False
|
| 282 |
+
self.hidden_size = hidden_size
|
| 283 |
+
self.heads_num = heads_num
|
| 284 |
+
head_dim = hidden_size // heads_num
|
| 285 |
+
mlp_hidden_dim = int(hidden_size * mlp_width_ratio)
|
| 286 |
+
self.mlp_hidden_dim = mlp_hidden_dim
|
| 287 |
+
self.scale = qk_scale or head_dim ** -0.5
|
| 288 |
+
|
| 289 |
+
# qkv and mlp_in
|
| 290 |
+
self.linear1 = nn.Linear(
|
| 291 |
+
hidden_size, hidden_size * 3 + mlp_hidden_dim, **factory_kwargs
|
| 292 |
+
)
|
| 293 |
+
# proj and mlp_out
|
| 294 |
+
self.linear2 = nn.Linear(
|
| 295 |
+
hidden_size + mlp_hidden_dim, hidden_size, **factory_kwargs
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
qk_norm_layer = get_norm_layer(qk_norm_type)
|
| 299 |
+
self.q_norm = (
|
| 300 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 301 |
+
if qk_norm
|
| 302 |
+
else nn.Identity()
|
| 303 |
+
)
|
| 304 |
+
self.k_norm = (
|
| 305 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 306 |
+
if qk_norm
|
| 307 |
+
else nn.Identity()
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
self.pre_norm = nn.LayerNorm(
|
| 311 |
+
hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
self.mlp_act = get_activation_layer(mlp_act_type)()
|
| 315 |
+
self.modulation = ModulateDiT(
|
| 316 |
+
hidden_size,
|
| 317 |
+
factor=3,
|
| 318 |
+
act_layer=get_activation_layer("silu"),
|
| 319 |
+
**factory_kwargs,
|
| 320 |
+
)
|
| 321 |
+
self.hybrid_seq_parallel_attn = None
|
| 322 |
+
|
| 323 |
+
def enable_deterministic(self):
|
| 324 |
+
self.deterministic = True
|
| 325 |
+
|
| 326 |
+
def disable_deterministic(self):
|
| 327 |
+
self.deterministic = False
|
| 328 |
+
|
| 329 |
+
def forward(
|
| 330 |
+
self,
|
| 331 |
+
x: torch.Tensor,
|
| 332 |
+
vec: torch.Tensor,
|
| 333 |
+
txt_len: int,
|
| 334 |
+
cu_seqlens_q: Optional[torch.Tensor] = None,
|
| 335 |
+
cu_seqlens_kv: Optional[torch.Tensor] = None,
|
| 336 |
+
max_seqlen_q: Optional[int] = None,
|
| 337 |
+
max_seqlen_kv: Optional[int] = None,
|
| 338 |
+
freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None,
|
| 339 |
+
) -> torch.Tensor:
|
| 340 |
+
mod_shift, mod_scale, mod_gate = self.modulation(vec).chunk(3, dim=-1)
|
| 341 |
+
x_mod = modulate(self.pre_norm(x), shift=mod_shift, scale=mod_scale)
|
| 342 |
+
qkv, mlp = torch.split(
|
| 343 |
+
self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
q, k, v = rearrange(qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num)
|
| 347 |
+
|
| 348 |
+
# Apply QK-Norm if needed.
|
| 349 |
+
q = self.q_norm(q).to(v)
|
| 350 |
+
k = self.k_norm(k).to(v)
|
| 351 |
+
|
| 352 |
+
# Apply RoPE if needed.
|
| 353 |
+
if freqs_cis is not None:
|
| 354 |
+
img_q, txt_q = q[:, :-txt_len, :, :], q[:, -txt_len:, :, :]
|
| 355 |
+
img_k, txt_k = k[:, :-txt_len, :, :], k[:, -txt_len:, :, :]
|
| 356 |
+
img_qq, img_kk = apply_rotary_emb(img_q, img_k, freqs_cis, head_first=False)
|
| 357 |
+
assert (
|
| 358 |
+
img_qq.shape == img_q.shape and img_kk.shape == img_k.shape
|
| 359 |
+
), f"img_kk: {img_qq.shape}, img_q: {img_q.shape}, img_kk: {img_kk.shape}, img_k: {img_k.shape}"
|
| 360 |
+
img_q, img_k = img_qq, img_kk
|
| 361 |
+
q = torch.cat((img_q, txt_q), dim=1)
|
| 362 |
+
k = torch.cat((img_k, txt_k), dim=1)
|
| 363 |
+
|
| 364 |
+
# Compute attention.
|
| 365 |
+
assert (
|
| 366 |
+
cu_seqlens_q.shape[0] == 2 * x.shape[0] + 1
|
| 367 |
+
), f"cu_seqlens_q.shape:{cu_seqlens_q.shape}, x.shape[0]:{x.shape[0]}"
|
| 368 |
+
|
| 369 |
+
# attention computation start
|
| 370 |
+
if not self.hybrid_seq_parallel_attn:
|
| 371 |
+
attn = attention(
|
| 372 |
+
q,
|
| 373 |
+
k,
|
| 374 |
+
v,
|
| 375 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 376 |
+
cu_seqlens_kv=cu_seqlens_kv,
|
| 377 |
+
max_seqlen_q=max_seqlen_q,
|
| 378 |
+
max_seqlen_kv=max_seqlen_kv,
|
| 379 |
+
batch_size=x.shape[0],
|
| 380 |
+
)
|
| 381 |
+
else:
|
| 382 |
+
attn = parallel_attention(
|
| 383 |
+
self.hybrid_seq_parallel_attn,
|
| 384 |
+
q,
|
| 385 |
+
k,
|
| 386 |
+
v,
|
| 387 |
+
img_q_len=img_q.shape[1],
|
| 388 |
+
img_kv_len=img_k.shape[1],
|
| 389 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 390 |
+
cu_seqlens_kv=cu_seqlens_kv
|
| 391 |
+
)
|
| 392 |
+
# attention computation end
|
| 393 |
+
|
| 394 |
+
# Compute activation in mlp stream, cat again and run second linear layer.
|
| 395 |
+
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
|
| 396 |
+
return x + apply_gate(output, gate=mod_gate)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class HYVideoDiffusionTransformer(ModelMixin, ConfigMixin):
|
| 400 |
+
"""
|
| 401 |
+
HunyuanVideo Transformer backbone
|
| 402 |
+
|
| 403 |
+
Inherited from ModelMixin and ConfigMixin for compatibility with diffusers' sampler StableDiffusionPipeline.
|
| 404 |
+
|
| 405 |
+
Reference:
|
| 406 |
+
[1] Flux.1: https://github.com/black-forest-labs/flux
|
| 407 |
+
[2] MMDiT: http://arxiv.org/abs/2403.03206
|
| 408 |
+
|
| 409 |
+
Parameters
|
| 410 |
+
----------
|
| 411 |
+
args: argparse.Namespace
|
| 412 |
+
The arguments parsed by argparse.
|
| 413 |
+
patch_size: list
|
| 414 |
+
The size of the patch.
|
| 415 |
+
in_channels: int
|
| 416 |
+
The number of input channels.
|
| 417 |
+
out_channels: int
|
| 418 |
+
The number of output channels.
|
| 419 |
+
hidden_size: int
|
| 420 |
+
The hidden size of the transformer backbone.
|
| 421 |
+
heads_num: int
|
| 422 |
+
The number of attention heads.
|
| 423 |
+
mlp_width_ratio: float
|
| 424 |
+
The ratio of the hidden size of the MLP in the transformer block.
|
| 425 |
+
mlp_act_type: str
|
| 426 |
+
The activation function of the MLP in the transformer block.
|
| 427 |
+
depth_double_blocks: int
|
| 428 |
+
The number of transformer blocks in the double blocks.
|
| 429 |
+
depth_single_blocks: int
|
| 430 |
+
The number of transformer blocks in the single blocks.
|
| 431 |
+
rope_dim_list: list
|
| 432 |
+
The dimension of the rotary embedding for t, h, w.
|
| 433 |
+
qkv_bias: bool
|
| 434 |
+
Whether to use bias in the qkv linear layer.
|
| 435 |
+
qk_norm: bool
|
| 436 |
+
Whether to use qk norm.
|
| 437 |
+
qk_norm_type: str
|
| 438 |
+
The type of qk norm.
|
| 439 |
+
guidance_embed: bool
|
| 440 |
+
Whether to use guidance embedding for distillation.
|
| 441 |
+
text_projection: str
|
| 442 |
+
The type of the text projection, default is single_refiner.
|
| 443 |
+
use_attention_mask: bool
|
| 444 |
+
Whether to use attention mask for text encoder.
|
| 445 |
+
dtype: torch.dtype
|
| 446 |
+
The dtype of the model.
|
| 447 |
+
device: torch.device
|
| 448 |
+
The device of the model.
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
@register_to_config
|
| 452 |
+
def __init__(
|
| 453 |
+
self,
|
| 454 |
+
args: Any,
|
| 455 |
+
patch_size: list = [1, 2, 2],
|
| 456 |
+
in_channels: int = 4, # Should be VAE.config.latent_channels.
|
| 457 |
+
out_channels: int = None,
|
| 458 |
+
hidden_size: int = 3072,
|
| 459 |
+
heads_num: int = 24,
|
| 460 |
+
mlp_width_ratio: float = 4.0,
|
| 461 |
+
mlp_act_type: str = "gelu_tanh",
|
| 462 |
+
mm_double_blocks_depth: int = 20,
|
| 463 |
+
mm_single_blocks_depth: int = 40,
|
| 464 |
+
rope_dim_list: List[int] = [16, 56, 56],
|
| 465 |
+
qkv_bias: bool = True,
|
| 466 |
+
qk_norm: bool = True,
|
| 467 |
+
qk_norm_type: str = "rms",
|
| 468 |
+
guidance_embed: bool = False, # For modulation.
|
| 469 |
+
text_projection: str = "single_refiner",
|
| 470 |
+
use_attention_mask: bool = True,
|
| 471 |
+
dtype: Optional[torch.dtype] = None,
|
| 472 |
+
device: Optional[torch.device] = None,
|
| 473 |
+
):
|
| 474 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 475 |
+
super().__init__()
|
| 476 |
+
|
| 477 |
+
self.patch_size = patch_size
|
| 478 |
+
self.in_channels = in_channels
|
| 479 |
+
self.out_channels = in_channels if out_channels is None else out_channels
|
| 480 |
+
self.unpatchify_channels = self.out_channels
|
| 481 |
+
self.guidance_embed = guidance_embed
|
| 482 |
+
self.rope_dim_list = rope_dim_list
|
| 483 |
+
|
| 484 |
+
# Text projection. Default to linear projection.
|
| 485 |
+
# Alternative: TokenRefiner. See more details (LI-DiT): http://arxiv.org/abs/2406.11831
|
| 486 |
+
self.use_attention_mask = use_attention_mask
|
| 487 |
+
self.text_projection = text_projection
|
| 488 |
+
|
| 489 |
+
self.text_states_dim = args.text_states_dim
|
| 490 |
+
self.text_states_dim_2 = args.text_states_dim_2
|
| 491 |
+
self.rope_theta = args.rope_theta
|
| 492 |
+
|
| 493 |
+
if hidden_size % heads_num != 0:
|
| 494 |
+
raise ValueError(
|
| 495 |
+
f"Hidden size {hidden_size} must be divisible by heads_num {heads_num}"
|
| 496 |
+
)
|
| 497 |
+
pe_dim = hidden_size // heads_num
|
| 498 |
+
if sum(rope_dim_list) != pe_dim:
|
| 499 |
+
raise ValueError(
|
| 500 |
+
f"Got {rope_dim_list} but expected positional dim {pe_dim}"
|
| 501 |
+
)
|
| 502 |
+
self.hidden_size = hidden_size
|
| 503 |
+
self.heads_num = heads_num
|
| 504 |
+
|
| 505 |
+
# image projection
|
| 506 |
+
self.img_in = PatchEmbed(
|
| 507 |
+
self.patch_size, self.in_channels, self.hidden_size, **factory_kwargs
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
# text projection
|
| 511 |
+
if self.text_projection == "linear":
|
| 512 |
+
self.txt_in = TextProjection(
|
| 513 |
+
self.text_states_dim,
|
| 514 |
+
self.hidden_size,
|
| 515 |
+
get_activation_layer("silu"),
|
| 516 |
+
**factory_kwargs,
|
| 517 |
+
)
|
| 518 |
+
elif self.text_projection == "single_refiner":
|
| 519 |
+
self.txt_in = SingleTokenRefiner(
|
| 520 |
+
self.text_states_dim, hidden_size, heads_num, depth=2, **factory_kwargs
|
| 521 |
+
)
|
| 522 |
+
else:
|
| 523 |
+
raise NotImplementedError(
|
| 524 |
+
f"Unsupported text_projection: {self.text_projection}"
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
# time modulation
|
| 528 |
+
self.time_in = TimestepEmbedder(
|
| 529 |
+
self.hidden_size, get_activation_layer("silu"), **factory_kwargs
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
# text modulation
|
| 533 |
+
self.vector_in = MLPEmbedder(
|
| 534 |
+
self.text_states_dim_2, self.hidden_size, **factory_kwargs
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
# guidance modulation
|
| 538 |
+
self.guidance_in = (
|
| 539 |
+
TimestepEmbedder(
|
| 540 |
+
self.hidden_size, get_activation_layer("silu"), **factory_kwargs
|
| 541 |
+
)
|
| 542 |
+
if guidance_embed
|
| 543 |
+
else None
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
# double blocks
|
| 547 |
+
self.double_blocks = nn.ModuleList(
|
| 548 |
+
[
|
| 549 |
+
MMDoubleStreamBlock(
|
| 550 |
+
self.hidden_size,
|
| 551 |
+
self.heads_num,
|
| 552 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 553 |
+
mlp_act_type=mlp_act_type,
|
| 554 |
+
qk_norm=qk_norm,
|
| 555 |
+
qk_norm_type=qk_norm_type,
|
| 556 |
+
qkv_bias=qkv_bias,
|
| 557 |
+
**factory_kwargs,
|
| 558 |
+
)
|
| 559 |
+
for _ in range(mm_double_blocks_depth)
|
| 560 |
+
]
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
# single blocks
|
| 564 |
+
self.single_blocks = nn.ModuleList(
|
| 565 |
+
[
|
| 566 |
+
MMSingleStreamBlock(
|
| 567 |
+
self.hidden_size,
|
| 568 |
+
self.heads_num,
|
| 569 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 570 |
+
mlp_act_type=mlp_act_type,
|
| 571 |
+
qk_norm=qk_norm,
|
| 572 |
+
qk_norm_type=qk_norm_type,
|
| 573 |
+
**factory_kwargs,
|
| 574 |
+
)
|
| 575 |
+
for _ in range(mm_single_blocks_depth)
|
| 576 |
+
]
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
self.final_layer = FinalLayer(
|
| 580 |
+
self.hidden_size,
|
| 581 |
+
self.patch_size,
|
| 582 |
+
self.out_channels,
|
| 583 |
+
get_activation_layer("silu"),
|
| 584 |
+
**factory_kwargs,
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
def enable_deterministic(self):
|
| 588 |
+
for block in self.double_blocks:
|
| 589 |
+
block.enable_deterministic()
|
| 590 |
+
for block in self.single_blocks:
|
| 591 |
+
block.enable_deterministic()
|
| 592 |
+
|
| 593 |
+
def disable_deterministic(self):
|
| 594 |
+
for block in self.double_blocks:
|
| 595 |
+
block.disable_deterministic()
|
| 596 |
+
for block in self.single_blocks:
|
| 597 |
+
block.disable_deterministic()
|
| 598 |
+
|
| 599 |
+
def get_rotary_pos_embed(self, rope_sizes):
|
| 600 |
+
target_ndim = 3
|
| 601 |
+
# ndim = 5 - 2
|
| 602 |
+
head_dim = self.hidden_size // self.heads_num
|
| 603 |
+
rope_dim_list = self.rope_dim_list
|
| 604 |
+
if rope_dim_list is None:
|
| 605 |
+
rope_dim_list = [head_dim // target_ndim for _ in range(target_ndim)]
|
| 606 |
+
assert (
|
| 607 |
+
sum(rope_dim_list) == head_dim
|
| 608 |
+
), "sum(rope_dim_list) should equal to head_dim of attention layer"
|
| 609 |
+
freqs_cos, freqs_sin = get_nd_rotary_pos_embed(
|
| 610 |
+
rope_dim_list,
|
| 611 |
+
rope_sizes,
|
| 612 |
+
theta=self.rope_theta,
|
| 613 |
+
use_real=True,
|
| 614 |
+
theta_rescale_factor=1,
|
| 615 |
+
)
|
| 616 |
+
return freqs_cos, freqs_sin
|
| 617 |
+
|
| 618 |
+
def forward(
|
| 619 |
+
self,
|
| 620 |
+
x: torch.Tensor,
|
| 621 |
+
t: torch.Tensor, # Should be in range(0, 1000).
|
| 622 |
+
text_states: torch.Tensor = None,
|
| 623 |
+
text_mask: torch.Tensor = None, # Now we don't use it.
|
| 624 |
+
text_states_2: Optional[torch.Tensor] = None, # Text embedding for modulation.
|
| 625 |
+
freqs_cos: Optional[torch.Tensor] = None,
|
| 626 |
+
freqs_sin: Optional[torch.Tensor] = None,
|
| 627 |
+
guidance: torch.Tensor = None, # Guidance for modulation, should be cfg_scale x 1000.
|
| 628 |
+
return_dict: bool = True,
|
| 629 |
+
output_features=False,
|
| 630 |
+
output_features_stride=39,
|
| 631 |
+
output_intermediate_features=False,
|
| 632 |
+
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 633 |
+
|
| 634 |
+
out = {}
|
| 635 |
+
img = x
|
| 636 |
+
txt = text_states
|
| 637 |
+
_, _, ot, oh, ow = x.shape
|
| 638 |
+
tt, th, tw = (
|
| 639 |
+
ot // self.patch_size[0],
|
| 640 |
+
oh // self.patch_size[1],
|
| 641 |
+
ow // self.patch_size[2],
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
if freqs_cos is None:
|
| 645 |
+
original_tt = nccl_info.sp_size * tt
|
| 646 |
+
freqs_cos, freqs_sin = self.get_rotary_pos_embed((original_tt, th, tw))
|
| 647 |
+
|
| 648 |
+
# Prepare modulation vectors.
|
| 649 |
+
vec = self.time_in(t)
|
| 650 |
+
|
| 651 |
+
# text modulation
|
| 652 |
+
vec = vec + self.vector_in(text_states_2)
|
| 653 |
+
|
| 654 |
+
# guidance modulation
|
| 655 |
+
if self.guidance_embed:
|
| 656 |
+
if guidance is None:
|
| 657 |
+
raise ValueError(
|
| 658 |
+
"Didn't get guidance strength for guidance distilled model."
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
# our timestep_embedding is merged into guidance_in(TimestepEmbedder)
|
| 662 |
+
vec = vec + self.guidance_in(guidance)
|
| 663 |
+
|
| 664 |
+
# Embed image and text.
|
| 665 |
+
img = self.img_in(img)
|
| 666 |
+
if self.text_projection == "linear":
|
| 667 |
+
txt = self.txt_in(txt)
|
| 668 |
+
elif self.text_projection == "single_refiner":
|
| 669 |
+
txt = self.txt_in(txt, t, text_mask if self.use_attention_mask else None)
|
| 670 |
+
else:
|
| 671 |
+
raise NotImplementedError(
|
| 672 |
+
f"Unsupported text_projection: {self.text_projection}"
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
txt_seq_len = txt.shape[1]
|
| 676 |
+
img_seq_len = img.shape[1]
|
| 677 |
+
|
| 678 |
+
# Compute cu_squlens and max_seqlen for flash attention
|
| 679 |
+
cu_seqlens_q = get_cu_seqlens(text_mask, img_seq_len)
|
| 680 |
+
cu_seqlens_kv = cu_seqlens_q
|
| 681 |
+
max_seqlen_q = img_seq_len + txt_seq_len
|
| 682 |
+
max_seqlen_kv = max_seqlen_q
|
| 683 |
+
|
| 684 |
+
freqs_cis = (freqs_cos, freqs_sin) if freqs_cos is not None else None
|
| 685 |
+
if output_intermediate_features:
|
| 686 |
+
intermediate_features_list = []
|
| 687 |
+
# --------------------- Pass through DiT blocks ------------------------
|
| 688 |
+
for _, block in enumerate(self.double_blocks):
|
| 689 |
+
double_block_args = [
|
| 690 |
+
img,
|
| 691 |
+
txt,
|
| 692 |
+
vec,
|
| 693 |
+
cu_seqlens_q,
|
| 694 |
+
cu_seqlens_kv,
|
| 695 |
+
max_seqlen_q,
|
| 696 |
+
max_seqlen_kv,
|
| 697 |
+
freqs_cis,
|
| 698 |
+
]
|
| 699 |
+
|
| 700 |
+
img, txt = block(*double_block_args)
|
| 701 |
+
if output_intermediate_features:
|
| 702 |
+
intermediate_features_list.append(img)
|
| 703 |
+
|
| 704 |
+
# Merge txt and img to pass through single stream blocks.
|
| 705 |
+
if output_features:
|
| 706 |
+
features_list = []
|
| 707 |
+
|
| 708 |
+
x = torch.cat((img, txt), 1)
|
| 709 |
+
if len(self.single_blocks) > 0:
|
| 710 |
+
for _, block in enumerate(self.single_blocks):
|
| 711 |
+
single_block_args = [
|
| 712 |
+
x,
|
| 713 |
+
vec,
|
| 714 |
+
txt_seq_len,
|
| 715 |
+
cu_seqlens_q,
|
| 716 |
+
cu_seqlens_kv,
|
| 717 |
+
max_seqlen_q,
|
| 718 |
+
max_seqlen_kv,
|
| 719 |
+
(freqs_cos, freqs_sin),
|
| 720 |
+
]
|
| 721 |
+
|
| 722 |
+
x = block(*single_block_args)
|
| 723 |
+
# if output_features and _ % output_features_stride == 0:
|
| 724 |
+
# features_list.append(x[:, :img_seq_len, ...])
|
| 725 |
+
if _ == output_features_stride and output_features:
|
| 726 |
+
features_list.append(x[:, :img_seq_len, ...])
|
| 727 |
+
features_list = torch.stack(features_list, dim=0)
|
| 728 |
+
return (None, features_list)
|
| 729 |
+
if output_intermediate_features:
|
| 730 |
+
intermediate_features_list.append(x[:, :img_seq_len, ...])
|
| 731 |
+
|
| 732 |
+
img = x[:, :img_seq_len, ...]
|
| 733 |
+
|
| 734 |
+
# ---------------------------- Final layer ------------------------------
|
| 735 |
+
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
|
| 736 |
+
|
| 737 |
+
img = self.unpatchify(img, tt, th, tw)
|
| 738 |
+
|
| 739 |
+
if output_features:
|
| 740 |
+
features_list = torch.stack(features_list, dim=0)
|
| 741 |
+
else:
|
| 742 |
+
features_list = None
|
| 743 |
+
|
| 744 |
+
if return_dict:
|
| 745 |
+
out["x"] = img
|
| 746 |
+
return out
|
| 747 |
+
if output_features:
|
| 748 |
+
return (img, features_list)
|
| 749 |
+
if output_intermediate_features:
|
| 750 |
+
return intermediate_features_list
|
| 751 |
+
return img
|
| 752 |
+
|
| 753 |
+
def unpatchify(self, x, t, h, w):
|
| 754 |
+
"""
|
| 755 |
+
x: (N, T, patch_size**2 * C)
|
| 756 |
+
imgs: (N, H, W, C)
|
| 757 |
+
"""
|
| 758 |
+
c = self.unpatchify_channels
|
| 759 |
+
pt, ph, pw = self.patch_size
|
| 760 |
+
assert t * h * w == x.shape[1]
|
| 761 |
+
|
| 762 |
+
x = x.reshape(shape=(x.shape[0], t, h, w, c, pt, ph, pw))
|
| 763 |
+
x = torch.einsum("nthwcopq->nctohpwq", x)
|
| 764 |
+
imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw))
|
| 765 |
+
|
| 766 |
+
return imgs
|
| 767 |
+
|
| 768 |
+
def params_count(self):
|
| 769 |
+
counts = {
|
| 770 |
+
"double": sum(
|
| 771 |
+
[
|
| 772 |
+
sum(p.numel() for p in block.img_attn_qkv.parameters())
|
| 773 |
+
+ sum(p.numel() for p in block.img_attn_proj.parameters())
|
| 774 |
+
+ sum(p.numel() for p in block.img_mlp.parameters())
|
| 775 |
+
+ sum(p.numel() for p in block.txt_attn_qkv.parameters())
|
| 776 |
+
+ sum(p.numel() for p in block.txt_attn_proj.parameters())
|
| 777 |
+
+ sum(p.numel() for p in block.txt_mlp.parameters())
|
| 778 |
+
for block in self.double_blocks
|
| 779 |
+
]
|
| 780 |
+
),
|
| 781 |
+
"single": sum(
|
| 782 |
+
[
|
| 783 |
+
sum(p.numel() for p in block.linear1.parameters())
|
| 784 |
+
+ sum(p.numel() for p in block.linear2.parameters())
|
| 785 |
+
for block in self.single_blocks
|
| 786 |
+
]
|
| 787 |
+
),
|
| 788 |
+
"total": sum(p.numel() for p in self.parameters()),
|
| 789 |
+
}
|
| 790 |
+
counts["attn+mlp"] = counts["double"] + counts["single"]
|
| 791 |
+
return counts
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
#################################################################################
|
| 795 |
+
# HunyuanVideo Configs #
|
| 796 |
+
#################################################################################
|
| 797 |
+
|
| 798 |
+
HUNYUAN_VIDEO_CONFIG = {
|
| 799 |
+
"HYVideo-T/2": {
|
| 800 |
+
"mm_double_blocks_depth": 20,
|
| 801 |
+
"mm_single_blocks_depth": 40,
|
| 802 |
+
"rope_dim_list": [16, 56, 56],
|
| 803 |
+
"hidden_size": 3072,
|
| 804 |
+
"heads_num": 24,
|
| 805 |
+
"mlp_width_ratio": 4,
|
| 806 |
+
},
|
| 807 |
+
"HYVideo-T/2-cfgdistill": {
|
| 808 |
+
"mm_double_blocks_depth": 20,
|
| 809 |
+
"mm_single_blocks_depth": 40,
|
| 810 |
+
"rope_dim_list": [16, 56, 56],
|
| 811 |
+
"hidden_size": 3072,
|
| 812 |
+
"heads_num": 24,
|
| 813 |
+
"mlp_width_ratio": 4,
|
| 814 |
+
"guidance_embed": True,
|
| 815 |
+
},
|
| 816 |
+
}
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/modulate_layers.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ModulateDiT(nn.Module):
|
| 8 |
+
"""Modulation layer for DiT."""
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
hidden_size: int,
|
| 12 |
+
factor: int,
|
| 13 |
+
act_layer: Callable,
|
| 14 |
+
dtype=None,
|
| 15 |
+
device=None,
|
| 16 |
+
):
|
| 17 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.act = act_layer()
|
| 20 |
+
self.linear = nn.Linear(
|
| 21 |
+
hidden_size, factor * hidden_size, bias=True, **factory_kwargs
|
| 22 |
+
)
|
| 23 |
+
# Zero-initialize the modulation
|
| 24 |
+
nn.init.zeros_(self.linear.weight)
|
| 25 |
+
nn.init.zeros_(self.linear.bias)
|
| 26 |
+
|
| 27 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 28 |
+
return self.linear(self.act(x))
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def modulate(x, shift=None, scale=None):
|
| 32 |
+
"""modulate by shift and scale
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
x (torch.Tensor): input tensor.
|
| 36 |
+
shift (torch.Tensor, optional): shift tensor. Defaults to None.
|
| 37 |
+
scale (torch.Tensor, optional): scale tensor. Defaults to None.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
torch.Tensor: the output tensor after modulate.
|
| 41 |
+
"""
|
| 42 |
+
if scale is None and shift is None:
|
| 43 |
+
return x
|
| 44 |
+
elif shift is None:
|
| 45 |
+
return x * (1 + scale.unsqueeze(1))
|
| 46 |
+
elif scale is None:
|
| 47 |
+
return x + shift.unsqueeze(1)
|
| 48 |
+
else:
|
| 49 |
+
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def apply_gate(x, gate=None, tanh=False):
|
| 53 |
+
"""AI is creating summary for apply_gate
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
x (torch.Tensor): input tensor.
|
| 57 |
+
gate (torch.Tensor, optional): gate tensor. Defaults to None.
|
| 58 |
+
tanh (bool, optional): whether to use tanh function. Defaults to False.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
torch.Tensor: the output tensor after apply gate.
|
| 62 |
+
"""
|
| 63 |
+
if gate is None:
|
| 64 |
+
return x
|
| 65 |
+
if tanh:
|
| 66 |
+
return x * gate.unsqueeze(1).tanh()
|
| 67 |
+
else:
|
| 68 |
+
return x * gate.unsqueeze(1)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def ckpt_wrapper(module):
|
| 72 |
+
def ckpt_forward(*inputs):
|
| 73 |
+
outputs = module(*inputs)
|
| 74 |
+
return outputs
|
| 75 |
+
|
| 76 |
+
return ckpt_forward
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/norm_layers.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class RMSNorm(nn.Module):
|
| 6 |
+
def __init__(
|
| 7 |
+
self,
|
| 8 |
+
dim: int,
|
| 9 |
+
elementwise_affine=True,
|
| 10 |
+
eps: float = 1e-6,
|
| 11 |
+
device=None,
|
| 12 |
+
dtype=None,
|
| 13 |
+
):
|
| 14 |
+
"""
|
| 15 |
+
Initialize the RMSNorm normalization layer.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
dim (int): The dimension of the input tensor.
|
| 19 |
+
eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
|
| 20 |
+
|
| 21 |
+
Attributes:
|
| 22 |
+
eps (float): A small value added to the denominator for numerical stability.
|
| 23 |
+
weight (nn.Parameter): Learnable scaling parameter.
|
| 24 |
+
|
| 25 |
+
"""
|
| 26 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 27 |
+
super().__init__()
|
| 28 |
+
self.eps = eps
|
| 29 |
+
if elementwise_affine:
|
| 30 |
+
self.weight = nn.Parameter(torch.ones(dim, **factory_kwargs))
|
| 31 |
+
|
| 32 |
+
def _norm(self, x):
|
| 33 |
+
"""
|
| 34 |
+
Apply the RMSNorm normalization to the input tensor.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
x (torch.Tensor): The input tensor.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
torch.Tensor: The normalized tensor.
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
"""
|
| 47 |
+
Forward pass through the RMSNorm layer.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
x (torch.Tensor): The input tensor.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
torch.Tensor: The output tensor after applying RMSNorm.
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
output = self._norm(x.float()).type_as(x)
|
| 57 |
+
if hasattr(self, "weight"):
|
| 58 |
+
output = output * self.weight
|
| 59 |
+
return output
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_norm_layer(norm_layer):
|
| 63 |
+
"""
|
| 64 |
+
Get the normalization layer.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
norm_layer (str): The type of normalization layer.
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
norm_layer (nn.Module): The normalization layer.
|
| 71 |
+
"""
|
| 72 |
+
if norm_layer == "layer":
|
| 73 |
+
return nn.LayerNorm
|
| 74 |
+
elif norm_layer == "rms":
|
| 75 |
+
return RMSNorm
|
| 76 |
+
else:
|
| 77 |
+
raise NotImplementedError(f"Norm layer {norm_layer} is not implemented")
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/posemb_layers.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Union, Tuple, List
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def _to_tuple(x, dim=2):
|
| 6 |
+
if isinstance(x, int):
|
| 7 |
+
return (x,) * dim
|
| 8 |
+
elif len(x) == dim:
|
| 9 |
+
return x
|
| 10 |
+
else:
|
| 11 |
+
raise ValueError(f"Expected length {dim} or int, but got {x}")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_meshgrid_nd(start, *args, dim=2):
|
| 15 |
+
"""
|
| 16 |
+
Get n-D meshgrid with start, stop and num.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
start (int or tuple): If len(args) == 0, start is num; If len(args) == 1, start is start, args[0] is stop,
|
| 20 |
+
step is 1; If len(args) == 2, start is start, args[0] is stop, args[1] is num. For n-dim, start/stop/num
|
| 21 |
+
should be int or n-tuple. If n-tuple is provided, the meshgrid will be stacked following the dim order in
|
| 22 |
+
n-tuples.
|
| 23 |
+
*args: See above.
|
| 24 |
+
dim (int): Dimension of the meshgrid. Defaults to 2.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
grid (np.ndarray): [dim, ...]
|
| 28 |
+
"""
|
| 29 |
+
if len(args) == 0:
|
| 30 |
+
# start is grid_size
|
| 31 |
+
num = _to_tuple(start, dim=dim)
|
| 32 |
+
start = (0,) * dim
|
| 33 |
+
stop = num
|
| 34 |
+
elif len(args) == 1:
|
| 35 |
+
# start is start, args[0] is stop, step is 1
|
| 36 |
+
start = _to_tuple(start, dim=dim)
|
| 37 |
+
stop = _to_tuple(args[0], dim=dim)
|
| 38 |
+
num = [stop[i] - start[i] for i in range(dim)]
|
| 39 |
+
elif len(args) == 2:
|
| 40 |
+
# start is start, args[0] is stop, args[1] is num
|
| 41 |
+
start = _to_tuple(start, dim=dim) # Left-Top eg: 12,0
|
| 42 |
+
stop = _to_tuple(args[0], dim=dim) # Right-Bottom eg: 20,32
|
| 43 |
+
num = _to_tuple(args[1], dim=dim) # Target Size eg: 32,124
|
| 44 |
+
else:
|
| 45 |
+
raise ValueError(f"len(args) should be 0, 1 or 2, but got {len(args)}")
|
| 46 |
+
|
| 47 |
+
# PyTorch implement of np.linspace(start[i], stop[i], num[i], endpoint=False)
|
| 48 |
+
axis_grid = []
|
| 49 |
+
for i in range(dim):
|
| 50 |
+
a, b, n = start[i], stop[i], num[i]
|
| 51 |
+
g = torch.linspace(a, b, n + 1, dtype=torch.float32)[:n]
|
| 52 |
+
axis_grid.append(g)
|
| 53 |
+
grid = torch.meshgrid(*axis_grid, indexing="ij") # dim x [W, H, D]
|
| 54 |
+
grid = torch.stack(grid, dim=0) # [dim, W, H, D]
|
| 55 |
+
|
| 56 |
+
return grid
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
#################################################################################
|
| 60 |
+
# Rotary Positional Embedding Functions #
|
| 61 |
+
#################################################################################
|
| 62 |
+
# https://github.com/meta-llama/llama/blob/be327c427cc5e89cc1d3ab3d3fec4484df771245/llama/model.py#L80
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def reshape_for_broadcast(
|
| 66 |
+
freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]],
|
| 67 |
+
x: torch.Tensor,
|
| 68 |
+
head_first=False,
|
| 69 |
+
):
|
| 70 |
+
"""
|
| 71 |
+
Reshape frequency tensor for broadcasting it with another tensor.
|
| 72 |
+
|
| 73 |
+
This function reshapes the frequency tensor to have the same shape as the target tensor 'x'
|
| 74 |
+
for the purpose of broadcasting the frequency tensor during element-wise operations.
|
| 75 |
+
|
| 76 |
+
Notes:
|
| 77 |
+
When using FlashMHAModified, head_first should be False.
|
| 78 |
+
When using Attention, head_first should be True.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
freqs_cis (Union[torch.Tensor, Tuple[torch.Tensor]]): Frequency tensor to be reshaped.
|
| 82 |
+
x (torch.Tensor): Target tensor for broadcasting compatibility.
|
| 83 |
+
head_first (bool): head dimension first (except batch dim) or not.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
torch.Tensor: Reshaped frequency tensor.
|
| 87 |
+
|
| 88 |
+
Raises:
|
| 89 |
+
AssertionError: If the frequency tensor doesn't match the expected shape.
|
| 90 |
+
AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions.
|
| 91 |
+
"""
|
| 92 |
+
ndim = x.ndim
|
| 93 |
+
assert 0 <= 1 < ndim
|
| 94 |
+
|
| 95 |
+
if isinstance(freqs_cis, tuple):
|
| 96 |
+
# freqs_cis: (cos, sin) in real space
|
| 97 |
+
if head_first:
|
| 98 |
+
assert freqs_cis[0].shape == (
|
| 99 |
+
x.shape[-2],
|
| 100 |
+
x.shape[-1],
|
| 101 |
+
), f"freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}"
|
| 102 |
+
shape = [
|
| 103 |
+
d if i == ndim - 2 or i == ndim - 1 else 1
|
| 104 |
+
for i, d in enumerate(x.shape)
|
| 105 |
+
]
|
| 106 |
+
else:
|
| 107 |
+
assert freqs_cis[0].shape == (
|
| 108 |
+
x.shape[1],
|
| 109 |
+
x.shape[-1],
|
| 110 |
+
), f"freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}"
|
| 111 |
+
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
|
| 112 |
+
return freqs_cis[0].view(*shape), freqs_cis[1].view(*shape)
|
| 113 |
+
else:
|
| 114 |
+
# freqs_cis: values in complex space
|
| 115 |
+
if head_first:
|
| 116 |
+
assert freqs_cis.shape == (
|
| 117 |
+
x.shape[-2],
|
| 118 |
+
x.shape[-1],
|
| 119 |
+
), f"freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}"
|
| 120 |
+
shape = [
|
| 121 |
+
d if i == ndim - 2 or i == ndim - 1 else 1
|
| 122 |
+
for i, d in enumerate(x.shape)
|
| 123 |
+
]
|
| 124 |
+
else:
|
| 125 |
+
assert freqs_cis.shape == (
|
| 126 |
+
x.shape[1],
|
| 127 |
+
x.shape[-1],
|
| 128 |
+
), f"freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}"
|
| 129 |
+
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
|
| 130 |
+
return freqs_cis.view(*shape)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def rotate_half(x):
|
| 134 |
+
x_real, x_imag = (
|
| 135 |
+
x.float().reshape(*x.shape[:-1], -1, 2).unbind(-1)
|
| 136 |
+
) # [B, S, H, D//2]
|
| 137 |
+
return torch.stack([-x_imag, x_real], dim=-1).flatten(3)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def apply_rotary_emb(
|
| 141 |
+
xq: torch.Tensor,
|
| 142 |
+
xk: torch.Tensor,
|
| 143 |
+
freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
| 144 |
+
head_first: bool = False,
|
| 145 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 146 |
+
"""
|
| 147 |
+
Apply rotary embeddings to input tensors using the given frequency tensor.
|
| 148 |
+
|
| 149 |
+
This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided
|
| 150 |
+
frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor
|
| 151 |
+
is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are
|
| 152 |
+
returned as real tensors.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
xq (torch.Tensor): Query tensor to apply rotary embeddings. [B, S, H, D]
|
| 156 |
+
xk (torch.Tensor): Key tensor to apply rotary embeddings. [B, S, H, D]
|
| 157 |
+
freqs_cis (torch.Tensor or tuple): Precomputed frequency tensor for complex exponential.
|
| 158 |
+
head_first (bool): head dimension first (except batch dim) or not.
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.
|
| 162 |
+
|
| 163 |
+
"""
|
| 164 |
+
xk_out = None
|
| 165 |
+
if isinstance(freqs_cis, tuple):
|
| 166 |
+
cos, sin = reshape_for_broadcast(freqs_cis, xq, head_first) # [S, D]
|
| 167 |
+
cos, sin = cos.to(xq.device), sin.to(xq.device)
|
| 168 |
+
# real * cos - imag * sin
|
| 169 |
+
# imag * cos + real * sin
|
| 170 |
+
xq_out = (xq.float() * cos + rotate_half(xq.float()) * sin).type_as(xq)
|
| 171 |
+
xk_out = (xk.float() * cos + rotate_half(xk.float()) * sin).type_as(xk)
|
| 172 |
+
else:
|
| 173 |
+
# view_as_complex will pack [..., D/2, 2](real) to [..., D/2](complex)
|
| 174 |
+
xq_ = torch.view_as_complex(
|
| 175 |
+
xq.float().reshape(*xq.shape[:-1], -1, 2)
|
| 176 |
+
) # [B, S, H, D//2]
|
| 177 |
+
freqs_cis = reshape_for_broadcast(freqs_cis, xq_, head_first).to(
|
| 178 |
+
xq.device
|
| 179 |
+
) # [S, D//2] --> [1, S, 1, D//2]
|
| 180 |
+
# (real, imag) * (cos, sin) = (real * cos - imag * sin, imag * cos + real * sin)
|
| 181 |
+
# view_as_real will expand [..., D/2](complex) to [..., D/2, 2](real)
|
| 182 |
+
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3).type_as(xq)
|
| 183 |
+
xk_ = torch.view_as_complex(
|
| 184 |
+
xk.float().reshape(*xk.shape[:-1], -1, 2)
|
| 185 |
+
) # [B, S, H, D//2]
|
| 186 |
+
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3).type_as(xk)
|
| 187 |
+
|
| 188 |
+
return xq_out, xk_out
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def get_nd_rotary_pos_embed(
|
| 192 |
+
rope_dim_list,
|
| 193 |
+
start,
|
| 194 |
+
*args,
|
| 195 |
+
theta=10000.0,
|
| 196 |
+
use_real=False,
|
| 197 |
+
theta_rescale_factor: Union[float, List[float]] = 1.0,
|
| 198 |
+
interpolation_factor: Union[float, List[float]] = 1.0,
|
| 199 |
+
):
|
| 200 |
+
"""
|
| 201 |
+
This is a n-d version of precompute_freqs_cis, which is a RoPE for tokens with n-d structure.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
rope_dim_list (list of int): Dimension of each rope. len(rope_dim_list) should equal to n.
|
| 205 |
+
sum(rope_dim_list) should equal to head_dim of attention layer.
|
| 206 |
+
start (int | tuple of int | list of int): If len(args) == 0, start is num; If len(args) == 1, start is start,
|
| 207 |
+
args[0] is stop, step is 1; If len(args) == 2, start is start, args[0] is stop, args[1] is num.
|
| 208 |
+
*args: See above.
|
| 209 |
+
theta (float): Scaling factor for frequency computation. Defaults to 10000.0.
|
| 210 |
+
use_real (bool): If True, return real part and imaginary part separately. Otherwise, return complex numbers.
|
| 211 |
+
Some libraries such as TensorRT does not support complex64 data type. So it is useful to provide a real
|
| 212 |
+
part and an imaginary part separately.
|
| 213 |
+
theta_rescale_factor (float): Rescale factor for theta. Defaults to 1.0.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
pos_embed (torch.Tensor): [HW, D/2]
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
grid = get_meshgrid_nd(
|
| 220 |
+
start, *args, dim=len(rope_dim_list)
|
| 221 |
+
) # [3, W, H, D] / [2, W, H]
|
| 222 |
+
|
| 223 |
+
if isinstance(theta_rescale_factor, int) or isinstance(theta_rescale_factor, float):
|
| 224 |
+
theta_rescale_factor = [theta_rescale_factor] * len(rope_dim_list)
|
| 225 |
+
elif isinstance(theta_rescale_factor, list) and len(theta_rescale_factor) == 1:
|
| 226 |
+
theta_rescale_factor = [theta_rescale_factor[0]] * len(rope_dim_list)
|
| 227 |
+
assert len(theta_rescale_factor) == len(
|
| 228 |
+
rope_dim_list
|
| 229 |
+
), "len(theta_rescale_factor) should equal to len(rope_dim_list)"
|
| 230 |
+
|
| 231 |
+
if isinstance(interpolation_factor, int) or isinstance(interpolation_factor, float):
|
| 232 |
+
interpolation_factor = [interpolation_factor] * len(rope_dim_list)
|
| 233 |
+
elif isinstance(interpolation_factor, list) and len(interpolation_factor) == 1:
|
| 234 |
+
interpolation_factor = [interpolation_factor[0]] * len(rope_dim_list)
|
| 235 |
+
assert len(interpolation_factor) == len(
|
| 236 |
+
rope_dim_list
|
| 237 |
+
), "len(interpolation_factor) should equal to len(rope_dim_list)"
|
| 238 |
+
|
| 239 |
+
# use 1/ndim of dimensions to encode grid_axis
|
| 240 |
+
embs = []
|
| 241 |
+
for i in range(len(rope_dim_list)):
|
| 242 |
+
emb = get_1d_rotary_pos_embed(
|
| 243 |
+
rope_dim_list[i],
|
| 244 |
+
grid[i].reshape(-1),
|
| 245 |
+
theta,
|
| 246 |
+
use_real=use_real,
|
| 247 |
+
theta_rescale_factor=theta_rescale_factor[i],
|
| 248 |
+
interpolation_factor=interpolation_factor[i],
|
| 249 |
+
) # 2 x [WHD, rope_dim_list[i]]
|
| 250 |
+
embs.append(emb)
|
| 251 |
+
|
| 252 |
+
if use_real:
|
| 253 |
+
cos = torch.cat([emb[0] for emb in embs], dim=1) # (WHD, D/2)
|
| 254 |
+
sin = torch.cat([emb[1] for emb in embs], dim=1) # (WHD, D/2)
|
| 255 |
+
return cos, sin
|
| 256 |
+
else:
|
| 257 |
+
emb = torch.cat(embs, dim=1) # (WHD, D/2)
|
| 258 |
+
return emb
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def get_1d_rotary_pos_embed(
|
| 262 |
+
dim: int,
|
| 263 |
+
pos: Union[torch.FloatTensor, int],
|
| 264 |
+
theta: float = 10000.0,
|
| 265 |
+
use_real: bool = False,
|
| 266 |
+
theta_rescale_factor: float = 1.0,
|
| 267 |
+
interpolation_factor: float = 1.0,
|
| 268 |
+
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
|
| 269 |
+
"""
|
| 270 |
+
Precompute the frequency tensor for complex exponential (cis) with given dimensions.
|
| 271 |
+
(Note: `cis` means `cos + i * sin`, where i is the imaginary unit.)
|
| 272 |
+
|
| 273 |
+
This function calculates a frequency tensor with complex exponential using the given dimension 'dim'
|
| 274 |
+
and the end index 'end'. The 'theta' parameter scales the frequencies.
|
| 275 |
+
The returned tensor contains complex values in complex64 data type.
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
dim (int): Dimension of the frequency tensor.
|
| 279 |
+
pos (int or torch.FloatTensor): Position indices for the frequency tensor. [S] or scalar
|
| 280 |
+
theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0.
|
| 281 |
+
use_real (bool, optional): If True, return real part and imaginary part separately.
|
| 282 |
+
Otherwise, return complex numbers.
|
| 283 |
+
theta_rescale_factor (float, optional): Rescale factor for theta. Defaults to 1.0.
|
| 284 |
+
|
| 285 |
+
Returns:
|
| 286 |
+
freqs_cis: Precomputed frequency tensor with complex exponential. [S, D/2]
|
| 287 |
+
freqs_cos, freqs_sin: Precomputed frequency tensor with real and imaginary parts separately. [S, D]
|
| 288 |
+
"""
|
| 289 |
+
if isinstance(pos, int):
|
| 290 |
+
pos = torch.arange(pos).float()
|
| 291 |
+
|
| 292 |
+
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
|
| 293 |
+
# has some connection to NTK literature
|
| 294 |
+
if theta_rescale_factor != 1.0:
|
| 295 |
+
theta *= theta_rescale_factor ** (dim / (dim - 2))
|
| 296 |
+
|
| 297 |
+
freqs = 1.0 / (
|
| 298 |
+
theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)
|
| 299 |
+
) # [D/2]
|
| 300 |
+
# assert interpolation_factor == 1.0, f"interpolation_factor: {interpolation_factor}"
|
| 301 |
+
freqs = torch.outer(pos * interpolation_factor, freqs) # [S, D/2]
|
| 302 |
+
if use_real:
|
| 303 |
+
freqs_cos = freqs.cos().repeat_interleave(2, dim=1) # [S, D]
|
| 304 |
+
freqs_sin = freqs.sin().repeat_interleave(2, dim=1) # [S, D]
|
| 305 |
+
return freqs_cos, freqs_sin
|
| 306 |
+
else:
|
| 307 |
+
freqs_cis = torch.polar(
|
| 308 |
+
torch.ones_like(freqs), freqs
|
| 309 |
+
) # complex64 # [S, D/2]
|
| 310 |
+
return freqs_cis
|
exp_code/1_benchmark/AccVideo/models/hunyuan/modules/token_refiner.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
from einops import rearrange
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from .activation_layers import get_activation_layer
|
| 8 |
+
from .attenion import attention
|
| 9 |
+
from .norm_layers import get_norm_layer
|
| 10 |
+
from .embed_layers import TimestepEmbedder, TextProjection
|
| 11 |
+
from .attenion import attention
|
| 12 |
+
from .mlp_layers import MLP
|
| 13 |
+
from .modulate_layers import modulate, apply_gate
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class IndividualTokenRefinerBlock(nn.Module):
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
hidden_size,
|
| 20 |
+
heads_num,
|
| 21 |
+
mlp_width_ratio: str = 4.0,
|
| 22 |
+
mlp_drop_rate: float = 0.0,
|
| 23 |
+
act_type: str = "silu",
|
| 24 |
+
qk_norm: bool = False,
|
| 25 |
+
qk_norm_type: str = "layer",
|
| 26 |
+
qkv_bias: bool = True,
|
| 27 |
+
dtype: Optional[torch.dtype] = None,
|
| 28 |
+
device: Optional[torch.device] = None,
|
| 29 |
+
):
|
| 30 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 31 |
+
super().__init__()
|
| 32 |
+
self.heads_num = heads_num
|
| 33 |
+
head_dim = hidden_size // heads_num
|
| 34 |
+
mlp_hidden_dim = int(hidden_size * mlp_width_ratio)
|
| 35 |
+
|
| 36 |
+
self.norm1 = nn.LayerNorm(
|
| 37 |
+
hidden_size, elementwise_affine=True, eps=1e-6, **factory_kwargs
|
| 38 |
+
)
|
| 39 |
+
self.self_attn_qkv = nn.Linear(
|
| 40 |
+
hidden_size, hidden_size * 3, bias=qkv_bias, **factory_kwargs
|
| 41 |
+
)
|
| 42 |
+
qk_norm_layer = get_norm_layer(qk_norm_type)
|
| 43 |
+
self.self_attn_q_norm = (
|
| 44 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 45 |
+
if qk_norm
|
| 46 |
+
else nn.Identity()
|
| 47 |
+
)
|
| 48 |
+
self.self_attn_k_norm = (
|
| 49 |
+
qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs)
|
| 50 |
+
if qk_norm
|
| 51 |
+
else nn.Identity()
|
| 52 |
+
)
|
| 53 |
+
self.self_attn_proj = nn.Linear(
|
| 54 |
+
hidden_size, hidden_size, bias=qkv_bias, **factory_kwargs
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
self.norm2 = nn.LayerNorm(
|
| 58 |
+
hidden_size, elementwise_affine=True, eps=1e-6, **factory_kwargs
|
| 59 |
+
)
|
| 60 |
+
act_layer = get_activation_layer(act_type)
|
| 61 |
+
self.mlp = MLP(
|
| 62 |
+
in_channels=hidden_size,
|
| 63 |
+
hidden_channels=mlp_hidden_dim,
|
| 64 |
+
act_layer=act_layer,
|
| 65 |
+
drop=mlp_drop_rate,
|
| 66 |
+
**factory_kwargs,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
self.adaLN_modulation = nn.Sequential(
|
| 70 |
+
act_layer(),
|
| 71 |
+
nn.Linear(hidden_size, 2 * hidden_size, bias=True, **factory_kwargs),
|
| 72 |
+
)
|
| 73 |
+
# Zero-initialize the modulation
|
| 74 |
+
nn.init.zeros_(self.adaLN_modulation[1].weight)
|
| 75 |
+
nn.init.zeros_(self.adaLN_modulation[1].bias)
|
| 76 |
+
|
| 77 |
+
def forward(
|
| 78 |
+
self,
|
| 79 |
+
x: torch.Tensor,
|
| 80 |
+
c: torch.Tensor, # timestep_aware_representations + context_aware_representations
|
| 81 |
+
attn_mask: torch.Tensor = None,
|
| 82 |
+
):
|
| 83 |
+
gate_msa, gate_mlp = self.adaLN_modulation(c).chunk(2, dim=1)
|
| 84 |
+
|
| 85 |
+
norm_x = self.norm1(x)
|
| 86 |
+
qkv = self.self_attn_qkv(norm_x)
|
| 87 |
+
q, k, v = rearrange(qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num)
|
| 88 |
+
# Apply QK-Norm if needed
|
| 89 |
+
q = self.self_attn_q_norm(q).to(v)
|
| 90 |
+
k = self.self_attn_k_norm(k).to(v)
|
| 91 |
+
|
| 92 |
+
# Self-Attention
|
| 93 |
+
attn = attention(q, k, v, mode="torch", attn_mask=attn_mask)
|
| 94 |
+
|
| 95 |
+
x = x + apply_gate(self.self_attn_proj(attn), gate_msa)
|
| 96 |
+
|
| 97 |
+
# FFN Layer
|
| 98 |
+
x = x + apply_gate(self.mlp(self.norm2(x)), gate_mlp)
|
| 99 |
+
|
| 100 |
+
return x
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class IndividualTokenRefiner(nn.Module):
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
hidden_size,
|
| 107 |
+
heads_num,
|
| 108 |
+
depth,
|
| 109 |
+
mlp_width_ratio: float = 4.0,
|
| 110 |
+
mlp_drop_rate: float = 0.0,
|
| 111 |
+
act_type: str = "silu",
|
| 112 |
+
qk_norm: bool = False,
|
| 113 |
+
qk_norm_type: str = "layer",
|
| 114 |
+
qkv_bias: bool = True,
|
| 115 |
+
dtype: Optional[torch.dtype] = None,
|
| 116 |
+
device: Optional[torch.device] = None,
|
| 117 |
+
):
|
| 118 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 119 |
+
super().__init__()
|
| 120 |
+
self.blocks = nn.ModuleList(
|
| 121 |
+
[
|
| 122 |
+
IndividualTokenRefinerBlock(
|
| 123 |
+
hidden_size=hidden_size,
|
| 124 |
+
heads_num=heads_num,
|
| 125 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 126 |
+
mlp_drop_rate=mlp_drop_rate,
|
| 127 |
+
act_type=act_type,
|
| 128 |
+
qk_norm=qk_norm,
|
| 129 |
+
qk_norm_type=qk_norm_type,
|
| 130 |
+
qkv_bias=qkv_bias,
|
| 131 |
+
**factory_kwargs,
|
| 132 |
+
)
|
| 133 |
+
for _ in range(depth)
|
| 134 |
+
]
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
def forward(
|
| 138 |
+
self,
|
| 139 |
+
x: torch.Tensor,
|
| 140 |
+
c: torch.LongTensor,
|
| 141 |
+
mask: Optional[torch.Tensor] = None,
|
| 142 |
+
):
|
| 143 |
+
self_attn_mask = None
|
| 144 |
+
if mask is not None:
|
| 145 |
+
batch_size = mask.shape[0]
|
| 146 |
+
seq_len = mask.shape[1]
|
| 147 |
+
mask = mask.to(x.device)
|
| 148 |
+
# batch_size x 1 x seq_len x seq_len
|
| 149 |
+
self_attn_mask_1 = mask.view(batch_size, 1, 1, seq_len).repeat(
|
| 150 |
+
1, 1, seq_len, 1
|
| 151 |
+
)
|
| 152 |
+
# batch_size x 1 x seq_len x seq_len
|
| 153 |
+
self_attn_mask_2 = self_attn_mask_1.transpose(2, 3)
|
| 154 |
+
# batch_size x 1 x seq_len x seq_len, 1 for broadcasting of heads_num
|
| 155 |
+
self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool()
|
| 156 |
+
# avoids self-attention weight being NaN for padding tokens
|
| 157 |
+
self_attn_mask[:, :, :, 0] = True
|
| 158 |
+
|
| 159 |
+
for block in self.blocks:
|
| 160 |
+
x = block(x, c, self_attn_mask)
|
| 161 |
+
return x
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class SingleTokenRefiner(nn.Module):
|
| 165 |
+
"""
|
| 166 |
+
A single token refiner block for llm text embedding refine.
|
| 167 |
+
"""
|
| 168 |
+
def __init__(
|
| 169 |
+
self,
|
| 170 |
+
in_channels,
|
| 171 |
+
hidden_size,
|
| 172 |
+
heads_num,
|
| 173 |
+
depth,
|
| 174 |
+
mlp_width_ratio: float = 4.0,
|
| 175 |
+
mlp_drop_rate: float = 0.0,
|
| 176 |
+
act_type: str = "silu",
|
| 177 |
+
qk_norm: bool = False,
|
| 178 |
+
qk_norm_type: str = "layer",
|
| 179 |
+
qkv_bias: bool = True,
|
| 180 |
+
attn_mode: str = "torch",
|
| 181 |
+
dtype: Optional[torch.dtype] = None,
|
| 182 |
+
device: Optional[torch.device] = None,
|
| 183 |
+
):
|
| 184 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 185 |
+
super().__init__()
|
| 186 |
+
self.attn_mode = attn_mode
|
| 187 |
+
assert self.attn_mode == "torch", "Only support 'torch' mode for token refiner."
|
| 188 |
+
|
| 189 |
+
self.input_embedder = nn.Linear(
|
| 190 |
+
in_channels, hidden_size, bias=True, **factory_kwargs
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
act_layer = get_activation_layer(act_type)
|
| 194 |
+
# Build timestep embedding layer
|
| 195 |
+
self.t_embedder = TimestepEmbedder(hidden_size, act_layer, **factory_kwargs)
|
| 196 |
+
# Build context embedding layer
|
| 197 |
+
self.c_embedder = TextProjection(
|
| 198 |
+
in_channels, hidden_size, act_layer, **factory_kwargs
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
self.individual_token_refiner = IndividualTokenRefiner(
|
| 202 |
+
hidden_size=hidden_size,
|
| 203 |
+
heads_num=heads_num,
|
| 204 |
+
depth=depth,
|
| 205 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 206 |
+
mlp_drop_rate=mlp_drop_rate,
|
| 207 |
+
act_type=act_type,
|
| 208 |
+
qk_norm=qk_norm,
|
| 209 |
+
qk_norm_type=qk_norm_type,
|
| 210 |
+
qkv_bias=qkv_bias,
|
| 211 |
+
**factory_kwargs,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
def forward(
|
| 215 |
+
self,
|
| 216 |
+
x: torch.Tensor,
|
| 217 |
+
t: torch.LongTensor,
|
| 218 |
+
mask: Optional[torch.LongTensor] = None,
|
| 219 |
+
):
|
| 220 |
+
timestep_aware_representations = self.t_embedder(t)
|
| 221 |
+
|
| 222 |
+
if mask is None:
|
| 223 |
+
context_aware_representations = x.mean(dim=1)
|
| 224 |
+
else:
|
| 225 |
+
mask_float = mask.float().unsqueeze(-1) # [b, s1, 1]
|
| 226 |
+
context_aware_representations = (x * mask_float).sum(
|
| 227 |
+
dim=1
|
| 228 |
+
) / mask_float.sum(dim=1)
|
| 229 |
+
context_aware_representations = self.c_embedder(context_aware_representations)
|
| 230 |
+
c = timestep_aware_representations + context_aware_representations
|
| 231 |
+
|
| 232 |
+
x = self.input_embedder(x)
|
| 233 |
+
|
| 234 |
+
x = self.individual_token_refiner(x, c, mask)
|
| 235 |
+
|
| 236 |
+
return x
|
exp_code/1_benchmark/AccVideo/models/hunyuan/parallel_states.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.distributed as dist
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class COMM_INFO:
|
| 7 |
+
def __init__(self):
|
| 8 |
+
self.group = None
|
| 9 |
+
self.sp_size = 1
|
| 10 |
+
self.global_rank = 0
|
| 11 |
+
self.rank_within_group = 0
|
| 12 |
+
self.group_id = 0
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
nccl_info = COMM_INFO()
|
| 16 |
+
_SEQUENCE_PARALLEL_STATE = False
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def initialize_sequence_parallel_state(sequence_parallel_size):
|
| 20 |
+
global _SEQUENCE_PARALLEL_STATE
|
| 21 |
+
if sequence_parallel_size > 1:
|
| 22 |
+
_SEQUENCE_PARALLEL_STATE = True
|
| 23 |
+
initialize_sequence_parallel_group(sequence_parallel_size)
|
| 24 |
+
else:
|
| 25 |
+
nccl_info.sp_size = 1
|
| 26 |
+
nccl_info.global_rank = int(os.getenv("RANK", "0"))
|
| 27 |
+
nccl_info.rank_within_group = 0
|
| 28 |
+
nccl_info.group_id = int(os.getenv("RANK", "0"))
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def set_sequence_parallel_state(state):
|
| 32 |
+
global _SEQUENCE_PARALLEL_STATE
|
| 33 |
+
_SEQUENCE_PARALLEL_STATE = state
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_sequence_parallel_state():
|
| 37 |
+
return _SEQUENCE_PARALLEL_STATE
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def initialize_sequence_parallel_group(sequence_parallel_size):
|
| 41 |
+
"""Initialize the sequence parallel group."""
|
| 42 |
+
rank = int(os.getenv("RANK", "0"))
|
| 43 |
+
world_size = int(os.getenv("WORLD_SIZE", "1"))
|
| 44 |
+
assert (
|
| 45 |
+
world_size % sequence_parallel_size == 0
|
| 46 |
+
), "world_size must be divisible by sequence_parallel_size, but got world_size: {}, sequence_parallel_size: {}".format(
|
| 47 |
+
world_size, sequence_parallel_size
|
| 48 |
+
)
|
| 49 |
+
nccl_info.sp_size = sequence_parallel_size
|
| 50 |
+
nccl_info.global_rank = rank
|
| 51 |
+
num_sequence_parallel_groups: int = world_size // sequence_parallel_size
|
| 52 |
+
for i in range(num_sequence_parallel_groups):
|
| 53 |
+
ranks = range(i * sequence_parallel_size, (i + 1) * sequence_parallel_size)
|
| 54 |
+
group = dist.new_group(ranks)
|
| 55 |
+
if rank in ranks:
|
| 56 |
+
nccl_info.group = group
|
| 57 |
+
nccl_info.rank_within_group = rank - i * sequence_parallel_size
|
| 58 |
+
nccl_info.group_id = i
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def destroy_sequence_parallel_group():
|
| 62 |
+
"""Destroy the sequence parallel group."""
|
| 63 |
+
dist.destroy_process_group()
|
exp_code/1_benchmark/AccVideo/models/hunyuan/prompt_rewrite.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
normal_mode_prompt = """Normal mode - Video Recaption Task:
|
| 2 |
+
|
| 3 |
+
You are a large language model specialized in rewriting video descriptions. Your task is to modify the input description.
|
| 4 |
+
|
| 5 |
+
0. Preserve ALL information, including style words and technical terms.
|
| 6 |
+
|
| 7 |
+
1. If the input is in Chinese, translate the entire description to English.
|
| 8 |
+
|
| 9 |
+
2. If the input is just one or two words describing an object or person, provide a brief, simple description focusing on basic visual characteristics. Limit the description to 1-2 short sentences.
|
| 10 |
+
|
| 11 |
+
3. If the input does not include style, lighting, atmosphere, you can make reasonable associations.
|
| 12 |
+
|
| 13 |
+
4. Output ALL must be in English.
|
| 14 |
+
|
| 15 |
+
Given Input:
|
| 16 |
+
input: "{input}"
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
master_mode_prompt = """Master mode - Video Recaption Task:
|
| 21 |
+
|
| 22 |
+
You are a large language model specialized in rewriting video descriptions. Your task is to modify the input description.
|
| 23 |
+
|
| 24 |
+
0. Preserve ALL information, including style words and technical terms.
|
| 25 |
+
|
| 26 |
+
1. If the input is in Chinese, translate the entire description to English.
|
| 27 |
+
|
| 28 |
+
2. If the input is just one or two words describing an object or person, provide a brief, simple description focusing on basic visual characteristics. Limit the description to 1-2 short sentences.
|
| 29 |
+
|
| 30 |
+
3. If the input does not include style, lighting, atmosphere, you can make reasonable associations.
|
| 31 |
+
|
| 32 |
+
4. Output ALL must be in English.
|
| 33 |
+
|
| 34 |
+
Given Input:
|
| 35 |
+
input: "{input}"
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_rewrite_prompt(ori_prompt, mode="Normal"):
|
| 40 |
+
if mode == "Normal":
|
| 41 |
+
prompt = normal_mode_prompt.format(input=ori_prompt)
|
| 42 |
+
elif mode == "Master":
|
| 43 |
+
prompt = master_mode_prompt.format(input=ori_prompt)
|
| 44 |
+
else:
|
| 45 |
+
raise Exception("Only supports Normal and Normal", mode)
|
| 46 |
+
return prompt
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
ori_prompt = "一只小狗在草地上奔跑。"
|
| 50 |
+
normal_prompt = get_rewrite_prompt(ori_prompt, mode="Normal")
|
| 51 |
+
master_prompt = get_rewrite_prompt(ori_prompt, mode="Master")
|
| 52 |
+
|
| 53 |
+
# Then you can use the normal_prompt or master_prompt to access the hunyuan-large rewrite model to get the final prompt.
|
exp_code/1_benchmark/AccVideo/models/hunyuan/text_encoder/__init__.py
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Optional, Tuple
|
| 3 |
+
from copy import deepcopy
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from transformers import CLIPTextModel, CLIPTokenizer, AutoTokenizer, AutoModel
|
| 8 |
+
from transformers.utils import ModelOutput
|
| 9 |
+
|
| 10 |
+
from ..constants import TEXT_ENCODER_PATH, TOKENIZER_PATH
|
| 11 |
+
from ..constants import PRECISION_TO_TYPE
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def use_default(value, default):
|
| 15 |
+
return value if value is not None else default
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_text_encoder(
|
| 19 |
+
text_encoder_type,
|
| 20 |
+
text_encoder_precision=None,
|
| 21 |
+
text_encoder_path=None,
|
| 22 |
+
logger=None,
|
| 23 |
+
device=None,
|
| 24 |
+
):
|
| 25 |
+
if text_encoder_path is None:
|
| 26 |
+
text_encoder_path = TEXT_ENCODER_PATH[text_encoder_type]
|
| 27 |
+
if logger is not None:
|
| 28 |
+
logger.info(
|
| 29 |
+
f"Loading text encoder model ({text_encoder_type}) from: {text_encoder_path}"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
if text_encoder_type == "clipL":
|
| 33 |
+
text_encoder = CLIPTextModel.from_pretrained(text_encoder_path)
|
| 34 |
+
text_encoder.final_layer_norm = text_encoder.text_model.final_layer_norm
|
| 35 |
+
elif text_encoder_type == "llm":
|
| 36 |
+
text_encoder = AutoModel.from_pretrained(
|
| 37 |
+
text_encoder_path, low_cpu_mem_usage=True
|
| 38 |
+
)
|
| 39 |
+
text_encoder.final_layer_norm = text_encoder.norm
|
| 40 |
+
else:
|
| 41 |
+
raise ValueError(f"Unsupported text encoder type: {text_encoder_type}")
|
| 42 |
+
# from_pretrained will ensure that the model is in eval mode.
|
| 43 |
+
|
| 44 |
+
if text_encoder_precision is not None:
|
| 45 |
+
text_encoder = text_encoder.to(dtype=PRECISION_TO_TYPE[text_encoder_precision])
|
| 46 |
+
|
| 47 |
+
text_encoder.requires_grad_(False)
|
| 48 |
+
|
| 49 |
+
if logger is not None:
|
| 50 |
+
logger.info(f"Text encoder to dtype: {text_encoder.dtype}")
|
| 51 |
+
|
| 52 |
+
if device is not None:
|
| 53 |
+
text_encoder = text_encoder.to(device)
|
| 54 |
+
|
| 55 |
+
return text_encoder, text_encoder_path
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def load_tokenizer(
|
| 59 |
+
tokenizer_type, tokenizer_path=None, padding_side="right", logger=None
|
| 60 |
+
):
|
| 61 |
+
if tokenizer_path is None:
|
| 62 |
+
tokenizer_path = TOKENIZER_PATH[tokenizer_type]
|
| 63 |
+
if logger is not None:
|
| 64 |
+
logger.info(f"Loading tokenizer ({tokenizer_type}) from: {tokenizer_path}")
|
| 65 |
+
|
| 66 |
+
if tokenizer_type == "clipL":
|
| 67 |
+
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path, max_length=77)
|
| 68 |
+
elif tokenizer_type == "llm":
|
| 69 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 70 |
+
tokenizer_path, padding_side=padding_side
|
| 71 |
+
)
|
| 72 |
+
else:
|
| 73 |
+
raise ValueError(f"Unsupported tokenizer type: {tokenizer_type}")
|
| 74 |
+
|
| 75 |
+
return tokenizer, tokenizer_path
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@dataclass
|
| 79 |
+
class TextEncoderModelOutput(ModelOutput):
|
| 80 |
+
"""
|
| 81 |
+
Base class for model's outputs that also contains a pooling of the last hidden states.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 85 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 86 |
+
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 87 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
|
| 88 |
+
hidden_states_list (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed):
|
| 89 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 90 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 91 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 92 |
+
text_outputs (`list`, *optional*, returned when `return_texts=True` is passed):
|
| 93 |
+
List of decoded texts.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
hidden_state: torch.FloatTensor = None
|
| 97 |
+
attention_mask: Optional[torch.LongTensor] = None
|
| 98 |
+
hidden_states_list: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 99 |
+
text_outputs: Optional[list] = None
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class TextEncoder(nn.Module):
|
| 103 |
+
def __init__(
|
| 104 |
+
self,
|
| 105 |
+
text_encoder_type: str,
|
| 106 |
+
max_length: int,
|
| 107 |
+
text_encoder_precision: Optional[str] = None,
|
| 108 |
+
text_encoder_path: Optional[str] = None,
|
| 109 |
+
tokenizer_type: Optional[str] = None,
|
| 110 |
+
tokenizer_path: Optional[str] = None,
|
| 111 |
+
output_key: Optional[str] = None,
|
| 112 |
+
use_attention_mask: bool = True,
|
| 113 |
+
input_max_length: Optional[int] = None,
|
| 114 |
+
prompt_template: Optional[dict] = None,
|
| 115 |
+
prompt_template_video: Optional[dict] = None,
|
| 116 |
+
hidden_state_skip_layer: Optional[int] = None,
|
| 117 |
+
apply_final_norm: bool = False,
|
| 118 |
+
reproduce: bool = False,
|
| 119 |
+
logger=None,
|
| 120 |
+
device=None,
|
| 121 |
+
):
|
| 122 |
+
super().__init__()
|
| 123 |
+
self.text_encoder_type = text_encoder_type
|
| 124 |
+
self.max_length = max_length
|
| 125 |
+
self.precision = text_encoder_precision
|
| 126 |
+
self.model_path = text_encoder_path
|
| 127 |
+
self.tokenizer_type = (
|
| 128 |
+
tokenizer_type if tokenizer_type is not None else text_encoder_type
|
| 129 |
+
)
|
| 130 |
+
self.tokenizer_path = (
|
| 131 |
+
tokenizer_path if tokenizer_path is not None else text_encoder_path
|
| 132 |
+
)
|
| 133 |
+
self.use_attention_mask = use_attention_mask
|
| 134 |
+
if prompt_template_video is not None:
|
| 135 |
+
assert (
|
| 136 |
+
use_attention_mask is True
|
| 137 |
+
), "Attention mask is True required when training videos."
|
| 138 |
+
self.input_max_length = (
|
| 139 |
+
input_max_length if input_max_length is not None else max_length
|
| 140 |
+
)
|
| 141 |
+
self.prompt_template = prompt_template
|
| 142 |
+
self.prompt_template_video = prompt_template_video
|
| 143 |
+
self.hidden_state_skip_layer = hidden_state_skip_layer
|
| 144 |
+
self.apply_final_norm = apply_final_norm
|
| 145 |
+
self.reproduce = reproduce
|
| 146 |
+
self.logger = logger
|
| 147 |
+
|
| 148 |
+
self.use_template = self.prompt_template is not None
|
| 149 |
+
if self.use_template:
|
| 150 |
+
assert (
|
| 151 |
+
isinstance(self.prompt_template, dict)
|
| 152 |
+
and "template" in self.prompt_template
|
| 153 |
+
), f"`prompt_template` must be a dictionary with a key 'template', got {self.prompt_template}"
|
| 154 |
+
assert "{}" in str(self.prompt_template["template"]), (
|
| 155 |
+
"`prompt_template['template']` must contain a placeholder `{}` for the input text, "
|
| 156 |
+
f"got {self.prompt_template['template']}"
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
self.use_video_template = self.prompt_template_video is not None
|
| 160 |
+
if self.use_video_template:
|
| 161 |
+
if self.prompt_template_video is not None:
|
| 162 |
+
assert (
|
| 163 |
+
isinstance(self.prompt_template_video, dict)
|
| 164 |
+
and "template" in self.prompt_template_video
|
| 165 |
+
), f"`prompt_template_video` must be a dictionary with a key 'template', got {self.prompt_template_video}"
|
| 166 |
+
assert "{}" in str(self.prompt_template_video["template"]), (
|
| 167 |
+
"`prompt_template_video['template']` must contain a placeholder `{}` for the input text, "
|
| 168 |
+
f"got {self.prompt_template_video['template']}"
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
if "t5" in text_encoder_type:
|
| 172 |
+
self.output_key = output_key or "last_hidden_state"
|
| 173 |
+
elif "clip" in text_encoder_type:
|
| 174 |
+
self.output_key = output_key or "pooler_output"
|
| 175 |
+
elif "llm" in text_encoder_type or "glm" in text_encoder_type:
|
| 176 |
+
self.output_key = output_key or "last_hidden_state"
|
| 177 |
+
else:
|
| 178 |
+
raise ValueError(f"Unsupported text encoder type: {text_encoder_type}")
|
| 179 |
+
|
| 180 |
+
self.model, self.model_path = load_text_encoder(
|
| 181 |
+
text_encoder_type=self.text_encoder_type,
|
| 182 |
+
text_encoder_precision=self.precision,
|
| 183 |
+
text_encoder_path=self.model_path,
|
| 184 |
+
logger=self.logger,
|
| 185 |
+
device=device,
|
| 186 |
+
)
|
| 187 |
+
self.dtype = self.model.dtype
|
| 188 |
+
self.device = self.model.device
|
| 189 |
+
|
| 190 |
+
self.tokenizer, self.tokenizer_path = load_tokenizer(
|
| 191 |
+
tokenizer_type=self.tokenizer_type,
|
| 192 |
+
tokenizer_path=self.tokenizer_path,
|
| 193 |
+
padding_side="right",
|
| 194 |
+
logger=self.logger,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
def __repr__(self):
|
| 198 |
+
return f"{self.text_encoder_type} ({self.precision} - {self.model_path})"
|
| 199 |
+
|
| 200 |
+
@staticmethod
|
| 201 |
+
def apply_text_to_template(text, template, prevent_empty_text=True):
|
| 202 |
+
"""
|
| 203 |
+
Apply text to template.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
text (str): Input text.
|
| 207 |
+
template (str or list): Template string or list of chat conversation.
|
| 208 |
+
prevent_empty_text (bool): If Ture, we will prevent the user text from being empty
|
| 209 |
+
by adding a space. Defaults to True.
|
| 210 |
+
"""
|
| 211 |
+
if isinstance(template, str):
|
| 212 |
+
# Will send string to tokenizer. Used for llm
|
| 213 |
+
return template.format(text)
|
| 214 |
+
else:
|
| 215 |
+
raise TypeError(f"Unsupported template type: {type(template)}")
|
| 216 |
+
|
| 217 |
+
def text2tokens(self, text, data_type="image"):
|
| 218 |
+
"""
|
| 219 |
+
Tokenize the input text.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
text (str or list): Input text.
|
| 223 |
+
"""
|
| 224 |
+
tokenize_input_type = "str"
|
| 225 |
+
if self.use_template:
|
| 226 |
+
if data_type == "image":
|
| 227 |
+
prompt_template = self.prompt_template["template"]
|
| 228 |
+
elif data_type == "video":
|
| 229 |
+
prompt_template = self.prompt_template_video["template"]
|
| 230 |
+
else:
|
| 231 |
+
raise ValueError(f"Unsupported data type: {data_type}")
|
| 232 |
+
if isinstance(text, (list, tuple)):
|
| 233 |
+
text = [
|
| 234 |
+
self.apply_text_to_template(one_text, prompt_template)
|
| 235 |
+
for one_text in text
|
| 236 |
+
]
|
| 237 |
+
if isinstance(text[0], list):
|
| 238 |
+
tokenize_input_type = "list"
|
| 239 |
+
elif isinstance(text, str):
|
| 240 |
+
text = self.apply_text_to_template(text, prompt_template)
|
| 241 |
+
if isinstance(text, list):
|
| 242 |
+
tokenize_input_type = "list"
|
| 243 |
+
else:
|
| 244 |
+
raise TypeError(f"Unsupported text type: {type(text)}")
|
| 245 |
+
|
| 246 |
+
kwargs = dict(
|
| 247 |
+
truncation=True,
|
| 248 |
+
max_length=self.max_length,
|
| 249 |
+
padding="max_length",
|
| 250 |
+
return_tensors="pt",
|
| 251 |
+
)
|
| 252 |
+
if tokenize_input_type == "str":
|
| 253 |
+
return self.tokenizer(
|
| 254 |
+
text,
|
| 255 |
+
return_length=False,
|
| 256 |
+
return_overflowing_tokens=False,
|
| 257 |
+
return_attention_mask=True,
|
| 258 |
+
**kwargs,
|
| 259 |
+
)
|
| 260 |
+
elif tokenize_input_type == "list":
|
| 261 |
+
return self.tokenizer.apply_chat_template(
|
| 262 |
+
text,
|
| 263 |
+
add_generation_prompt=True,
|
| 264 |
+
tokenize=True,
|
| 265 |
+
return_dict=True,
|
| 266 |
+
**kwargs,
|
| 267 |
+
)
|
| 268 |
+
else:
|
| 269 |
+
raise ValueError(f"Unsupported tokenize_input_type: {tokenize_input_type}")
|
| 270 |
+
|
| 271 |
+
def encode(
|
| 272 |
+
self,
|
| 273 |
+
batch_encoding,
|
| 274 |
+
use_attention_mask=None,
|
| 275 |
+
output_hidden_states=False,
|
| 276 |
+
do_sample=None,
|
| 277 |
+
hidden_state_skip_layer=None,
|
| 278 |
+
return_texts=False,
|
| 279 |
+
data_type="image",
|
| 280 |
+
device=None,
|
| 281 |
+
):
|
| 282 |
+
"""
|
| 283 |
+
Args:
|
| 284 |
+
batch_encoding (dict): Batch encoding from tokenizer.
|
| 285 |
+
use_attention_mask (bool): Whether to use attention mask. If None, use self.use_attention_mask.
|
| 286 |
+
Defaults to None.
|
| 287 |
+
output_hidden_states (bool): Whether to output hidden states. If False, return the value of
|
| 288 |
+
self.output_key. If True, return the entire output. If set self.hidden_state_skip_layer,
|
| 289 |
+
output_hidden_states will be set True. Defaults to False.
|
| 290 |
+
do_sample (bool): Whether to sample from the model. Used for Decoder-Only LLMs. Defaults to None.
|
| 291 |
+
When self.produce is False, do_sample is set to True by default.
|
| 292 |
+
hidden_state_skip_layer (int): Number of hidden states to hidden_state_skip_layer. 0 means the last layer.
|
| 293 |
+
If None, self.output_key will be used. Defaults to None.
|
| 294 |
+
return_texts (bool): Whether to return the decoded texts. Defaults to False.
|
| 295 |
+
"""
|
| 296 |
+
device = self.model.device if device is None else device
|
| 297 |
+
use_attention_mask = use_default(use_attention_mask, self.use_attention_mask)
|
| 298 |
+
hidden_state_skip_layer = use_default(
|
| 299 |
+
hidden_state_skip_layer, self.hidden_state_skip_layer
|
| 300 |
+
)
|
| 301 |
+
do_sample = use_default(do_sample, not self.reproduce)
|
| 302 |
+
attention_mask = (
|
| 303 |
+
batch_encoding["attention_mask"].to(device) if use_attention_mask else None
|
| 304 |
+
)
|
| 305 |
+
outputs = self.model(
|
| 306 |
+
input_ids=batch_encoding["input_ids"].to(device),
|
| 307 |
+
attention_mask=attention_mask,
|
| 308 |
+
output_hidden_states=output_hidden_states
|
| 309 |
+
or hidden_state_skip_layer is not None,
|
| 310 |
+
)
|
| 311 |
+
if hidden_state_skip_layer is not None:
|
| 312 |
+
last_hidden_state = outputs.hidden_states[-(hidden_state_skip_layer + 1)]
|
| 313 |
+
# Real last hidden state already has layer norm applied. So here we only apply it
|
| 314 |
+
# for intermediate layers.
|
| 315 |
+
if hidden_state_skip_layer > 0 and self.apply_final_norm:
|
| 316 |
+
last_hidden_state = self.model.final_layer_norm(last_hidden_state)
|
| 317 |
+
else:
|
| 318 |
+
last_hidden_state = outputs[self.output_key]
|
| 319 |
+
|
| 320 |
+
# Remove hidden states of instruction tokens, only keep prompt tokens.
|
| 321 |
+
if self.use_template:
|
| 322 |
+
if data_type == "image":
|
| 323 |
+
crop_start = self.prompt_template.get("crop_start", -1)
|
| 324 |
+
elif data_type == "video":
|
| 325 |
+
crop_start = self.prompt_template_video.get("crop_start", -1)
|
| 326 |
+
else:
|
| 327 |
+
raise ValueError(f"Unsupported data type: {data_type}")
|
| 328 |
+
if crop_start > 0:
|
| 329 |
+
last_hidden_state = last_hidden_state[:, crop_start:]
|
| 330 |
+
attention_mask = (
|
| 331 |
+
attention_mask[:, crop_start:] if use_attention_mask else None
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
if output_hidden_states:
|
| 335 |
+
return TextEncoderModelOutput(
|
| 336 |
+
last_hidden_state, attention_mask, outputs.hidden_states
|
| 337 |
+
)
|
| 338 |
+
return TextEncoderModelOutput(last_hidden_state, attention_mask)
|
| 339 |
+
|
| 340 |
+
def forward(
|
| 341 |
+
self,
|
| 342 |
+
text,
|
| 343 |
+
use_attention_mask=None,
|
| 344 |
+
output_hidden_states=False,
|
| 345 |
+
do_sample=False,
|
| 346 |
+
hidden_state_skip_layer=None,
|
| 347 |
+
return_texts=False,
|
| 348 |
+
):
|
| 349 |
+
batch_encoding = self.text2tokens(text)
|
| 350 |
+
return self.encode(
|
| 351 |
+
batch_encoding,
|
| 352 |
+
use_attention_mask=use_attention_mask,
|
| 353 |
+
output_hidden_states=output_hidden_states,
|
| 354 |
+
do_sample=do_sample,
|
| 355 |
+
hidden_state_skip_layer=hidden_state_skip_layer,
|
| 356 |
+
return_texts=return_texts,
|
| 357 |
+
)
|