|
|
import os |
|
|
import sys |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
from diffusers import (CogVideoXDDIMScheduler, DDIMScheduler, |
|
|
DPMSolverMultistepScheduler, |
|
|
EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, |
|
|
PNDMScheduler) |
|
|
from PIL import Image |
|
|
|
|
|
current_file_path = os.path.abspath(__file__) |
|
|
project_roots = [os.path.dirname(current_file_path), os.path.dirname(os.path.dirname(current_file_path)), os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))] |
|
|
for project_root in project_roots: |
|
|
sys.path.insert(0, project_root) if project_root not in sys.path else None |
|
|
|
|
|
from videox_fun.dist import set_multi_gpus_devices, shard_model |
|
|
from videox_fun.models import (AutoencoderKLCogVideoX, |
|
|
CogVideoXTransformer3DModel, T5EncoderModel, |
|
|
T5Tokenizer) |
|
|
from videox_fun.pipeline import (CogVideoXFunInpaintPipeline, |
|
|
CogVideoXFunPipeline) |
|
|
from videox_fun.utils.fp8_optimization import (convert_model_weight_to_float8, replace_parameters_by_name, |
|
|
convert_weight_dtype_wrapper) |
|
|
from videox_fun.utils.lora_utils import merge_lora, unmerge_lora |
|
|
from videox_fun.utils.utils import get_image_to_video_latent, save_videos_grid |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPU_memory_mode = "model_cpu_offload_and_qfloat8" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ulysses_degree = 1 |
|
|
ring_degree = 1 |
|
|
|
|
|
fsdp_dit = False |
|
|
fsdp_text_encoder = True |
|
|
|
|
|
|
|
|
compile_dit = False |
|
|
|
|
|
|
|
|
model_name = "models/Diffusion_Transformer/CogVideoX-Fun-V1.1-2b-InP" |
|
|
|
|
|
|
|
|
sampler_name = "DDIM_Origin" |
|
|
|
|
|
|
|
|
transformer_path = None |
|
|
vae_path = None |
|
|
lora_path = None |
|
|
|
|
|
|
|
|
sample_size = [384, 672] |
|
|
|
|
|
|
|
|
video_length = 49 |
|
|
fps = 8 |
|
|
|
|
|
|
|
|
partial_video_length = None |
|
|
overlap_video_length = 4 |
|
|
|
|
|
|
|
|
|
|
|
weight_dtype = torch.bfloat16 |
|
|
|
|
|
validation_image_start = "asset/1.png" |
|
|
validation_image_end = None |
|
|
|
|
|
|
|
|
prompt = "The dog is shaking head. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic." |
|
|
negative_prompt = "The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. " |
|
|
guidance_scale = 6.0 |
|
|
seed = 43 |
|
|
num_inference_steps = 50 |
|
|
lora_weight = 0.55 |
|
|
save_path = "samples/cogvideox-fun-videos_i2v" |
|
|
|
|
|
device = set_multi_gpus_devices(ulysses_degree, ring_degree) |
|
|
|
|
|
transformer = CogVideoXTransformer3DModel.from_pretrained( |
|
|
model_name, |
|
|
subfolder="transformer", |
|
|
low_cpu_mem_usage=True, |
|
|
torch_dtype=weight_dtype, |
|
|
).to(weight_dtype) |
|
|
|
|
|
if transformer_path is not None: |
|
|
print(f"From checkpoint: {transformer_path}") |
|
|
if transformer_path.endswith("safetensors"): |
|
|
from safetensors.torch import load_file, safe_open |
|
|
state_dict = load_file(transformer_path) |
|
|
else: |
|
|
state_dict = torch.load(transformer_path, map_location="cpu") |
|
|
state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict |
|
|
|
|
|
m, u = transformer.load_state_dict(state_dict, strict=False) |
|
|
print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
|
|
|
|
|
|
|
|
vae = AutoencoderKLCogVideoX.from_pretrained( |
|
|
model_name, |
|
|
subfolder="vae" |
|
|
).to(weight_dtype) |
|
|
|
|
|
if vae_path is not None: |
|
|
print(f"From checkpoint: {vae_path}") |
|
|
if vae_path.endswith("safetensors"): |
|
|
from safetensors.torch import load_file, safe_open |
|
|
state_dict = load_file(vae_path) |
|
|
else: |
|
|
state_dict = torch.load(vae_path, map_location="cpu") |
|
|
state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict |
|
|
|
|
|
m, u = vae.load_state_dict(state_dict, strict=False) |
|
|
print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
|
|
|
|
|
|
|
|
tokenizer = T5Tokenizer.from_pretrained( |
|
|
model_name, subfolder="tokenizer" |
|
|
) |
|
|
text_encoder = T5EncoderModel.from_pretrained( |
|
|
model_name, subfolder="text_encoder", torch_dtype=weight_dtype |
|
|
) |
|
|
|
|
|
|
|
|
Chosen_Scheduler = scheduler_dict = { |
|
|
"Euler": EulerDiscreteScheduler, |
|
|
"Euler A": EulerAncestralDiscreteScheduler, |
|
|
"DPM++": DPMSolverMultistepScheduler, |
|
|
"PNDM": PNDMScheduler, |
|
|
"DDIM_Cog": CogVideoXDDIMScheduler, |
|
|
"DDIM_Origin": DDIMScheduler, |
|
|
}[sampler_name] |
|
|
scheduler = Chosen_Scheduler.from_pretrained( |
|
|
model_name, |
|
|
subfolder="scheduler" |
|
|
) |
|
|
|
|
|
if transformer.config.in_channels != vae.config.latent_channels: |
|
|
pipeline = CogVideoXFunInpaintPipeline( |
|
|
vae=vae, |
|
|
tokenizer=tokenizer, |
|
|
text_encoder=text_encoder, |
|
|
transformer=transformer, |
|
|
scheduler=scheduler, |
|
|
) |
|
|
else: |
|
|
pipeline = CogVideoXFunPipeline( |
|
|
vae=vae, |
|
|
tokenizer=tokenizer, |
|
|
text_encoder=text_encoder, |
|
|
transformer=transformer, |
|
|
scheduler=scheduler, |
|
|
) |
|
|
if ulysses_degree > 1 or ring_degree > 1: |
|
|
from functools import partial |
|
|
transformer.enable_multi_gpus_inference() |
|
|
if fsdp_dit: |
|
|
shard_fn = partial(shard_model, device_id=device, param_dtype=weight_dtype) |
|
|
pipeline.transformer = shard_fn(pipeline.transformer) |
|
|
print("Add FSDP DIT") |
|
|
if fsdp_text_encoder: |
|
|
shard_fn = partial(shard_model, device_id=device, param_dtype=weight_dtype) |
|
|
pipeline.text_encoder = shard_fn(pipeline.text_encoder) |
|
|
print("Add FSDP TEXT ENCODER") |
|
|
|
|
|
if compile_dit: |
|
|
for i in range(len(pipeline.transformer.transformer_blocks)): |
|
|
pipeline.transformer.transformer_blocks[i] = torch.compile(pipeline.transformer.transformer_blocks[i]) |
|
|
print("Add Compile") |
|
|
|
|
|
if GPU_memory_mode == "sequential_cpu_offload": |
|
|
pipeline.enable_sequential_cpu_offload(device=device) |
|
|
elif GPU_memory_mode == "model_cpu_offload_and_qfloat8": |
|
|
convert_model_weight_to_float8(transformer, exclude_module_name=[], device=device) |
|
|
convert_weight_dtype_wrapper(transformer, weight_dtype) |
|
|
pipeline.enable_model_cpu_offload(device=device) |
|
|
elif GPU_memory_mode == "model_cpu_offload": |
|
|
pipeline.enable_model_cpu_offload(device=device) |
|
|
elif GPU_memory_mode == "model_full_load_and_qfloat8": |
|
|
convert_model_weight_to_float8(transformer, exclude_module_name=[], device=device) |
|
|
convert_weight_dtype_wrapper(transformer, weight_dtype) |
|
|
pipeline.to(device=device) |
|
|
else: |
|
|
pipeline.to(device=device) |
|
|
|
|
|
generator = torch.Generator(device=device).manual_seed(seed) |
|
|
|
|
|
if lora_path is not None: |
|
|
pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) |
|
|
|
|
|
if partial_video_length is not None: |
|
|
partial_video_length = int((partial_video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 |
|
|
latent_frames = (partial_video_length - 1) // vae.config.temporal_compression_ratio + 1 |
|
|
if partial_video_length != 1 and transformer.config.patch_size_t is not None and latent_frames % transformer.config.patch_size_t != 0: |
|
|
additional_frames = transformer.config.patch_size_t - latent_frames % transformer.config.patch_size_t |
|
|
partial_video_length += additional_frames * vae.config.temporal_compression_ratio |
|
|
|
|
|
init_frames = 0 |
|
|
last_frames = init_frames + partial_video_length |
|
|
while init_frames < video_length: |
|
|
if last_frames >= video_length: |
|
|
_partial_video_length = video_length - init_frames |
|
|
_partial_video_length = int((_partial_video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 |
|
|
latent_frames = (_partial_video_length - 1) // vae.config.temporal_compression_ratio + 1 |
|
|
if _partial_video_length != 1 and transformer.config.patch_size_t is not None and latent_frames % transformer.config.patch_size_t != 0: |
|
|
additional_frames = transformer.config.patch_size_t - latent_frames % transformer.config.patch_size_t |
|
|
_partial_video_length += additional_frames * vae.config.temporal_compression_ratio |
|
|
|
|
|
if _partial_video_length <= 0: |
|
|
break |
|
|
else: |
|
|
_partial_video_length = partial_video_length |
|
|
|
|
|
input_video, input_video_mask, clip_image = get_image_to_video_latent(validation_image, None, video_length=_partial_video_length, sample_size=sample_size) |
|
|
|
|
|
with torch.no_grad(): |
|
|
sample = pipeline( |
|
|
prompt, |
|
|
num_frames = _partial_video_length, |
|
|
negative_prompt = negative_prompt, |
|
|
height = sample_size[0], |
|
|
width = sample_size[1], |
|
|
generator = generator, |
|
|
guidance_scale = guidance_scale, |
|
|
num_inference_steps = num_inference_steps, |
|
|
|
|
|
video = input_video, |
|
|
mask_video = input_video_mask |
|
|
).videos |
|
|
|
|
|
if init_frames != 0: |
|
|
mix_ratio = torch.from_numpy( |
|
|
np.array([float(_index) / float(overlap_video_length) for _index in range(overlap_video_length)], np.float32) |
|
|
).unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) |
|
|
|
|
|
new_sample[:, :, -overlap_video_length:] = new_sample[:, :, -overlap_video_length:] * (1 - mix_ratio) + \ |
|
|
sample[:, :, :overlap_video_length] * mix_ratio |
|
|
new_sample = torch.cat([new_sample, sample[:, :, overlap_video_length:]], dim = 2) |
|
|
|
|
|
sample = new_sample |
|
|
else: |
|
|
new_sample = sample |
|
|
|
|
|
if last_frames >= video_length: |
|
|
break |
|
|
|
|
|
validation_image = [ |
|
|
Image.fromarray( |
|
|
(sample[0, :, _index].transpose(0, 1).transpose(1, 2) * 255).numpy().astype(np.uint8) |
|
|
) for _index in range(-overlap_video_length, 0) |
|
|
] |
|
|
|
|
|
init_frames = init_frames + _partial_video_length - overlap_video_length |
|
|
last_frames = init_frames + _partial_video_length |
|
|
else: |
|
|
video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 |
|
|
latent_frames = (video_length - 1) // vae.config.temporal_compression_ratio + 1 |
|
|
if video_length != 1 and transformer.config.patch_size_t is not None and latent_frames % transformer.config.patch_size_t != 0: |
|
|
additional_frames = transformer.config.patch_size_t - latent_frames % transformer.config.patch_size_t |
|
|
video_length += additional_frames * vae.config.temporal_compression_ratio |
|
|
input_video, input_video_mask, clip_image = get_image_to_video_latent(validation_image_start, validation_image_end, video_length=video_length, sample_size=sample_size) |
|
|
|
|
|
with torch.no_grad(): |
|
|
sample = pipeline( |
|
|
prompt, |
|
|
num_frames = video_length, |
|
|
negative_prompt = negative_prompt, |
|
|
height = sample_size[0], |
|
|
width = sample_size[1], |
|
|
generator = generator, |
|
|
guidance_scale = guidance_scale, |
|
|
num_inference_steps = num_inference_steps, |
|
|
|
|
|
video = input_video, |
|
|
mask_video = input_video_mask |
|
|
).videos |
|
|
|
|
|
if lora_path is not None: |
|
|
pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) |
|
|
|
|
|
def save_results(): |
|
|
if not os.path.exists(save_path): |
|
|
os.makedirs(save_path, exist_ok=True) |
|
|
|
|
|
index = len([path for path in os.listdir(save_path)]) + 1 |
|
|
prefix = str(index).zfill(8) |
|
|
if video_length == 1: |
|
|
video_path = os.path.join(save_path, prefix + ".png") |
|
|
|
|
|
image = sample[0, :, 0] |
|
|
image = image.transpose(0, 1).transpose(1, 2) |
|
|
image = (image * 255).numpy().astype(np.uint8) |
|
|
image = Image.fromarray(image) |
|
|
image.save(video_path) |
|
|
else: |
|
|
video_path = os.path.join(save_path, prefix + ".mp4") |
|
|
save_videos_grid(sample, video_path, fps=fps) |
|
|
|
|
|
if ulysses_degree * ring_degree > 1: |
|
|
import torch.distributed as dist |
|
|
if dist.get_rank() == 0: |
|
|
save_results() |
|
|
else: |
|
|
save_results() |