File size: 9,840 Bytes
0285d87 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 | import cv2
import numpy as np
import os
from glob import glob
import copy
from typing import Optional, Dict
from tqdm.auto import tqdm
from omegaconf import OmegaConf
import click
import json
import torch
import torch.utils.data
import torch.utils.checkpoint
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import (
AutoencoderKL,
DDIMScheduler,
)
from diffusers.utils.import_utils import is_xformers_available
from transformers import AutoTokenizer, CLIPTextModel
from einops import rearrange
from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel
from video_diffusion.data.dataset import ImageSequenceDataset
from video_diffusion.common.util import get_time_string, get_function_args
from video_diffusion.common.logger import get_logger_config_path
from video_diffusion.common.image_util import log_train_samples
from video_diffusion.common.instantiate_from_config import instantiate_from_config
from video_diffusion.pipelines.p2p_validation_loop import P2pSampleLogger
def collate_fn(examples):
batch = {
"prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0),
"images": torch.stack([example["images"] for example in examples]),
}
return batch
class VideoDataset(torch.utils.data.Dataset):
def __init__(self, video_path, prompt_ids, frame_stride=1, target_size=(512, 512)):
self.cap = cv2.VideoCapture(video_path)
self.frame_stride = frame_stride
self.target_size = target_size
self.prompt_ids = prompt_ids
self.frames = []
self._preprocess_video()
def _preprocess_video(self):
frame_count = 0
while True:
ret, frame = self.cap.read()
if not ret:
break
if frame_count % self.frame_stride == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, self.target_size)
frame = torch.from_numpy(frame).float() / 127.5 - 1.0
self.frames.append(frame.permute(2, 0, 1))
frame_count += 1
self.cap.release()
def __len__(self):
return 1
def __getitem__(self, idx):
return {
"prompt_ids": self.prompt_ids[0],
"images": torch.stack(self.frames).permute(1, 0, 2, 3)
}
def test(
config: str,
pretrained_model_path: str,
dataset_config: Dict,
logdir: str = None,
editing_config: Optional[Dict] = None,
test_pipeline_config: Optional[Dict] = None,
gradient_accumulation_steps: int = 1,
seed: Optional[int] = None,
mixed_precision: Optional[str] = "fp16",
batch_size: int = 1,
model_config: dict={},
verbose: bool=True,
**kwargs
):
args = get_function_args()
time_string = get_time_string()
if logdir is None:
logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '')
logdir += f"_{time_string}"
accelerator = Accelerator(
gradient_accumulation_steps=gradient_accumulation_steps,
mixed_precision=mixed_precision,
)
if accelerator.is_main_process:
os.makedirs(logdir, exist_ok=True)
OmegaConf.save(args, os.path.join(logdir, "config.yml"))
logger = get_logger_config_path(logdir)
if seed is not None:
set_seed(seed)
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_path,
subfolder="tokenizer",
use_fast=False,
)
text_encoder = CLIPTextModel.from_pretrained(
pretrained_model_path,
subfolder="text_encoder",
)
vae = AutoencoderKL.from_pretrained(
pretrained_model_path,
subfolder="vae",
)
unet = UNetPseudo3DConditionModel.from_2d_model(
os.path.join(pretrained_model_path, "unet"), model_config=model_config
)
if 'target' not in test_pipeline_config:
test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionPipeline'
pipeline = instantiate_from_config(
test_pipeline_config,
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=DDIMScheduler.from_pretrained(
pretrained_model_path,
subfolder="scheduler",
),
disk_store=kwargs.get('disk_store', False)
)
pipeline.scheduler.set_timesteps(editing_config['num_inference_steps'])
pipeline.set_progress_bar_config(disable=True)
pipeline.print_pipeline(logger)
if is_xformers_available():
try:
pipeline.enable_xformers_memory_efficient_attention()
except Exception as e:
logger.warning(
"Could not enable memory efficient attention. Make sure xformers is installed"
f" correctly and a GPU is available: {e}"
)
vae.requires_grad_(False)
unet.requires_grad_(False)
text_encoder.requires_grad_(False)
prompt_ids = tokenizer(
dataset_config["prompt"],
truncation=True,
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
).input_ids
video_dataset = VideoDataset(
video_path=dataset_config["video_path"],
prompt_ids=prompt_ids,
frame_stride=dataset_config.get("frame_stride", 1),
target_size=tuple(dataset_config.get("target_size", [512, 512]))
)
train_dataloader = torch.utils.data.DataLoader(
video_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
collate_fn=collate_fn,
)
train_sample_save_path = os.path.join(logdir, "train_samples.gif")
log_train_samples(save_path=train_sample_save_path, train_dataloader=train_dataloader)
unet, train_dataloader = accelerator.prepare(
unet, train_dataloader
)
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
if accelerator.is_main_process:
accelerator.init_trackers("video")
logger.info("***** wait to fix the logger path *****")
if editing_config is not None and accelerator.is_main_process:
validation_sample_logger = P2pSampleLogger(**editing_config, logdir=logdir, source_prompt=dataset_config['prompt'])
def make_data_yielder(dataloader):
while True:
for batch in dataloader:
yield batch
accelerator.wait_for_everyone()
train_data_yielder = make_data_yielder(train_dataloader)
batch = next(train_data_yielder)
if editing_config.get('use_invertion_latents', False):
assert batch["images"].shape[0] == 1, "Only support, overfitting on a single video"
vae.eval()
text_encoder.eval()
unet.eval()
text_embeddings = pipeline._encode_prompt(
dataset_config["prompt"],
device=accelerator.device,
num_images_per_prompt=1,
do_classifier_free_guidance=True,
negative_prompt=None
)
use_inversion_attention = editing_config.get('use_inversion_attention', False)
batch['latents_all_step'] = pipeline.prepare_latents_ddim_inverted(
rearrange(batch["images"].to(dtype=weight_dtype), "b c f h w -> (b f) c h w"),
batch_size=1,
num_images_per_prompt=1,
text_embeddings=text_embeddings,
prompt=dataset_config["prompt"],
store_attention=use_inversion_attention,
LOW_RESOURCE=True,
save_path=logdir if verbose else None
)
batch['ddim_init_latents'] = batch['latents_all_step'][-1]
else:
batch['ddim_init_latents'] = None
vae.eval()
text_encoder.eval()
unet.eval()
images = batch["images"].to(dtype=weight_dtype)
images = rearrange(images, "b c f h w -> (b f) c h w")
if accelerator.is_main_process:
if validation_sample_logger is not None:
unet.eval()
validation_sample_logger.log_sample_images(
image=images,
pipeline=pipeline,
device=accelerator.device,
step=0,
latents=batch['ddim_init_latents'],
save_dir=logdir if verbose else None
)
accelerator.end_training()
@click.command()
@click.option("--config", type=str, default="config/sample.yml")
@click.option("--jsonl_path", type=str, required=True, help="Path to the JSONL file containing video information")
def run(config, jsonl_path):
base_config = OmegaConf.load(config)
root_logdir = base_config.get('logdir', 'results')
with open(jsonl_path, 'r') as f:
video_infos = [json.loads(line) for line in f]
for video_info in video_infos:
video_path = os.path.join("/home/wangjuntong/video_editing_dataset/all_sourse/", video_info['video'])
prompt = video_info['prompt']
editing_prompts = video_info['edit_prompt']
Omegadict = copy.deepcopy(base_config)
Omegadict['dataset_config']['video_path'] = video_path
Omegadict['dataset_config']['prompt'] = prompt
Omegadict['editing_config']['editing_prompts'] = [editing_prompts] if isinstance(editing_prompts, str) else editing_prompts
video_name = os.path.basename(video_path).split('.')[0]
logdir = os.path.join(root_logdir, video_name)
Omegadict['logdir'] = logdir
test(config=config, **Omegadict)
if __name__ == "__main__":
run() |