| import torch
|
| import os
|
| from tqdm import tqdm
|
| from diffusers import StableDiffusionPipeline
|
| from Slicedit.video_utils import image_grid
|
| import torch.nn as nn
|
| from torch.cuda.amp import autocast
|
| import numpy as np
|
| from Slicedit.slicedit_attention_utils import register_n_keyframes, register_extended_attention, register_conv_injection, register_time, register_denoiser_xy
|
| from Slicedit.video_utils import create_video_from_frames, save_video
|
| from PIL import Image
|
| from math import ceil as ceil
|
| import torch.nn.functional as F
|
| from einops import rearrange
|
|
|
|
|
| def sample_xts_from_x0_and_eps(model, x0, num_inference_steps=50, eps = None):
|
| """
|
| Samples from P(x_1:T|x_0)
|
| """
|
|
|
|
|
| alpha_bar = model.scheduler.alphas_cumprod
|
| sqrt_one_minus_alpha_bar = (1-alpha_bar) ** 0.5
|
| variance_noise_shape = (
|
| num_inference_steps,
|
| model.unet.in_channels,
|
| model.unet.sample_size,
|
| model.unet.sample_size)
|
|
|
| timesteps = model.scheduler.timesteps.to(model.device)
|
| t_to_idx = {int(v):k for k,v in enumerate(timesteps)}
|
|
|
| xts = torch.zeros(variance_noise_shape).to(x0.device)
|
| for t in reversed(timesteps):
|
| idx = t_to_idx[int(t)]
|
| if eps is None:
|
| xts[idx] = x0 * (alpha_bar[t] ** 0.5) + torch.randn_like(x0) * sqrt_one_minus_alpha_bar[t]
|
| else:
|
| xts[idx] = x0 * (alpha_bar[t] ** 0.5) + eps[idx] * sqrt_one_minus_alpha_bar[t]
|
| xts = torch.cat([xts, x0 ],dim = 0)
|
|
|
|
|
| return xts
|
|
|
|
|
| def encode_text(model, prompts):
|
| text_input = model.tokenizer(
|
| prompts,
|
| padding="max_length",
|
| max_length=model.tokenizer.model_max_length,
|
| truncation=True,
|
| return_tensors="pt",
|
| )
|
| with torch.no_grad():
|
| text_encoding = model.text_encoder(text_input.input_ids.to(model.device))[0]
|
| return text_encoding
|
|
|
|
|
| def get_variance(model, timestep):
|
| prev_timestep = timestep - model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps
|
| alpha_prod_t = model.scheduler.alphas_cumprod[timestep]
|
| alpha_prod_t_prev = model.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else model.scheduler.final_alpha_cumprod
|
| beta_prod_t = 1 - alpha_prod_t
|
| beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
|
| return variance
|
|
|
|
|
| def calc_mu(model, model_output, timestep, sample, eta = 0):
|
|
|
| prev_timestep = timestep - model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps
|
|
|
| alpha_prod_t = model.scheduler.alphas_cumprod[timestep]
|
| alpha_prod_t_prev = model.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else model.scheduler.final_alpha_cumprod
|
| beta_prod_t = 1 - alpha_prod_t
|
|
|
|
|
| pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
|
|
|
|
|
|
| variance = get_variance(model, timestep)
|
|
|
|
|
| model_output_direction = model_output
|
|
|
|
|
|
|
| pred_sample_direction = (1 - alpha_prod_t_prev - eta*variance) ** (0.5) * model_output_direction
|
|
|
| mu = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
|
|
|
|
|
| return mu,variance
|
|
|
|
|
| def slicedit_ddpm_inversion_loop(model, x0_video,
|
| etas = None,
|
| prog_bar = False,
|
| src_prompt = "",
|
| tar_prompt = "",
|
| prompt_time = "",
|
| cfg_scale = 3.5,
|
| alpha=0.5,
|
| num_inference_steps=50,
|
| eps = None,
|
| n_frames = 64,
|
| skip=0,
|
| cfg_scale_tar=1,
|
| save_path="",
|
| orig_frame_size=(512,512),
|
| qk_injection_t=-1,
|
| conv_injection_t=-1,
|
| negative_time_prompt="",
|
| cfg_scale_time=1,
|
| frame_batch_size=6,
|
| use_negative_tar_prompt=False,
|
| x_and_y=False):
|
|
|
|
|
| xts = torch.zeros((num_inference_steps+1,2,n_frames,4,64,64),device=model.device)
|
|
|
| volume_t = torch.zeros(2,n_frames, 4, 64, 64).to(model.device)
|
|
|
| noise_pred_xy = torch.zeros(2, n_frames, 4, 64, 64).to(model.device)
|
|
|
|
|
| if not src_prompt=="":
|
| text_embeddings = encode_text(model, src_prompt)
|
| text_embeddings_time = encode_text(model, prompt_time)
|
| if not tar_prompt=="":
|
| tar_emb = encode_text(model, tar_prompt)
|
|
|
|
|
| if use_negative_tar_prompt:
|
| negative_tar_prompt = "ugly, blurry, low res"
|
| else:
|
| negative_tar_prompt = ""
|
| neg_target_embedding = encode_text(model, negative_tar_prompt)
|
|
|
|
|
| negative_time_embedding = encode_text(model, negative_time_prompt)
|
| uncond_embedding = encode_text(model, "")
|
|
|
|
|
| timesteps = model.scheduler.timesteps.to(model.device)
|
|
|
| if type(etas) in [int, float]: etas = [etas]*(model.scheduler.num_inference_steps)
|
|
|
|
|
| for i in range(n_frames):
|
| xts[:,0,i,:,:,:] = sample_xts_from_x0_and_eps(model, x0_video[i], num_inference_steps=num_inference_steps, eps = eps)
|
|
|
| xts[:,1,i,:,:,:] = xts[:,0,i,:,:,:]
|
|
|
|
|
| if qk_injection_t == -1:
|
| qk_injection_t = num_inference_steps-skip
|
| else:
|
| qk_injection_t = (num_inference_steps-skip) * qk_injection_t // 100
|
| if conv_injection_t == -1:
|
| conv_injection_t = num_inference_steps-skip
|
| else:
|
| conv_injection_t = (num_inference_steps-skip) * conv_injection_t // 100
|
| qk_injection_timesteps = timesteps[skip:skip+qk_injection_t] if qk_injection_t >= 0 else []
|
| conv_injection_timesteps = timesteps[skip:skip+conv_injection_t] if conv_injection_t >= 0 else []
|
|
|
| register_extended_attention(model, qk_injection_timesteps)
|
| register_conv_injection(model, conv_injection_timesteps)
|
|
|
|
|
| n_keyframes = 3
|
| register_n_keyframes(model, n_keyframes)
|
|
|
| t_to_idx = {int(v):k for k,v in enumerate(timesteps[-(num_inference_steps-skip):])}
|
| op = tqdm(timesteps[-(num_inference_steps-skip):]) if prog_bar else timesteps[-(num_inference_steps-skip):]
|
|
|
|
|
| volume_t[1, :, :,:,:] = xts[skip,0,:,:,:,:]
|
|
|
| for k,t in enumerate(op):
|
|
|
| idx = t_to_idx[int(t)]
|
|
|
|
|
| noise_pred_xy = pred_noise_xy_func(xts, t, idx, uncond_embedding, neg_target_embedding, text_embeddings, model, cfg_scale, frame_batch_size, tar_emb, cfg_scale_tar, skip)
|
| volume_t[0, :, :,:,:] = xts[skip+idx,0,:,:,:,:]
|
|
|
| if alpha > 0:
|
| st_batch_size = 8
|
| noise_pred_t = temporal_volume_denoise(model, n_frames, volume_t, t, negative_time_embedding, text_embeddings_time, st_batch_size, cfg_scale_time, x_and_y=x_and_y)
|
|
|
| noise_pred_src = (np.sqrt(alpha))*noise_pred_t[0] + (np.sqrt(1-alpha))*noise_pred_xy[0]
|
| noise_pred_tar = (np.sqrt(alpha))*noise_pred_t[1] + (np.sqrt(1-alpha))*noise_pred_xy[1]
|
| else:
|
| noise_pred_src = noise_pred_xy[0]
|
| noise_pred_tar = noise_pred_xy[1]
|
|
|
|
|
| xtm1_src = xts[skip+idx+1,0,:,:,:,:]
|
| xt_src = xts[skip+idx,0,:,:,:,:]
|
|
|
| mu_xt_src,variance = calc_mu(model, noise_pred_src, t, xt_src,eta=etas[idx])
|
| z_src = (xtm1_src - mu_xt_src ) / ( etas[idx] * variance ** 0.5 )
|
|
|
| if k==num_inference_steps-1:
|
| z_src = torch.zeros_like(z_src)
|
|
|
| mu_xt_tar,variance = calc_mu(model, noise_pred_tar, t,volume_t[1],eta=etas[idx])
|
| sigma_z = etas[idx] * variance ** (0.5) * z_src
|
| xt = mu_xt_tar + sigma_z
|
| volume_t[1] = xt
|
| xts[skip+k+1,1,:,:,:,:] = xt
|
|
|
|
|
| xtm1 = mu_xt_src + ( etas[idx] * variance ** 0.5 ) * z_src
|
| xts[skip+idx+1, 0, :, :,:,:] = xtm1
|
|
|
|
|
|
|
| decoded_frames = []
|
| for n in range(n_frames):
|
| w0 = volume_t[1,n,:,:,:].expand(1, -1, -1, -1)
|
|
|
| with torch.autocast("cuda"), torch.inference_mode():
|
| x0_dec = model.vae.decode(1 / 0.18215 * w0).sample
|
| decoded_frames.append(x0_dec)
|
| img = image_grid(x0_dec)
|
|
|
|
|
| img = img.resize((orig_frame_size[1], orig_frame_size[0]), resample=Image.LANCZOS)
|
| image_name_png = f'frames/cfg_d_{[cfg_scale_tar][0]}_' + f'skip_{skip}_' + str(n) + ".png"
|
|
|
| os.makedirs(save_path+'/frames' , exist_ok=True)
|
| save_full_path = os.path.join(save_path, image_name_png)
|
| img.save(save_full_path)
|
|
|
| create_video_from_frames(save_path+'/frames')
|
|
|
| decoded_frames = torch.stack(decoded_frames)
|
| save_video(raw_frames=decoded_frames, save_path=os.path.join(save_path, 'slicedit_video_compressed.mp4'), fps=25, orig_size=orig_frame_size[:-1])
|
|
|
|
|
| def pred_noise_xy_func(xts,
|
| t,
|
| idx,
|
| uncond_embedding,
|
| neg_target_embedding,
|
| text_embeddings,
|
| model,
|
| cfg_scale,
|
| frame_batch_size,
|
| tar_emb=None,
|
| cfg_scale_tar=1,
|
| skip=0,
|
| n_keyframes=3):
|
|
|
|
|
| noise_pred_xy = torch.zeros(xts.shape[1], xts.shape[2], 4, 64, 64).to(model.device)
|
| n_frames = xts.shape[2]
|
|
|
| for n in range(ceil(n_frames/frame_batch_size)):
|
| batch_upper_idx = min((n+1)*frame_batch_size, n_frames)
|
| cur_batch_size = batch_upper_idx - n*frame_batch_size
|
| if n_keyframes == 3:
|
|
|
| idx1 = n_frames*3//6
|
|
|
|
|
| if cur_batch_size == frame_batch_size:
|
| ref_frame_indices = [idx1, n*frame_batch_size + 1, n*frame_batch_size + 4]
|
|
|
| else:
|
| ref_frame_indices = [idx1, n*frame_batch_size, n*frame_batch_size]
|
| else:
|
| raise NotImplementedError
|
|
|
|
|
| xt_src = xts[skip+idx,0, n*frame_batch_size:batch_upper_idx,:,:,:]
|
| xt_tar = xts[skip+idx,1, n*frame_batch_size:batch_upper_idx,:,:,:]
|
|
|
|
|
| text_embed_input = torch.cat([uncond_embedding.repeat(cur_batch_size+n_keyframes, 1, 1),
|
| text_embeddings.repeat(cur_batch_size+n_keyframes, 1, 1),
|
| neg_target_embedding.repeat(cur_batch_size+n_keyframes, 1, 1),
|
| tar_emb.repeat(cur_batch_size+n_keyframes, 1, 1)])
|
|
|
| with torch.no_grad():
|
| ref_frames_src = torch.cat([xts[skip+idx,0, ref_frame_indices[i],:,:,:].unsqueeze(0) for i in range(n_keyframes)], dim=0)
|
| ref_frames_tar = torch.cat([xts[skip+idx,1, ref_frame_indices[i],:,:,:].unsqueeze(0) for i in range(n_keyframes)], dim=0)
|
|
|
|
|
| register_time(model, t.item())
|
| register_denoiser_xy(model, is_xy=True)
|
|
|
| noise_pred = model.unet.forward(torch.cat(([xt_src]+[ref_frames_src]) * 2 + ([xt_tar]+[ref_frames_tar]) * 2), timestep = t, encoder_hidden_states = text_embed_input)
|
|
|
| register_denoiser_xy(model, is_xy=False)
|
|
|
|
|
| noise_pred_src_uncond, noise_pred_src_cond, noise_pred_tar_uncond, noise_pred_tar_cond = noise_pred['sample'].chunk(4)
|
| noise_pred_src = noise_pred_src_uncond + cfg_scale * (noise_pred_src_cond - noise_pred_src_uncond)
|
| noise_pred_tar = noise_pred_tar_uncond + cfg_scale_tar * (noise_pred_tar_cond - noise_pred_tar_uncond)
|
|
|
|
|
| noise_pred_xy[0, n*frame_batch_size:batch_upper_idx, :,:,:] = noise_pred_src[:cur_batch_size]
|
| noise_pred_xy[1, n*frame_batch_size:batch_upper_idx, :,:,:] = noise_pred_tar[:cur_batch_size]
|
|
|
| return noise_pred_xy
|
|
|
|
|
| def temporal_volume_denoise(model, n_frames, volume_t, t, negative_time_embedding, text_embeddings_time, st_batch_size, cfg_scale_time, x_and_y=False):
|
| noise_pred_t = torch.zeros(2, n_frames, 4, 64, 64).to(model.device)
|
|
|
| n_slices = ceil((n_frames-1) / 63)
|
|
|
| if n_slices > 1:
|
|
|
| overlap = (n_slices*64-n_frames)//(n_slices-1)
|
|
|
| overlap_last = overlap+(n_slices*64-n_frames)%(n_slices-1)
|
| overlap_list = [overlap]*(n_slices-2) + [overlap_last]
|
|
|
| cumsum_overlap = np.cumsum(overlap_list)
|
|
|
| sl_idxs = [0]+[(i+1)*64-cumsum_overlap[i] for i in range(0, n_slices-1)]
|
|
|
| else:
|
| overlap = 0
|
| overlap_last = 0
|
| overlap_list = [0]
|
| sl_idxs = [0]
|
|
|
| for idx, sl_i in enumerate(sl_idxs):
|
| for i in range(64//st_batch_size):
|
|
|
| xt = rearrange(volume_t[0,sl_i:sl_i+64, :, :, i*st_batch_size:(i+1)*st_batch_size], 'n c x y -> y c n x')
|
| noise_pred = temporal_denoise(model, xt, t, negative_time_embedding, text_embeddings_time, st_batch_size, cfg_scale_time)
|
| noise_pred_t[0,sl_i:sl_i+64,:,:,i*st_batch_size:(i+1)*st_batch_size] += rearrange(noise_pred, 'y c n x -> n c x y')
|
|
|
| xt = rearrange(volume_t[1,sl_i:sl_i+64, :, :, i*st_batch_size:(i+1)*st_batch_size], 'n c x y -> y c n x')
|
| noise_pred = temporal_denoise(model, xt, t, negative_time_embedding, text_embeddings_time, st_batch_size, cfg_scale_time)
|
| noise_pred_t[1,sl_i:sl_i+64,:,:,i*st_batch_size:(i+1)*st_batch_size] += rearrange(noise_pred, 'y c n x -> n c x y')
|
|
|
|
|
| if sl_i > 0:
|
| noise_pred_t[:,sl_i:sl_i+overlap_list[idx-1],:,:,:] = (noise_pred_t[:,sl_i:sl_i+overlap_list[idx-1],:,:,:])*np.sqrt(1/2)
|
|
|
| if x_and_y:
|
| volume_t_p = rearrange(volume_t, 's n c x y -> s n c y x').permute(0,1,3,2,4)
|
| noise_pred_t_x = torch.zeros_like(volume_t_p)
|
|
|
| for idx, sl_i in enumerate(sl_idxs):
|
| for i in range(64//st_batch_size):
|
| xt = rearrange(volume_t_p[0,sl_i:sl_i+64, :, :, i*st_batch_size:(i+1)*st_batch_size], 'n c x y -> y c n x')
|
| noise_pred = temporal_denoise(model, xt, t, negative_time_embedding, text_embeddings_time, st_batch_size, cfg_scale_time)
|
| noise_pred_t_x[0,sl_i:sl_i+64,:,:,i*st_batch_size:(i+1)*st_batch_size] += rearrange(noise_pred, 'y c n x -> n c x y')
|
|
|
|
|
| xt = rearrange(volume_t_p[1,sl_i:sl_i+64, :, :, i*st_batch_size:(i+1)*st_batch_size], 'n c x y -> y c n x')
|
| noise_pred = temporal_denoise(model, xt, t, negative_time_embedding, text_embeddings_time, st_batch_size, cfg_scale_time)
|
| noise_pred_t_x[1,sl_i:sl_i+64,:,:,i*st_batch_size:(i+1)*st_batch_size] += rearrange(noise_pred, 'y c n x -> n c x y')
|
|
|
| if sl_i > 0:
|
| noise_pred_t_x[:,sl_i:sl_i+overlap_list[idx-1],:,:,:] = (noise_pred_t_x[:,sl_i:sl_i+overlap_list[idx-1],:,:,:])*np.sqrt(1/2)
|
|
|
| noise_pred_t_x = rearrange(noise_pred_t_x, 's n c y x -> s n c x y')
|
|
|
| noise_pred_t = noise_pred_t + noise_pred_t_x
|
| noise_pred_t = noise_pred_t * np.sqrt(1/2)
|
|
|
| return noise_pred_t
|
|
|
|
|
| def temporal_denoise(model, xt, t, neg_embedding, text_embeddings_time, st_batch_size, cfg_scale):
|
|
|
| if cfg_scale == 1:
|
| with torch.no_grad():
|
| cond_out = model.unet.forward(xt, timestep = t, encoder_hidden_states = text_embeddings_time.repeat(st_batch_size, 1, 1))
|
| return cond_out.sample
|
|
|
|
|
| with torch.no_grad():
|
| uncond_out = model.unet.forward(xt, timestep = t, encoder_hidden_states = neg_embedding.repeat(st_batch_size, 1, 1))
|
|
|
|
|
| with torch.no_grad():
|
| cond_out = model.unet.forward(xt, timestep = t, encoder_hidden_states = text_embeddings_time.repeat(st_batch_size, 1, 1))
|
|
|
| noise_pred = uncond_out.sample + cfg_scale * (cond_out.sample - uncond_out.sample)
|
|
|
| return noise_pred
|
|
|