Delete modeling_latent_diffusion.py
Browse files- modeling_latent_diffusion.py +0 -94
modeling_latent_diffusion.py
DELETED
|
@@ -1,94 +0,0 @@
|
|
| 1 |
-
import tqdm
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
from diffusers import DiffusionPipeline
|
| 5 |
-
|
| 6 |
-
# add these relative imports here, so we can load from hub
|
| 7 |
-
from .modeling_vae import AutoencoderKL # NOQA
|
| 8 |
-
from .configuration_ldmbert import LDMBertConfig # NOQA
|
| 9 |
-
from .modeling_ldmbert import LDMBertModel # NOQA
|
| 10 |
-
|
| 11 |
-
class LatentDiffusion(DiffusionPipeline):
|
| 12 |
-
def __init__(self, vqvae, bert, tokenizer, unet, noise_scheduler):
|
| 13 |
-
super().__init__()
|
| 14 |
-
self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, noise_scheduler=noise_scheduler)
|
| 15 |
-
|
| 16 |
-
@torch.no_grad()
|
| 17 |
-
def __call__(self, prompt, batch_size=1, generator=None, torch_device=None, eta=0.0, guidance_scale=1.0, num_inference_steps=50):
|
| 18 |
-
# eta corresponds to η in paper and should be between [0, 1]
|
| 19 |
-
|
| 20 |
-
if torch_device is None:
|
| 21 |
-
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
-
|
| 23 |
-
self.unet.to(torch_device)
|
| 24 |
-
self.vqvae.to(torch_device)
|
| 25 |
-
self.bert.to(torch_device)
|
| 26 |
-
|
| 27 |
-
# get unconditional embeddings for classifier free guidence
|
| 28 |
-
if guidance_scale != 1.0:
|
| 29 |
-
uncond_input = self.tokenizer([""], padding="max_length", max_length=77, return_tensors='pt').to(torch_device)
|
| 30 |
-
uncond_embeddings = self.bert(uncond_input.input_ids)[0]
|
| 31 |
-
|
| 32 |
-
# get text embedding
|
| 33 |
-
text_input = self.tokenizer(prompt, padding="max_length", max_length=77, return_tensors='pt').to(torch_device)
|
| 34 |
-
text_embedding = self.bert(text_input.input_ids)[0]
|
| 35 |
-
|
| 36 |
-
num_trained_timesteps = self.noise_scheduler.num_timesteps
|
| 37 |
-
inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps)
|
| 38 |
-
|
| 39 |
-
image = self.noise_scheduler.sample_noise(
|
| 40 |
-
(batch_size, self.unet.in_channels, self.unet.image_size, self.unet.image_size),
|
| 41 |
-
device=torch_device,
|
| 42 |
-
generator=generator,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
|
| 46 |
-
# Ideally, read DDIM paper in-detail understanding
|
| 47 |
-
|
| 48 |
-
# Notation (<variable name> -> <name in paper>
|
| 49 |
-
# - pred_noise_t -> e_theta(x_t, t)
|
| 50 |
-
# - pred_original_image -> f_theta(x_t, t) or x_0
|
| 51 |
-
# - std_dev_t -> sigma_t
|
| 52 |
-
# - eta -> η
|
| 53 |
-
# - pred_image_direction -> "direction pointingc to x_t"
|
| 54 |
-
# - pred_prev_image -> "x_t-1"
|
| 55 |
-
for t in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps):
|
| 56 |
-
# guidance_scale of 1 means no guidance
|
| 57 |
-
if guidance_scale == 1.0:
|
| 58 |
-
image_in = image
|
| 59 |
-
context = text_embedding
|
| 60 |
-
timesteps = torch.tensor([inference_step_times[t]] * image.shape[0], device=torch_device)
|
| 61 |
-
else:
|
| 62 |
-
# for classifier free guidance, we need to do two forward passes
|
| 63 |
-
# here we concanate embedding and unconditioned embedding in a single batch
|
| 64 |
-
# to avoid doing two forward passes
|
| 65 |
-
image_in = torch.cat([image] * 2)
|
| 66 |
-
context = torch.cat([uncond_embeddings, text_embedding])
|
| 67 |
-
timesteps = torch.tensor([inference_step_times[t]] * image.shape[0], device=torch_device)
|
| 68 |
-
|
| 69 |
-
# 1. predict noise residual
|
| 70 |
-
pred_noise_t = self.unet(image_in, timesteps, context=context)
|
| 71 |
-
|
| 72 |
-
# perform guidance
|
| 73 |
-
if guidance_scale != 1.0:
|
| 74 |
-
pred_noise_t_uncond, pred_noise_t = pred_noise_t.chunk(2)
|
| 75 |
-
pred_noise_t = pred_noise_t_uncond + guidance_scale * (pred_noise_t - pred_noise_t_uncond)
|
| 76 |
-
|
| 77 |
-
# 2. predict previous mean of image x_t-1
|
| 78 |
-
pred_prev_image = self.noise_scheduler.compute_prev_image_step(pred_noise_t, image, t, num_inference_steps, eta)
|
| 79 |
-
|
| 80 |
-
# 3. optionally sample variance
|
| 81 |
-
variance = 0
|
| 82 |
-
if eta > 0:
|
| 83 |
-
noise = self.noise_scheduler.sample_noise(image.shape, device=image.device, generator=generator)
|
| 84 |
-
variance = self.noise_scheduler.get_variance(t, num_inference_steps).sqrt() * eta * noise
|
| 85 |
-
|
| 86 |
-
# 4. set current image to prev_image: x_t -> x_t-1
|
| 87 |
-
image = pred_prev_image + variance
|
| 88 |
-
|
| 89 |
-
# scale and decode image with vae
|
| 90 |
-
image = 1 / 0.18215 * image
|
| 91 |
-
image = self.vqvae.decode(image)
|
| 92 |
-
image = torch.clamp((image+1.0)/2.0, min=0.0, max=1.0)
|
| 93 |
-
|
| 94 |
-
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|