Spaces:
Runtime error
Runtime error
| from base64 import b64encode | |
| import gradio as gr | |
| import numpy | |
| import torch | |
| import torchvision.transforms as T | |
| from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel | |
| from PIL import Image | |
| from torch import autocast | |
| from torchvision import transforms as tfms | |
| from tqdm.auto import tqdm | |
| from transformers import CLIPTextModel, CLIPTokenizer, logging | |
| torch.manual_seed(1) | |
| logging.set_verbosity_error() | |
| torch_device = "cpu" | |
| # Load the autoencoder model which will be used to decode the latents into image space. | |
| vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") | |
| # Load the tokenizer and text encoder to tokenize and encode the text. | |
| tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") | |
| text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") | |
| # The UNet model for generating the latents. | |
| unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") | |
| # The noise scheduler | |
| scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) | |
| # To the GPU we go! | |
| vae = vae.to(torch_device) | |
| text_encoder = text_encoder.to(torch_device) | |
| unet = unet.to(torch_device); | |
| token_emb_layer = text_encoder.text_model.embeddings.token_embedding | |
| pos_emb_layer = text_encoder.text_model.embeddings.position_embedding | |
| position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] | |
| position_embeddings = pos_emb_layer(position_ids) | |
| def pil_to_latent(input_im): | |
| # Single image -> single latent in a batch (so size 1, 4, 64, 64) | |
| with torch.no_grad(): | |
| latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling | |
| return 0.18215 * latent.latent_dist.sample() | |
| def latents_to_pil(latents): | |
| # bath of latents -> list of images | |
| latents = (1 / 0.18215) * latents | |
| with torch.no_grad(): | |
| image = vae.decode(latents).sample | |
| image = (image / 2 + 0.5).clamp(0, 1) | |
| image = image.detach().cpu().permute(0, 2, 3, 1).numpy() | |
| images = (image * 255).round().astype("uint8") | |
| pil_images = [Image.fromarray(image) for image in images] | |
| return pil_images | |
| def get_output_embeds(input_embeddings): | |
| # CLIP's text model uses causal mask, so we prepare it here: | |
| bsz, seq_len = input_embeddings.shape[:2] | |
| causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) | |
| # Getting the output embeddings involves calling the model with passing output_hidden_states=True | |
| # so that it doesn't just return the pooled final predictions: | |
| encoder_outputs = text_encoder.text_model.encoder( | |
| inputs_embeds=input_embeddings, | |
| attention_mask=None, # We aren't using an attention mask so that can be None | |
| causal_attention_mask=causal_attention_mask.to(torch_device), | |
| output_attentions=None, | |
| output_hidden_states=True, # We want the output embs not the final output | |
| return_dict=None, | |
| ) | |
| # We're interested in the output hidden state only | |
| output = encoder_outputs[0] | |
| # There is a final layer norm we need to pass these through | |
| output = text_encoder.text_model.final_layer_norm(output) | |
| # And now they're ready! | |
| return output | |
| def generate_with_embs(text_embeddings, seed, max_length): | |
| height = 512 # default height of Stable Diffusion | |
| width = 512 # default width of Stable Diffusion | |
| num_inference_steps = 30 # Number of denoising steps | |
| guidance_scale = 7.5 # Scale for classifier-free guidance | |
| generator = torch.manual_seed(seed) # Seed generator to create the inital latent noise | |
| batch_size = 1 | |
| # max_length = text_input.input_ids.shape[-1] | |
| uncond_input = tokenizer( | |
| [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" | |
| ) | |
| with torch.no_grad(): | |
| uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] | |
| text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) | |
| # Prep Scheduler | |
| set_timesteps(scheduler, num_inference_steps) | |
| # Prep latents | |
| latents = torch.randn( | |
| (batch_size, unet.in_channels, height // 8, width // 8), | |
| generator=generator, | |
| ) | |
| latents = latents.to(torch_device) | |
| latents = latents * scheduler.init_noise_sigma | |
| # Loop | |
| for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): | |
| # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. | |
| latent_model_input = torch.cat([latents] * 2) | |
| sigma = scheduler.sigmas[i] | |
| latent_model_input = scheduler.scale_model_input(latent_model_input, t) | |
| # predict the noise residual | |
| with torch.no_grad(): | |
| noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] | |
| # perform guidance | |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
| # compute the previous noisy sample x_t -> x_t-1 | |
| latents = scheduler.step(noise_pred, t, latents).prev_sample | |
| return latents_to_pil(latents)[0] | |
| # Prep Scheduler | |
| def set_timesteps(scheduler, num_inference_steps): | |
| scheduler.set_timesteps(num_inference_steps) | |
| scheduler.timesteps = scheduler.timesteps.to(torch.float32) # minor fix to ensure MPS compatibility, fixed in diffusers PR 3925 | |
| def eos_pos(prompt): | |
| return len(prompt.split()) + 1 | |
| def embed_style(prompt, style_embed, style_seed): | |
| # Tokenize | |
| text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") | |
| input_ids = text_input.input_ids.to(torch_device) | |
| # Get token embeddings | |
| token_embeddings = token_emb_layer(input_ids) | |
| # The new embedding - our special birb word | |
| replacement_token_embedding = style_embed.to(torch_device) | |
| # Insert this into the token embeddings | |
| token_embeddings[0, torch.tensor(eos_pos(prompt))] = replacement_token_embedding.to(torch_device) | |
| # Combine with pos embs | |
| input_embeddings = token_embeddings + position_embeddings | |
| # Feed through to get final output embs | |
| modified_output_embeddings = get_output_embeds(input_embeddings) | |
| # And generate an image with this: | |
| max_length = text_input.input_ids.shape[-1] | |
| return generate_with_embs(modified_output_embeddings, style_seed, max_length) | |
| def custom_loss(image): | |
| # Calculate colorfulness metric (standard deviation of RGB channels) | |
| std_dev = torch.std(image, dim=(1, 2)) | |
| loss = torch.mean(std_dev) | |
| return loss | |
| def generate_image_on_loss(prompt, seed): | |
| height = 64 # default height of Stable Diffusion | |
| width = 64 # default width of Stable Diffusion | |
| num_inference_steps = 50 # Number of denoising steps | |
| guidance_scale = 8 # Scale for classifier-free guidance | |
| generator = torch.manual_seed(64) # Seed generator to create the inital latent noise | |
| batch_size = 1 | |
| loss_scale = 200 | |
| # Prep text | |
| text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") | |
| with torch.no_grad(): | |
| text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] | |
| # And the uncond. input as before: | |
| max_length = text_input.input_ids.shape[-1] | |
| uncond_input = tokenizer( | |
| [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" | |
| ) | |
| with torch.no_grad(): | |
| uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] | |
| text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) | |
| # Prep Scheduler | |
| set_timesteps(scheduler, num_inference_steps+1) | |
| # Prep latents | |
| latents = torch.randn( | |
| (batch_size, unet.in_channels, height // 8, width // 8), | |
| generator=generator, | |
| ) | |
| latents = latents.to(torch_device) | |
| latents = latents * scheduler.init_noise_sigma | |
| sched_out = None | |
| # Loop | |
| for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): | |
| # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. | |
| latent_model_input = torch.cat([latents] * 2) | |
| sigma = scheduler.sigmas[i] | |
| latent_model_input = scheduler.scale_model_input(latent_model_input, t) | |
| # predict the noise residual | |
| with torch.no_grad(): | |
| noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] | |
| # perform CFG | |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
| ### ADDITIONAL GUIDANCE ### | |
| if i%5 == 0 and i>0: | |
| # Requires grad on the latents | |
| latents = latents.detach().requires_grad_() | |
| # Get the predicted x0: | |
| scheduler._step_index -= 1 | |
| latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample | |
| # Decode to image space | |
| denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1) | |
| # Calculate loss | |
| loss = custom_loss(denoised_images) * loss_scale | |
| # Occasionally print it out | |
| # if i%10==0: | |
| print(i, 'loss:', loss) | |
| # Get gradient | |
| cond_grad = torch.autograd.grad(loss, latents)[0] | |
| # Modify the latents based on this gradient | |
| latents = latents.detach() - cond_grad * sigma**2 | |
| # To PIL Images | |
| im_t0 = latents_to_pil(latents_x0)[0] | |
| im_next = latents_to_pil(latents)[0] | |
| # Now step with scheduler | |
| latents = scheduler.step(noise_pred, t, latents).prev_sample | |
| return latents_to_pil(latents)[0] | |
| def generate_image_from_prompt(prompt, style): | |
| style_list = ['dreamy_painting.bin', 'egorey.bin', 'fairy_tale_painting.bin', 'matrix.bin', 'pjablonski_style.bin'] | |
| style_seeds = [16, 64, 32, 128, 8] | |
| style_file = style + '.bin' | |
| idx = style_list.index(style_file) | |
| style_seed = style_seeds[idx] | |
| style_dict = torch.load(style_file) | |
| style_embed = [val for val in style_dict.values()] | |
| generated_image = embed_style(prompt, style_embed[0], style_seed) | |
| loss_generated_img = generate_image_on_loss(prompt, style_seed) | |
| return [generated_image, loss_generated_img] | |
| demo = gr.Interface( | |
| fn = generate_image_from_prompt, | |
| inputs = [ | |
| gr.Textbox(label="Prompt", value="Enter your prompt"), | |
| gr.Dropdown( | |
| ["dreamy_painting", "egorey", "fairy_tale_painting", "matrix", "pjablonski_style"], value="dreamy_painting", label="Pretrained Styles" | |
| ) | |
| ], | |
| outputs = [ | |
| gr.Gallery(label="Generated images", show_label=False, elem_id="gallery", columns=[2], rows=[1], object_fit="contain", height="512") | |
| ], | |
| examples = [ | |
| ["a cat climbing a tree", "dreamy_painting"] | |
| ] | |
| ) | |
| demo.launch(debug=True) |