| from base64 import b64encode |
|
|
| import numpy |
| import torch |
| from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel |
| from huggingface_hub import notebook_login |
|
|
| |
| from matplotlib import pyplot as plt |
| from pathlib import Path |
| from PIL import Image |
| from torch import autocast |
| from torchvision import transforms as tfms |
| from tqdm.auto import tqdm |
| from transformers import CLIPTextModel, CLIPTokenizer, logging |
| import os |
| import numpy as np |
|
|
| torch.manual_seed(1) |
| |
|
|
| |
| logging.set_verbosity_error() |
|
|
| |
| torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" |
|
|
| |
| vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") |
|
|
| |
| tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
| text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
|
|
| |
| unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") |
|
|
| |
| scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
|
|
| |
| vae = vae.to(torch_device) |
| text_encoder = text_encoder.to(torch_device) |
| unet = unet.to(torch_device) |
| token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
| pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
|
|
| position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
| position_embeddings = pos_emb_layer(position_ids) |
|
|
|
|
| def get_output_embeds(input_embeddings): |
| |
| bsz, seq_len = input_embeddings.shape[:2] |
| causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) |
|
|
| |
| |
| encoder_outputs = text_encoder.text_model.encoder( |
| inputs_embeds=input_embeddings, |
| attention_mask=None, |
| causal_attention_mask=causal_attention_mask.to(torch_device), |
| output_attentions=None, |
| output_hidden_states=True, |
| return_dict=None, |
| ) |
|
|
| |
| output = encoder_outputs[0] |
|
|
| |
| output = text_encoder.text_model.final_layer_norm(output) |
|
|
| |
| return output |
|
|
|
|
| def set_timesteps(scheduler, num_inference_steps): |
| scheduler.set_timesteps(num_inference_steps) |
| scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
|
|
| def pil_to_latent(input_im): |
| |
| with torch.no_grad(): |
| latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
| return 0.18215 * latent.latent_dist.sample() |
|
|
| def latents_to_pil(latents): |
| |
| latents = (1 / 0.18215) * latents |
| with torch.no_grad(): |
| image = vae.decode(latents).sample |
| image = (image / 2 + 0.5).clamp(0, 1) |
| image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
| images = (image * 255).round().astype("uint8") |
| pil_images = [Image.fromarray(image) for image in images] |
| return pil_images |
|
|
|
|
| def generate_with_embs(text_embeddings, text_input, seed): |
|
|
| height = 512 |
| width = 512 |
| num_inference_steps = 10 |
| guidance_scale = 7.5 |
| generator = torch.manual_seed(seed) |
| batch_size = 1 |
|
|
| max_length = text_input.input_ids.shape[-1] |
| uncond_input = tokenizer( |
| [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
| ) |
| with torch.no_grad(): |
| uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
| text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
| |
| set_timesteps(scheduler, num_inference_steps) |
|
|
| |
| latents = torch.randn( |
| (batch_size, unet.in_channels, height // 8, width // 8), |
| generator=generator, |
| ) |
| latents = latents.to(torch_device) |
| latents = latents * scheduler.init_noise_sigma |
|
|
| |
| for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
| |
| latent_model_input = torch.cat([latents] * 2) |
| sigma = scheduler.sigmas[i] |
| latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
| |
| with torch.no_grad(): |
| noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
| |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
| |
| latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
| return latents_to_pil(latents)[0] |
|
|
|
|
| def generate_with_prompt_style(prompt, style, seed = 42): |
|
|
| prompt = prompt + ' in style of s' |
| embed = torch.load(style) |
|
|
| text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
| |
| |
| input_ids = text_input.input_ids.to(torch_device) |
|
|
| token_embeddings = token_emb_layer(input_ids) |
| |
| replacement_token_embedding = embed[list(embed.keys())[0]].to(torch_device) |
|
|
| |
| token_embeddings[0, torch.where(input_ids[0]==338)] = replacement_token_embedding.to(torch_device) |
|
|
| |
| input_embeddings = token_embeddings + position_embeddings |
|
|
| |
| modified_output_embeddings = get_output_embeds(input_embeddings) |
|
|
| |
| return generate_with_embs(modified_output_embeddings, text_input, seed) |
|
|
|
|
| import torch |
|
|
| def contrast_loss(images): |
| variance = torch.var(images) |
| return -variance |
|
|
| def generate_with_prompt_style_guidance(prompt, style, seed=42): |
|
|
| prompt = prompt + ' in style of s' |
| |
| embed = torch.load(style) |
|
|
| height = 512 |
| width = 512 |
| num_inference_steps = 10 |
| guidance_scale = 8 |
| generator = torch.manual_seed(seed) |
| batch_size = 1 |
| contrast_loss_scale = 200 |
|
|
| |
| text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
| with torch.no_grad(): |
| text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] |
|
|
| input_ids = text_input.input_ids.to(torch_device) |
|
|
| |
| token_embeddings = token_emb_layer(input_ids) |
|
|
| |
| replacement_token_embedding = embed[list(embed.keys())[0]].to(torch_device) |
|
|
| |
| token_embeddings[0, torch.where(input_ids[0]==338)] = replacement_token_embedding.to(torch_device) |
|
|
| |
| input_embeddings = token_embeddings + position_embeddings |
|
|
| |
| modified_output_embeddings = get_output_embeds(input_embeddings) |
|
|
| |
| max_length = text_input.input_ids.shape[-1] |
| uncond_input = tokenizer( |
| [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
| ) |
| with torch.no_grad(): |
| uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
|
|
| text_embeddings = torch.cat([uncond_embeddings, modified_output_embeddings]) |
|
|
| |
| scheduler.set_timesteps(num_inference_steps) |
|
|
| |
| latents = torch.randn( |
| (batch_size, unet.config.in_channels, height // 8, width // 8), |
| generator=generator, |
| ) |
| latents = latents.to(torch_device) |
| latents = latents * scheduler.init_noise_sigma |
|
|
| |
| for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
| |
| latent_model_input = torch.cat([latents] * 2) |
| sigma = scheduler.sigmas[i] |
| latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
| |
| with torch.no_grad(): |
| noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
| |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
| |
| if i%5 == 0: |
| |
| latents = latents.detach().requires_grad_() |
|
|
| |
| latents_x0 = latents - sigma * noise_pred |
| |
|
|
| |
| denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
|
| |
| loss = contrast_loss(denoised_images) * contrast_loss_scale |
|
|
| |
| |
| |
|
|
| |
| cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
| |
| latents = latents.detach() - cond_grad * sigma**2 |
|
|
| |
| latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
|
| return latents_to_pil(latents)[0] |
|
|
|
|
| import gradio as gr |
|
|
|
|
| dict_styles = {'<gartic-phone>':'styles/learned_embeds_gartic-phone.bin', |
| '<hawaiian shirt>':'styles/learned_embeds_hawaiian-shirt.bin', |
| '<gp>': 'styles/learned_embeds_phone01.bin', |
| '<style-spdmn>':'styles/learned_embeds_style-spdmn.bin', |
| '<yvmqznrm>': 'styles/learned_embedssd_yvmqznrm.bin'} |
| |
|
|
| def inference(prompt, style): |
| |
| if prompt is not None and style is not None: |
| style = dict_styles[style] |
| result = generate_with_prompt_style_guidance(prompt, style) |
| return np.array(result) |
| else: |
| return None |
|
|
| title = "Stable Diffusion and Textual Inversion" |
| description = "A simple Gradio interface to stylize Stable Diffusion outputs" |
| examples = [['A man sipping wine wearing a spacesuit on the moon', 'Stripes']] |
|
|
| demo = gr.Interface(inference, |
| inputs = [gr.Textbox(label='Prompt'), |
| gr.Dropdown(['Gartic Phone', 'Hawaiian Shirt', 'GP', 'SPDM', 'YUMQZNRM'], label='Style') |
| ], |
| outputs = [ |
| gr.Image(label="Stable Diffusion Output"), |
| ], |
| title = title, |
| description = description, |
| |
| |
| ) |
| demo.launch() |
| |