| | import os |
| | import torch |
| | import gradio as gr |
| | from tqdm import tqdm |
| | from PIL import Image |
| | from torchvision import transforms as tfms |
| | from transformers import CLIPTextModel, CLIPTokenizer, logging |
| | from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel |
| |
|
| | torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" |
| | if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1" |
| |
|
| | |
| | vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") |
| |
|
| | |
| | tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
| | text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
| |
|
| | |
| | unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") |
| |
|
| | |
| | scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
| |
|
| | style_token_dict = {'Concept':'<concept-art>', 'Realistic':'<doose-realistic>', 'Line':'<line-art>', |
| | 'Ricky':'<RickyArt>', 'Plane Scape':'<tony-diterlizzi-planescape>'} |
| |
|
| | |
| | vae = vae.to(torch_device) |
| | text_encoder = text_encoder.to(torch_device) |
| | unet = unet.to(torch_device) |
| |
|
| | token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
| | pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
| | position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
| | position_embeddings = pos_emb_layer(position_ids) |
| |
|
| | concept_art_embed = torch.load('concept-art.bin') |
| | doose_s_realistic_art_style_embed = torch.load('doose-s-realistic-art-style.bin') |
| | line_art_embed = torch.load('line-art.bin') |
| | rickyart_embed = torch.load('rickyart.bin') |
| | tony_diterlizzi_s_planescape_art_embed = torch.load('tony-diterlizzi-s-planescape-art.bin') |
| |
|
| | tokenizer.add_tokens(['<concept-art>', '<doose-realistic>', '<line-art>', '<RickyArt>', '<tony-diterlizzi-planescape>']) |
| |
|
| | token_emb_layer_with_art = torch.nn.Embedding(49413, 768) |
| | token_emb_layer_with_art.load_state_dict({'weight': torch.cat((token_emb_layer.state_dict()['weight'], |
| | concept_art_embed['<concept-art>'].unsqueeze(0).to(torch_device), |
| | doose_s_realistic_art_style_embed['<doose-realistic>'].unsqueeze(0).to(torch_device), |
| | line_art_embed['<line-art>'].unsqueeze(0).to(torch_device), |
| | rickyart_embed['<RickyArt>'].unsqueeze(0).to(torch_device), |
| | tony_diterlizzi_s_planescape_art_embed['<tony-diterlizzi-planescape>'].unsqueeze(0).to(torch_device)))}) |
| | token_emb_layer_with_art = token_emb_layer_with_art.to(torch_device) |
| |
|
| | def set_timesteps(scheduler, num_inference_steps): |
| | scheduler.set_timesteps(num_inference_steps) |
| | scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
| |
|
| | def pil_to_latent(input_im): |
| | with torch.no_grad(): |
| | latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
| | return 0.18215 * latent.latent_dist.sample() |
| |
|
| | def latents_to_pil(latents): |
| | latents = (1 / 0.18215) * latents |
| | with torch.no_grad(): |
| | image = vae.decode(latents).sample |
| | image = (image / 2 + 0.5).clamp(0, 1) |
| | image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
| | images = (image * 255).round().astype("uint8") |
| | pil_images = [Image.fromarray(image) for image in images] |
| | return pil_images |
| |
|
| | def build_causal_attention_mask(bsz, seq_len, dtype): |
| | mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) |
| | mask.fill_(torch.tensor(torch.finfo(dtype).min)) |
| | mask = mask.triu_(1) |
| | return mask.unsqueeze(1) |
| |
|
| | def get_output_embeds(input_embeddings): |
| | |
| | bsz, seq_len = input_embeddings.shape[:2] |
| | causal_attention_mask = build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) |
| |
|
| | |
| | |
| | encoder_outputs = text_encoder.text_model.encoder( |
| | inputs_embeds=input_embeddings, |
| | attention_mask=None, |
| | causal_attention_mask=causal_attention_mask.to(torch_device), |
| | output_attentions=None, |
| | output_hidden_states=True, |
| | return_dict=None, |
| | ) |
| |
|
| | |
| | output = encoder_outputs[0] |
| |
|
| | |
| | output = text_encoder.text_model.final_layer_norm(output) |
| |
|
| | |
| | return output |
| |
|
| | def generate_with_embs(num_inference_steps, guidance_scale, seed, text_input, text_embeddings): |
| | height = 512 |
| | width = 512 |
| | generator = torch.manual_seed(seed) |
| | batch_size = 1 |
| |
|
| | max_length = text_input.input_ids.shape[-1] |
| | uncond_input = tokenizer( |
| | [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
| | ) |
| | with torch.no_grad(): |
| | uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
| | text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
| |
|
| | |
| | set_timesteps(scheduler, num_inference_steps) |
| |
|
| | |
| | latents = torch.randn( |
| | (batch_size, unet.in_channels, height // 8, width // 8), |
| | generator=generator, |
| | ) |
| | latents = latents.to(torch_device) |
| | latents = latents * scheduler.init_noise_sigma |
| |
|
| | |
| | for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
| | |
| | latent_model_input = torch.cat([latents] * 2) |
| | sigma = scheduler.sigmas[i] |
| | latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
| |
|
| | |
| | with torch.no_grad(): |
| | noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
| |
|
| | |
| | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
| |
|
| | |
| | latents = scheduler.step(noise_pred, t, latents).prev_sample |
| |
|
| | return latents_to_pil(latents)[0] |
| |
|
| | def inference(text, style, inference_step, guidance_scale, seed): |
| | prompt = text + " the style of " + style_token_dict[style] |
| |
|
| | |
| | text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
| | input_ids = text_input.input_ids.to(torch_device) |
| |
|
| | |
| | token_embeddings = token_emb_layer_with_art(input_ids) |
| |
|
| | |
| | input_embeddings = token_embeddings + position_embeddings |
| |
|
| | |
| | modified_output_embeddings = get_output_embeds(input_embeddings) |
| |
|
| | |
| | image = generate_with_embs(inference_step, guidance_scale, seed, text_input, modified_output_embeddings) |
| |
|
| | return image |
| |
|
| | title = "Stable Diffusion with Textual Inversion" |
| | description = "A simple Gradio interface to infer Stable Diffusion and generate images with different art style" |
| | examples = [["A sweet potato farm", 'Concept', 20, 0.5, 1], |
| | ["Sky full of cotton candy", 'Realistic', 20, 1.5, 2], |
| | ["Coffin full of jello", 'Line', 20, 2.5, 3], |
| | ["Water skiing on a lake", 'Ricky', 20, 3.5, 4], |
| | ["Super slippery noodles", 'Plane Scape', 20, 4.5, 5], |
| | ["Beautiful sunset", 'Concept', 20, 5.5, 6], |
| | ["A glittering gem", 'Realistic', 20, 6.5, 7], |
| | ["River rafting", 'Line', 20, 7.5, 8], |
| | ["A green tea", 'Ricky', 20, 8.5, 9], |
| | ["Three sphered rocks", 'Plane Scape', 20, 9.5, 10]] |
| |
|
| | demo = gr.Interface(inference, |
| | inputs = [gr.Textbox(label="Prompt", type="text"), |
| | gr.Dropdown(label="Style", choices=['Concept', 'Realistic', 'Line', |
| | 'Ricky', 'Plane Scape'], value="Concept"), |
| | gr.Slider(10, 50, 20, step = 10, label="Inference steps"), |
| | gr.Slider(1, 10, 7.5, step = 0.1, label="Guidance scale"), |
| | gr.Slider(0, 10000, 1, step = 1, label="Seed")], |
| | outputs= [gr.Image(width=320, height=320, label="Output SAM")], |
| | title=title, |
| | description=description, |
| | examples=examples) |
| |
|
| | demo.launch() |
| |
|