AYYasaswini commited on
Commit
dcaa118
·
verified ·
1 Parent(s): 6ca72ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -1016
app.py CHANGED
@@ -1,72 +1,19 @@
1
- # -*- coding: utf-8 -*-
2
- """Stable_Diffusion_Deep_Dive_Styles.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1d9sD2YTAZWkZ7XBLQ6MDnyfg2V-uXdlT
8
-
9
- # Stable Diffusion Deep Dive
10
-
11
- Stable Diffusion is a powerful text-to-image model. There are various websites and tools to make using it as easy as possible. It is also [integrated into the Huggingface diffusers library](https://huggingface.co/blog/stable_diffusion) where generating images can be as simple as:
12
- ```python
13
- from diffusers import StableDiffusionPipeline
14
- pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", variant="fp16", torch_dtype=torch.float16, use_auth_token=True).to("cuda")
15
- image = pipe("An astronaught scuba diving").images[0]
16
-
17
- ```
18
-
19
- In this notebook we're going to dig into the code behind these easy-to-use interfaces, to see what is going on under the hood. We'll begin by re-creating the functionality above as a scary chunk of code, and then one by one we'll inspect the different components and figure out what they do. By the end of this notebook that same sampling loop should feel like something you can tweak and modify as you like.
20
-
21
- ## Setup & Imports
22
-
23
- You'll need to log into huggingface and accept the terms of the licence for this model - see the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. And when you first run this notebook you need to uncomment the following two cells to install the requirements and log in to huggingface with an access token.
24
- """
25
-
26
- #!pip install -q --upgrade transformers==4.25.1 diffusers ftfy accelerate
27
-
28
- import base64
29
-
30
  from base64 import b64encode
31
-
32
  import numpy
33
  import torch
34
  from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
35
- from huggingface_hub import notebook_login
36
-
37
- # For video display:
38
- #from IPython.display import HTML
39
- from matplotlib import pyplot as plt
40
- from pathlib import Path
41
  from PIL import Image
42
  from torch import autocast
43
  from torchvision import transforms as tfms
44
  from tqdm.auto import tqdm
45
  from transformers import CLIPTextModel, CLIPTokenizer, logging
46
- import os
47
- import numpy as np
48
 
49
  torch.manual_seed(1)
50
- #if not (Path.home()/'.cache/huggingface'/'token').exists(): notebook_login()
51
-
52
- # Supress some unnecessary warnings when loading the CLIPTextModel
53
  logging.set_verbosity_error()
54
  torch_device = "cpu"
55
- # Set device
56
- #if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1"
57
-
58
- """## Loading the models
59
-
60
- This code (and that in the next section) comes from the [Huggingface example notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb).
61
-
62
- This will download and set up the relevant models and components we'll be using. Let's just run this for now and move on to the next section to check that it all works before diving deeper.
63
-
64
- If you've loaded a pipeline, you can also access these components using `pipe.unet`, `pipe.vae` and so on.
65
 
66
- In this notebook we aren't doing any memory-saving tricks - if you find yourself running out of GPU RAM, look at the pipeline code for inspiration with things like attention slicing, switching to half precision (fp16), keeping the VAE on the CPU and other modifications.
67
- """
68
-
69
- # Load the autoencoder model which will be used to decode the latents into image space.
70
  vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
71
 
72
  # Load the tokenizer and text encoder to tokenize and encode the text.
@@ -79,94 +26,14 @@ unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", sub
79
  # The noise scheduler
80
  scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
81
 
82
- # To the GPU we go!
83
  vae = vae.to(torch_device)
84
  text_encoder = text_encoder.to(torch_device)
85
  unet = unet.to(torch_device);
86
 
87
- """## A diffusion loop
88
-
89
- If all you want is to make a picture with some text, you could ignore this notebook and use one of the existing tools (such as [DreamStudio](https://beta.dreamstudio.ai/)) or use the simplified pipeline from huggingface, as documented [here](https://huggingface.co/blog/stable_diffusion).
90
-
91
- What we want to do in this notebook is dig a little deeper into how this works, so we'll start by checking that the example code runs. Again, this is adapted from the [HF notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb) and looks very similar to what you'll find if you inspect [the `__call__()` method of the stable diffusion pipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L200).
92
- """
93
- ##########################################################################################
94
- # Some settings
95
- prompt = ["A watercolor painting of an otter"]
96
- height = 512 # default height of Stable Diffusion
97
- width = 512 # default width of Stable Diffusion
98
- num_inference_steps = 30 # Number of denoising steps
99
- guidance_scale = 7.5 # Scale for classifier-free guidance
100
- generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
101
- batch_size = 1
102
-
103
- # Prep text
104
- text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
105
- with torch.no_grad():
106
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
107
- max_length = text_input.input_ids.shape[-1]
108
- uncond_input = tokenizer(
109
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
110
- )
111
- with torch.no_grad():
112
- uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
113
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
114
-
115
- # Prep Scheduler
116
- def set_timesteps(scheduler, num_inference_steps):
117
- scheduler.set_timesteps(num_inference_steps)
118
- scheduler.timesteps = scheduler.timesteps.to(torch.float32) # minor fix to ensure MPS compatibility, fixed in diffusers PR 3925
119
-
120
- set_timesteps(scheduler,num_inference_steps)
121
-
122
- # Prep latents
123
- latents = torch.randn(
124
- (batch_size, unet.in_channels, height // 8, width // 8),
125
- generator=generator,
126
- )
127
- latents = latents.to(torch_device)
128
- latents = latents * scheduler.init_noise_sigma # Scaling (previous versions did latents = latents * self.scheduler.sigmas[0]
129
-
130
- # Loop
131
- with autocast("cuda"): # will fallback to CPU if no CUDA; no autocast for MPS
132
- for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
133
- # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
134
- latent_model_input = torch.cat([latents] * 2)
135
- sigma = scheduler.sigmas[i]
136
- # Scale the latents (preconditioning):
137
- # latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5) # Diffusers 0.3 and below
138
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
139
-
140
- # predict the noise residual
141
- with torch.no_grad():
142
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
143
-
144
- # perform guidance
145
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
146
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
147
-
148
- # compute the previous noisy sample x_t -> x_t-1
149
- # latents = scheduler.step(noise_pred, i, latents)["prev_sample"] # Diffusers 0.3 and below
150
- latents = scheduler.step(noise_pred, t, latents).prev_sample
151
-
152
- # scale and decode the image latents with vae
153
- latents = 1 / 0.18215 * latents
154
- with torch.no_grad():
155
- image = vae.decode(latents).sample
156
-
157
- # Display
158
- image = (image / 2 + 0.5).clamp(0, 1)
159
- image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
160
- images = (image * 255).round().astype("uint8")
161
- pil_images = [Image.fromarray(image) for image in images]
162
- pil_images[0]
163
-
164
- """It's working, but that's quite a bit of code! Let's look at the components one by one.
165
-
166
- ## The Autoencoder (AE)
167
-
168
- The AE can 'encode' an image into some sort of latent representation, and decode this back into an image. I've wrapped the code for this into a couple of functions here so we can see what this looks like in action:
169
- """
170
 
171
  def pil_to_latent(input_im):
172
  # Single image -> single latent in a batch (so size 1, 4, 64, 64)
@@ -185,281 +52,6 @@ def latents_to_pil(latents):
185
  pil_images = [Image.fromarray(image) for image in images]
186
  return pil_images
187
 
188
- """We'll use a pic from the web here, but you can load your own instead by uploading it and editing the filename in the next cell."""
189
- import requests
190
- # Download a demo Image
191
- #curl --output macaw.jpg 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg'
192
- url = 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg'
193
-
194
- try:
195
- response = requests.get(url)
196
- response.raise_for_status() # Raise an exception for HTTP errors
197
- with open('macaw.jpg', 'wb') as file:
198
- file.write(response.content)
199
- print("Image downloaded successfully")
200
- except requests.exceptions.RequestException as e:
201
- print(f"Failed to download image: {e}")
202
-
203
- # Call the function if this is the main module
204
-
205
- # Load the image with PIL
206
- input_image = Image.open('macaw.jpg').resize((512, 512))
207
- input_image
208
-
209
- """Encoding this into the latent space of the AE with the function defined above looks like this:"""
210
-
211
- # Encode to the latent space
212
- encoded = pil_to_latent(input_image)
213
- encoded.shape
214
-
215
- # Let's visualize the four channels of this latent representation:
216
- fig, axs = plt.subplots(1, 4, figsize=(16, 4))
217
- for c in range(4):
218
- axs[c].imshow(encoded[0][c].cpu(), cmap='Greys')
219
-
220
- """This 4x64x64 tensor captures lots of information about the image, hopefully enough that when we feed it through the decoder we get back something very close to our input image:"""
221
-
222
- # Decode this latent representation back into an image
223
- decoded = latents_to_pil(encoded)[0]
224
- decoded
225
-
226
- """You'll see some small differences if you squint! Forcus on the eye if you can't see anything obvious. This is pretty impressive - that 4x64x64 latent seems to hold a lot more information that a 64px image...
227
-
228
- This autoencoder has been trained to squish down an image to a smaller representation and then re-create the image back from this compressed version again.
229
-
230
- In this particular case the compression factor is 48, we start with a 3x512x512(chxhtxwd) image and it get compressed to a latent vector 4x64x64. Each 3x8x8 pixel volume in the input image gets compressed down to just 4 numbers(4x1x1). You can find AEs with a higher compression ratio (eg f16 like some popular VQGAN models) but at some point they begin to introduce artifacts that we don't want.
231
-
232
- Why do we even use an autoencoder? We can do diffusion in pixel space - where the model gets all the image data as inputs and produces an output prediction of the same shape. But this means processing a LOT of data, and make high-resolution generation very computationally expensive. Some solutions to this involve doing diffusion at low resolution (64px for eg) and then training a separate model to upscale repeatedly (as with D2/Imagen). But latent diffusion instead does the diffusion process in this 'latent space', using the compressed representations from our AE rather than raw images. These representations are information rich, and can be small enough to handle manageably on consumer hardware. Once we've generated a new 'image' as a latent representation, the autoencoder can take those final latent outputs and turn them into actual pixels.
233
-
234
- # The Scheduler
235
- Now we need to talk about adding noise...
236
-
237
- During training, we add some noise to an image an then have the model try to predict the noise. If we always added a ton of noise, the model might not have much to work with. If we only add a tiny amount, the model won't be able to do much with the random starting points we use for sampling. So during training the amount is varied, according to some distribution.
238
-
239
- During sampling, we want to 'denoise' over a number of steps. How many steps and how much noise we should aim for at each step are going to affect the final result.
240
-
241
- The scheduler is in charge of handling all of these details. For example: `scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)` sets up a scheduler that matches the one used to train this model. When we want to sample over a smaller number of steps, we set this up with `scheduler.set_timesteps`:
242
- """
243
-
244
- # Setting the number of sampling steps:
245
- set_timesteps(scheduler, 15)
246
-
247
- """You can see how our new set of steps corresponds to those used in training:"""
248
-
249
- # See these in terms of the original 1000 steps used for training:
250
- print(scheduler.timesteps)
251
-
252
- """And how much noise is present at each:"""
253
-
254
- # Look at the equivalent noise levels:
255
- print(scheduler.sigmas)
256
-
257
- """During sampling, we'll start at a high noise level (in fact, our input will be pure noise) and gradually 'denoise' down to an image, according to this schedule."""
258
-
259
- # Plotting this noise schedule:
260
- plt.plot(scheduler.sigmas)
261
- plt.title('Noise Schedule')
262
- plt.xlabel('Sampling step')
263
- plt.ylabel('sigma')
264
- plt.show()
265
-
266
- # TODO maybe show timestep as well
267
-
268
- """This 'sigma' is the amount of noise added to the latent representation. Let's visualize what this looks like by adding a bit of noise to our encoded image and then decoding this noised version:"""
269
-
270
- noise = torch.randn_like(encoded) # Random noise
271
- sampling_step = 10 # Equivalent to step 10 out of 15 in the schedule above
272
- # encoded_and_noised = scheduler.add_noise(encoded, noise, timestep) # Diffusers 0.3 and below
273
- encoded_and_noised = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[sampling_step]]))
274
- latents_to_pil(encoded_and_noised.float())[0] # Display
275
-
276
- """What does this look like at different timesteps? Experiment and see for yourself!
277
-
278
- If you uncomment the cell below you'll see that in this case the `scheduler.add_noise` function literally just adds noise scaled by sigma: `noisy_samples = original_samples + noise * sigmas`
279
- """
280
-
281
- # ??scheduler.add_noise
282
-
283
- """Other diffusion models may be trained with different noising and scheduling approaches, some of which keep the variance fairly constant across noise levels ('variance preserving') with different scaling and mixing tricks instead of having noisy latents with higher and higher variance as more noise is added ('variance exploding').
284
-
285
- If we want to start from random noise instead of a noised image, we need to scale it by the largest sigma value used during training, ~14 in this case. And before these noisy latents are fed to the model they are scaled again in the so-called pre-conditioning step:
286
- `latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)` (now handled by `latent_model_input = scheduler.scale_model_input(latent_model_input, t)`).
287
-
288
- Again, this scaling/pre-conditioning differs between papers and implementations, so keep an eye out for this if you work with a different type of diffusion model.
289
-
290
- ## Loop starting from noised version of input (AKA image2image)
291
-
292
- Let's see what happens when we use our image as a starting point, adding some noise and then doing the final few denoising steps in the loop with a new prompt.
293
-
294
- We'll use a similar loop to the first demo, but we'll skip the first `start_step` steps.
295
-
296
- To noise our image we'll use code like that shown above, using the scheduler to noise it to a level equivalent to step 10 (`start_step`).
297
- """
298
-
299
- # Settings (same as before except for the new prompt)
300
- prompt = ["A colorful dancer, nat geo photo"]
301
- height = 512 # default height of Stable Diffusion
302
- width = 512 # default width of Stable Diffusion
303
- num_inference_steps = 50 # Number of denoising steps
304
- guidance_scale = 8 # Scale for classifier-free guidance
305
- generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
306
- batch_size = 1
307
-
308
- # Prep text (same as before)
309
- text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
310
- with torch.no_grad():
311
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
312
- max_length = text_input.input_ids.shape[-1]
313
- uncond_input = tokenizer(
314
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
315
- )
316
- with torch.no_grad():
317
- uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
318
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
319
-
320
- # Prep Scheduler (setting the number of inference steps)
321
- set_timesteps(scheduler, num_inference_steps)
322
-
323
- # Prep latents (noising appropriately for start_step)
324
- start_step = 10
325
- start_sigma = scheduler.sigmas[start_step]
326
- noise = torch.randn_like(encoded)
327
- latents = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[start_step]]))
328
- latents = latents.to(torch_device).float()
329
-
330
- # Loop
331
- for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
332
- if i >= start_step: # << This is the only modification to the loop we do
333
-
334
- # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
335
- latent_model_input = torch.cat([latents] * 2)
336
- sigma = scheduler.sigmas[i]
337
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
338
-
339
- # predict the noise residual
340
- with torch.no_grad():
341
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
342
-
343
- # perform guidance
344
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
345
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
346
-
347
- # compute the previous noisy sample x_t -> x_t-1
348
- latents = scheduler.step(noise_pred, t, latents).prev_sample
349
-
350
- latents_to_pil(latents)[0]
351
-
352
- """You can see that some colours and structure from the image are kept, but we now have a new picture! The more noise you add and the more steps you do, the further away it gets from the input image.
353
-
354
- This is how the popular img2img pipeline works. Again, if this is your end goal there are tools to make this easy!
355
-
356
- But you can see that under the hood this is the same as the generation loop just skipping the first few steps and starting from a noised image rather than pure noise.
357
-
358
- Explore changing how many steps are skipped and see how this affects the amount the image changes from the input.
359
-
360
- ## Exploring the text -> embedding pipeline
361
-
362
- We use a text encoder model to turn our text into a set of 'embeddings' which are fed to the diffusion model as conditioning. Let's follow a piece of text through this process and see how it works.
363
- """
364
-
365
- # Our text prompt
366
- prompt = 'A picture of a puppy'
367
-
368
- """We begin with tokenization:"""
369
-
370
- # Turn the text into a sequnce of tokens:
371
- text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
372
- text_input['input_ids'][0] # View the tokens
373
-
374
- # See the individual tokens
375
- for t in text_input['input_ids'][0][:8]: # We'll just look at the first 7 to save you from a wall of '<|endoftext|>'
376
- print(t, tokenizer.decoder.get(int(t)))
377
-
378
- # TODO call out that 6829 is puppy
379
-
380
- """We can jump straight to the final (output) embeddings like so:"""
381
-
382
- # Grab the output embeddings
383
- output_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
384
- print('Shape:', output_embeddings.shape)
385
- output_embeddings
386
-
387
- """We pass our tokens through the text_encoder and we magically get some numbers we can feed to the model.
388
-
389
- How are these generated? The tokens are transformed into a set of input embeddings, which are then fed through the transformer model to get the final output embeddings.
390
-
391
- To get these input embeddings, there are actually two steps - as revealed by inspecting `text_encoder.text_model.embeddings`:
392
- """
393
-
394
- text_encoder.text_model.embeddings
395
-
396
- """### Token embeddings
397
-
398
- The token is fed to the `token_embedding` to transform it into a vector. The function name `get_input_embeddings` here is misleading since these token embeddings need to be combined with the position embeddings before they are actually used as inputs to the model! Anyway, let's look at just the token embedding part first
399
-
400
- We can look at the embedding layer:
401
- """
402
- #########################################################################################
403
- # Access the embedding layer
404
- token_emb_layer = text_encoder.text_model.embeddings.token_embedding
405
- token_emb_layer # Vocab size 49408, emb_dim 768
406
-
407
- """And embed a token like so:"""
408
-
409
- # Embed a token - in this case the one for 'puppy'
410
- embedding = token_emb_layer(torch.tensor(6829, device=torch_device))
411
- embedding.shape # 768-dim representation
412
-
413
- """This single token has been mapped to a 768-dimensional vector - the token embedding.
414
-
415
- We can do the same with all of the tokens in the prompt to get all the token embeddings:
416
- """
417
-
418
- token_embeddings = token_emb_layer(text_input.input_ids.to(torch_device))
419
- print(token_embeddings.shape) # batch size 1, 77 tokens, 768 values for each
420
- token_embeddings
421
-
422
- """### Positional Embeddings
423
-
424
- Positional embeddings tell the model where in a sequence a token is. Much like the token embedding, this is a set of (optionally learnable) parameters. But now instead of dealing with ~50k tokens we just need one for each position (77 total):
425
- """
426
-
427
- pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
428
- pos_emb_layer
429
-
430
- """We can get the positional embedding for each position:"""
431
-
432
- position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
433
- position_embeddings = pos_emb_layer(position_ids)
434
- print(position_embeddings.shape)
435
- position_embeddings
436
-
437
- """### Combining token and position embeddings
438
-
439
- Time to combine the two. How do we do this? Just add them! Other approaches are possible but for this model this is how it is done.
440
-
441
- Combining them in this way gives us the final input embeddings ready to feed through the transformer model:
442
- """
443
-
444
- # And combining them we get the final input embeddings
445
- input_embeddings = token_embeddings + position_embeddings
446
- print(input_embeddings.shape)
447
- input_embeddings
448
-
449
- """We can check that these are the same as the result we'd get from `text_encoder.text_model.embeddings`:"""
450
-
451
- # The following combines all the above steps (but doesn't let us fiddle with them!)
452
- text_encoder.text_model.embeddings(text_input.input_ids.to(torch_device))
453
-
454
- """### Feeding these through the transformer model
455
-
456
- ![transformer diagram](https://github.com/johnowhitaker/tglcourse/raw/main/images/text_encoder_noborder.png)
457
-
458
- We want to mess with these input embeddings (specifically the token embeddings) before we send them through the rest of the model, but first we should check that we know how to do that. I read the code of the text_encoders `forward` method, and based on that the code for the `forward` method of the text_model that the text_encoder wraps. To inspect it yourself, type `??text_encoder.text_model.forward` and you'll get the function info and source code - a useful debugging trick!
459
-
460
- Anyway, based on that we can copy in the bits we need to get the so-called 'last hidden state' and thus generate our final embeddings:
461
- """
462
-
463
  def get_output_embeds(input_embeddings):
464
  # CLIP's text model uses causal mask, so we prepare it here:
465
  bsz, seq_len = input_embeddings.shape[:2]
@@ -485,55 +77,15 @@ def get_output_embeds(input_embeddings):
485
  # And now they're ready!
486
  return output
487
 
488
- out_embs_test = get_output_embeds(input_embeddings) # Feed through the model with our new function
489
- print(out_embs_test.shape) # Check the output shape
490
- out_embs_test # Inspect the output
491
-
492
- """Note that these match the `output_embeddings` we saw near the start - we've figured out how to split up that one step ("get the text embeddings") into multiple sub-steps ready for us to modify.
493
-
494
- Now that we have this process in place, we can replace the input embedding of a token with a new one of our choice - which in our final use-case will be something we learn. To demonstrate the concept though, let's replace the input embedding for 'puppy' in the prompt we've been playing with with the embedding for token 2368, get a new set of output embeddings based on this, and use these to generate an image to see what we get:
495
- """
496
-
497
- prompt = 'A picture of a puppy'
498
-
499
- # Tokenize
500
- text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
501
- input_ids = text_input.input_ids.to(torch_device)
502
-
503
- # Get token embeddings
504
- token_embeddings = token_emb_layer(input_ids)
505
-
506
- # The new embedding. In this case just the input embedding of token 2368...
507
- replacement_token_embedding = text_encoder.get_input_embeddings()(torch.tensor(2368, device=torch_device))
508
-
509
- # Insert this into the token embeddings (
510
- token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
511
-
512
- # Combine with pos embs
513
- input_embeddings = token_embeddings + position_embeddings
514
-
515
- # Feed through to get final output embs
516
- modified_output_embeddings = get_output_embeds(input_embeddings)
517
-
518
- print(modified_output_embeddings.shape)
519
- modified_output_embeddings
520
-
521
- """The first few are the same, the last aren't. Everything at and after the position of the token we're replacing will be affected.
522
-
523
- If all went well, we should see something other than a puppy when we use these to generate an image. And sure enough, we do!
524
- """
525
-
526
- #Generating an image with these modified embeddings
527
-
528
- def generate_with_embs(text_embeddings):
529
  height = 512 # default height of Stable Diffusion
530
  width = 512 # default width of Stable Diffusion
531
- num_inference_steps = 30 # Number of denoising steps
532
  guidance_scale = 7.5 # Scale for classifier-free guidance
533
- generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
534
  batch_size = 1
535
 
536
- max_length = text_input.input_ids.shape[-1]
537
  uncond_input = tokenizer(
538
  [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
539
  )
@@ -572,570 +124,146 @@ def generate_with_embs(text_embeddings):
572
 
573
  return latents_to_pil(latents)[0]
574
 
575
- generate_with_embs(modified_output_embeddings)
576
-
577
- """Suprise! Now you know what token 2368 means ;)
578
-
579
- **What can we do with this?** Why did we go to all of this trouble? Well, we'll see a more compelling use-case shortly but the tl;dr is that once we can access and modify the token embeddings we can do tricks like replacing them with something else. In the example we just did, that was just another token embedding from the model's vocabulary, equivalent to just editing the prompt. But we can also mix tokens - for example, here's a half-puppy-half-skunk:
580
- """
581
-
582
- # In case you're wondering how to get the token for a word, or the embedding for a token:
583
- prompt = 'skunk'
584
- print('tokenizer(prompt):', tokenizer(prompt))
585
- print('token_emb_layer([token_id]) shape:', token_emb_layer(torch.tensor([8797], device=torch_device)).shape)
586
-
587
- prompt = 'A picture of a puppy'
588
-
589
- # Tokenize
590
- text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
591
- input_ids = text_input.input_ids.to(torch_device)
592
-
593
- # Get token embeddings
594
- token_embeddings = token_emb_layer(input_ids)
595
-
596
- # The new embedding. Which is now a mixture of the token embeddings for 'puppy' and 'skunk'
597
- puppy_token_embedding = token_emb_layer(torch.tensor(6829, device=torch_device))
598
- skunk_token_embedding = token_emb_layer(torch.tensor(42194, device=torch_device))
599
- replacement_token_embedding = 0.5*puppy_token_embedding + 0.5*skunk_token_embedding
600
-
601
- # Insert this into the token embeddings (
602
- token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
603
-
604
- # Combine with pos embs
605
- input_embeddings = token_embeddings + position_embeddings
606
-
607
- # Feed through to get final output embs
608
- modified_output_embeddings = get_output_embeds(input_embeddings)
609
-
610
- # Generate an image with these
611
- generate_with_embs(modified_output_embeddings)
612
-
613
- """### Textual Inversion
614
-
615
- OK, so we can slip in a modified token embedding, and use this to generate an image. We used the token embedding for 'cat' in the above example, but what if instead could 'learn' a new token embedding for a specific concept? This is the idea behind 'Textual Inversion', in which a few example images are used to create a new token embedding:
616
-
617
- ![Overview image from the blog post](https://textual-inversion.github.io/static/images/training/training.JPG)
618
- _Diagram from the [textual inversion blog post](https://textual-inversion.github.io/static/images/training/training.JPG) - note it doesn't show the positional embeddings step for simplicity_
619
-
620
- We won't cover how this training works, but we can try loading one of these new 'concepts' from the [community-created SD concepts library](https://huggingface.co/sd-concepts-library) and see how it fits in with our example above. I'll use https://huggingface.co/sd-concepts-library/birb-style since it was the first one I made :) Download the learned_embeds.bin file from there and upload the file to wherever this notebook is before running this next cell:
621
- """
622
-
623
- #learned_embeds = [learned_embeds_gartic-phone.bin,learned_embeds_libraryhawaiian-shirt.bin, learned_embeds_phone0.bin1,learned_embeds_style-spdmn.bin,learned_embedssd_yvmqznrm.bin]
624
- learned_embeds = [
625
- 'learned_embeds_gartic-phone.bin',
626
- 'learned_embeds_libraryhawaiian-shirt.bin',
627
- 'learned_embeds_phone01.bin',
628
- 'learned_embeds_style-spdmn.bin',
629
- 'learned_embedssd_yvmqznrm.bin'
630
- ]
631
- embed_values =['<gartic-phone>', '<hawaiian shirt>','<gp>','<style-spdmn>','<yvmqznrm>']
632
- birb_embed = torch.load(learned_embeds[4])
633
- #birb_embed.keys(), birb_embed[embed_values[4]].shape
634
- birb_embed.keys()
635
-
636
- """We get a dictionary with a key (the special placeholder I used, <birb-style>) and the corresponding token embedding. As in the previous example, let's replace the 'puppy' token embedding with this and see what happens:"""
637
-
638
-
639
-
640
- prompt = 'A mouse in the style of puppy'
641
-
642
- # Tokenize
643
- text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
644
- input_ids = text_input.input_ids.to(torch_device)
645
-
646
- # Get token embeddings
647
- token_embeddings = token_emb_layer(input_ids)
648
-
649
- # The new embedding - our special birb word
650
- replacement_token_embedding = birb_embed[embed_values[4]].to(torch_device)
651
- # Assuming token_embeddings has shape [batch_size, seq_length, embedding_dim]
652
- replacement_token_embedding = replacement_token_embedding[:768] # Adjust the size
653
- replacement_token_embedding = replacement_token_embedding.unsqueeze(0) # Make it [1, 768] if necessary
654
-
655
- # Insert this into the token embeddings
656
- token_embeddings[0, torch.where(input_ids[0] == 6829)] = replacement_token_embedding.to(torch_device)
657
-
658
- # Insert this into the token embeddings
659
- #token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
660
- # Assuming token_embeddings has shape [batch_size, seq_length, embedding_dim]
661
-
662
- # Insert this into the token embeddings
663
- #token_embeddings[0, torch.where(input_ids[0] == 6829)] = replacement_token_embedding.to(torch_device)
664
-
665
- # Combine with pos embs
666
- input_embeddings = token_embeddings + position_embeddings
667
-
668
- # Feed through to get final output embs
669
- modified_output_embeddings = get_output_embeds(input_embeddings)
670
-
671
- # And generate an image with this:
672
- generate_with_embs(modified_output_embeddings)
673
-
674
- """The token for 'puppy' was replaced with one that captures a particular style of painting, but it could just as easily represent a specific object or class of objects.
675
-
676
- Again, there is [a nice inference notebook ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) from hf to make it easy to use the different concepts, that properly handles using the names in prompts ("A \<cat-toy> in the style of \<birb-style>") without worrying about all this manual stuff. The goal of this notebook is to pull back the curtain a bit so you know what is going on behind the scenes :)
677
-
678
- ## Messing with Embeddings
679
-
680
- Besides just replacing the token embedding of a single word, there are various other tricks we can try. For example, what if we create a 'chimera' by averaging the embeddings of two different prompts?
681
- """
682
-
683
- # Embed two prompts
684
- text_input1 = tokenizer(["A mouse"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
685
- text_input2 = tokenizer(["A leopard"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
686
- with torch.no_grad():
687
- text_embeddings1 = text_encoder(text_input1.input_ids.to(torch_device))[0]
688
- text_embeddings2 = text_encoder(text_input2.input_ids.to(torch_device))[0]
689
-
690
- # Mix them together
691
- mix_factor = 0.35
692
- mixed_embeddings = (text_embeddings1*mix_factor + \
693
- text_embeddings2*(1-mix_factor))
694
-
695
- # Generate!
696
- generate_with_embs(mixed_embeddings)
697
-
698
- """## The UNET and CFG
699
-
700
- Now it's time we looked at the actual diffusion model. This is typically a Unet that takes in the noisy latents (x) and predicts the noise. We use a conditional model that also takes in the timestep (t) and our text embedding (aka encoder_hidden_states) as conditioning. Feeding all of these into the model looks like this:
701
- `noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]`
702
-
703
- We can try it out and see what the output looks like:
704
- """
705
-
706
  # Prep Scheduler
707
- set_timesteps(scheduler, num_inference_steps)
708
-
709
- # What is our timestep
710
- t = scheduler.timesteps[0]
711
- sigma = scheduler.sigmas[0]
712
-
713
- # A noisy latent
714
- latents = torch.randn(
715
- (batch_size, unet.in_channels, height // 8, width // 8),
716
- generator=generator,
717
- )
718
- latents = latents.to(torch_device)
719
- latents = latents * scheduler.init_noise_sigma
720
-
721
- # Text embedding
722
- text_input = tokenizer(['A macaw'], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
723
- with torch.no_grad():
724
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
725
-
726
- # Run this through the unet to predict the noise residual
727
- with torch.no_grad():
728
- noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]
729
-
730
- latents.shape, noise_pred.shape # We get preds in the same shape as the input
731
-
732
- """Given a set of noisy latents, the model predicts the noise component. We can remove this noise from the noisy latents to see what the output image looks like (`latents_x0 = latents - sigma * noise_pred`). And we can add most of the noise back to this predicted output to get the (slightly less noisy hopefully) input for the next diffusion step. To visualize this let's generate another image, saving both the predicted output (x0) and the next step (xt-1) after every step:"""
733
-
734
- prompt = 'Oil painting of an otter in a top hat'
735
- height = 512
736
- width = 512
737
- num_inference_steps = 50
738
- guidance_scale = 8
739
- generator = torch.manual_seed(32)
740
- batch_size = 1
741
-
742
- # Make a folder to store results
743
- #!rm -rf steps/
744
- #!mkdir -p steps/
745
-
746
- # Prep text
747
- text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
748
- with torch.no_grad():
749
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
750
- max_length = text_input.input_ids.shape[-1]
751
- uncond_input = tokenizer(
752
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
753
- )
754
- with torch.no_grad():
755
- uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
756
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
757
-
758
- # Prep Scheduler
759
- set_timesteps(scheduler, num_inference_steps)
760
-
761
- # Prep latents
762
- latents = torch.randn(
763
- (batch_size, unet.in_channels, height // 8, width // 8),
764
- generator=generator,
765
- )
766
- latents = latents.to(torch_device)
767
- latents = latents * scheduler.init_noise_sigma
768
-
769
- # Loop
770
- for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
771
- # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
772
- latent_model_input = torch.cat([latents] * 2)
773
- sigma = scheduler.sigmas[i]
774
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
775
-
776
- # predict the noise residual
777
- with torch.no_grad():
778
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
779
-
780
- # perform guidance
781
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
782
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
783
-
784
- # Get the predicted x0:
785
- # latents_x0 = latents - sigma * noise_pred # Calculating ourselves
786
- scheduler_step = scheduler.step(noise_pred, t, latents)
787
- latents_x0 = scheduler_step.pred_original_sample # Using the scheduler (Diffusers 0.4 and above)
788
-
789
- # compute the previous noisy sample x_t -> x_t-1
790
- latents = scheduler_step.prev_sample
791
-
792
- # To PIL Images
793
- im_t0 = latents_to_pil(latents_x0)[0]
794
- im_next = latents_to_pil(latents)[0]
795
-
796
- # Combine the two images and save for later viewing
797
- im = Image.new('RGB', (1024, 512))
798
- im.paste(im_next, (0, 0))
799
- im.paste(im_t0, (512, 0))
800
- im.save(f'steps/{i:04}.jpeg')
801
-
802
- # Make and show the progress video (change width to 1024 for full res)
803
- #!ffmpeg -v 1 -y -f image2 -framerate 12 -i steps/%04d.jpeg -c:v libx264 -preset slow -qp 18 -pix_fmt yuv420p out.mp4
804
- mp4 = open('out.mp4','rb').read()
805
- data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
806
- HTML("""
807
- <video width=600 controls>
808
- <source src="%s" type="video/mp4">
809
- </video>
810
- """ % data_url)
811
-
812
- """The version on the right shows the predicted 'final output' (x0) at each step, and this is what is usually used for progress videos etc. The version on the left is the 'next step'. I found it interesteing to compare the two - watching the progress videos only you'd think drastic changes are happening expecially at early stages, but since the changes made per-step are relatively small the actual process is much more gradual.
813
-
814
- ### Classifier Free Guidance
815
-
816
- By default, the model doesn't often do what we ask. If we want it to follow the prompt better, we use a hack called CFG. There's a good explanation in this video (AI coffee break GLIDE).
817
-
818
- In the code, this comes down to us doing:
819
-
820
- `noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)`
821
-
822
- This works suprisingly well :) Explore changing the guidance_scale in the code above and see how this affects the results. How high can you push it before the results get worse?
823
-
824
- ## Sampling
825
-
826
- There is still some complexity hidden from us inside `latents = scheduler.step(noise_pred, i, latents)["prev_sample"]`. How exactly does the sampler go from the current noisy latents to a slightly less noisy version? Why don't we just use the model in a single step? Are there other ways to view this?
827
 
828
- The model tries to predict the noise in an image. For low noise values, we assume it does a pretty good job. For higher noise levels, it has a hard task! So instead of producing a perfect image, the results tend to look like a blurry mess - see the start of the video above for a visual! So, samplers use the model predictions to move a small amount towards the model prediction (removing some of the noise) and then get another prediction based on this marginally-less-rubbish input, and hope that this iteratively improves the result.
 
 
 
829
 
830
- Different samplers do this in different ways. You can try to inspect the code for the default LMS sampler with:
831
- """
832
 
833
- # ??scheduler.step
834
 
835
- """**Time to draw some diagrams!** (Whiteboard/paper interlude)
 
836
 
837
- # Guidance
 
838
 
 
 
839
 
840
- OK, final trick! How can we add some extra control to this generation process?
 
 
841
 
842
- At each step, we're going to use our model as before to predict the noise component of x. Then we'll use this to produce a predicted output image, and apply some loss function to this image.
 
 
 
843
 
844
- This function can be anything, but let's demo with a super simple example. If we want images that have a lot of blue, we can craft a loss function that gives a high loss if pixels have a low blue component:
845
- """
846
 
847
- def blue_loss(images):
848
- # How far are the blue channel values to 0.9:
849
- error = torch.abs(images[:,2] - 0.7).mean() # [:,2] -> all images in batch, only the blue channel
850
- return error
851
 
852
- def orange_loss(images):
853
- """
854
- Calculate the mean absolute error between the RGB values of the images and the target orange color.
855
 
856
- Parameters:
857
- - images (torch.Tensor): A batch of images with shape (batch_size, channels, height, width).
858
- The images are assumed to be in RGB format.
859
 
860
- Returns:
861
- - torch.Tensor: The mean absolute error for the orange color.
862
- """
863
- # Define the target RGB values for the color orange
864
- target_orange = torch.tensor([255/255, 200/255, 0/255]).view(1, 3, 1, 1).to(images.device) # (R, G, B)
865
 
866
- # Normalize images to [0, 1] range if not already normalized
867
- images = images / 255.0 if images.max() > 1.0 else images
 
868
 
869
- # Calculate the mean absolute error between the RGB values and the target orange values
870
- error = torch.abs(images - target_orange).mean()
871
 
 
 
 
872
  return error
873
 
874
- """During each update step, we find the gradient of the loss with respect to the current noisy latents, and tweak them in the direction that reduces this loss as well as performing the normal update step:"""
875
-
876
- prompt = 'A campfire (oil on canvas)' #@param
877
- height = 512 # default height of Stable Diffusion
878
- width = 512 # default width of Stable Diffusion
879
- num_inference_steps = 50 #@param # Number of denoising steps
880
- guidance_scale = 8 #@param # Scale for classifier-free guidance
881
- generator = torch.manual_seed(0) # Seed generator to create the inital latent noise
882
- batch_size = 1
883
- blue_loss_scale = 200 #@param
884
-
885
- # Prep text
886
- text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
887
- with torch.no_grad():
888
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
889
-
890
- # And the uncond. input as before:
891
- max_length = text_input.input_ids.shape[-1]
892
- uncond_input = tokenizer(
893
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
894
- )
895
- with torch.no_grad():
896
- uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
897
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
898
-
899
- # Prep Scheduler
900
- set_timesteps(scheduler, num_inference_steps)
901
-
902
- # Prep latents
903
- latents = torch.randn(
904
- (batch_size, unet.in_channels, height // 8, width // 8),
905
- generator=generator,
906
- )
907
- latents = latents.to(torch_device)
908
- latents = latents * scheduler.init_noise_sigma
909
-
910
- # Loop
911
- for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
912
- # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
913
- latent_model_input = torch.cat([latents] * 2)
914
- sigma = scheduler.sigmas[i]
915
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
916
-
917
- # predict the noise residual
918
- with torch.no_grad():
919
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
920
-
921
- # perform CFG
922
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
923
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
924
-
925
- #### ADDITIONAL GUIDANCE ###
926
- if i%5 == 0:
927
- # Requires grad on the latents
928
- latents = latents.detach().requires_grad_()
929
-
930
- # Get the predicted x0:
931
- latents_x0 = latents - sigma * noise_pred
932
- # latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
933
-
934
- # Decode to image space
935
- denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
936
-
937
- # Calculate loss
938
- loss = orange_loss(denoised_images) * blue_loss_scale
939
-
940
- # Occasionally print it out
941
- if i%10==0:
942
- print(i, 'loss:', loss.item())
943
-
944
- # Get gradient
945
- cond_grad = torch.autograd.grad(loss, latents)[0]
946
-
947
- # Modify the latents based on this gradient
948
- latents = latents.detach() - cond_grad * sigma**2
949
-
950
- # Now step with scheduler
951
- latents = scheduler.step(noise_pred, t, latents).prev_sample
952
-
953
-
954
- latents_to_pil(latents)[0]
955
-
956
-
957
-
958
- prompt = 'A campfire (oil on canvas)' #@param
959
- height = 512 # default height of Stable Diffusion
960
- width = 512 # default width of Stable Diffusion
961
- num_inference_steps = 50 #@param # Number of denoising steps
962
- guidance_scale = 8 #@param # Scale for classifier-free guidance
963
- generator = torch.manual_seed(77) # Seed generator to create the inital latent noise
964
- batch_size = 1
965
- blue_loss_scale = 200 #@param
966
-
967
- # Prep text
968
- text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
969
- with torch.no_grad():
970
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
971
-
972
- # And the uncond. input as before:
973
- max_length = text_input.input_ids.shape[-1]
974
- uncond_input = tokenizer(
975
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
976
- )
977
- with torch.no_grad():
978
- uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
979
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
980
-
981
- # Prep Scheduler
982
- set_timesteps(scheduler, num_inference_steps)
983
-
984
- # Prep latents
985
- latents = torch.randn(
986
- (batch_size, unet.in_channels, height // 8, width // 8),
987
- generator=generator,
988
- )
989
- latents = latents.to(torch_device)
990
- latents = latents * scheduler.init_noise_sigma
991
-
992
- # Loop
993
- for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
994
- # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
995
- latent_model_input = torch.cat([latents] * 2)
996
- sigma = scheduler.sigmas[i]
997
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
998
-
999
- # predict the noise residual
1000
- with torch.no_grad():
1001
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
1002
-
1003
- # perform CFG
1004
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1005
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1006
-
1007
- #### ADDITIONAL GUIDANCE ###
1008
- if i%5 == 0:
1009
- # Requires grad on the latents
1010
- latents = latents.detach().requires_grad_()
1011
-
1012
- # Get the predicted x0:
1013
- latents_x0 = latents - sigma * noise_pred
1014
- # latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
1015
-
1016
- # Decode to image space
1017
- denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
1018
-
1019
- # Calculate loss
1020
- loss = orange_loss(denoised_images) * blue_loss_scale
1021
-
1022
- # Occasionally print it out
1023
- if i%10==0:
1024
- print(i, 'loss:', loss.item())
1025
-
1026
- # Get gradient
1027
- cond_grad = torch.autograd.grad(loss, latents)[0]
1028
-
1029
- # Modify the latents based on this gradient
1030
- latents = latents.detach() - cond_grad * sigma**2
1031
-
1032
- # Now step with scheduler
1033
- latents = scheduler.step(noise_pred, t, latents).prev_sample
1034
-
1035
 
1036
- latents_to_pil(latents)[0]
1037
-
1038
-
1039
-
1040
- prompt = 'A campfire (oil on canvas)' #@param
1041
- height = 512 # default height of Stable Diffusion
1042
- width = 512 # default width of Stable Diffusion
1043
- num_inference_steps = 50 #@param # Number of denoising steps
1044
- guidance_scale = 8 #@param # Scale for classifier-free guidance
1045
- generator = torch.manual_seed(42) # Seed generator to create the inital latent noise
1046
- batch_size = 1
1047
- blue_loss_scale = 200 #@param
1048
-
1049
- # Prep text
1050
- text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
1051
- with torch.no_grad():
1052
- text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
1053
-
1054
- # And the uncond. input as before:
1055
- max_length = text_input.input_ids.shape[-1]
1056
- uncond_input = tokenizer(
1057
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
1058
- )
1059
- with torch.no_grad():
1060
- uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
1061
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
1062
 
1063
- # Prep Scheduler
1064
- set_timesteps(scheduler, num_inference_steps)
1065
-
1066
- # Prep latents
1067
- latents = torch.randn(
1068
- (batch_size, unet.in_channels, height // 8, width // 8),
1069
- generator=generator,
1070
- )
1071
- latents = latents.to(torch_device)
1072
- latents = latents * scheduler.init_noise_sigma
1073
-
1074
- # Loop
1075
- for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
1076
- # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
1077
- latent_model_input = torch.cat([latents] * 2)
1078
- sigma = scheduler.sigmas[i]
1079
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
1080
-
1081
- # predict the noise residual
1082
  with torch.no_grad():
1083
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
1084
-
1085
- # perform CFG
1086
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1087
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1088
-
1089
- #### ADDITIONAL GUIDANCE ###
1090
- if i%5 == 0:
1091
- # Requires grad on the latents
1092
- latents = latents.detach().requires_grad_()
1093
-
1094
- # Get the predicted x0:
1095
- latents_x0 = latents - sigma * noise_pred
1096
- # latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
1097
 
1098
- # Decode to image space
1099
- denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
1100
 
1101
- # Calculate loss
1102
- loss = orange_loss(denoised_images) * orange_loss_scale
 
 
 
 
 
1103
 
1104
- # Occasionally print it out
1105
- if i%10==0:
1106
- print(i, 'loss:', loss.item())
1107
 
1108
- # Get gradient
1109
- cond_grad = torch.autograd.grad(loss, latents)[0]
 
 
 
 
1110
 
1111
- # Modify the latents based on this gradient
1112
- latents = latents.detach() - cond_grad * sigma**2
 
1113
 
1114
- # Now step with scheduler
1115
- latents = scheduler.step(noise_pred, t, latents).prev_sample
 
1116
 
 
 
 
 
1117
 
1118
- latents_to_pil(latents)[0]
 
 
1119
 
1120
- """Tweak the scale (`blue_loss_scale`) - at low values, the image is mostly red and orange thanks to the prompt. At higher values, it is mostly bluish! Too high and we get a plain blue image.
 
1121
 
1122
- Since this is slow, you'll notice that I only apply this loss once every 5 iterations - this was a suggestion from Jeremy and we left it in because for this demo it saves time and still works. For your own tests you may want to explore using a lower scale for the loss and applying it every iteration instead :)
1123
 
1124
- NB: We should set latents requires_grad=True **before** we do the forward pass of the unet (removing `with torch.no_grad()`) if we want mode accurate gradients. BUT this requires a lot of extra memory. You'll see both approaches used depending on whose implementation you're looking at.
 
1125
 
1126
- Guiding with classifier models can give you images of a specific class. Guiding with a model like CLIP can help better match a text prompt. Guiding with a style loss can help add a particular style. Guiding with some sort of perceptual loss can force it towards the overall look af a target image. And so on.
 
 
1127
 
1128
- # Conclusions
 
1129
 
1130
- Hopefully now you have a slightly better idea of what is happening when you make an image with one of these models, and how you can modify the process in creative ways. I hope you're inspired to make something fun :)
 
 
 
 
1131
 
1132
- This notebook was written by Jonathan Whitaker, adapted from ['Grokking Stable Diffusion'](https://colab.research.google.com/drive/1dlgggNa5Mz8sEAGU0wFCHhGLFooW_pf1?usp=sharing) which was my early attempts to understand these components for myself. If you spot bugs or have questions, feel free to reach out to me @johnowhitaker :) Enjoy!
1133
- """
 
 
1134
 
1135
- import gradio as gr
1136
 
1137
  def generate_image_from_prompt(text_in, style_in):
1138
- STYLE_LIST = ['learned_embeds_gartic-phone.bin','learned_embeds_hawaiian-shirt.bin', 'learned_embeds_phone0.bin1','learned_embeds_style-spdmn.bin','learned_embedssd_yvmqznrm.bin']
1139
  STYLE_SEEDS = [128, 64, 128, 64, 128]
1140
 
1141
  print(text_in)
@@ -1156,38 +284,34 @@ def generate_image_from_prompt(text_in, style_in):
1156
  loss_generated_img = (loss_style(prompt, style_embed[0], style_seed))
1157
 
1158
  return [generated_image, loss_generated_img]
1159
- dict_styles = {'<gartic-phone>':'styles/learned_embeds_gartic-phone.bin',
1160
- '<hawaiian shirt>':'styles/learned_embeds_hawaiian-shirt.bin',
1161
- '<gp>': 'styles/learned_embeds_phone01.bin',
1162
- '<style-spdmn>':'styles/learned_embeds_style-spdmn.bin',
1163
- '<yvmqznrm>': 'styles/learned_embedssd_yvmqznrm.bin'}
1164
- # dict_styles.keys()
1165
-
1166
- def inference(prompt, style):
1167
 
1168
- if prompt is not None and style is not None:
1169
- style = dict_styles[style]
1170
- result = generate_image_from_prompt(prompt, style)
1171
- return np.array(result)
1172
- else:
1173
- return None
1174
-
1175
- title = "Stable Diffusion and Textual Inversion"
1176
- description = "A simple Gradio interface to stylize Stable Diffusion outputs"
1177
- examples = [['A man sipping wine wearing a spacesuit on the moon', 'Stripes']]
1178
-
1179
- demo = gr.Interface(inference,
1180
- inputs = [gr.Textbox(label='Prompt'),
1181
- gr.Dropdown(['<gartic-phone>', '<hawaiian shirt>','<gp>','<style-spdmn>','<yvmqznrm>'], label='Style')
1182
- ],
1183
- outputs = [
1184
- gr.Image(label="Stable Diffusion Output"),
1185
- ],
1186
- title = title,
1187
- description = description,
1188
- # examples = examples,
1189
- # cache_examples=True
1190
- )
1191
-
1192
- demo.launch()
1193
-
 
 
 
 
 
1
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from base64 import b64encode
 
3
  import numpy
4
  import torch
5
  from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
 
 
 
 
 
 
6
  from PIL import Image
7
  from torch import autocast
8
  from torchvision import transforms as tfms
9
  from tqdm.auto import tqdm
10
  from transformers import CLIPTextModel, CLIPTokenizer, logging
11
+ import torchvision.transforms as T
 
12
 
13
  torch.manual_seed(1)
 
 
 
14
  logging.set_verbosity_error()
15
  torch_device = "cpu"
 
 
 
 
 
 
 
 
 
 
16
 
 
 
 
 
17
  vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
18
 
19
  # Load the tokenizer and text encoder to tokenize and encode the text.
 
26
  # The noise scheduler
27
  scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
28
 
 
29
  vae = vae.to(torch_device)
30
  text_encoder = text_encoder.to(torch_device)
31
  unet = unet.to(torch_device);
32
 
33
+ token_emb_layer = text_encoder.text_model.embeddings.token_embedding
34
+ pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
35
+ position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
36
+ position_embeddings = pos_emb_layer(position_ids)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def pil_to_latent(input_im):
39
  # Single image -> single latent in a batch (so size 1, 4, 64, 64)
 
52
  pil_images = [Image.fromarray(image) for image in images]
53
  return pil_images
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  def get_output_embeds(input_embeddings):
56
  # CLIP's text model uses causal mask, so we prepare it here:
57
  bsz, seq_len = input_embeddings.shape[:2]
 
77
  # And now they're ready!
78
  return output
79
 
80
+ def generate_with_embs(text_embeddings, seed, max_length):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  height = 512 # default height of Stable Diffusion
82
  width = 512 # default width of Stable Diffusion
83
+ num_inference_steps = 10 # Number of denoising steps
84
  guidance_scale = 7.5 # Scale for classifier-free guidance
85
+ generator = torch.manual_seed(seed) # Seed generator to create the inital latent noise
86
  batch_size = 1
87
 
88
+ # max_length = text_input.input_ids.shape[-1]
89
  uncond_input = tokenizer(
90
  [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
91
  )
 
124
 
125
  return latents_to_pil(latents)[0]
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  # Prep Scheduler
128
+ def set_timesteps(scheduler, num_inference_steps):
129
+ scheduler.set_timesteps(num_inference_steps)
130
+ scheduler.timesteps = scheduler.timesteps.to(torch.float32) # minor fix to ensure MPS compatibility, fixed in diffusers PR 3925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
+ def embed_style(prompt, style_embed, style_seed):
133
+ # Tokenize
134
+ text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
135
+ input_ids = text_input.input_ids.to(torch_device)
136
 
137
+ # Get token embeddings
138
+ token_embeddings = token_emb_layer(input_ids)
139
 
140
+ replacement_token_embedding = style_embed.to(torch_device)
141
 
142
+ # Insert this into the token embeddings
143
+ token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
144
 
145
+ # Combine with pos embs
146
+ input_embeddings = token_embeddings + position_embeddings
147
 
148
+ # Feed through to get final output embs
149
+ modified_output_embeddings = get_output_embeds(input_embeddings)
150
 
151
+ # And generate an image with this:
152
+ max_length = text_input.input_ids.shape[-1]
153
+ return generate_with_embs(modified_output_embeddings, style_seed, max_length)
154
 
155
+ def loss_style(prompt, style_embed, style_seed):
156
+ # Tokenize
157
+ text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
158
+ input_ids = text_input.input_ids.to(torch_device)
159
 
160
+ # Get token embeddings
161
+ token_embeddings = token_emb_layer(input_ids)
162
 
163
+ # The new embedding - our special birb word
164
+ replacement_token_embedding = style_embed.to(torch_device)
 
 
165
 
166
+ # Insert this into the token embeddings
167
+ token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
 
168
 
169
+ # Combine with pos embs
170
+ input_embeddings = token_embeddings + position_embeddings
 
171
 
172
+ # Feed through to get final output embs
173
+ modified_output_embeddings = get_output_embeds(input_embeddings)
 
 
 
174
 
175
+ # And generate an image with this:
176
+ max_length = text_input.input_ids.shape[-1]
177
+ return generate_loss_based_image(modified_output_embeddings, style_seed,max_length)
178
 
 
 
179
 
180
+ def sepia_loss(images):
181
+ sepia_tone = 0.393 * images[:,0] + 0.769 * images[:,1] + 0.189 * images[:,2]
182
+ error = torch.abs(sepia_tone - 0.5).mean()
183
  return error
184
 
185
+ def generate_loss_based_image(text_embeddings, seed, max_length):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
+ height = 64
188
+ width = 64
189
+ num_inference_steps = 10
190
+ guidance_scale = 8
191
+ generator = torch.manual_seed(64)
192
+ batch_size = 1
193
+ loss_scale = 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ uncond_input = tokenizer(
196
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
197
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  with torch.no_grad():
199
+ uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
200
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
+ # Prep Scheduler
203
+ set_timesteps(scheduler, num_inference_steps+1)
204
 
205
+ # Prep latents
206
+ latents = torch.randn(
207
+ (batch_size, unet.in_channels, height // 8, width // 8),
208
+ generator=generator,
209
+ )
210
+ latents = latents.to(torch_device)
211
+ latents = latents * scheduler.init_noise_sigma
212
 
213
+ sched_out = None
 
 
214
 
215
+ # Loop
216
+ for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
217
+ # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
218
+ latent_model_input = torch.cat([latents] * 2)
219
+ sigma = scheduler.sigmas[i]
220
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
221
 
222
+ # predict the noise residual
223
+ with torch.no_grad():
224
+ noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
225
 
226
+ # perform CFG
227
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
228
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
229
 
230
+ ### ADDITIONAL GUIDANCE ###
231
+ if i%5 == 0 and i>0:
232
+ # Requires grad on the latents
233
+ latents = latents.detach().requires_grad_()
234
 
235
+ # Get the predicted x0:
236
+ scheduler._step_index -= 1
237
+ latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
238
 
239
+ # Decode to image space
240
+ denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
241
 
 
242
 
243
+ # Calculate loss
244
+ loss = sepia_loss(denoised_images) * loss_scale
245
 
246
+ # Occasionally print it out
247
+ # if i%10==0:
248
+ print(i, 'loss:', loss)
249
 
250
+ # Get gradient
251
+ cond_grad = torch.autograd.grad(loss, latents)[0]
252
 
253
+ # Modify the latents based on this gradient
254
+ latents = latents.detach() - cond_grad * sigma**2
255
+ # To PIL Images
256
+ im_t0 = latents_to_pil(latents_x0)[0]
257
+ im_next = latents_to_pil(latents)[0]
258
 
259
+ # Now step with scheduler
260
+ latents = scheduler.step(noise_pred, t, latents).prev_sample
261
+
262
+ return latents_to_pil(latents)[0]
263
 
 
264
 
265
  def generate_image_from_prompt(text_in, style_in):
266
+ STYLE_LIST = ['oil_style.bin', 'valorant_style.bin', 'cartoon_syle.bin', 'space_style.bin', 'terraria_syle.bin']
267
  STYLE_SEEDS = [128, 64, 128, 64, 128]
268
 
269
  print(text_in)
 
284
  loss_generated_img = (loss_style(prompt, style_embed[0], style_seed))
285
 
286
  return [generated_image, loss_generated_img]
 
 
 
 
 
 
 
 
287
 
288
+
289
+ # Define Interface
290
+ title = 'Stable Diffusion Art Generator'
291
+
292
+ # Add clear and concise labels and instructions
293
+ prompt_label = "Enter a prompt (e.g., 'man in style of puppy'):"
294
+ styles_label = "Select a Pretrained Style:"
295
+
296
+ instructions = "Explore creative art generation using Stable Diffusion. Enter a prompt and choose a style to get started."
297
+
298
+ demo = gr.Interface(generate_image_from_prompt,
299
+ inputs=[
300
+ gr.Textbox('man in style of puppy', label=prompt_label, style="width: 100%"),
301
+ gr.Dropdown(
302
+ ['oil', 'valorant', 'cartoon', 'space', 'terraria'],
303
+ value="valorant",
304
+ label=styles_label,
305
+ style="width: 100%"
306
+ ),
307
+ ],
308
+ outputs=[
309
+ gr.Gallery(label="Generated Images", show_label=False, elem_id="gallery", columns=[2], rows=[2],
310
+ object_fit="contain", style="max-width: 800px; margin: 20px auto;"),
311
+ ],
312
+ title=title,
313
+ description=instructions,
314
+ theme="compact" # Apply a predefined theme
315
+ )
316
+
317
+ demo.launch(debug=True)