AlanB commited on
Commit
a52dc91
·
1 Parent(s): fea435d

Upload pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +530 -0
pipeline.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modeled after the textual_inversion.py / train_dreambooth.py and the work
3
+ of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
4
+ """
5
+ import inspect
6
+ import warnings
7
+ from typing import Callable, List, Optional, Union
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn.functional as F
12
+
13
+ import PIL
14
+ from accelerate import Accelerator
15
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
+ from diffusers.pipeline_utils import DiffusionPipeline
17
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
19
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler
20
+ from diffusers.utils import logging
21
+
22
+ # TODO: remove and import from diffusers.utils when the new version of diffusers is released
23
+ from packaging import version
24
+ from tqdm.auto import tqdm
25
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
26
+
27
+
28
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
29
+ PIL_INTERPOLATION = {
30
+ "linear": PIL.Image.Resampling.BILINEAR,
31
+ "bilinear": PIL.Image.Resampling.BILINEAR,
32
+ "bicubic": PIL.Image.Resampling.BICUBIC,
33
+ "lanczos": PIL.Image.Resampling.LANCZOS,
34
+ "nearest": PIL.Image.Resampling.NEAREST,
35
+ }
36
+ else:
37
+ PIL_INTERPOLATION = {
38
+ "linear": PIL.Image.LINEAR,
39
+ "bilinear": PIL.Image.BILINEAR,
40
+ "bicubic": PIL.Image.BICUBIC,
41
+ "lanczos": PIL.Image.LANCZOS,
42
+ "nearest": PIL.Image.NEAREST,
43
+ }
44
+ # ------------------------------------------------------------------------------
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ def preprocess(image):
50
+ w, h = image.size
51
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
52
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
53
+ image = np.array(image).astype(np.float32) / 255.0
54
+ image = image[None].transpose(0, 3, 1, 2)
55
+ image = torch.from_numpy(image)
56
+ return 2.0 * image - 1.0
57
+
58
+
59
+ class ImagicStableDiffusionPipeline(DiffusionPipeline):
60
+ r"""
61
+ Pipeline for imagic image editing.
62
+ See paper here: https://arxiv.org/pdf/2210.09276.pdf
63
+
64
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
65
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
66
+ Args:
67
+ vae ([`AutoencoderKL`]):
68
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
69
+ text_encoder ([`CLIPTextModel`]):
70
+ Frozen text-encoder. Stable Diffusion uses the text portion of
71
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
72
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
73
+ tokenizer (`CLIPTokenizer`):
74
+ Tokenizer of class
75
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
76
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
77
+ scheduler ([`SchedulerMixin`]):
78
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
79
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
80
+ safety_checker ([`StableDiffusionSafetyChecker`]):
81
+ Classification module that estimates whether generated images could be considered offsensive or harmful.
82
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
83
+ feature_extractor ([`CLIPFeatureExtractor`]):
84
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ vae: AutoencoderKL,
90
+ text_encoder: CLIPTextModel,
91
+ tokenizer: CLIPTokenizer,
92
+ unet: UNet2DConditionModel,
93
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler],
94
+ safety_checker: StableDiffusionSafetyChecker,
95
+ feature_extractor: CLIPFeatureExtractor,
96
+ ):
97
+ super().__init__()
98
+ self.register_modules(
99
+ vae=vae,
100
+ text_encoder=text_encoder,
101
+ tokenizer=tokenizer,
102
+ unet=unet,
103
+ scheduler=scheduler,
104
+ safety_checker=safety_checker,
105
+ feature_extractor=feature_extractor,
106
+ )
107
+
108
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
109
+ r"""
110
+ Enable sliced attention computation.
111
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
112
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
113
+ Args:
114
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
115
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
116
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
117
+ `attention_head_dim` must be a multiple of `slice_size`.
118
+ """
119
+ if slice_size == "auto":
120
+ if isinstance(self.unet.config.attention_head_dim, int):
121
+ # half the attention head size is usually a good trade-off between
122
+ # speed and memory
123
+ slice_size = self.unet.config.attention_head_dim // 2
124
+ else:
125
+ # if `attention_head_dim` is a list, take the smallest head size
126
+ slice_size = min(self.unet.config.attention_head_dim)
127
+
128
+ self.unet.set_attention_slice(slice_size)
129
+
130
+ def disable_attention_slicing(self):
131
+ r"""
132
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
133
+ back to computing attention in one step.
134
+ """
135
+ # set slice_size = `None` to disable `attention slicing`
136
+ self.enable_attention_slicing(None)
137
+
138
+ def train(
139
+ self,
140
+ prompt: Union[str, List[str]],
141
+ init_image: Union[torch.FloatTensor, PIL.Image.Image],
142
+ height: Optional[int] = 512,
143
+ width: Optional[int] = 512,
144
+ generator: Optional[torch.Generator] = None,
145
+ embedding_learning_rate: float = 0.001,
146
+ diffusion_model_learning_rate: float = 2e-6,
147
+ text_embedding_optimization_steps: int = 500,
148
+ model_fine_tuning_optimization_steps: int = 1000,
149
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
150
+ callback_steps: Optional[int] = 1,
151
+ **kwargs,
152
+ ):
153
+ r"""
154
+ Function invoked when calling the pipeline for generation.
155
+ Args:
156
+ prompt (`str` or `List[str]`):
157
+ The prompt or prompts to guide the image generation.
158
+ height (`int`, *optional*, defaults to 512):
159
+ The height in pixels of the generated image.
160
+ width (`int`, *optional*, defaults to 512):
161
+ The width in pixels of the generated image.
162
+ num_inference_steps (`int`, *optional*, defaults to 50):
163
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
164
+ expense of slower inference.
165
+ guidance_scale (`float`, *optional*, defaults to 7.5):
166
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
167
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
168
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
169
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
170
+ usually at the expense of lower image quality.
171
+ eta (`float`, *optional*, defaults to 0.0):
172
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
173
+ [`schedulers.DDIMScheduler`], will be ignored for others.
174
+ generator (`torch.Generator`, *optional*):
175
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
176
+ deterministic.
177
+ latents (`torch.FloatTensor`, *optional*):
178
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
179
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
180
+ tensor will ge generated by sampling using the supplied random `generator`.
181
+ output_type (`str`, *optional*, defaults to `"pil"`):
182
+ The output format of the generate image. Choose between
183
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
184
+ return_dict (`bool`, *optional*, defaults to `True`):
185
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
186
+ plain tuple.
187
+ callback (`Callable`, *optional*):
188
+ A function that will be called every `callback_steps` steps during inference. The function will be
189
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
190
+ callback_steps (`int`, *optional*, defaults to 1):
191
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
192
+ called at every step.
193
+ Returns:
194
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
195
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
196
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
197
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
198
+ (nsfw) content, according to the `safety_checker`.
199
+ """
200
+ accelerator = Accelerator(
201
+ gradient_accumulation_steps=1,
202
+ mixed_precision="fp16",
203
+ )
204
+
205
+ if "torch_device" in kwargs:
206
+ device = kwargs.pop("torch_device")
207
+ warnings.warn(
208
+ "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
209
+ " Consider using `pipe.to(torch_device)` instead."
210
+ )
211
+
212
+ if device is None:
213
+ device = "cuda" if torch.cuda.is_available() else "cpu"
214
+ self.to(device)
215
+
216
+ if height % 8 != 0 or width % 8 != 0:
217
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
218
+
219
+ # Freeze vae and unet
220
+ self.vae.requires_grad_(False)
221
+ self.unet.requires_grad_(False)
222
+ self.text_encoder.requires_grad_(False)
223
+ self.unet.eval()
224
+ self.vae.eval()
225
+ self.text_encoder.eval()
226
+
227
+ if accelerator.is_main_process:
228
+ accelerator.init_trackers(
229
+ "imagic",
230
+ config={
231
+ "embedding_learning_rate": embedding_learning_rate,
232
+ "text_embedding_optimization_steps": text_embedding_optimization_steps,
233
+ },
234
+ )
235
+
236
+ # get text embeddings for prompt
237
+ text_input = self.tokenizer(
238
+ prompt,
239
+ padding="max_length",
240
+ max_length=self.tokenizer.model_max_length,
241
+ truncaton=True,
242
+ return_tensors="pt",
243
+ )
244
+ text_embeddings = torch.nn.Parameter(
245
+ self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
246
+ )
247
+ text_embeddings = text_embeddings.detach()
248
+ text_embeddings.requires_grad_()
249
+ text_embeddings_orig = text_embeddings.clone()
250
+
251
+ # Initialize the optimizer
252
+ optimizer = torch.optim.Adam(
253
+ [text_embeddings], # only optimize the embeddings
254
+ lr=embedding_learning_rate,
255
+ )
256
+
257
+ if isinstance(init_image, PIL.Image.Image):
258
+ init_image = preprocess(init_image)
259
+
260
+ latents_dtype = text_embeddings.dtype
261
+ init_image = init_image.to(device=self.device, dtype=latents_dtype)
262
+ init_latent_image_dist = self.vae.encode(init_image).latent_dist
263
+ init_image_latents = init_latent_image_dist.sample(generator=generator)
264
+ init_image_latents = 0.18215 * init_image_latents
265
+
266
+ progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
267
+ progress_bar.set_description("Steps")
268
+
269
+ global_step = 0
270
+
271
+ logger.info("First optimizing the text embedding to better reconstruct the init image")
272
+ for _ in range(text_embedding_optimization_steps):
273
+ with accelerator.accumulate(text_embeddings):
274
+ # Sample noise that we'll add to the latents
275
+ noise = torch.randn(init_image_latents.shape).to(init_image_latents.device)
276
+ timesteps = torch.randint(1000, (1,), device=init_image_latents.device)
277
+
278
+ # Add noise to the latents according to the noise magnitude at each timestep
279
+ # (this is the forward diffusion process)
280
+ noisy_latents = self.scheduler.add_noise(init_image_latents, noise, timesteps)
281
+
282
+ # Predict the noise residual
283
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
284
+
285
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
286
+ accelerator.backward(loss)
287
+
288
+ optimizer.step()
289
+ optimizer.zero_grad()
290
+
291
+ # Checks if the accelerator has performed an optimization step behind the scenes
292
+ if accelerator.sync_gradients:
293
+ progress_bar.update(1)
294
+ global_step += 1
295
+
296
+ # call the callback, if provided
297
+ if callback is not None and global_step % callback_steps == 0:
298
+ callback(global_step, text_embedding_optimization_steps, noisy_latents)
299
+
300
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
301
+ progress_bar.set_postfix(**logs)
302
+ accelerator.log(logs, step=global_step)
303
+
304
+ accelerator.wait_for_everyone()
305
+
306
+ text_embeddings.requires_grad_(False)
307
+
308
+ # Now we fine tune the unet to better reconstruct the image
309
+ self.unet.requires_grad_(True)
310
+ self.unet.train()
311
+ optimizer = torch.optim.Adam(
312
+ self.unet.parameters(), # only optimize unet
313
+ lr=diffusion_model_learning_rate,
314
+ )
315
+ progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
316
+
317
+ logger.info("Next fine tuning the entire model to better reconstruct the init image")
318
+ for _ in range(model_fine_tuning_optimization_steps):
319
+ with accelerator.accumulate(self.unet.parameters()):
320
+ # Sample noise that we'll add to the latents
321
+ noise = torch.randn(init_image_latents.shape).to(init_image_latents.device)
322
+ timesteps = torch.randint(1000, (1,), device=init_image_latents.device)
323
+
324
+ # Add noise to the latents according to the noise magnitude at each timestep
325
+ # (this is the forward diffusion process)
326
+ noisy_latents = self.scheduler.add_noise(init_image_latents, noise, timesteps)
327
+
328
+ # Predict the noise residual
329
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
330
+
331
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
332
+ accelerator.backward(loss)
333
+
334
+ optimizer.step()
335
+ optimizer.zero_grad()
336
+
337
+ # Checks if the accelerator has performed an optimization step behind the scenes
338
+ if accelerator.sync_gradients:
339
+ progress_bar.update(1)
340
+ global_step += 1
341
+
342
+ # call the callback, if provided
343
+ if callback is not None and global_step % callback_steps == 0:
344
+ callback(global_step, text_embedding_optimization_steps, noisy_latents)
345
+
346
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
347
+ progress_bar.set_postfix(**logs)
348
+ accelerator.log(logs, step=global_step)
349
+
350
+ accelerator.wait_for_everyone()
351
+ self.text_embeddings_orig = text_embeddings_orig
352
+ self.text_embeddings = text_embeddings
353
+
354
+ @torch.no_grad()
355
+ def __call__(
356
+ self,
357
+ alpha: float = 1.2,
358
+ height: Optional[int] = 512,
359
+ width: Optional[int] = 512,
360
+ num_inference_steps: Optional[int] = 50,
361
+ generator: Optional[torch.Generator] = None,
362
+ output_type: Optional[str] = "pil",
363
+ return_dict: bool = True,
364
+ guidance_scale: float = 7.5,
365
+ eta: float = 0.0,
366
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
367
+ callback_steps: Optional[int] = 1,
368
+ **kwargs,
369
+ ):
370
+ r"""
371
+ Function invoked when calling the pipeline for generation.
372
+ Args:
373
+ prompt (`str` or `List[str]`):
374
+ The prompt or prompts to guide the image generation.
375
+ height (`int`, *optional*, defaults to 512):
376
+ The height in pixels of the generated image.
377
+ width (`int`, *optional*, defaults to 512):
378
+ The width in pixels of the generated image.
379
+ num_inference_steps (`int`, *optional*, defaults to 50):
380
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
381
+ expense of slower inference.
382
+ guidance_scale (`float`, *optional*, defaults to 7.5):
383
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
384
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
385
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
386
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
387
+ usually at the expense of lower image quality.
388
+ eta (`float`, *optional*, defaults to 0.0):
389
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
390
+ [`schedulers.DDIMScheduler`], will be ignored for others.
391
+ generator (`torch.Generator`, *optional*):
392
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
393
+ deterministic.
394
+ latents (`torch.FloatTensor`, *optional*):
395
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
396
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
397
+ tensor will ge generated by sampling using the supplied random `generator`.
398
+ output_type (`str`, *optional*, defaults to `"pil"`):
399
+ The output format of the generate image. Choose between
400
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
401
+ return_dict (`bool`, *optional*, defaults to `True`):
402
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
403
+ plain tuple.
404
+ callback (`Callable`, *optional*):
405
+ A function that will be called every `callback_steps` steps during inference. The function will be
406
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
407
+ callback_steps (`int`, *optional*, defaults to 1):
408
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
409
+ called at every step.
410
+ Returns:
411
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
412
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
413
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
414
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
415
+ (nsfw) content, according to the `safety_checker`.
416
+ """
417
+ if height % 8 != 0 or width % 8 != 0:
418
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
419
+ if self.text_embeddings is None:
420
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
421
+ if self.text_embeddings_orig is None:
422
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
423
+
424
+ text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
425
+
426
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
427
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
428
+ # corresponds to doing no classifier free guidance.
429
+ do_classifier_free_guidance = guidance_scale > 1.0
430
+ # get unconditional embeddings for classifier free guidance
431
+ if do_classifier_free_guidance:
432
+ uncond_tokens = [""]
433
+ max_length = self.tokenizer.model_max_length
434
+ uncond_input = self.tokenizer(
435
+ uncond_tokens,
436
+ padding="max_length",
437
+ max_length=max_length,
438
+ truncation=True,
439
+ return_tensors="pt",
440
+ )
441
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
442
+
443
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
444
+ seq_len = uncond_embeddings.shape[1]
445
+ uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
446
+
447
+ # For classifier free guidance, we need to do two forward passes.
448
+ # Here we concatenate the unconditional and text embeddings into a single batch
449
+ # to avoid doing two forward passes
450
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
451
+
452
+ # get the initial random noise unless the user supplied it
453
+
454
+ # Unlike in other pipelines, latents need to be generated in the target device
455
+ # for 1-to-1 results reproducibility with the CompVis implementation.
456
+ # However this currently doesn't work in `mps`.
457
+ latents_shape = (1, self.unet.in_channels, height // 8, width // 8)
458
+ latents_dtype = text_embeddings.dtype
459
+ if self.device.type == "mps":
460
+ # randn does not exist on mps
461
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
462
+ self.device
463
+ )
464
+ else:
465
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
466
+
467
+ # set timesteps
468
+ self.scheduler.set_timesteps(num_inference_steps)
469
+
470
+ # Some schedulers like PNDM have timesteps as arrays
471
+ # It's more optimized to move all timesteps to correct device beforehand
472
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
473
+
474
+ # scale the initial noise by the standard deviation required by the scheduler
475
+ latents = latents * self.scheduler.init_noise_sigma
476
+
477
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
478
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
479
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
480
+ # and should be between [0, 1]
481
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
482
+ extra_step_kwargs = {}
483
+ if accepts_eta:
484
+ extra_step_kwargs["eta"] = eta
485
+
486
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
487
+ # expand the latents if we are doing classifier free guidance
488
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
489
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
490
+
491
+ # predict the noise residual
492
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
493
+
494
+ # perform guidance
495
+ if do_classifier_free_guidance:
496
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
497
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
498
+
499
+ # compute the previous noisy sample x_t -> x_t-1
500
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
501
+
502
+ # call the callback, if provided
503
+ if callback is not None and i % callback_steps == 0:
504
+ callback(i, t, latents)
505
+
506
+ latents = 1 / 0.18215 * latents
507
+ image = self.vae.decode(latents).sample
508
+
509
+ image = (image / 2 + 0.5).clamp(0, 1)
510
+
511
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
512
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
513
+
514
+ if self.safety_checker is not None:
515
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
516
+ self.device
517
+ )
518
+ image, has_nsfw_concept = self.safety_checker(
519
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
520
+ )
521
+ else:
522
+ has_nsfw_concept = None
523
+
524
+ if output_type == "pil":
525
+ image = self.numpy_to_pil(image)
526
+
527
+ if not return_dict:
528
+ return (image, has_nsfw_concept)
529
+
530
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)