thisiswooyeol commited on
Commit
7dcd7e8
·
verified ·
1 Parent(s): 9135c83

Delete pipeline_stable_diffusion_migc.py

Browse files
Files changed (1) hide show
  1. pipeline_stable_diffusion_migc.py +0 -1239
pipeline_stable_diffusion_migc.py DELETED
@@ -1,1239 +0,0 @@
1
- import inspect
2
- from typing import Any, Callable, Dict, List, Optional, Union
3
-
4
- import numpy as np
5
- import torch
6
- import torch.nn.functional as F
7
- from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
8
- from diffusers.configuration_utils import FrozenDict
9
- from diffusers.image_processor import VaeImageProcessor
10
- from diffusers.loaders import (
11
- FromSingleFileMixin,
12
- StableDiffusionLoraLoaderMixin,
13
- TextualInversionLoaderMixin,
14
- )
15
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
- from diffusers.models.attention_processor import Attention, AttnProcessor
17
- from diffusers.models.lora import adjust_lora_scale_text_encoder
18
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
19
- from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
20
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
21
- from diffusers.schedulers import KarrasDiffusionSchedulers
22
- from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
23
- from diffusers.utils.torch_utils import randn_tensor
24
- from packaging import version
25
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
26
-
27
- from core.diffusion.migc.migc_archs import MIGC, NaiveFuser
28
-
29
- logger = logging.get_logger(__name__)
30
-
31
-
32
- def get_sup_mask(mask_list):
33
- or_mask = np.zeros_like(mask_list[0])
34
- for mask in mask_list:
35
- or_mask += mask
36
- or_mask[or_mask >= 1] = 1
37
- sup_mask = 1 - or_mask
38
- return sup_mask
39
-
40
-
41
- class MIGCProcessor(AttnProcessor):
42
- def __init__(self, use_migc: bool):
43
- self.use_migc = use_migc
44
- self.naive_fuser = NaiveFuser()
45
-
46
- def __call__(
47
- self,
48
- attn: Attention,
49
- hidden_states: torch.Tensor,
50
- encoder_hidden_states: torch.Tensor | None = None,
51
- attention_mask: torch.Tensor | None = None,
52
- temb: torch.Tensor | None = None,
53
- encoder_hidden_states_phrases: torch.Tensor | None = None,
54
- bboxes: List[List[float]] = [],
55
- ith: int = 0,
56
- embeds_pooler: torch.Tensor | None = None,
57
- height: int = 512,
58
- width: int = 512,
59
- MIGCsteps: int = 25,
60
- NaiveFuserSteps: int = -1,
61
- ca_scale: float | None = None,
62
- ea_scale: float | None = None,
63
- sac_scale: float | None = None,
64
- guidance_masks: torch.Tensor | None = None,
65
- supplement_mask: torch.Tensor | None = None,
66
- in_box: torch.Tensor | None = None,
67
- ):
68
- batch_size, sequence_length, _ = hidden_states.shape
69
- assert batch_size == 1 or batch_size == 2, (
70
- "We currently only implement sampling with batch_size=1, and we will implement sampling with batch_size=N as soon as possible."
71
- )
72
- residual = hidden_states
73
- if attn.spatial_norm is not None:
74
- hidden_states = attn.spatial_norm(hidden_states, temb)
75
-
76
- input_ndim = hidden_states.ndim
77
-
78
- if input_ndim == 4:
79
- batch_size, channel, height, width = hidden_states.shape
80
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
81
-
82
- batch_size, sequence_length, _ = (
83
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
84
- )
85
-
86
- if attention_mask is not None:
87
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
88
- # scaled_dot_product_attention expects attention_mask shape to be
89
- # (batch, heads, source_length, target_length)
90
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
91
-
92
- if attn.group_norm is not None:
93
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
94
-
95
- ##########
96
- # Expand encoder_hidden_states with encoder_hidden_states_phrases
97
- instance_num = len(bboxes)
98
-
99
- if ith > MIGCsteps:
100
- use_migc = False
101
- else:
102
- use_migc = self.use_migc
103
- is_vanilla_cross = instance_num == 0 or (not use_migc and ith > NaiveFuserSteps)
104
-
105
- is_cross = encoder_hidden_states is not None
106
-
107
- # In this case, we need to use MIGC or naive_fuser, so
108
- # 1. We concat prompt embeds and phrases embeds
109
- # 2. we copy the hidden_states_cond (instance_num+1) times for QKV
110
- if is_cross and not is_vanilla_cross:
111
- batch_size_phrases = encoder_hidden_states_phrases.shape[0]
112
- encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_phrases])
113
- # print(encoder_hidden_states.shape)
114
- hidden_states_uncond = hidden_states[[0], ...]
115
- hidden_states_cond = hidden_states[[1], ...].repeat(instance_num + 1, 1, 1)
116
- hidden_states = torch.cat([hidden_states_uncond, hidden_states_cond])
117
- else:
118
- batch_size_phrases = 0
119
- ##########
120
-
121
- query = attn.to_q(hidden_states)
122
-
123
- if encoder_hidden_states is None:
124
- encoder_hidden_states = hidden_states
125
- elif attn.norm_cross:
126
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
127
-
128
- key = attn.to_k(encoder_hidden_states)
129
- value = attn.to_v(encoder_hidden_states)
130
-
131
- inner_dim = key.shape[-1]
132
- head_dim = inner_dim // attn.heads
133
-
134
- query = query.view(batch_size + batch_size_phrases, -1, attn.heads, head_dim).transpose(1, 2)
135
-
136
- key = key.view(batch_size + batch_size_phrases, -1, attn.heads, head_dim).transpose(1, 2)
137
- value = value.view(batch_size + batch_size_phrases, -1, attn.heads, head_dim).transpose(1, 2)
138
-
139
- if attn.norm_q is not None:
140
- query = attn.norm_q(query)
141
- if attn.norm_k is not None:
142
- key = attn.norm_k(key)
143
-
144
- hidden_states = F.scaled_dot_product_attention(
145
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
146
- )
147
-
148
- hidden_states = hidden_states.transpose(1, 2).reshape(
149
- batch_size + batch_size_phrases, -1, attn.heads * head_dim
150
- )
151
- hidden_states = hidden_states.to(query.dtype)
152
-
153
- # linear proj
154
- hidden_states = attn.to_out[0](hidden_states)
155
- # dropout
156
- hidden_states = attn.to_out[1](hidden_states)
157
-
158
- if input_ndim == 4:
159
- hidden_states = hidden_states.transpose(-1, -2).reshape(
160
- batch_size + batch_size_phrases, channel, height, width
161
- )
162
-
163
- if attn.residual_connection:
164
- hidden_states = hidden_states + residual
165
-
166
- hidden_states = hidden_states / attn.rescale_output_factor
167
-
168
- ###### Self-Attention Results ######
169
- if not is_cross:
170
- return hidden_states
171
-
172
- ###### Vanilla Cross-Attention Results ######
173
- if is_vanilla_cross:
174
- return hidden_states
175
-
176
- ###### Cross-Attention with MIGC ######
177
- # hidden_states: torch.Size([1+1+instance_num, HW, C]), the first 1 is the uncond ca output, the second 1 is the global ca output.
178
- hidden_states_uncond = hidden_states[[0], ...] # torch.Size([1, HW, C])
179
- cond_ca_output = hidden_states[1:, ...].unsqueeze(0) # torch.Size([1, 1+instance_num, 5, 64, 1280])
180
-
181
- other_info = {}
182
- other_info["image_token"] = hidden_states_cond[None, ...]
183
- other_info["box"] = in_box
184
- other_info["context_pooler"] = embeds_pooler[:, None, :] # (instance_num, 1, 768)
185
- other_info["supplement_mask"] = supplement_mask
186
- other_info["height"] = height
187
- other_info["width"] = width
188
- other_info["ca_scale"] = ca_scale
189
- other_info["ea_scale"] = ea_scale
190
- other_info["sac_scale"] = sac_scale
191
-
192
- if use_migc:
193
- assert hasattr(attn, "migc") and isinstance(attn.migc, MIGC)
194
- hidden_states_cond, _ = attn.migc(
195
- cond_ca_output, guidance_masks, other_info=other_info, return_fuser_info=True
196
- )
197
- else:
198
- hidden_states_cond, _ = self.naive_fuser(
199
- cond_ca_output, guidance_masks, other_info=other_info, return_fuser_info=True
200
- )
201
- hidden_states_cond = hidden_states_cond.squeeze(1)
202
-
203
- hidden_states = torch.cat([hidden_states_uncond, hidden_states_cond])
204
- return hidden_states
205
-
206
-
207
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
208
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
209
- r"""
210
- Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
211
- Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
212
- Flawed](https://huggingface.co/papers/2305.08891).
213
-
214
- Args:
215
- noise_cfg (`torch.Tensor`):
216
- The predicted noise tensor for the guided diffusion process.
217
- noise_pred_text (`torch.Tensor`):
218
- The predicted noise tensor for the text-guided diffusion process.
219
- guidance_rescale (`float`, *optional*, defaults to 0.0):
220
- A rescale factor applied to the noise predictions.
221
-
222
- Returns:
223
- noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor.
224
- """
225
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
226
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
227
- # rescale the results from guidance (fixes overexposure)
228
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
229
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
230
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
231
- return noise_cfg
232
-
233
-
234
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
235
- def retrieve_timesteps(
236
- scheduler,
237
- num_inference_steps: Optional[int] = None,
238
- device: Optional[Union[str, torch.device]] = None,
239
- timesteps: Optional[List[int]] = None,
240
- sigmas: Optional[List[float]] = None,
241
- **kwargs,
242
- ):
243
- r"""
244
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
245
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
246
-
247
- Args:
248
- scheduler (`SchedulerMixin`):
249
- The scheduler to get timesteps from.
250
- num_inference_steps (`int`):
251
- The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
252
- must be `None`.
253
- device (`str` or `torch.device`, *optional*):
254
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
255
- timesteps (`List[int]`, *optional*):
256
- Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
257
- `num_inference_steps` and `sigmas` must be `None`.
258
- sigmas (`List[float]`, *optional*):
259
- Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
260
- `num_inference_steps` and `timesteps` must be `None`.
261
-
262
- Returns:
263
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
264
- second element is the number of inference steps.
265
- """
266
- if timesteps is not None and sigmas is not None:
267
- raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
268
- if timesteps is not None:
269
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
270
- if not accepts_timesteps:
271
- raise ValueError(
272
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
273
- f" timestep schedules. Please check whether you are using the correct scheduler."
274
- )
275
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
276
- timesteps = scheduler.timesteps
277
- num_inference_steps = len(timesteps)
278
- elif sigmas is not None:
279
- accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
280
- if not accept_sigmas:
281
- raise ValueError(
282
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
283
- f" sigmas schedules. Please check whether you are using the correct scheduler."
284
- )
285
- scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
286
- timesteps = scheduler.timesteps
287
- num_inference_steps = len(timesteps)
288
- else:
289
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
290
- timesteps = scheduler.timesteps
291
- return timesteps, num_inference_steps
292
-
293
-
294
- class StableDiffusionMIGCPipeline(
295
- DiffusionPipeline,
296
- StableDiffusionMixin,
297
- TextualInversionLoaderMixin,
298
- StableDiffusionLoraLoaderMixin,
299
- FromSingleFileMixin,
300
- ):
301
- """
302
- Pipeline for layout-to-image generation using Stable Diffusion + MIGC (https://arxiv.org/abs/2402.05408).
303
-
304
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
305
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
306
-
307
- The pipeline also inherits the following loading methods:
308
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
309
- - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
310
- - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
311
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
312
-
313
- Args:
314
- vae ([`AutoencoderKL`]):
315
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
316
- text_encoder ([`~transformers.CLIPTextModel`]):
317
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
318
- tokenizer ([`~transformers.CLIPTokenizer`]):
319
- A `CLIPTokenizer` to tokenize text.
320
- unet ([`UNet2DConditionModel`]):
321
- A `UNet2DConditionModel` to denoise the encoded image latents.
322
- scheduler ([`SchedulerMixin`]):
323
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
324
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
325
- safety_checker ([`StableDiffusionSafetyChecker`]):
326
- Classification module that estimates whether generated images could be considered offensive or harmful.
327
- Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for
328
- more details about a model's potential harms.
329
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
330
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
331
- """
332
-
333
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
334
- _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
335
- _exclude_from_cpu_offload = ["safety_checker"]
336
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
337
-
338
- def __init__(
339
- self,
340
- vae: AutoencoderKL,
341
- text_encoder: CLIPTextModel,
342
- tokenizer: CLIPTokenizer,
343
- unet: UNet2DConditionModel,
344
- scheduler: KarrasDiffusionSchedulers,
345
- safety_checker: StableDiffusionSafetyChecker,
346
- feature_extractor: CLIPImageProcessor,
347
- image_encoder: CLIPVisionModelWithProjection | None = None,
348
- requires_safety_checker: bool = True,
349
- ):
350
- super().__init__()
351
-
352
- if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
353
- deprecation_message = (
354
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
355
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
356
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
357
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
358
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
359
- " file"
360
- )
361
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
362
- new_config = dict(scheduler.config)
363
- new_config["steps_offset"] = 1
364
- scheduler._internal_dict = FrozenDict(new_config)
365
-
366
- if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
367
- deprecation_message = (
368
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
369
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
370
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
371
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
372
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
373
- )
374
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
375
- new_config = dict(scheduler.config)
376
- new_config["clip_sample"] = False
377
- scheduler._internal_dict = FrozenDict(new_config)
378
-
379
- if safety_checker is None and requires_safety_checker:
380
- logger.warning(
381
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
382
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
383
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
384
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
385
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
386
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
387
- )
388
-
389
- if safety_checker is not None and feature_extractor is None:
390
- raise ValueError(
391
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
392
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
393
- )
394
-
395
- is_unet_version_less_0_9_0 = (
396
- unet is not None
397
- and hasattr(unet.config, "_diffusers_version")
398
- and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
399
- )
400
- self._is_unet_config_sample_size_int = unet is not None and isinstance(unet.config.sample_size, int)
401
- is_unet_sample_size_less_64 = (
402
- unet is not None
403
- and hasattr(unet.config, "sample_size")
404
- and self._is_unet_config_sample_size_int
405
- and unet.config.sample_size < 64
406
- )
407
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
408
- deprecation_message = (
409
- "The configuration file of the unet has set the default `sample_size` to smaller than"
410
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
411
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
412
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
413
- " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
414
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
415
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
416
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
417
- " the `unet/config.json` file"
418
- )
419
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
420
- new_config = dict(unet.config)
421
- new_config["sample_size"] = 64
422
- unet._internal_dict = FrozenDict(new_config)
423
-
424
- self._register_migc_adapters(unet)
425
-
426
- self.register_modules(
427
- vae=vae,
428
- text_encoder=text_encoder,
429
- tokenizer=tokenizer,
430
- unet=unet,
431
- scheduler=scheduler,
432
- safety_checker=safety_checker,
433
- feature_extractor=feature_extractor,
434
- image_encoder=image_encoder,
435
- )
436
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
437
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
438
- self.register_to_config(requires_safety_checker=requires_safety_checker)
439
-
440
- self.default_sample_size = (
441
- self.unet.config.sample_size
442
- if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size")
443
- else 64
444
- )
445
-
446
- def _register_migc_adapters(self, unet: UNet2DConditionModel):
447
- for name, module in unet.named_modules():
448
- if isinstance(module, Attention):
449
- # print(f"Hook {name} {module.__class__.__name__}")
450
- if "attn1" in name:
451
- module.set_processor(MIGCProcessor(use_migc=False))
452
- elif "attn2" in name and "down" in name:
453
- module.set_processor(MIGCProcessor(use_migc=False))
454
- elif "attn2" in name and ("up_blocks.2" in name or "up_blocks.3" in name):
455
- module.set_processor(MIGCProcessor(use_migc=False))
456
- elif "attn2" in name and "up_blocks.1" in name:
457
- module.migc = MIGC(C=1280)
458
- module.register_module("migc", module.migc)
459
- module.set_processor(MIGCProcessor(use_migc=True))
460
- elif "attn2" in name and "mid" in name:
461
- module.migc = MIGC(C=1280)
462
- module.register_module("migc", module.migc)
463
- module.set_processor(MIGCProcessor(use_migc=True))
464
- else:
465
- logger.warning(f"Unknown attention module: {name}")
466
-
467
- def encode_prompt(
468
- self,
469
- prompt,
470
- device,
471
- num_images_per_prompt,
472
- do_classifier_free_guidance,
473
- negative_prompt: str | List[str] | None = None,
474
- prompt_embeds: Optional[torch.Tensor] = None,
475
- negative_prompt_embeds: Optional[torch.Tensor] = None,
476
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
477
- lora_scale: Optional[float] = None,
478
- ):
479
- r"""
480
- Encodes the prompt into text encoder hidden states.
481
-
482
- Args:
483
- prompt (`str` or `List[str]`, *optional*):
484
- prompt to be encoded
485
- device: (`torch.device`):
486
- torch device
487
- num_images_per_prompt (`int`):
488
- number of images that should be generated per prompt
489
- do_classifier_free_guidance (`bool`):
490
- whether to use classifier free guidance or not
491
- negative_prompt (`str` or `List[str]`, *optional*):
492
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
493
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
494
- less than `1`).
495
- prompt_embeds (`torch.Tensor`, *optional*):
496
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
497
- provided, text embeddings will be generated from `prompt` input argument.
498
- negative_prompt_embeds (`torch.Tensor`, *optional*):
499
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
500
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
501
- argument.
502
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
503
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
504
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
505
- lora_scale (`float`, *optional*):
506
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
507
- """
508
- # set lora scale so that monkey patched LoRA
509
- # function of text encoder can correctly access it
510
- if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
511
- self._lora_scale = lora_scale
512
-
513
- # dynamically adjust the LoRA scale
514
- if not USE_PEFT_BACKEND:
515
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
516
- else:
517
- scale_lora_layers(self.text_encoder, lora_scale)
518
-
519
- if prompt is not None and isinstance(prompt, str):
520
- batch_size = 1
521
- elif prompt is not None and isinstance(prompt, list):
522
- batch_size = len(prompt)
523
- else:
524
- batch_size = prompt_embeds.shape[0]
525
-
526
- if prompt_embeds is None:
527
- # textual inversion: process multi-vector tokens if necessary
528
- if isinstance(self, TextualInversionLoaderMixin):
529
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
530
-
531
- text_inputs = self.tokenizer(
532
- prompt,
533
- padding="max_length",
534
- max_length=self.tokenizer.model_max_length,
535
- truncation=True,
536
- return_tensors="pt",
537
- )
538
- text_input_ids = text_inputs.input_ids
539
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
540
-
541
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
542
- text_input_ids, untruncated_ids
543
- ):
544
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
545
- logger.warning(
546
- "The following part of your input was truncated because CLIP can only handle sequences up to"
547
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
548
- )
549
-
550
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
551
- attention_mask = text_inputs.attention_mask.to(device)
552
- else:
553
- attention_mask = None
554
-
555
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
556
- pooled_prompt_embeds = prompt_embeds.pooler_output
557
- prompt_embeds = prompt_embeds[0]
558
-
559
- if self.text_encoder is not None:
560
- prompt_embeds_dtype = self.text_encoder.dtype
561
- elif self.unet is not None:
562
- prompt_embeds_dtype = self.unet.dtype
563
- else:
564
- prompt_embeds_dtype = prompt_embeds.dtype
565
-
566
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
567
- pooled_prompt_embeds = pooled_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
568
-
569
- # get unconditional embeddings for classifier free guidance
570
- if do_classifier_free_guidance and negative_prompt_embeds is None:
571
- uncond_tokens: List[str]
572
- if negative_prompt is None:
573
- uncond_tokens = [""] * batch_size
574
- elif prompt is not None and type(prompt) is not type(negative_prompt):
575
- raise TypeError(
576
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
577
- f" {type(prompt)}."
578
- )
579
- elif isinstance(negative_prompt, str):
580
- uncond_tokens = [negative_prompt]
581
- elif batch_size != len(negative_prompt):
582
- raise ValueError(
583
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
584
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
585
- " the batch size of `prompt`."
586
- )
587
- else:
588
- uncond_tokens = negative_prompt
589
-
590
- # textual inversion: process multi-vector tokens if necessary
591
- if isinstance(self, TextualInversionLoaderMixin):
592
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
593
-
594
- max_length = prompt_embeds.shape[1]
595
- uncond_input = self.tokenizer(
596
- uncond_tokens,
597
- padding="max_length",
598
- max_length=max_length,
599
- truncation=True,
600
- return_tensors="pt",
601
- )
602
-
603
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
604
- attention_mask = uncond_input.attention_mask.to(device)
605
- else:
606
- attention_mask = None
607
-
608
- negative_prompt_embeds = self.text_encoder(
609
- uncond_input.input_ids.to(device),
610
- attention_mask=attention_mask,
611
- )
612
- negative_prompt_embeds = negative_prompt_embeds[0]
613
-
614
- bs_embed, seq_len, _ = prompt_embeds.shape
615
- # duplicate text embeddings for each generation per prompt, using mps friendly method
616
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
617
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
618
-
619
- if do_classifier_free_guidance:
620
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
621
- seq_len = negative_prompt_embeds.shape[1]
622
-
623
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
624
-
625
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
626
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
627
-
628
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
629
- bs_embed * num_images_per_prompt, -1
630
- )
631
-
632
- if self.text_encoder is not None:
633
- if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
634
- # Retrieve the original scale by scaling back the LoRA layers
635
- unscale_lora_layers(self.text_encoder, lora_scale)
636
-
637
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds
638
-
639
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
640
- dtype = next(self.image_encoder.parameters()).dtype
641
-
642
- if not isinstance(image, torch.Tensor):
643
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
644
-
645
- image = image.to(device=device, dtype=dtype)
646
- if output_hidden_states:
647
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
648
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
649
- uncond_image_enc_hidden_states = self.image_encoder(
650
- torch.zeros_like(image), output_hidden_states=True
651
- ).hidden_states[-2]
652
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
653
- num_images_per_prompt, dim=0
654
- )
655
- return image_enc_hidden_states, uncond_image_enc_hidden_states
656
- else:
657
- image_embeds = self.image_encoder(image).image_embeds
658
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
659
- uncond_image_embeds = torch.zeros_like(image_embeds)
660
-
661
- return image_embeds, uncond_image_embeds
662
-
663
- def run_safety_checker(self, image, device, dtype):
664
- if self.safety_checker is None:
665
- has_nsfw_concept = None
666
- else:
667
- if torch.is_tensor(image):
668
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
669
- else:
670
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
671
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
672
- image, has_nsfw_concept = self.safety_checker(
673
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
674
- )
675
- return image, has_nsfw_concept
676
-
677
- def prepare_extra_step_kwargs(self, generator, eta):
678
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
679
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
680
- # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
681
- # and should be between [0, 1]
682
-
683
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
684
- extra_step_kwargs = {}
685
- if accepts_eta:
686
- extra_step_kwargs["eta"] = eta
687
-
688
- # check if the scheduler accepts generator
689
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
690
- if accepts_generator:
691
- extra_step_kwargs["generator"] = generator
692
- return extra_step_kwargs
693
-
694
- def check_inputs(
695
- self,
696
- prompt,
697
- height,
698
- width,
699
- bboxes,
700
- callback_steps,
701
- negative_prompt=None,
702
- prompt_embeds=None,
703
- negative_prompt_embeds=None,
704
- callback_on_step_end_tensor_inputs=None,
705
- ):
706
- if height % 8 != 0 or width % 8 != 0:
707
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
708
-
709
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
710
- raise ValueError(
711
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}."
712
- )
713
- if callback_on_step_end_tensor_inputs is not None and not all(
714
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
715
- ):
716
- raise ValueError(
717
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
718
- )
719
-
720
- if prompt is not None and prompt_embeds is not None:
721
- raise ValueError(
722
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
723
- " only forward one of the two."
724
- )
725
- elif prompt is None and prompt_embeds is None:
726
- raise ValueError(
727
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
728
- )
729
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
730
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
731
-
732
- if negative_prompt is not None and negative_prompt_embeds is not None:
733
- raise ValueError(
734
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
735
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
736
- )
737
-
738
- if prompt_embeds is not None and negative_prompt_embeds is not None:
739
- if prompt_embeds.shape != negative_prompt_embeds.shape:
740
- raise ValueError(
741
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
742
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
743
- f" {negative_prompt_embeds.shape}."
744
- )
745
-
746
- bboxes_batch_size = -1
747
- if bboxes is not None:
748
- if isinstance(bboxes, list):
749
- if isinstance(bboxes[0], list):
750
- if (
751
- isinstance(bboxes[0][0], list)
752
- and len(bboxes[0][0]) == 4
753
- and all(isinstance(x, float) for x in bboxes[0][0])
754
- ):
755
- bboxes_batch_size = len(bboxes)
756
- elif (
757
- isinstance(bboxes[0], list)
758
- and len(bboxes[0]) == 4
759
- and all(isinstance(x, float) for x in bboxes[0])
760
- ):
761
- bboxes_batch_size = 1
762
- else:
763
- print(isinstance(bboxes[0], list), len(bboxes[0]))
764
- raise TypeError(
765
- "`bboxes` must be a list of lists of list with four floats or a list of tuples with four floats."
766
- )
767
- else:
768
- print(isinstance(bboxes[0], list), len(bboxes[0]))
769
- raise TypeError(
770
- "`bboxes` must be a list of lists of list with four floats or a list of tuples with four floats."
771
- )
772
- else:
773
- print(isinstance(bboxes[0], list), len(bboxes[0]))
774
- raise TypeError(
775
- "`bboxes` must be a list of lists of list with four floats or a list of tuples with four floats."
776
- )
777
-
778
- if prompt is not None and isinstance(prompt, str):
779
- prompt_batch_size = 1
780
- elif prompt is not None and isinstance(prompt, list):
781
- prompt_batch_size = len(prompt)
782
- elif prompt_embeds is not None:
783
- prompt_batch_size = prompt_embeds.shape[0]
784
- else:
785
- raise ValueError("Cannot determine batch size from `prompt` or `prompt_embeds`.")
786
-
787
- if bboxes_batch_size != prompt_batch_size:
788
- raise ValueError(
789
- f"bbox batch size must be same as prompt batch size. bbox batch size: {bboxes_batch_size}, prompt batch size: {prompt_batch_size}"
790
- )
791
-
792
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
793
- shape = (
794
- batch_size,
795
- num_channels_latents,
796
- int(height) // self.vae_scale_factor,
797
- int(width) // self.vae_scale_factor,
798
- )
799
- if isinstance(generator, list) and len(generator) != batch_size:
800
- raise ValueError(
801
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
802
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
803
- )
804
-
805
- if latents is None:
806
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
807
- else:
808
- latents = latents.to(device)
809
-
810
- # scale the initial noise by the standard deviation required by the scheduler
811
- latents = latents * self.scheduler.init_noise_sigma
812
- return latents
813
-
814
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
815
- def get_guidance_scale_embedding(
816
- self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
817
- ) -> torch.Tensor:
818
- """
819
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
820
-
821
- Args:
822
- w (`torch.Tensor`):
823
- Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
824
- embedding_dim (`int`, *optional*, defaults to 512):
825
- Dimension of the embeddings to generate.
826
- dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
827
- Data type of the generated embeddings.
828
-
829
- Returns:
830
- `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
831
- """
832
- assert len(w.shape) == 1
833
- w = w * 1000.0
834
-
835
- half_dim = embedding_dim // 2
836
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
837
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
838
- emb = w.to(dtype)[:, None] * emb[None, :]
839
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
840
- if embedding_dim % 2 == 1: # zero pad
841
- emb = torch.nn.functional.pad(emb, (0, 1))
842
- assert emb.shape == (w.shape[0], embedding_dim)
843
- return emb
844
-
845
- @property
846
- def guidance_scale(self):
847
- return self._guidance_scale
848
-
849
- @property
850
- def guidance_rescale(self):
851
- return self._guidance_rescale
852
-
853
- @property
854
- def clip_skip(self):
855
- return self._clip_skip
856
-
857
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
858
- # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
859
- # corresponds to doing no classifier free guidance.
860
- @property
861
- def do_classifier_free_guidance(self):
862
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
863
-
864
- @property
865
- def cross_attention_kwargs(self):
866
- return self._cross_attention_kwargs
867
-
868
- @property
869
- def num_timesteps(self):
870
- return self._num_timesteps
871
-
872
- @property
873
- def interrupt(self):
874
- return self._interrupt
875
-
876
- @torch.no_grad()
877
- def __call__(
878
- self,
879
- prompt: str,
880
- phrases: List[str],
881
- bboxes: List[List[float]],
882
- height: Optional[int] = None,
883
- width: Optional[int] = None,
884
- num_inference_steps: int = 50,
885
- timesteps: Optional[List[int]] = None,
886
- sigmas: Optional[List[float]] = None,
887
- guidance_scale: float = 7.5,
888
- negative_prompt: Optional[Union[str, List[str]]] = None,
889
- num_images_per_prompt: Optional[int] = 1,
890
- eta: float = 0.0,
891
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
892
- latents: Optional[torch.Tensor] = None,
893
- prompt_embeds: Optional[torch.Tensor] = None,
894
- negative_prompt_embeds: Optional[torch.Tensor] = None,
895
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
896
- output_type: Optional[str] = "pil",
897
- return_dict: bool = True,
898
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
899
- guidance_rescale: float = 0.0,
900
- callback_on_step_end: Optional[
901
- Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
902
- ] = None,
903
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
904
- MIGCsteps=20,
905
- NaiveFuserSteps=-1,
906
- ca_scale: float | None = None,
907
- ea_scale: float | None = None,
908
- sac_scale: float | None = None,
909
- **kwargs,
910
- ):
911
- r"""
912
- The call function to the pipeline for generation.
913
-
914
- Args:
915
- prompt (`str`, *optional*):
916
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
917
- instead.
918
- bboxes (List[List[float]]], optional):
919
- The bounding boxes of the indexes to maintain layout in the image. Defaults to None.
920
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
921
- The height in pixels of the generated image.
922
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
923
- The width in pixels of the generated image.
924
- num_inference_steps (`int`, *optional*, defaults to 50):
925
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
926
- expense of slower inference.
927
- timesteps (`List[int]`, *optional*):
928
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
929
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
930
- passed will be used. Must be in descending order.
931
- sigmas (`List[float]`, *optional*):
932
- Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
933
- their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
934
- will be used.
935
- guidance_scale (`float`, *optional*, defaults to 7.5):
936
- A higher guidance scale value encourages the model to generate images closely linked to the text
937
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
938
- negative_prompt (`str` or `List[str]`, *optional*):
939
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
940
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
941
- num_images_per_prompt (`int`, *optional*, defaults to 1):
942
- The number of images to generate per prompt.
943
- eta (`float`, *optional*, defaults to 0.0):
944
- Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only
945
- applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
946
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
947
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
948
- generation deterministic.
949
- latents (`torch.Tensor`, *optional*):
950
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
951
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
952
- tensor is generated by sampling using the supplied random `generator`.
953
- prompt_embeds (`torch.Tensor`, *optional*):
954
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
955
- provided, text embeddings are generated from the `prompt` input argument.
956
- negative_prompt_embeds (`torch.Tensor`, *optional*):
957
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
958
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
959
- output_type (`str`, *optional*, defaults to `"pil"`):
960
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
961
- return_dict (`bool`, *optional*, defaults to `True`):
962
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
963
- plain tuple.
964
- callback (`Callable`, *optional*):
965
- A function that will be called every `callback_steps` steps during inference. The function will be
966
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
967
- callback_steps (`int`, *optional*, defaults to 1):
968
- The frequency at which the `callback` function will be called. If not specified, the callback will be
969
- called at every step.
970
- cross_attention_kwargs (`dict`, *optional*):
971
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
972
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
973
- guidance_rescale (`float`, *optional*, defaults to 0.0):
974
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
975
- Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
976
- using zero terminal SNR.
977
- callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
978
- A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
979
- each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
980
- DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
981
- list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
982
- callback_on_step_end_tensor_inputs (`List`, *optional*):
983
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
984
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
985
- `._callback_tensor_inputs` attribute of your pipeline class.
986
-
987
- Examples:
988
-
989
- Returns:
990
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
991
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
992
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
993
- second element is a list of `bool`s indicating whether the corresponding generated image contains
994
- "not-safe-for-work" (nsfw) content.
995
- """
996
-
997
- callback = kwargs.pop("callback", None)
998
- callback_steps = kwargs.pop("callback_steps", None)
999
-
1000
- if callback is not None:
1001
- deprecate(
1002
- "callback",
1003
- "1.0.0",
1004
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1005
- )
1006
- if callback_steps is not None:
1007
- deprecate(
1008
- "callback_steps",
1009
- "1.0.0",
1010
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1011
- )
1012
-
1013
- if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
1014
- callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
1015
-
1016
- # 0. Default height and width to unet
1017
- if not height or not width:
1018
- height = (
1019
- self.unet.config.sample_size
1020
- if self._is_unet_config_sample_size_int
1021
- else self.unet.config.sample_size[0]
1022
- )
1023
- width = (
1024
- self.unet.config.sample_size
1025
- if self._is_unet_config_sample_size_int
1026
- else self.unet.config.sample_size[1]
1027
- )
1028
- height, width = height * self.vae_scale_factor, width * self.vae_scale_factor
1029
- # to deal with lora scaling and other possible forward hooks
1030
-
1031
- # 1. Check inputs. Raise error if not correct
1032
- self.check_inputs(
1033
- prompt,
1034
- height,
1035
- width,
1036
- bboxes,
1037
- callback_steps,
1038
- negative_prompt,
1039
- prompt_embeds,
1040
- negative_prompt_embeds,
1041
- callback_on_step_end_tensor_inputs,
1042
- )
1043
-
1044
- self._guidance_scale = guidance_scale
1045
- self._guidance_rescale = guidance_rescale
1046
- self._cross_attention_kwargs = cross_attention_kwargs
1047
- self._interrupt = False
1048
-
1049
- # 2. Define call parameters
1050
- if prompt is not None and isinstance(prompt, str):
1051
- batch_size = 1
1052
- elif prompt is not None and isinstance(prompt, list):
1053
- batch_size = len(prompt)
1054
- else:
1055
- batch_size = prompt_embeds.shape[0]
1056
- if batch_size > 1:
1057
- raise NotImplementedError("Batch processing is not supported yet.")
1058
-
1059
- device = self._execution_device
1060
-
1061
- # 3. Encode input prompt
1062
- lora_scale = self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1063
-
1064
- prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds = self.encode_prompt(
1065
- prompt,
1066
- device,
1067
- num_images_per_prompt,
1068
- self.do_classifier_free_guidance,
1069
- negative_prompt,
1070
- prompt_embeds=prompt_embeds,
1071
- negative_prompt_embeds=negative_prompt_embeds,
1072
- lora_scale=lora_scale,
1073
- )
1074
-
1075
- phrases_embeds, _, pooled_phrases_embeds = self.encode_prompt(
1076
- phrases,
1077
- device,
1078
- num_images_per_prompt,
1079
- do_classifier_free_guidance=False,
1080
- lora_scale=lora_scale,
1081
- )
1082
-
1083
- # For classifier free guidance, we need to do two forward passes.
1084
- # Here we concatenate the unconditional and text embeddings into a single batch
1085
- # to avoid doing two forward passes
1086
- if self.do_classifier_free_guidance:
1087
- assert isinstance(negative_prompt_embeds, torch.Tensor)
1088
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1089
-
1090
- # 4. Prepare timesteps
1091
- timesteps, num_inference_steps = retrieve_timesteps(
1092
- self.scheduler, num_inference_steps, device, timesteps, sigmas
1093
- )
1094
-
1095
- # 5. Prepare latent variables
1096
- num_channels_latents = self.unet.config.in_channels
1097
- latents = self.prepare_latents(
1098
- batch_size * num_images_per_prompt,
1099
- num_channels_latents,
1100
- height,
1101
- width,
1102
- prompt_embeds.dtype,
1103
- device,
1104
- generator,
1105
- latents,
1106
- )
1107
-
1108
- # 5.1 Prepare guidance masks
1109
- guidance_masks = []
1110
- in_box = []
1111
- # Construct Instance Guidance Mask
1112
- for bbox in bboxes:
1113
- guidance_mask = np.zeros((height, width))
1114
- w_min = int(width * bbox[0])
1115
- w_max = int(width * bbox[2])
1116
- h_min = int(height * bbox[1])
1117
- h_max = int(height * bbox[3])
1118
- guidance_mask[h_min:h_max, w_min:w_max] = 1.0
1119
- guidance_masks.append(guidance_mask[None, ...])
1120
- in_box.append([bbox[0], bbox[2], bbox[1], bbox[3]])
1121
-
1122
- # Construct Background Guidance Mask
1123
- sup_mask = get_sup_mask(guidance_masks)
1124
- supplement_mask = torch.from_numpy(sup_mask[None, ...])
1125
- supplement_mask = F.interpolate(supplement_mask, (height // 8, width // 8), mode="bilinear")
1126
- supplement_mask = supplement_mask.to(device=device, dtype=self.unet.dtype) # (1, 1, H, W)
1127
-
1128
- guidance_masks = np.concatenate(guidance_masks, axis=0)
1129
- guidance_masks = guidance_masks[None, ...]
1130
- guidance_masks = torch.from_numpy(guidance_masks).to(device=device, dtype=self.unet.dtype)
1131
- guidance_masks = F.interpolate(
1132
- guidance_masks, (height // 8, width // 8), mode="bilinear"
1133
- ) # (1, instance_num, H, W)
1134
-
1135
- in_box = torch.from_numpy(np.array(in_box))[None, ...].to(
1136
- device=device, dtype=self.unet.dtype
1137
- ) # (1, instance_num, 4)
1138
-
1139
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1140
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1141
-
1142
- # 6.1 Optionally get Guidance Scale Embedding
1143
- timestep_cond = None
1144
- if self.unet.config.time_cond_proj_dim is not None:
1145
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1146
- timestep_cond = self.get_guidance_scale_embedding(
1147
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1148
- ).to(device=device, dtype=latents.dtype)
1149
-
1150
- # 7. Denoising loop
1151
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1152
- self._num_timesteps = len(timesteps)
1153
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1154
- for i, t in enumerate(timesteps):
1155
- if self.interrupt:
1156
- continue
1157
-
1158
- # expand the latents if we are doing classifier free guidance
1159
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1160
- if hasattr(self.scheduler, "scale_model_input"):
1161
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1162
-
1163
- # predict the noise residual
1164
- cross_attention_kwargs = {
1165
- "bboxes": bboxes,
1166
- "ith": i,
1167
- "embeds_pooler": torch.cat([pooled_prompt_embeds, pooled_phrases_embeds]),
1168
- "encoder_hidden_states_phrases": phrases_embeds,
1169
- # "timestep": t,
1170
- "height": height,
1171
- "width": width,
1172
- "MIGCsteps": MIGCsteps,
1173
- "NaiveFuserSteps": NaiveFuserSteps,
1174
- "ca_scale": ca_scale,
1175
- "ea_scale": ea_scale,
1176
- "sac_scale": sac_scale,
1177
- "guidance_masks": guidance_masks,
1178
- "supplement_mask": supplement_mask,
1179
- "in_box": in_box,
1180
- }
1181
-
1182
- noise_pred = self.unet(
1183
- latent_model_input,
1184
- t,
1185
- encoder_hidden_states=prompt_embeds,
1186
- timestep_cond=timestep_cond,
1187
- cross_attention_kwargs=cross_attention_kwargs,
1188
- return_dict=False,
1189
- )[0]
1190
-
1191
- # perform guidance
1192
- if self.do_classifier_free_guidance:
1193
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1194
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1195
-
1196
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1197
- # Based on 3.4. in https://huggingface.co/papers/2305.08891
1198
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1199
-
1200
- # compute the previous noisy sample x_t -> x_t-1
1201
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1202
-
1203
- if callback_on_step_end is not None:
1204
- callback_kwargs = {}
1205
- for k in callback_on_step_end_tensor_inputs:
1206
- callback_kwargs[k] = locals()[k]
1207
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1208
-
1209
- latents = callback_outputs.pop("latents", latents)
1210
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1211
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1212
-
1213
- # call the callback, if provided
1214
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1215
- progress_bar.update()
1216
- if callback is not None and i % callback_steps == 0:
1217
- step_idx = i // getattr(self.scheduler, "order", 1)
1218
- callback(step_idx, t, latents)
1219
-
1220
- if not output_type == "latent":
1221
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0]
1222
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1223
- else:
1224
- image = latents
1225
- has_nsfw_concept = None
1226
-
1227
- if has_nsfw_concept is None:
1228
- do_denormalize = [True] * image.shape[0]
1229
- else:
1230
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1231
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1232
-
1233
- # Offload all models
1234
- self.maybe_free_model_hooks()
1235
-
1236
- if not return_dict:
1237
- return (image, has_nsfw_concept)
1238
-
1239
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)