ZTWHHH commited on
Commit
222ac2b
·
verified ·
1 Parent(s): 78c4e92

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/__init__.cpython-310.pyc +0 -0
  2. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused.cpython-310.pyc +0 -0
  3. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused_inpaint.cpython-310.pyc +0 -0
  4. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused.py +328 -0
  5. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
  6. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__init__.py +80 -0
  7. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/__init__.cpython-310.pyc +0 -0
  8. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/multicontrolnet.cpython-310.pyc +0 -0
  9. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet.cpython-310.pyc +0 -0
  10. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_blip_diffusion.cpython-310.pyc +0 -0
  11. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_img2img.cpython-310.pyc +0 -0
  12. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_inpaint.cpython-310.pyc +0 -0
  13. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_inpaint_sd_xl.cpython-310.pyc +0 -0
  14. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-310.pyc +0 -0
  15. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl_img2img.cpython-310.pyc +0 -0
  16. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_flax_controlnet.cpython-310.pyc +0 -0
  17. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/multicontrolnet.py +187 -0
  18. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet.py +1318 -0
  19. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +413 -0
  20. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +1310 -0
  21. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +1620 -0
  22. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +1818 -0
  23. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +1499 -0
  24. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +1626 -0
  25. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +532 -0
  26. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__init__.py +85 -0
  27. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/__init__.cpython-310.pyc +0 -0
  28. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if.cpython-310.pyc +0 -0
  29. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_img2img.cpython-310.pyc +0 -0
  30. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_img2img_superresolution.cpython-310.pyc +0 -0
  31. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_inpainting.cpython-310.pyc +0 -0
  32. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_inpainting_superresolution.cpython-310.pyc +0 -0
  33. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_superresolution.cpython-310.pyc +0 -0
  34. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_output.cpython-310.pyc +0 -0
  35. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/safety_checker.cpython-310.pyc +0 -0
  36. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/timesteps.cpython-310.pyc +0 -0
  37. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/watermark.cpython-310.pyc +0 -0
  38. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if.py +788 -0
  39. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +910 -0
  40. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +1029 -0
  41. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +1030 -0
  42. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +1137 -0
  43. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +885 -0
  44. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_output.py +28 -0
  45. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/safety_checker.py +59 -0
  46. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/timesteps.py +579 -0
  47. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/watermark.py +46 -0
  48. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/dit/__init__.py +19 -0
  49. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/dit/__pycache__/__init__.cpython-310.pyc +0 -0
  50. evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/dit/__pycache__/pipeline_dit.cpython-310.pyc +0 -0
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused_inpaint.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ from transformers import CLIPTextModelWithProjection, CLIPTokenizer
19
+
20
+ from ...image_processor import VaeImageProcessor
21
+ from ...models import UVit2DModel, VQModel
22
+ from ...schedulers import AmusedScheduler
23
+ from ...utils import replace_example_docstring
24
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
+
26
+
27
+ EXAMPLE_DOC_STRING = """
28
+ Examples:
29
+ ```py
30
+ >>> import torch
31
+ >>> from diffusers import AmusedPipeline
32
+
33
+ >>> pipe = AmusedPipeline.from_pretrained(
34
+ ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16
35
+ ... )
36
+ >>> pipe = pipe.to("cuda")
37
+
38
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
39
+ >>> image = pipe(prompt).images[0]
40
+ ```
41
+ """
42
+
43
+
44
+ class AmusedPipeline(DiffusionPipeline):
45
+ image_processor: VaeImageProcessor
46
+ vqvae: VQModel
47
+ tokenizer: CLIPTokenizer
48
+ text_encoder: CLIPTextModelWithProjection
49
+ transformer: UVit2DModel
50
+ scheduler: AmusedScheduler
51
+
52
+ model_cpu_offload_seq = "text_encoder->transformer->vqvae"
53
+
54
+ def __init__(
55
+ self,
56
+ vqvae: VQModel,
57
+ tokenizer: CLIPTokenizer,
58
+ text_encoder: CLIPTextModelWithProjection,
59
+ transformer: UVit2DModel,
60
+ scheduler: AmusedScheduler,
61
+ ):
62
+ super().__init__()
63
+
64
+ self.register_modules(
65
+ vqvae=vqvae,
66
+ tokenizer=tokenizer,
67
+ text_encoder=text_encoder,
68
+ transformer=transformer,
69
+ scheduler=scheduler,
70
+ )
71
+ self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
72
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False)
73
+
74
+ @torch.no_grad()
75
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
76
+ def __call__(
77
+ self,
78
+ prompt: Optional[Union[List[str], str]] = None,
79
+ height: Optional[int] = None,
80
+ width: Optional[int] = None,
81
+ num_inference_steps: int = 12,
82
+ guidance_scale: float = 10.0,
83
+ negative_prompt: Optional[Union[str, List[str]]] = None,
84
+ num_images_per_prompt: Optional[int] = 1,
85
+ generator: Optional[torch.Generator] = None,
86
+ latents: Optional[torch.IntTensor] = None,
87
+ prompt_embeds: Optional[torch.Tensor] = None,
88
+ encoder_hidden_states: Optional[torch.Tensor] = None,
89
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
90
+ negative_encoder_hidden_states: Optional[torch.Tensor] = None,
91
+ output_type="pil",
92
+ return_dict: bool = True,
93
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
94
+ callback_steps: int = 1,
95
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
96
+ micro_conditioning_aesthetic_score: int = 6,
97
+ micro_conditioning_crop_coord: Tuple[int, int] = (0, 0),
98
+ temperature: Union[int, Tuple[int, int], List[int]] = (2, 0),
99
+ ):
100
+ """
101
+ The call function to the pipeline for generation.
102
+
103
+ Args:
104
+ prompt (`str` or `List[str]`, *optional*):
105
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
106
+ height (`int`, *optional*, defaults to `self.transformer.config.sample_size * self.vae_scale_factor`):
107
+ The height in pixels of the generated image.
108
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
109
+ The width in pixels of the generated image.
110
+ num_inference_steps (`int`, *optional*, defaults to 16):
111
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
112
+ expense of slower inference.
113
+ guidance_scale (`float`, *optional*, defaults to 10.0):
114
+ A higher guidance scale value encourages the model to generate images closely linked to the text
115
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
116
+ negative_prompt (`str` or `List[str]`, *optional*):
117
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
118
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
119
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
120
+ The number of images to generate per prompt.
121
+ generator (`torch.Generator`, *optional*):
122
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
123
+ generation deterministic.
124
+ latents (`torch.IntTensor`, *optional*):
125
+ Pre-generated tokens representing latent vectors in `self.vqvae`, to be used as inputs for image
126
+ gneration. If not provided, the starting latents will be completely masked.
127
+ prompt_embeds (`torch.FloatTensor`, *optional*):
128
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
129
+ provided, text embeddings are generated from the `prompt` input argument. A single vector from the
130
+ pooled and projected final hidden states.
131
+ encoder_hidden_states (`torch.FloatTensor`, *optional*):
132
+ Pre-generated penultimate hidden states from the text encoder providing additional text conditioning.
133
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
134
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
135
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
136
+ negative_encoder_hidden_states (`torch.FloatTensor`, *optional*):
137
+ Analogous to `encoder_hidden_states` for the positive prompt.
138
+ output_type (`str`, *optional*, defaults to `"pil"`):
139
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
140
+ return_dict (`bool`, *optional*, defaults to `True`):
141
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
142
+ plain tuple.
143
+ callback (`Callable`, *optional*):
144
+ A function that calls every `callback_steps` steps during inference. The function is called with the
145
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
146
+ callback_steps (`int`, *optional*, defaults to 1):
147
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
148
+ every step.
149
+ cross_attention_kwargs (`dict`, *optional*):
150
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
151
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
152
+ micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6):
153
+ The targeted aesthetic score according to the laion aesthetic classifier. See https://laion.ai/blog/laion-aesthetics/
154
+ and the micro-conditioning section of https://arxiv.org/abs/2307.01952.
155
+ micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)):
156
+ The targeted height, width crop coordinates. See the micro-conditioning section of https://arxiv.org/abs/2307.01952.
157
+ temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)):
158
+ Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`.
159
+
160
+ Examples:
161
+
162
+ Returns:
163
+ [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`:
164
+ If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a
165
+ `tuple` is returned where the first element is a list with the generated images.
166
+ """
167
+ if (prompt_embeds is not None and encoder_hidden_states is None) or (
168
+ prompt_embeds is None and encoder_hidden_states is not None
169
+ ):
170
+ raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither")
171
+
172
+ if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or (
173
+ negative_prompt_embeds is None and negative_encoder_hidden_states is not None
174
+ ):
175
+ raise ValueError(
176
+ "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither"
177
+ )
178
+
179
+ if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None):
180
+ raise ValueError("pass only one of `prompt` or `prompt_embeds`")
181
+
182
+ if isinstance(prompt, str):
183
+ prompt = [prompt]
184
+
185
+ if prompt is not None:
186
+ batch_size = len(prompt)
187
+ else:
188
+ batch_size = prompt_embeds.shape[0]
189
+
190
+ batch_size = batch_size * num_images_per_prompt
191
+
192
+ if height is None:
193
+ height = self.transformer.config.sample_size * self.vae_scale_factor
194
+
195
+ if width is None:
196
+ width = self.transformer.config.sample_size * self.vae_scale_factor
197
+
198
+ if prompt_embeds is None:
199
+ input_ids = self.tokenizer(
200
+ prompt,
201
+ return_tensors="pt",
202
+ padding="max_length",
203
+ truncation=True,
204
+ max_length=self.tokenizer.model_max_length,
205
+ ).input_ids.to(self._execution_device)
206
+
207
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
208
+ prompt_embeds = outputs.text_embeds
209
+ encoder_hidden_states = outputs.hidden_states[-2]
210
+
211
+ prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1)
212
+ encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
213
+
214
+ if guidance_scale > 1.0:
215
+ if negative_prompt_embeds is None:
216
+ if negative_prompt is None:
217
+ negative_prompt = [""] * len(prompt)
218
+
219
+ if isinstance(negative_prompt, str):
220
+ negative_prompt = [negative_prompt]
221
+
222
+ input_ids = self.tokenizer(
223
+ negative_prompt,
224
+ return_tensors="pt",
225
+ padding="max_length",
226
+ truncation=True,
227
+ max_length=self.tokenizer.model_max_length,
228
+ ).input_ids.to(self._execution_device)
229
+
230
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
231
+ negative_prompt_embeds = outputs.text_embeds
232
+ negative_encoder_hidden_states = outputs.hidden_states[-2]
233
+
234
+ negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1)
235
+ negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
236
+
237
+ prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds])
238
+ encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states])
239
+
240
+ # Note that the micro conditionings _do_ flip the order of width, height for the original size
241
+ # and the crop coordinates. This is how it was done in the original code base
242
+ micro_conds = torch.tensor(
243
+ [
244
+ width,
245
+ height,
246
+ micro_conditioning_crop_coord[0],
247
+ micro_conditioning_crop_coord[1],
248
+ micro_conditioning_aesthetic_score,
249
+ ],
250
+ device=self._execution_device,
251
+ dtype=encoder_hidden_states.dtype,
252
+ )
253
+ micro_conds = micro_conds.unsqueeze(0)
254
+ micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1)
255
+
256
+ shape = (batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor)
257
+
258
+ if latents is None:
259
+ latents = torch.full(
260
+ shape, self.scheduler.config.mask_token_id, dtype=torch.long, device=self._execution_device
261
+ )
262
+
263
+ self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device)
264
+
265
+ num_warmup_steps = len(self.scheduler.timesteps) - num_inference_steps * self.scheduler.order
266
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
267
+ for i, timestep in enumerate(self.scheduler.timesteps):
268
+ if guidance_scale > 1.0:
269
+ model_input = torch.cat([latents] * 2)
270
+ else:
271
+ model_input = latents
272
+
273
+ model_output = self.transformer(
274
+ model_input,
275
+ micro_conds=micro_conds,
276
+ pooled_text_emb=prompt_embeds,
277
+ encoder_hidden_states=encoder_hidden_states,
278
+ cross_attention_kwargs=cross_attention_kwargs,
279
+ )
280
+
281
+ if guidance_scale > 1.0:
282
+ uncond_logits, cond_logits = model_output.chunk(2)
283
+ model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits)
284
+
285
+ latents = self.scheduler.step(
286
+ model_output=model_output,
287
+ timestep=timestep,
288
+ sample=latents,
289
+ generator=generator,
290
+ ).prev_sample
291
+
292
+ if i == len(self.scheduler.timesteps) - 1 or (
293
+ (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
294
+ ):
295
+ progress_bar.update()
296
+ if callback is not None and i % callback_steps == 0:
297
+ step_idx = i // getattr(self.scheduler, "order", 1)
298
+ callback(step_idx, timestep, latents)
299
+
300
+ if output_type == "latent":
301
+ output = latents
302
+ else:
303
+ needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast
304
+
305
+ if needs_upcasting:
306
+ self.vqvae.float()
307
+
308
+ output = self.vqvae.decode(
309
+ latents,
310
+ force_not_quantize=True,
311
+ shape=(
312
+ batch_size,
313
+ height // self.vae_scale_factor,
314
+ width // self.vae_scale_factor,
315
+ self.vqvae.config.latent_channels,
316
+ ),
317
+ ).sample.clip(0, 1)
318
+ output = self.image_processor.postprocess(output, output_type)
319
+
320
+ if needs_upcasting:
321
+ self.vqvae.half()
322
+
323
+ self.maybe_free_model_hooks()
324
+
325
+ if not return_dict:
326
+ return (output,)
327
+
328
+ return ImagePipelineOutput(output)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused_inpaint.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from transformers import CLIPTextModelWithProjection, CLIPTokenizer
20
+
21
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
22
+ from ...models import UVit2DModel, VQModel
23
+ from ...schedulers import AmusedScheduler
24
+ from ...utils import replace_example_docstring
25
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
26
+
27
+
28
+ EXAMPLE_DOC_STRING = """
29
+ Examples:
30
+ ```py
31
+ >>> import torch
32
+ >>> from diffusers import AmusedInpaintPipeline
33
+ >>> from diffusers.utils import load_image
34
+
35
+ >>> pipe = AmusedInpaintPipeline.from_pretrained(
36
+ ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16
37
+ ... )
38
+ >>> pipe = pipe.to("cuda")
39
+
40
+ >>> prompt = "fall mountains"
41
+ >>> input_image = (
42
+ ... load_image(
43
+ ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg"
44
+ ... )
45
+ ... .resize((512, 512))
46
+ ... .convert("RGB")
47
+ ... )
48
+ >>> mask = (
49
+ ... load_image(
50
+ ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"
51
+ ... )
52
+ ... .resize((512, 512))
53
+ ... .convert("L")
54
+ ... )
55
+ >>> pipe(prompt, input_image, mask).images[0].save("out.png")
56
+ ```
57
+ """
58
+
59
+
60
+ class AmusedInpaintPipeline(DiffusionPipeline):
61
+ image_processor: VaeImageProcessor
62
+ vqvae: VQModel
63
+ tokenizer: CLIPTokenizer
64
+ text_encoder: CLIPTextModelWithProjection
65
+ transformer: UVit2DModel
66
+ scheduler: AmusedScheduler
67
+
68
+ model_cpu_offload_seq = "text_encoder->transformer->vqvae"
69
+
70
+ # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before
71
+ # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter
72
+ # off the meta device. There should be a way to fix this instead of just not offloading it
73
+ _exclude_from_cpu_offload = ["vqvae"]
74
+
75
+ def __init__(
76
+ self,
77
+ vqvae: VQModel,
78
+ tokenizer: CLIPTokenizer,
79
+ text_encoder: CLIPTextModelWithProjection,
80
+ transformer: UVit2DModel,
81
+ scheduler: AmusedScheduler,
82
+ ):
83
+ super().__init__()
84
+
85
+ self.register_modules(
86
+ vqvae=vqvae,
87
+ tokenizer=tokenizer,
88
+ text_encoder=text_encoder,
89
+ transformer=transformer,
90
+ scheduler=scheduler,
91
+ )
92
+ self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
93
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False)
94
+ self.mask_processor = VaeImageProcessor(
95
+ vae_scale_factor=self.vae_scale_factor,
96
+ do_normalize=False,
97
+ do_binarize=True,
98
+ do_convert_grayscale=True,
99
+ do_resize=True,
100
+ )
101
+ self.scheduler.register_to_config(masking_schedule="linear")
102
+
103
+ @torch.no_grad()
104
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
105
+ def __call__(
106
+ self,
107
+ prompt: Optional[Union[List[str], str]] = None,
108
+ image: PipelineImageInput = None,
109
+ mask_image: PipelineImageInput = None,
110
+ strength: float = 1.0,
111
+ num_inference_steps: int = 12,
112
+ guidance_scale: float = 10.0,
113
+ negative_prompt: Optional[Union[str, List[str]]] = None,
114
+ num_images_per_prompt: Optional[int] = 1,
115
+ generator: Optional[torch.Generator] = None,
116
+ prompt_embeds: Optional[torch.Tensor] = None,
117
+ encoder_hidden_states: Optional[torch.Tensor] = None,
118
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
119
+ negative_encoder_hidden_states: Optional[torch.Tensor] = None,
120
+ output_type="pil",
121
+ return_dict: bool = True,
122
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
123
+ callback_steps: int = 1,
124
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
125
+ micro_conditioning_aesthetic_score: int = 6,
126
+ micro_conditioning_crop_coord: Tuple[int, int] = (0, 0),
127
+ temperature: Union[int, Tuple[int, int], List[int]] = (2, 0),
128
+ ):
129
+ """
130
+ The call function to the pipeline for generation.
131
+
132
+ Args:
133
+ prompt (`str` or `List[str]`, *optional*):
134
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
135
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
136
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
137
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
138
+ or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
139
+ list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image
140
+ latents as `image`, but if passing latents directly it is not encoded again.
141
+ mask_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
142
+ `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask
143
+ are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
144
+ single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one
145
+ color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B,
146
+ H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W,
147
+ 1)`, or `(H, W)`.
148
+ strength (`float`, *optional*, defaults to 1.0):
149
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
150
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
151
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
152
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
153
+ essentially ignores `image`.
154
+ num_inference_steps (`int`, *optional*, defaults to 16):
155
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
156
+ expense of slower inference.
157
+ guidance_scale (`float`, *optional*, defaults to 10.0):
158
+ A higher guidance scale value encourages the model to generate images closely linked to the text
159
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
160
+ negative_prompt (`str` or `List[str]`, *optional*):
161
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
162
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
163
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
164
+ The number of images to generate per prompt.
165
+ generator (`torch.Generator`, *optional*):
166
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
167
+ generation deterministic.
168
+ prompt_embeds (`torch.FloatTensor`, *optional*):
169
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
170
+ provided, text embeddings are generated from the `prompt` input argument. A single vector from the
171
+ pooled and projected final hidden states.
172
+ encoder_hidden_states (`torch.FloatTensor`, *optional*):
173
+ Pre-generated penultimate hidden states from the text encoder providing additional text conditioning.
174
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
175
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
176
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
177
+ negative_encoder_hidden_states (`torch.FloatTensor`, *optional*):
178
+ Analogous to `encoder_hidden_states` for the positive prompt.
179
+ output_type (`str`, *optional*, defaults to `"pil"`):
180
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
181
+ return_dict (`bool`, *optional*, defaults to `True`):
182
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
183
+ plain tuple.
184
+ callback (`Callable`, *optional*):
185
+ A function that calls every `callback_steps` steps during inference. The function is called with the
186
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
187
+ callback_steps (`int`, *optional*, defaults to 1):
188
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
189
+ every step.
190
+ cross_attention_kwargs (`dict`, *optional*):
191
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
192
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
193
+ micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6):
194
+ The targeted aesthetic score according to the laion aesthetic classifier. See https://laion.ai/blog/laion-aesthetics/
195
+ and the micro-conditioning section of https://arxiv.org/abs/2307.01952.
196
+ micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)):
197
+ The targeted height, width crop coordinates. See the micro-conditioning section of https://arxiv.org/abs/2307.01952.
198
+ temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)):
199
+ Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`.
200
+
201
+ Examples:
202
+
203
+ Returns:
204
+ [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`:
205
+ If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a
206
+ `tuple` is returned where the first element is a list with the generated images.
207
+ """
208
+
209
+ if (prompt_embeds is not None and encoder_hidden_states is None) or (
210
+ prompt_embeds is None and encoder_hidden_states is not None
211
+ ):
212
+ raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither")
213
+
214
+ if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or (
215
+ negative_prompt_embeds is None and negative_encoder_hidden_states is not None
216
+ ):
217
+ raise ValueError(
218
+ "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither"
219
+ )
220
+
221
+ if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None):
222
+ raise ValueError("pass only one of `prompt` or `prompt_embeds`")
223
+
224
+ if isinstance(prompt, str):
225
+ prompt = [prompt]
226
+
227
+ if prompt is not None:
228
+ batch_size = len(prompt)
229
+ else:
230
+ batch_size = prompt_embeds.shape[0]
231
+
232
+ batch_size = batch_size * num_images_per_prompt
233
+
234
+ if prompt_embeds is None:
235
+ input_ids = self.tokenizer(
236
+ prompt,
237
+ return_tensors="pt",
238
+ padding="max_length",
239
+ truncation=True,
240
+ max_length=self.tokenizer.model_max_length,
241
+ ).input_ids.to(self._execution_device)
242
+
243
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
244
+ prompt_embeds = outputs.text_embeds
245
+ encoder_hidden_states = outputs.hidden_states[-2]
246
+
247
+ prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1)
248
+ encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
249
+
250
+ if guidance_scale > 1.0:
251
+ if negative_prompt_embeds is None:
252
+ if negative_prompt is None:
253
+ negative_prompt = [""] * len(prompt)
254
+
255
+ if isinstance(negative_prompt, str):
256
+ negative_prompt = [negative_prompt]
257
+
258
+ input_ids = self.tokenizer(
259
+ negative_prompt,
260
+ return_tensors="pt",
261
+ padding="max_length",
262
+ truncation=True,
263
+ max_length=self.tokenizer.model_max_length,
264
+ ).input_ids.to(self._execution_device)
265
+
266
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
267
+ negative_prompt_embeds = outputs.text_embeds
268
+ negative_encoder_hidden_states = outputs.hidden_states[-2]
269
+
270
+ negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1)
271
+ negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
272
+
273
+ prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds])
274
+ encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states])
275
+
276
+ image = self.image_processor.preprocess(image)
277
+
278
+ height, width = image.shape[-2:]
279
+
280
+ # Note that the micro conditionings _do_ flip the order of width, height for the original size
281
+ # and the crop coordinates. This is how it was done in the original code base
282
+ micro_conds = torch.tensor(
283
+ [
284
+ width,
285
+ height,
286
+ micro_conditioning_crop_coord[0],
287
+ micro_conditioning_crop_coord[1],
288
+ micro_conditioning_aesthetic_score,
289
+ ],
290
+ device=self._execution_device,
291
+ dtype=encoder_hidden_states.dtype,
292
+ )
293
+
294
+ micro_conds = micro_conds.unsqueeze(0)
295
+ micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1)
296
+
297
+ self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device)
298
+ num_inference_steps = int(len(self.scheduler.timesteps) * strength)
299
+ start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps
300
+
301
+ needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast
302
+
303
+ if needs_upcasting:
304
+ self.vqvae.float()
305
+
306
+ latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents
307
+ latents_bsz, channels, latents_height, latents_width = latents.shape
308
+ latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width)
309
+
310
+ mask = self.mask_processor.preprocess(
311
+ mask_image, height // self.vae_scale_factor, width // self.vae_scale_factor
312
+ )
313
+ mask = mask.reshape(mask.shape[0], latents_height, latents_width).bool().to(latents.device)
314
+ latents[mask] = self.scheduler.config.mask_token_id
315
+
316
+ starting_mask_ratio = mask.sum() / latents.numel()
317
+
318
+ latents = latents.repeat(num_images_per_prompt, 1, 1)
319
+
320
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
321
+ for i in range(start_timestep_idx, len(self.scheduler.timesteps)):
322
+ timestep = self.scheduler.timesteps[i]
323
+
324
+ if guidance_scale > 1.0:
325
+ model_input = torch.cat([latents] * 2)
326
+ else:
327
+ model_input = latents
328
+
329
+ model_output = self.transformer(
330
+ model_input,
331
+ micro_conds=micro_conds,
332
+ pooled_text_emb=prompt_embeds,
333
+ encoder_hidden_states=encoder_hidden_states,
334
+ cross_attention_kwargs=cross_attention_kwargs,
335
+ )
336
+
337
+ if guidance_scale > 1.0:
338
+ uncond_logits, cond_logits = model_output.chunk(2)
339
+ model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits)
340
+
341
+ latents = self.scheduler.step(
342
+ model_output=model_output,
343
+ timestep=timestep,
344
+ sample=latents,
345
+ generator=generator,
346
+ starting_mask_ratio=starting_mask_ratio,
347
+ ).prev_sample
348
+
349
+ if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
350
+ progress_bar.update()
351
+ if callback is not None and i % callback_steps == 0:
352
+ step_idx = i // getattr(self.scheduler, "order", 1)
353
+ callback(step_idx, timestep, latents)
354
+
355
+ if output_type == "latent":
356
+ output = latents
357
+ else:
358
+ output = self.vqvae.decode(
359
+ latents,
360
+ force_not_quantize=True,
361
+ shape=(
362
+ batch_size,
363
+ height // self.vae_scale_factor,
364
+ width // self.vae_scale_factor,
365
+ self.vqvae.config.latent_channels,
366
+ ),
367
+ ).sample.clip(0, 1)
368
+ output = self.image_processor.postprocess(output, output_type)
369
+
370
+ if needs_upcasting:
371
+ self.vqvae.half()
372
+
373
+ self.maybe_free_model_hooks()
374
+
375
+ if not return_dict:
376
+ return (output,)
377
+
378
+ return ImagePipelineOutput(output)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_flax_available,
9
+ is_torch_available,
10
+ is_transformers_available,
11
+ )
12
+
13
+
14
+ _dummy_objects = {}
15
+ _import_structure = {}
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["multicontrolnet"] = ["MultiControlNetModel"]
26
+ _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"]
27
+ _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"]
28
+ _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"]
29
+ _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"]
30
+ _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"]
31
+ _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"]
32
+ _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"]
33
+ try:
34
+ if not (is_transformers_available() and is_flax_available()):
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ from ...utils import dummy_flax_and_transformers_objects # noqa F403
38
+
39
+ _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects))
40
+ else:
41
+ _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"]
42
+
43
+
44
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
45
+ try:
46
+ if not (is_transformers_available() and is_torch_available()):
47
+ raise OptionalDependencyNotAvailable()
48
+
49
+ except OptionalDependencyNotAvailable:
50
+ from ...utils.dummy_torch_and_transformers_objects import *
51
+ else:
52
+ from .multicontrolnet import MultiControlNetModel
53
+ from .pipeline_controlnet import StableDiffusionControlNetPipeline
54
+ from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline
55
+ from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
56
+ from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
57
+ from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline
58
+ from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
59
+ from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline
60
+
61
+ try:
62
+ if not (is_transformers_available() and is_flax_available()):
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ from ...utils.dummy_flax_and_transformers_objects import * # noqa F403
66
+ else:
67
+ from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(
74
+ __name__,
75
+ globals()["__file__"],
76
+ _import_structure,
77
+ module_spec=__spec__,
78
+ )
79
+ for name, value in _dummy_objects.items():
80
+ setattr(sys.modules[__name__], name, value)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/multicontrolnet.cpython-310.pyc ADDED
Binary file (8.67 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet.cpython-310.pyc ADDED
Binary file (42.1 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_blip_diffusion.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_img2img.cpython-310.pyc ADDED
Binary file (41.3 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_inpaint.cpython-310.pyc ADDED
Binary file (50.4 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_inpaint_sd_xl.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-310.pyc ADDED
Binary file (47.9 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl_img2img.cpython-310.pyc ADDED
Binary file (52.8 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__pycache__/pipeline_flax_controlnet.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/multicontrolnet.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+ from ...models.controlnet import ControlNetModel, ControlNetOutput
8
+ from ...models.modeling_utils import ModelMixin
9
+ from ...utils import logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ class MultiControlNetModel(ModelMixin):
16
+ r"""
17
+ Multiple `ControlNetModel` wrapper class for Multi-ControlNet
18
+
19
+ This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be
20
+ compatible with `ControlNetModel`.
21
+
22
+ Args:
23
+ controlnets (`List[ControlNetModel]`):
24
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
25
+ `ControlNetModel` as a list.
26
+ """
27
+
28
+ def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]):
29
+ super().__init__()
30
+ self.nets = nn.ModuleList(controlnets)
31
+
32
+ def forward(
33
+ self,
34
+ sample: torch.FloatTensor,
35
+ timestep: Union[torch.Tensor, float, int],
36
+ encoder_hidden_states: torch.Tensor,
37
+ controlnet_cond: List[torch.tensor],
38
+ conditioning_scale: List[float],
39
+ class_labels: Optional[torch.Tensor] = None,
40
+ timestep_cond: Optional[torch.Tensor] = None,
41
+ attention_mask: Optional[torch.Tensor] = None,
42
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
43
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
44
+ guess_mode: bool = False,
45
+ return_dict: bool = True,
46
+ ) -> Union[ControlNetOutput, Tuple]:
47
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
48
+ down_samples, mid_sample = controlnet(
49
+ sample=sample,
50
+ timestep=timestep,
51
+ encoder_hidden_states=encoder_hidden_states,
52
+ controlnet_cond=image,
53
+ conditioning_scale=scale,
54
+ class_labels=class_labels,
55
+ timestep_cond=timestep_cond,
56
+ attention_mask=attention_mask,
57
+ added_cond_kwargs=added_cond_kwargs,
58
+ cross_attention_kwargs=cross_attention_kwargs,
59
+ guess_mode=guess_mode,
60
+ return_dict=return_dict,
61
+ )
62
+
63
+ # merge samples
64
+ if i == 0:
65
+ down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
66
+ else:
67
+ down_block_res_samples = [
68
+ samples_prev + samples_curr
69
+ for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
70
+ ]
71
+ mid_block_res_sample += mid_sample
72
+
73
+ return down_block_res_samples, mid_block_res_sample
74
+
75
+ def save_pretrained(
76
+ self,
77
+ save_directory: Union[str, os.PathLike],
78
+ is_main_process: bool = True,
79
+ save_function: Callable = None,
80
+ safe_serialization: bool = True,
81
+ variant: Optional[str] = None,
82
+ ):
83
+ """
84
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
85
+ `[`~pipelines.controlnet.MultiControlNetModel.from_pretrained`]` class method.
86
+
87
+ Arguments:
88
+ save_directory (`str` or `os.PathLike`):
89
+ Directory to which to save. Will be created if it doesn't exist.
90
+ is_main_process (`bool`, *optional*, defaults to `True`):
91
+ Whether the process calling this is the main process or not. Useful when in distributed training like
92
+ TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
93
+ the main process to avoid race conditions.
94
+ save_function (`Callable`):
95
+ The function to use to save the state dictionary. Useful on distributed training like TPUs when one
96
+ need to replace `torch.save` by another method. Can be configured with the environment variable
97
+ `DIFFUSERS_SAVE_MODE`.
98
+ safe_serialization (`bool`, *optional*, defaults to `True`):
99
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
100
+ variant (`str`, *optional*):
101
+ If specified, weights are saved in the format pytorch_model.<variant>.bin.
102
+ """
103
+ idx = 0
104
+ model_path_to_save = save_directory
105
+ for controlnet in self.nets:
106
+ controlnet.save_pretrained(
107
+ model_path_to_save,
108
+ is_main_process=is_main_process,
109
+ save_function=save_function,
110
+ safe_serialization=safe_serialization,
111
+ variant=variant,
112
+ )
113
+
114
+ idx += 1
115
+ model_path_to_save = model_path_to_save + f"_{idx}"
116
+
117
+ @classmethod
118
+ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
119
+ r"""
120
+ Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models.
121
+
122
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
123
+ the model, you should first set it back in training mode with `model.train()`.
124
+
125
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
126
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
127
+ task.
128
+
129
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
130
+ weights are discarded.
131
+
132
+ Parameters:
133
+ pretrained_model_path (`os.PathLike`):
134
+ A path to a *directory* containing model weights saved using
135
+ [`~diffusers.pipelines.controlnet.MultiControlNetModel.save_pretrained`], e.g.,
136
+ `./my_model_directory/controlnet`.
137
+ torch_dtype (`str` or `torch.dtype`, *optional*):
138
+ Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
139
+ will be automatically derived from the model's weights.
140
+ output_loading_info(`bool`, *optional*, defaults to `False`):
141
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
142
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
143
+ A map that specifies where each submodule should go. It doesn't need to be refined to each
144
+ parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
145
+ same device.
146
+
147
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
148
+ more information about each option see [designing a device
149
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
150
+ max_memory (`Dict`, *optional*):
151
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
152
+ GPU and the available CPU RAM if unset.
153
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
154
+ Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
155
+ also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
156
+ model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
157
+ setting this argument to `True` will raise an error.
158
+ variant (`str`, *optional*):
159
+ If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
160
+ ignored when using `from_flax`.
161
+ use_safetensors (`bool`, *optional*, defaults to `None`):
162
+ If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
163
+ `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
164
+ `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
165
+ """
166
+ idx = 0
167
+ controlnets = []
168
+
169
+ # load controlnet and append to list until no controlnet directory exists anymore
170
+ # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
171
+ # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
172
+ model_path_to_load = pretrained_model_path
173
+ while os.path.isdir(model_path_to_load):
174
+ controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs)
175
+ controlnets.append(controlnet)
176
+
177
+ idx += 1
178
+ model_path_to_load = pretrained_model_path + f"_{idx}"
179
+
180
+ logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.")
181
+
182
+ if len(controlnets) == 0:
183
+ raise ValueError(
184
+ f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
185
+ )
186
+
187
+ return cls(controlnets)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet.py ADDED
@@ -0,0 +1,1318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
24
+
25
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
26
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
28
+ from ...models.lora import adjust_lora_scale_text_encoder
29
+ from ...schedulers import KarrasDiffusionSchedulers
30
+ from ...utils import (
31
+ USE_PEFT_BACKEND,
32
+ deprecate,
33
+ logging,
34
+ replace_example_docstring,
35
+ scale_lora_layers,
36
+ unscale_lora_layers,
37
+ )
38
+ from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
39
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
40
+ from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
41
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
42
+ from .multicontrolnet import MultiControlNetModel
43
+
44
+
45
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
46
+
47
+
48
+ EXAMPLE_DOC_STRING = """
49
+ Examples:
50
+ ```py
51
+ >>> # !pip install opencv-python transformers accelerate
52
+ >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
53
+ >>> from diffusers.utils import load_image
54
+ >>> import numpy as np
55
+ >>> import torch
56
+
57
+ >>> import cv2
58
+ >>> from PIL import Image
59
+
60
+ >>> # download an image
61
+ >>> image = load_image(
62
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
63
+ ... )
64
+ >>> image = np.array(image)
65
+
66
+ >>> # get canny image
67
+ >>> image = cv2.Canny(image, 100, 200)
68
+ >>> image = image[:, :, None]
69
+ >>> image = np.concatenate([image, image, image], axis=2)
70
+ >>> canny_image = Image.fromarray(image)
71
+
72
+ >>> # load control net and stable diffusion v1-5
73
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
74
+ >>> pipe = StableDiffusionControlNetPipeline.from_pretrained(
75
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
76
+ ... )
77
+
78
+ >>> # speed up diffusion process with faster scheduler and memory optimization
79
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
80
+ >>> # remove following line if xformers is not installed
81
+ >>> pipe.enable_xformers_memory_efficient_attention()
82
+
83
+ >>> pipe.enable_model_cpu_offload()
84
+
85
+ >>> # generate image
86
+ >>> generator = torch.manual_seed(0)
87
+ >>> image = pipe(
88
+ ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image
89
+ ... ).images[0]
90
+ ```
91
+ """
92
+
93
+
94
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
95
+ def retrieve_timesteps(
96
+ scheduler,
97
+ num_inference_steps: Optional[int] = None,
98
+ device: Optional[Union[str, torch.device]] = None,
99
+ timesteps: Optional[List[int]] = None,
100
+ **kwargs,
101
+ ):
102
+ """
103
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
104
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
105
+
106
+ Args:
107
+ scheduler (`SchedulerMixin`):
108
+ The scheduler to get timesteps from.
109
+ num_inference_steps (`int`):
110
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
111
+ `timesteps` must be `None`.
112
+ device (`str` or `torch.device`, *optional*):
113
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
114
+ timesteps (`List[int]`, *optional*):
115
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
116
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
117
+ must be `None`.
118
+
119
+ Returns:
120
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
121
+ second element is the number of inference steps.
122
+ """
123
+ if timesteps is not None:
124
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
125
+ if not accepts_timesteps:
126
+ raise ValueError(
127
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
128
+ f" timestep schedules. Please check whether you are using the correct scheduler."
129
+ )
130
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
131
+ timesteps = scheduler.timesteps
132
+ num_inference_steps = len(timesteps)
133
+ else:
134
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
135
+ timesteps = scheduler.timesteps
136
+ return timesteps, num_inference_steps
137
+
138
+
139
+ class StableDiffusionControlNetPipeline(
140
+ DiffusionPipeline,
141
+ StableDiffusionMixin,
142
+ TextualInversionLoaderMixin,
143
+ LoraLoaderMixin,
144
+ IPAdapterMixin,
145
+ FromSingleFileMixin,
146
+ ):
147
+ r"""
148
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
149
+
150
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
151
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
152
+
153
+ The pipeline also inherits the following loading methods:
154
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
155
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
156
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
157
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
158
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
159
+
160
+ Args:
161
+ vae ([`AutoencoderKL`]):
162
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
163
+ text_encoder ([`~transformers.CLIPTextModel`]):
164
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
165
+ tokenizer ([`~transformers.CLIPTokenizer`]):
166
+ A `CLIPTokenizer` to tokenize text.
167
+ unet ([`UNet2DConditionModel`]):
168
+ A `UNet2DConditionModel` to denoise the encoded image latents.
169
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
170
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
171
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
172
+ additional conditioning.
173
+ scheduler ([`SchedulerMixin`]):
174
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
175
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
176
+ safety_checker ([`StableDiffusionSafetyChecker`]):
177
+ Classification module that estimates whether generated images could be considered offensive or harmful.
178
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
179
+ about a model's potential harms.
180
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
181
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
182
+ """
183
+
184
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
185
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
186
+ _exclude_from_cpu_offload = ["safety_checker"]
187
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
188
+
189
+ def __init__(
190
+ self,
191
+ vae: AutoencoderKL,
192
+ text_encoder: CLIPTextModel,
193
+ tokenizer: CLIPTokenizer,
194
+ unet: UNet2DConditionModel,
195
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
196
+ scheduler: KarrasDiffusionSchedulers,
197
+ safety_checker: StableDiffusionSafetyChecker,
198
+ feature_extractor: CLIPImageProcessor,
199
+ image_encoder: CLIPVisionModelWithProjection = None,
200
+ requires_safety_checker: bool = True,
201
+ ):
202
+ super().__init__()
203
+
204
+ if safety_checker is None and requires_safety_checker:
205
+ logger.warning(
206
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
207
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
208
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
209
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
210
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
211
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
212
+ )
213
+
214
+ if safety_checker is not None and feature_extractor is None:
215
+ raise ValueError(
216
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
217
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
218
+ )
219
+
220
+ if isinstance(controlnet, (list, tuple)):
221
+ controlnet = MultiControlNetModel(controlnet)
222
+
223
+ self.register_modules(
224
+ vae=vae,
225
+ text_encoder=text_encoder,
226
+ tokenizer=tokenizer,
227
+ unet=unet,
228
+ controlnet=controlnet,
229
+ scheduler=scheduler,
230
+ safety_checker=safety_checker,
231
+ feature_extractor=feature_extractor,
232
+ image_encoder=image_encoder,
233
+ )
234
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
235
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
236
+ self.control_image_processor = VaeImageProcessor(
237
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
238
+ )
239
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
240
+
241
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
242
+ def _encode_prompt(
243
+ self,
244
+ prompt,
245
+ device,
246
+ num_images_per_prompt,
247
+ do_classifier_free_guidance,
248
+ negative_prompt=None,
249
+ prompt_embeds: Optional[torch.FloatTensor] = None,
250
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
251
+ lora_scale: Optional[float] = None,
252
+ **kwargs,
253
+ ):
254
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
255
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
256
+
257
+ prompt_embeds_tuple = self.encode_prompt(
258
+ prompt=prompt,
259
+ device=device,
260
+ num_images_per_prompt=num_images_per_prompt,
261
+ do_classifier_free_guidance=do_classifier_free_guidance,
262
+ negative_prompt=negative_prompt,
263
+ prompt_embeds=prompt_embeds,
264
+ negative_prompt_embeds=negative_prompt_embeds,
265
+ lora_scale=lora_scale,
266
+ **kwargs,
267
+ )
268
+
269
+ # concatenate for backwards comp
270
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
271
+
272
+ return prompt_embeds
273
+
274
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
275
+ def encode_prompt(
276
+ self,
277
+ prompt,
278
+ device,
279
+ num_images_per_prompt,
280
+ do_classifier_free_guidance,
281
+ negative_prompt=None,
282
+ prompt_embeds: Optional[torch.FloatTensor] = None,
283
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
284
+ lora_scale: Optional[float] = None,
285
+ clip_skip: Optional[int] = None,
286
+ ):
287
+ r"""
288
+ Encodes the prompt into text encoder hidden states.
289
+
290
+ Args:
291
+ prompt (`str` or `List[str]`, *optional*):
292
+ prompt to be encoded
293
+ device: (`torch.device`):
294
+ torch device
295
+ num_images_per_prompt (`int`):
296
+ number of images that should be generated per prompt
297
+ do_classifier_free_guidance (`bool`):
298
+ whether to use classifier free guidance or not
299
+ negative_prompt (`str` or `List[str]`, *optional*):
300
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
301
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
302
+ less than `1`).
303
+ prompt_embeds (`torch.FloatTensor`, *optional*):
304
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
305
+ provided, text embeddings will be generated from `prompt` input argument.
306
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
307
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
308
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
309
+ argument.
310
+ lora_scale (`float`, *optional*):
311
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
312
+ clip_skip (`int`, *optional*):
313
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
314
+ the output of the pre-final layer will be used for computing the prompt embeddings.
315
+ """
316
+ # set lora scale so that monkey patched LoRA
317
+ # function of text encoder can correctly access it
318
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
319
+ self._lora_scale = lora_scale
320
+
321
+ # dynamically adjust the LoRA scale
322
+ if not USE_PEFT_BACKEND:
323
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
324
+ else:
325
+ scale_lora_layers(self.text_encoder, lora_scale)
326
+
327
+ if prompt is not None and isinstance(prompt, str):
328
+ batch_size = 1
329
+ elif prompt is not None and isinstance(prompt, list):
330
+ batch_size = len(prompt)
331
+ else:
332
+ batch_size = prompt_embeds.shape[0]
333
+
334
+ if prompt_embeds is None:
335
+ # textual inversion: process multi-vector tokens if necessary
336
+ if isinstance(self, TextualInversionLoaderMixin):
337
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
338
+
339
+ text_inputs = self.tokenizer(
340
+ prompt,
341
+ padding="max_length",
342
+ max_length=self.tokenizer.model_max_length,
343
+ truncation=True,
344
+ return_tensors="pt",
345
+ )
346
+ text_input_ids = text_inputs.input_ids
347
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
348
+
349
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
350
+ text_input_ids, untruncated_ids
351
+ ):
352
+ removed_text = self.tokenizer.batch_decode(
353
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
354
+ )
355
+ logger.warning(
356
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
357
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
358
+ )
359
+
360
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
361
+ attention_mask = text_inputs.attention_mask.to(device)
362
+ else:
363
+ attention_mask = None
364
+
365
+ if clip_skip is None:
366
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
367
+ prompt_embeds = prompt_embeds[0]
368
+ else:
369
+ prompt_embeds = self.text_encoder(
370
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
371
+ )
372
+ # Access the `hidden_states` first, that contains a tuple of
373
+ # all the hidden states from the encoder layers. Then index into
374
+ # the tuple to access the hidden states from the desired layer.
375
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
376
+ # We also need to apply the final LayerNorm here to not mess with the
377
+ # representations. The `last_hidden_states` that we typically use for
378
+ # obtaining the final prompt representations passes through the LayerNorm
379
+ # layer.
380
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
381
+
382
+ if self.text_encoder is not None:
383
+ prompt_embeds_dtype = self.text_encoder.dtype
384
+ elif self.unet is not None:
385
+ prompt_embeds_dtype = self.unet.dtype
386
+ else:
387
+ prompt_embeds_dtype = prompt_embeds.dtype
388
+
389
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
390
+
391
+ bs_embed, seq_len, _ = prompt_embeds.shape
392
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
393
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
394
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
395
+
396
+ # get unconditional embeddings for classifier free guidance
397
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
398
+ uncond_tokens: List[str]
399
+ if negative_prompt is None:
400
+ uncond_tokens = [""] * batch_size
401
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
402
+ raise TypeError(
403
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
404
+ f" {type(prompt)}."
405
+ )
406
+ elif isinstance(negative_prompt, str):
407
+ uncond_tokens = [negative_prompt]
408
+ elif batch_size != len(negative_prompt):
409
+ raise ValueError(
410
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
411
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
412
+ " the batch size of `prompt`."
413
+ )
414
+ else:
415
+ uncond_tokens = negative_prompt
416
+
417
+ # textual inversion: process multi-vector tokens if necessary
418
+ if isinstance(self, TextualInversionLoaderMixin):
419
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
420
+
421
+ max_length = prompt_embeds.shape[1]
422
+ uncond_input = self.tokenizer(
423
+ uncond_tokens,
424
+ padding="max_length",
425
+ max_length=max_length,
426
+ truncation=True,
427
+ return_tensors="pt",
428
+ )
429
+
430
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
431
+ attention_mask = uncond_input.attention_mask.to(device)
432
+ else:
433
+ attention_mask = None
434
+
435
+ negative_prompt_embeds = self.text_encoder(
436
+ uncond_input.input_ids.to(device),
437
+ attention_mask=attention_mask,
438
+ )
439
+ negative_prompt_embeds = negative_prompt_embeds[0]
440
+
441
+ if do_classifier_free_guidance:
442
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
443
+ seq_len = negative_prompt_embeds.shape[1]
444
+
445
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
446
+
447
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
448
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
449
+
450
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
451
+ # Retrieve the original scale by scaling back the LoRA layers
452
+ unscale_lora_layers(self.text_encoder, lora_scale)
453
+
454
+ return prompt_embeds, negative_prompt_embeds
455
+
456
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
457
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
458
+ dtype = next(self.image_encoder.parameters()).dtype
459
+
460
+ if not isinstance(image, torch.Tensor):
461
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
462
+
463
+ image = image.to(device=device, dtype=dtype)
464
+ if output_hidden_states:
465
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
466
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
467
+ uncond_image_enc_hidden_states = self.image_encoder(
468
+ torch.zeros_like(image), output_hidden_states=True
469
+ ).hidden_states[-2]
470
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
471
+ num_images_per_prompt, dim=0
472
+ )
473
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
474
+ else:
475
+ image_embeds = self.image_encoder(image).image_embeds
476
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
477
+ uncond_image_embeds = torch.zeros_like(image_embeds)
478
+
479
+ return image_embeds, uncond_image_embeds
480
+
481
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
482
+ def prepare_ip_adapter_image_embeds(
483
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
484
+ ):
485
+ if ip_adapter_image_embeds is None:
486
+ if not isinstance(ip_adapter_image, list):
487
+ ip_adapter_image = [ip_adapter_image]
488
+
489
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
490
+ raise ValueError(
491
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
492
+ )
493
+
494
+ image_embeds = []
495
+ for single_ip_adapter_image, image_proj_layer in zip(
496
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
497
+ ):
498
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
499
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
500
+ single_ip_adapter_image, device, 1, output_hidden_state
501
+ )
502
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
503
+ single_negative_image_embeds = torch.stack(
504
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
505
+ )
506
+
507
+ if do_classifier_free_guidance:
508
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
509
+ single_image_embeds = single_image_embeds.to(device)
510
+
511
+ image_embeds.append(single_image_embeds)
512
+ else:
513
+ repeat_dims = [1]
514
+ image_embeds = []
515
+ for single_image_embeds in ip_adapter_image_embeds:
516
+ if do_classifier_free_guidance:
517
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
518
+ single_image_embeds = single_image_embeds.repeat(
519
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
520
+ )
521
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
522
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
523
+ )
524
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
525
+ else:
526
+ single_image_embeds = single_image_embeds.repeat(
527
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
528
+ )
529
+ image_embeds.append(single_image_embeds)
530
+
531
+ return image_embeds
532
+
533
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
534
+ def run_safety_checker(self, image, device, dtype):
535
+ if self.safety_checker is None:
536
+ has_nsfw_concept = None
537
+ else:
538
+ if torch.is_tensor(image):
539
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
540
+ else:
541
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
542
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
543
+ image, has_nsfw_concept = self.safety_checker(
544
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
545
+ )
546
+ return image, has_nsfw_concept
547
+
548
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
549
+ def decode_latents(self, latents):
550
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
551
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
552
+
553
+ latents = 1 / self.vae.config.scaling_factor * latents
554
+ image = self.vae.decode(latents, return_dict=False)[0]
555
+ image = (image / 2 + 0.5).clamp(0, 1)
556
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
557
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
558
+ return image
559
+
560
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
561
+ def prepare_extra_step_kwargs(self, generator, eta):
562
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
563
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
564
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
565
+ # and should be between [0, 1]
566
+
567
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
568
+ extra_step_kwargs = {}
569
+ if accepts_eta:
570
+ extra_step_kwargs["eta"] = eta
571
+
572
+ # check if the scheduler accepts generator
573
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
574
+ if accepts_generator:
575
+ extra_step_kwargs["generator"] = generator
576
+ return extra_step_kwargs
577
+
578
+ def check_inputs(
579
+ self,
580
+ prompt,
581
+ image,
582
+ callback_steps,
583
+ negative_prompt=None,
584
+ prompt_embeds=None,
585
+ negative_prompt_embeds=None,
586
+ ip_adapter_image=None,
587
+ ip_adapter_image_embeds=None,
588
+ controlnet_conditioning_scale=1.0,
589
+ control_guidance_start=0.0,
590
+ control_guidance_end=1.0,
591
+ callback_on_step_end_tensor_inputs=None,
592
+ ):
593
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
594
+ raise ValueError(
595
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
596
+ f" {type(callback_steps)}."
597
+ )
598
+
599
+ if callback_on_step_end_tensor_inputs is not None and not all(
600
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
601
+ ):
602
+ raise ValueError(
603
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
604
+ )
605
+
606
+ if prompt is not None and prompt_embeds is not None:
607
+ raise ValueError(
608
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
609
+ " only forward one of the two."
610
+ )
611
+ elif prompt is None and prompt_embeds is None:
612
+ raise ValueError(
613
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
614
+ )
615
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
616
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
617
+
618
+ if negative_prompt is not None and negative_prompt_embeds is not None:
619
+ raise ValueError(
620
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
621
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
622
+ )
623
+
624
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
625
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
626
+ raise ValueError(
627
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
628
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
629
+ f" {negative_prompt_embeds.shape}."
630
+ )
631
+
632
+ # Check `image`
633
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
634
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
635
+ )
636
+ if (
637
+ isinstance(self.controlnet, ControlNetModel)
638
+ or is_compiled
639
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
640
+ ):
641
+ self.check_image(image, prompt, prompt_embeds)
642
+ elif (
643
+ isinstance(self.controlnet, MultiControlNetModel)
644
+ or is_compiled
645
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
646
+ ):
647
+ if not isinstance(image, list):
648
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
649
+
650
+ # When `image` is a nested list:
651
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
652
+ elif any(isinstance(i, list) for i in image):
653
+ transposed_image = [list(t) for t in zip(*image)]
654
+ if len(transposed_image) != len(self.controlnet.nets):
655
+ raise ValueError(
656
+ f"For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets."
657
+ )
658
+ for image_ in transposed_image:
659
+ self.check_image(image_, prompt, prompt_embeds)
660
+ elif len(image) != len(self.controlnet.nets):
661
+ raise ValueError(
662
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
663
+ )
664
+
665
+ for image_ in image:
666
+ self.check_image(image_, prompt, prompt_embeds)
667
+ else:
668
+ assert False
669
+
670
+ # Check `controlnet_conditioning_scale`
671
+ if (
672
+ isinstance(self.controlnet, ControlNetModel)
673
+ or is_compiled
674
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
675
+ ):
676
+ if not isinstance(controlnet_conditioning_scale, float):
677
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
678
+ elif (
679
+ isinstance(self.controlnet, MultiControlNetModel)
680
+ or is_compiled
681
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
682
+ ):
683
+ if isinstance(controlnet_conditioning_scale, list):
684
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
685
+ raise ValueError(
686
+ "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. "
687
+ "The conditioning scale must be fixed across the batch."
688
+ )
689
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
690
+ self.controlnet.nets
691
+ ):
692
+ raise ValueError(
693
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
694
+ " the same length as the number of controlnets"
695
+ )
696
+ else:
697
+ assert False
698
+
699
+ if not isinstance(control_guidance_start, (tuple, list)):
700
+ control_guidance_start = [control_guidance_start]
701
+
702
+ if not isinstance(control_guidance_end, (tuple, list)):
703
+ control_guidance_end = [control_guidance_end]
704
+
705
+ if len(control_guidance_start) != len(control_guidance_end):
706
+ raise ValueError(
707
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
708
+ )
709
+
710
+ if isinstance(self.controlnet, MultiControlNetModel):
711
+ if len(control_guidance_start) != len(self.controlnet.nets):
712
+ raise ValueError(
713
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
714
+ )
715
+
716
+ for start, end in zip(control_guidance_start, control_guidance_end):
717
+ if start >= end:
718
+ raise ValueError(
719
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
720
+ )
721
+ if start < 0.0:
722
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
723
+ if end > 1.0:
724
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
725
+
726
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
727
+ raise ValueError(
728
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
729
+ )
730
+
731
+ if ip_adapter_image_embeds is not None:
732
+ if not isinstance(ip_adapter_image_embeds, list):
733
+ raise ValueError(
734
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
735
+ )
736
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
737
+ raise ValueError(
738
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
739
+ )
740
+
741
+ def check_image(self, image, prompt, prompt_embeds):
742
+ image_is_pil = isinstance(image, PIL.Image.Image)
743
+ image_is_tensor = isinstance(image, torch.Tensor)
744
+ image_is_np = isinstance(image, np.ndarray)
745
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
746
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
747
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
748
+
749
+ if (
750
+ not image_is_pil
751
+ and not image_is_tensor
752
+ and not image_is_np
753
+ and not image_is_pil_list
754
+ and not image_is_tensor_list
755
+ and not image_is_np_list
756
+ ):
757
+ raise TypeError(
758
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
759
+ )
760
+
761
+ if image_is_pil:
762
+ image_batch_size = 1
763
+ else:
764
+ image_batch_size = len(image)
765
+
766
+ if prompt is not None and isinstance(prompt, str):
767
+ prompt_batch_size = 1
768
+ elif prompt is not None and isinstance(prompt, list):
769
+ prompt_batch_size = len(prompt)
770
+ elif prompt_embeds is not None:
771
+ prompt_batch_size = prompt_embeds.shape[0]
772
+
773
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
774
+ raise ValueError(
775
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
776
+ )
777
+
778
+ def prepare_image(
779
+ self,
780
+ image,
781
+ width,
782
+ height,
783
+ batch_size,
784
+ num_images_per_prompt,
785
+ device,
786
+ dtype,
787
+ do_classifier_free_guidance=False,
788
+ guess_mode=False,
789
+ ):
790
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
791
+ image_batch_size = image.shape[0]
792
+
793
+ if image_batch_size == 1:
794
+ repeat_by = batch_size
795
+ else:
796
+ # image batch size is the same as prompt batch size
797
+ repeat_by = num_images_per_prompt
798
+
799
+ image = image.repeat_interleave(repeat_by, dim=0)
800
+
801
+ image = image.to(device=device, dtype=dtype)
802
+
803
+ if do_classifier_free_guidance and not guess_mode:
804
+ image = torch.cat([image] * 2)
805
+
806
+ return image
807
+
808
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
809
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
810
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
811
+ if isinstance(generator, list) and len(generator) != batch_size:
812
+ raise ValueError(
813
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
814
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
815
+ )
816
+
817
+ if latents is None:
818
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
819
+ else:
820
+ latents = latents.to(device)
821
+
822
+ # scale the initial noise by the standard deviation required by the scheduler
823
+ latents = latents * self.scheduler.init_noise_sigma
824
+ return latents
825
+
826
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
827
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
828
+ """
829
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
830
+
831
+ Args:
832
+ timesteps (`torch.Tensor`):
833
+ generate embedding vectors at these timesteps
834
+ embedding_dim (`int`, *optional*, defaults to 512):
835
+ dimension of the embeddings to generate
836
+ dtype:
837
+ data type of the generated embeddings
838
+
839
+ Returns:
840
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
841
+ """
842
+ assert len(w.shape) == 1
843
+ w = w * 1000.0
844
+
845
+ half_dim = embedding_dim // 2
846
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
847
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
848
+ emb = w.to(dtype)[:, None] * emb[None, :]
849
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
850
+ if embedding_dim % 2 == 1: # zero pad
851
+ emb = torch.nn.functional.pad(emb, (0, 1))
852
+ assert emb.shape == (w.shape[0], embedding_dim)
853
+ return emb
854
+
855
+ @property
856
+ def guidance_scale(self):
857
+ return self._guidance_scale
858
+
859
+ @property
860
+ def clip_skip(self):
861
+ return self._clip_skip
862
+
863
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
864
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
865
+ # corresponds to doing no classifier free guidance.
866
+ @property
867
+ def do_classifier_free_guidance(self):
868
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
869
+
870
+ @property
871
+ def cross_attention_kwargs(self):
872
+ return self._cross_attention_kwargs
873
+
874
+ @property
875
+ def num_timesteps(self):
876
+ return self._num_timesteps
877
+
878
+ @torch.no_grad()
879
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
880
+ def __call__(
881
+ self,
882
+ prompt: Union[str, List[str]] = None,
883
+ image: PipelineImageInput = None,
884
+ height: Optional[int] = None,
885
+ width: Optional[int] = None,
886
+ num_inference_steps: int = 50,
887
+ timesteps: List[int] = None,
888
+ guidance_scale: float = 7.5,
889
+ negative_prompt: Optional[Union[str, List[str]]] = None,
890
+ num_images_per_prompt: Optional[int] = 1,
891
+ eta: float = 0.0,
892
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
893
+ latents: Optional[torch.FloatTensor] = None,
894
+ prompt_embeds: Optional[torch.FloatTensor] = None,
895
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
896
+ ip_adapter_image: Optional[PipelineImageInput] = None,
897
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
898
+ output_type: Optional[str] = "pil",
899
+ return_dict: bool = True,
900
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
901
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
902
+ guess_mode: bool = False,
903
+ control_guidance_start: Union[float, List[float]] = 0.0,
904
+ control_guidance_end: Union[float, List[float]] = 1.0,
905
+ clip_skip: Optional[int] = None,
906
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
907
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
908
+ **kwargs,
909
+ ):
910
+ r"""
911
+ The call function to the pipeline for generation.
912
+
913
+ Args:
914
+ prompt (`str` or `List[str]`, *optional*):
915
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
916
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
917
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
918
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
919
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
920
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
921
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
922
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
923
+ input to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single ControlNet,
924
+ each will be paired with each prompt in the `prompt` list. This also applies to multiple ControlNets,
925
+ where a list of image lists can be passed to batch for each prompt and each ControlNet.
926
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
927
+ The height in pixels of the generated image.
928
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
929
+ The width in pixels of the generated image.
930
+ num_inference_steps (`int`, *optional*, defaults to 50):
931
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
932
+ expense of slower inference.
933
+ timesteps (`List[int]`, *optional*):
934
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
935
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
936
+ passed will be used. Must be in descending order.
937
+ guidance_scale (`float`, *optional*, defaults to 7.5):
938
+ A higher guidance scale value encourages the model to generate images closely linked to the text
939
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
940
+ negative_prompt (`str` or `List[str]`, *optional*):
941
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
942
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
943
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
944
+ The number of images to generate per prompt.
945
+ eta (`float`, *optional*, defaults to 0.0):
946
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
947
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
948
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
949
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
950
+ generation deterministic.
951
+ latents (`torch.FloatTensor`, *optional*):
952
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
953
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
954
+ tensor is generated by sampling using the supplied random `generator`.
955
+ prompt_embeds (`torch.FloatTensor`, *optional*):
956
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
957
+ provided, text embeddings are generated from the `prompt` input argument.
958
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
959
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
960
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
961
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
962
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
963
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
964
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
965
+ if `do_classifier_free_guidance` is set to `True`.
966
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
967
+ output_type (`str`, *optional*, defaults to `"pil"`):
968
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
969
+ return_dict (`bool`, *optional*, defaults to `True`):
970
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
971
+ plain tuple.
972
+ callback (`Callable`, *optional*):
973
+ A function that calls every `callback_steps` steps during inference. The function is called with the
974
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
975
+ callback_steps (`int`, *optional*, defaults to 1):
976
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
977
+ every step.
978
+ cross_attention_kwargs (`dict`, *optional*):
979
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
980
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
981
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
982
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
983
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
984
+ the corresponding scale as a list.
985
+ guess_mode (`bool`, *optional*, defaults to `False`):
986
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
987
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
988
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
989
+ The percentage of total steps at which the ControlNet starts applying.
990
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
991
+ The percentage of total steps at which the ControlNet stops applying.
992
+ clip_skip (`int`, *optional*):
993
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
994
+ the output of the pre-final layer will be used for computing the prompt embeddings.
995
+ callback_on_step_end (`Callable`, *optional*):
996
+ A function that calls at the end of each denoising steps during the inference. The function is called
997
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
998
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
999
+ `callback_on_step_end_tensor_inputs`.
1000
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1001
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1002
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1003
+ `._callback_tensor_inputs` attribute of your pipeine class.
1004
+
1005
+ Examples:
1006
+
1007
+ Returns:
1008
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1009
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1010
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
1011
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
1012
+ "not-safe-for-work" (nsfw) content.
1013
+ """
1014
+
1015
+ callback = kwargs.pop("callback", None)
1016
+ callback_steps = kwargs.pop("callback_steps", None)
1017
+
1018
+ if callback is not None:
1019
+ deprecate(
1020
+ "callback",
1021
+ "1.0.0",
1022
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1023
+ )
1024
+ if callback_steps is not None:
1025
+ deprecate(
1026
+ "callback_steps",
1027
+ "1.0.0",
1028
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1029
+ )
1030
+
1031
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1032
+
1033
+ # align format for control guidance
1034
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1035
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1036
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1037
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1038
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1039
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1040
+ control_guidance_start, control_guidance_end = (
1041
+ mult * [control_guidance_start],
1042
+ mult * [control_guidance_end],
1043
+ )
1044
+
1045
+ # 1. Check inputs. Raise error if not correct
1046
+ self.check_inputs(
1047
+ prompt,
1048
+ image,
1049
+ callback_steps,
1050
+ negative_prompt,
1051
+ prompt_embeds,
1052
+ negative_prompt_embeds,
1053
+ ip_adapter_image,
1054
+ ip_adapter_image_embeds,
1055
+ controlnet_conditioning_scale,
1056
+ control_guidance_start,
1057
+ control_guidance_end,
1058
+ callback_on_step_end_tensor_inputs,
1059
+ )
1060
+
1061
+ self._guidance_scale = guidance_scale
1062
+ self._clip_skip = clip_skip
1063
+ self._cross_attention_kwargs = cross_attention_kwargs
1064
+
1065
+ # 2. Define call parameters
1066
+ if prompt is not None and isinstance(prompt, str):
1067
+ batch_size = 1
1068
+ elif prompt is not None and isinstance(prompt, list):
1069
+ batch_size = len(prompt)
1070
+ else:
1071
+ batch_size = prompt_embeds.shape[0]
1072
+
1073
+ device = self._execution_device
1074
+
1075
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1076
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1077
+
1078
+ global_pool_conditions = (
1079
+ controlnet.config.global_pool_conditions
1080
+ if isinstance(controlnet, ControlNetModel)
1081
+ else controlnet.nets[0].config.global_pool_conditions
1082
+ )
1083
+ guess_mode = guess_mode or global_pool_conditions
1084
+
1085
+ # 3. Encode input prompt
1086
+ text_encoder_lora_scale = (
1087
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1088
+ )
1089
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1090
+ prompt,
1091
+ device,
1092
+ num_images_per_prompt,
1093
+ self.do_classifier_free_guidance,
1094
+ negative_prompt,
1095
+ prompt_embeds=prompt_embeds,
1096
+ negative_prompt_embeds=negative_prompt_embeds,
1097
+ lora_scale=text_encoder_lora_scale,
1098
+ clip_skip=self.clip_skip,
1099
+ )
1100
+ # For classifier free guidance, we need to do two forward passes.
1101
+ # Here we concatenate the unconditional and text embeddings into a single batch
1102
+ # to avoid doing two forward passes
1103
+ if self.do_classifier_free_guidance:
1104
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1105
+
1106
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1107
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1108
+ ip_adapter_image,
1109
+ ip_adapter_image_embeds,
1110
+ device,
1111
+ batch_size * num_images_per_prompt,
1112
+ self.do_classifier_free_guidance,
1113
+ )
1114
+
1115
+ # 4. Prepare image
1116
+ if isinstance(controlnet, ControlNetModel):
1117
+ image = self.prepare_image(
1118
+ image=image,
1119
+ width=width,
1120
+ height=height,
1121
+ batch_size=batch_size * num_images_per_prompt,
1122
+ num_images_per_prompt=num_images_per_prompt,
1123
+ device=device,
1124
+ dtype=controlnet.dtype,
1125
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1126
+ guess_mode=guess_mode,
1127
+ )
1128
+ height, width = image.shape[-2:]
1129
+ elif isinstance(controlnet, MultiControlNetModel):
1130
+ images = []
1131
+
1132
+ # Nested lists as ControlNet condition
1133
+ if isinstance(image[0], list):
1134
+ # Transpose the nested image list
1135
+ image = [list(t) for t in zip(*image)]
1136
+
1137
+ for image_ in image:
1138
+ image_ = self.prepare_image(
1139
+ image=image_,
1140
+ width=width,
1141
+ height=height,
1142
+ batch_size=batch_size * num_images_per_prompt,
1143
+ num_images_per_prompt=num_images_per_prompt,
1144
+ device=device,
1145
+ dtype=controlnet.dtype,
1146
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1147
+ guess_mode=guess_mode,
1148
+ )
1149
+
1150
+ images.append(image_)
1151
+
1152
+ image = images
1153
+ height, width = image[0].shape[-2:]
1154
+ else:
1155
+ assert False
1156
+
1157
+ # 5. Prepare timesteps
1158
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1159
+ self._num_timesteps = len(timesteps)
1160
+
1161
+ # 6. Prepare latent variables
1162
+ num_channels_latents = self.unet.config.in_channels
1163
+ latents = self.prepare_latents(
1164
+ batch_size * num_images_per_prompt,
1165
+ num_channels_latents,
1166
+ height,
1167
+ width,
1168
+ prompt_embeds.dtype,
1169
+ device,
1170
+ generator,
1171
+ latents,
1172
+ )
1173
+
1174
+ # 6.5 Optionally get Guidance Scale Embedding
1175
+ timestep_cond = None
1176
+ if self.unet.config.time_cond_proj_dim is not None:
1177
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1178
+ timestep_cond = self.get_guidance_scale_embedding(
1179
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1180
+ ).to(device=device, dtype=latents.dtype)
1181
+
1182
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1183
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1184
+
1185
+ # 7.1 Add image embeds for IP-Adapter
1186
+ added_cond_kwargs = (
1187
+ {"image_embeds": image_embeds}
1188
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
1189
+ else None
1190
+ )
1191
+
1192
+ # 7.2 Create tensor stating which controlnets to keep
1193
+ controlnet_keep = []
1194
+ for i in range(len(timesteps)):
1195
+ keeps = [
1196
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1197
+ for s, e in zip(control_guidance_start, control_guidance_end)
1198
+ ]
1199
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1200
+
1201
+ # 8. Denoising loop
1202
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1203
+ is_unet_compiled = is_compiled_module(self.unet)
1204
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
1205
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
1206
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1207
+ for i, t in enumerate(timesteps):
1208
+ # Relevant thread:
1209
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
1210
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
1211
+ torch._inductor.cudagraph_mark_step_begin()
1212
+ # expand the latents if we are doing classifier free guidance
1213
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1214
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1215
+
1216
+ # controlnet(s) inference
1217
+ if guess_mode and self.do_classifier_free_guidance:
1218
+ # Infer ControlNet only for the conditional batch.
1219
+ control_model_input = latents
1220
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1221
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1222
+ else:
1223
+ control_model_input = latent_model_input
1224
+ controlnet_prompt_embeds = prompt_embeds
1225
+
1226
+ if isinstance(controlnet_keep[i], list):
1227
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1228
+ else:
1229
+ controlnet_cond_scale = controlnet_conditioning_scale
1230
+ if isinstance(controlnet_cond_scale, list):
1231
+ controlnet_cond_scale = controlnet_cond_scale[0]
1232
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1233
+
1234
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1235
+ control_model_input,
1236
+ t,
1237
+ encoder_hidden_states=controlnet_prompt_embeds,
1238
+ controlnet_cond=image,
1239
+ conditioning_scale=cond_scale,
1240
+ guess_mode=guess_mode,
1241
+ return_dict=False,
1242
+ )
1243
+
1244
+ if guess_mode and self.do_classifier_free_guidance:
1245
+ # Infered ControlNet only for the conditional batch.
1246
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1247
+ # add 0 to the unconditional batch to keep it unchanged.
1248
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1249
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1250
+
1251
+ # predict the noise residual
1252
+ noise_pred = self.unet(
1253
+ latent_model_input,
1254
+ t,
1255
+ encoder_hidden_states=prompt_embeds,
1256
+ timestep_cond=timestep_cond,
1257
+ cross_attention_kwargs=self.cross_attention_kwargs,
1258
+ down_block_additional_residuals=down_block_res_samples,
1259
+ mid_block_additional_residual=mid_block_res_sample,
1260
+ added_cond_kwargs=added_cond_kwargs,
1261
+ return_dict=False,
1262
+ )[0]
1263
+
1264
+ # perform guidance
1265
+ if self.do_classifier_free_guidance:
1266
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1267
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1268
+
1269
+ # compute the previous noisy sample x_t -> x_t-1
1270
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1271
+
1272
+ if callback_on_step_end is not None:
1273
+ callback_kwargs = {}
1274
+ for k in callback_on_step_end_tensor_inputs:
1275
+ callback_kwargs[k] = locals()[k]
1276
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1277
+
1278
+ latents = callback_outputs.pop("latents", latents)
1279
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1280
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1281
+
1282
+ # call the callback, if provided
1283
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1284
+ progress_bar.update()
1285
+ if callback is not None and i % callback_steps == 0:
1286
+ step_idx = i // getattr(self.scheduler, "order", 1)
1287
+ callback(step_idx, t, latents)
1288
+
1289
+ # If we do sequential model offloading, let's offload unet and controlnet
1290
+ # manually for max memory savings
1291
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1292
+ self.unet.to("cpu")
1293
+ self.controlnet.to("cpu")
1294
+ torch.cuda.empty_cache()
1295
+
1296
+ if not output_type == "latent":
1297
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1298
+ 0
1299
+ ]
1300
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1301
+ else:
1302
+ image = latents
1303
+ has_nsfw_concept = None
1304
+
1305
+ if has_nsfw_concept is None:
1306
+ do_denormalize = [True] * image.shape[0]
1307
+ else:
1308
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1309
+
1310
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1311
+
1312
+ # Offload all models
1313
+ self.maybe_free_model_hooks()
1314
+
1315
+ if not return_dict:
1316
+ return (image, has_nsfw_concept)
1317
+
1318
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Salesforce.com, inc.
2
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import List, Optional, Union
16
+
17
+ import PIL.Image
18
+ import torch
19
+ from transformers import CLIPTokenizer
20
+
21
+ from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
22
+ from ...schedulers import PNDMScheduler
23
+ from ...utils import (
24
+ logging,
25
+ replace_example_docstring,
26
+ )
27
+ from ...utils.torch_utils import randn_tensor
28
+ from ..blip_diffusion.blip_image_processing import BlipImageProcessor
29
+ from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel
30
+ from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel
31
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+ EXAMPLE_DOC_STRING = """
37
+ Examples:
38
+ ```py
39
+ >>> from diffusers.pipelines import BlipDiffusionControlNetPipeline
40
+ >>> from diffusers.utils import load_image
41
+ >>> from controlnet_aux import CannyDetector
42
+ >>> import torch
43
+
44
+ >>> blip_diffusion_pipe = BlipDiffusionControlNetPipeline.from_pretrained(
45
+ ... "Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16
46
+ ... ).to("cuda")
47
+
48
+ >>> style_subject = "flower"
49
+ >>> tgt_subject = "teapot"
50
+ >>> text_prompt = "on a marble table"
51
+
52
+ >>> cldm_cond_image = load_image(
53
+ ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg"
54
+ ... ).resize((512, 512))
55
+ >>> canny = CannyDetector()
56
+ >>> cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type="pil")
57
+ >>> style_image = load_image(
58
+ ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg"
59
+ ... )
60
+ >>> guidance_scale = 7.5
61
+ >>> num_inference_steps = 50
62
+ >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
63
+
64
+
65
+ >>> output = blip_diffusion_pipe(
66
+ ... text_prompt,
67
+ ... style_image,
68
+ ... cldm_cond_image,
69
+ ... style_subject,
70
+ ... tgt_subject,
71
+ ... guidance_scale=guidance_scale,
72
+ ... num_inference_steps=num_inference_steps,
73
+ ... neg_prompt=negative_prompt,
74
+ ... height=512,
75
+ ... width=512,
76
+ ... ).images
77
+ >>> output[0].save("image.png")
78
+ ```
79
+ """
80
+
81
+
82
+ class BlipDiffusionControlNetPipeline(DiffusionPipeline):
83
+ """
84
+ Pipeline for Canny Edge based Controlled subject-driven generation using Blip Diffusion.
85
+
86
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
87
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
88
+
89
+ Args:
90
+ tokenizer ([`CLIPTokenizer`]):
91
+ Tokenizer for the text encoder
92
+ text_encoder ([`ContextCLIPTextModel`]):
93
+ Text encoder to encode the text prompt
94
+ vae ([`AutoencoderKL`]):
95
+ VAE model to map the latents to the image
96
+ unet ([`UNet2DConditionModel`]):
97
+ Conditional U-Net architecture to denoise the image embedding.
98
+ scheduler ([`PNDMScheduler`]):
99
+ A scheduler to be used in combination with `unet` to generate image latents.
100
+ qformer ([`Blip2QFormerModel`]):
101
+ QFormer model to get multi-modal embeddings from the text and image.
102
+ controlnet ([`ControlNetModel`]):
103
+ ControlNet model to get the conditioning image embedding.
104
+ image_processor ([`BlipImageProcessor`]):
105
+ Image Processor to preprocess and postprocess the image.
106
+ ctx_begin_pos (int, `optional`, defaults to 2):
107
+ Position of the context token in the text encoder.
108
+ """
109
+
110
+ model_cpu_offload_seq = "qformer->text_encoder->unet->vae"
111
+
112
+ def __init__(
113
+ self,
114
+ tokenizer: CLIPTokenizer,
115
+ text_encoder: ContextCLIPTextModel,
116
+ vae: AutoencoderKL,
117
+ unet: UNet2DConditionModel,
118
+ scheduler: PNDMScheduler,
119
+ qformer: Blip2QFormerModel,
120
+ controlnet: ControlNetModel,
121
+ image_processor: BlipImageProcessor,
122
+ ctx_begin_pos: int = 2,
123
+ mean: List[float] = None,
124
+ std: List[float] = None,
125
+ ):
126
+ super().__init__()
127
+
128
+ self.register_modules(
129
+ tokenizer=tokenizer,
130
+ text_encoder=text_encoder,
131
+ vae=vae,
132
+ unet=unet,
133
+ scheduler=scheduler,
134
+ qformer=qformer,
135
+ controlnet=controlnet,
136
+ image_processor=image_processor,
137
+ )
138
+ self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std)
139
+
140
+ def get_query_embeddings(self, input_image, src_subject):
141
+ return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False)
142
+
143
+ # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it
144
+ def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20):
145
+ rv = []
146
+ for prompt, tgt_subject in zip(prompts, tgt_subjects):
147
+ prompt = f"a {tgt_subject} {prompt.strip()}"
148
+ # a trick to amplify the prompt
149
+ rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps)))
150
+
151
+ return rv
152
+
153
+ # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents
154
+ def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None):
155
+ shape = (batch_size, num_channels, height, width)
156
+ if isinstance(generator, list) and len(generator) != batch_size:
157
+ raise ValueError(
158
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
159
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
160
+ )
161
+
162
+ if latents is None:
163
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
164
+ else:
165
+ latents = latents.to(device=device, dtype=dtype)
166
+
167
+ # scale the initial noise by the standard deviation required by the scheduler
168
+ latents = latents * self.scheduler.init_noise_sigma
169
+ return latents
170
+
171
+ def encode_prompt(self, query_embeds, prompt, device=None):
172
+ device = device or self._execution_device
173
+
174
+ # embeddings for prompt, with query_embeds as context
175
+ max_len = self.text_encoder.text_model.config.max_position_embeddings
176
+ max_len -= self.qformer.config.num_query_tokens
177
+
178
+ tokenized_prompt = self.tokenizer(
179
+ prompt,
180
+ padding="max_length",
181
+ truncation=True,
182
+ max_length=max_len,
183
+ return_tensors="pt",
184
+ ).to(device)
185
+
186
+ batch_size = query_embeds.shape[0]
187
+ ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size
188
+
189
+ text_embeddings = self.text_encoder(
190
+ input_ids=tokenized_prompt.input_ids,
191
+ ctx_embeddings=query_embeds,
192
+ ctx_begin_pos=ctx_begin_pos,
193
+ )[0]
194
+
195
+ return text_embeddings
196
+
197
+ # Adapted from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
198
+ def prepare_control_image(
199
+ self,
200
+ image,
201
+ width,
202
+ height,
203
+ batch_size,
204
+ num_images_per_prompt,
205
+ device,
206
+ dtype,
207
+ do_classifier_free_guidance=False,
208
+ ):
209
+ image = self.image_processor.preprocess(
210
+ image,
211
+ size={"width": width, "height": height},
212
+ do_rescale=True,
213
+ do_center_crop=False,
214
+ do_normalize=False,
215
+ return_tensors="pt",
216
+ )["pixel_values"].to(device)
217
+ image_batch_size = image.shape[0]
218
+
219
+ if image_batch_size == 1:
220
+ repeat_by = batch_size
221
+ else:
222
+ # image batch size is the same as prompt batch size
223
+ repeat_by = num_images_per_prompt
224
+
225
+ image = image.repeat_interleave(repeat_by, dim=0)
226
+
227
+ image = image.to(device=device, dtype=dtype)
228
+
229
+ if do_classifier_free_guidance:
230
+ image = torch.cat([image] * 2)
231
+
232
+ return image
233
+
234
+ @torch.no_grad()
235
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
236
+ def __call__(
237
+ self,
238
+ prompt: List[str],
239
+ reference_image: PIL.Image.Image,
240
+ condtioning_image: PIL.Image.Image,
241
+ source_subject_category: List[str],
242
+ target_subject_category: List[str],
243
+ latents: Optional[torch.FloatTensor] = None,
244
+ guidance_scale: float = 7.5,
245
+ height: int = 512,
246
+ width: int = 512,
247
+ num_inference_steps: int = 50,
248
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
249
+ neg_prompt: Optional[str] = "",
250
+ prompt_strength: float = 1.0,
251
+ prompt_reps: int = 20,
252
+ output_type: Optional[str] = "pil",
253
+ return_dict: bool = True,
254
+ ):
255
+ """
256
+ Function invoked when calling the pipeline for generation.
257
+
258
+ Args:
259
+ prompt (`List[str]`):
260
+ The prompt or prompts to guide the image generation.
261
+ reference_image (`PIL.Image.Image`):
262
+ The reference image to condition the generation on.
263
+ condtioning_image (`PIL.Image.Image`):
264
+ The conditioning canny edge image to condition the generation on.
265
+ source_subject_category (`List[str]`):
266
+ The source subject category.
267
+ target_subject_category (`List[str]`):
268
+ The target subject category.
269
+ latents (`torch.FloatTensor`, *optional*):
270
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
271
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
272
+ tensor will ge generated by random sampling.
273
+ guidance_scale (`float`, *optional*, defaults to 7.5):
274
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
275
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
276
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
277
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
278
+ usually at the expense of lower image quality.
279
+ height (`int`, *optional*, defaults to 512):
280
+ The height of the generated image.
281
+ width (`int`, *optional*, defaults to 512):
282
+ The width of the generated image.
283
+ seed (`int`, *optional*, defaults to 42):
284
+ The seed to use for random generation.
285
+ num_inference_steps (`int`, *optional*, defaults to 50):
286
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
287
+ expense of slower inference.
288
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
289
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
290
+ to make generation deterministic.
291
+ neg_prompt (`str`, *optional*, defaults to ""):
292
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
293
+ if `guidance_scale` is less than `1`).
294
+ prompt_strength (`float`, *optional*, defaults to 1.0):
295
+ The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps
296
+ to amplify the prompt.
297
+ prompt_reps (`int`, *optional*, defaults to 20):
298
+ The number of times the prompt is repeated along with prompt_strength to amplify the prompt.
299
+ Examples:
300
+
301
+ Returns:
302
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
303
+ """
304
+ device = self._execution_device
305
+
306
+ reference_image = self.image_processor.preprocess(
307
+ reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt"
308
+ )["pixel_values"]
309
+ reference_image = reference_image.to(device)
310
+
311
+ if isinstance(prompt, str):
312
+ prompt = [prompt]
313
+ if isinstance(source_subject_category, str):
314
+ source_subject_category = [source_subject_category]
315
+ if isinstance(target_subject_category, str):
316
+ target_subject_category = [target_subject_category]
317
+
318
+ batch_size = len(prompt)
319
+
320
+ prompt = self._build_prompt(
321
+ prompts=prompt,
322
+ tgt_subjects=target_subject_category,
323
+ prompt_strength=prompt_strength,
324
+ prompt_reps=prompt_reps,
325
+ )
326
+ query_embeds = self.get_query_embeddings(reference_image, source_subject_category)
327
+ text_embeddings = self.encode_prompt(query_embeds, prompt, device)
328
+ # 3. unconditional embedding
329
+ do_classifier_free_guidance = guidance_scale > 1.0
330
+ if do_classifier_free_guidance:
331
+ max_length = self.text_encoder.text_model.config.max_position_embeddings
332
+
333
+ uncond_input = self.tokenizer(
334
+ [neg_prompt] * batch_size,
335
+ padding="max_length",
336
+ max_length=max_length,
337
+ return_tensors="pt",
338
+ )
339
+ uncond_embeddings = self.text_encoder(
340
+ input_ids=uncond_input.input_ids.to(device),
341
+ ctx_embeddings=None,
342
+ )[0]
343
+ # For classifier free guidance, we need to do two forward passes.
344
+ # Here we concatenate the unconditional and text embeddings into a single batch
345
+ # to avoid doing two forward passes
346
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
347
+ scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1)
348
+ latents = self.prepare_latents(
349
+ batch_size=batch_size,
350
+ num_channels=self.unet.config.in_channels,
351
+ height=height // scale_down_factor,
352
+ width=width // scale_down_factor,
353
+ generator=generator,
354
+ latents=latents,
355
+ dtype=self.unet.dtype,
356
+ device=device,
357
+ )
358
+ # set timesteps
359
+ extra_set_kwargs = {}
360
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
361
+
362
+ cond_image = self.prepare_control_image(
363
+ image=condtioning_image,
364
+ width=width,
365
+ height=height,
366
+ batch_size=batch_size,
367
+ num_images_per_prompt=1,
368
+ device=device,
369
+ dtype=self.controlnet.dtype,
370
+ do_classifier_free_guidance=do_classifier_free_guidance,
371
+ )
372
+
373
+ for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
374
+ # expand the latents if we are doing classifier free guidance
375
+ do_classifier_free_guidance = guidance_scale > 1.0
376
+
377
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
378
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
379
+ latent_model_input,
380
+ t,
381
+ encoder_hidden_states=text_embeddings,
382
+ controlnet_cond=cond_image,
383
+ return_dict=False,
384
+ )
385
+
386
+ noise_pred = self.unet(
387
+ latent_model_input,
388
+ timestep=t,
389
+ encoder_hidden_states=text_embeddings,
390
+ down_block_additional_residuals=down_block_res_samples,
391
+ mid_block_additional_residual=mid_block_res_sample,
392
+ )["sample"]
393
+
394
+ # perform guidance
395
+ if do_classifier_free_guidance:
396
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
397
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
398
+
399
+ latents = self.scheduler.step(
400
+ noise_pred,
401
+ t,
402
+ latents,
403
+ )["prev_sample"]
404
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
405
+ image = self.image_processor.postprocess(image, output_type=output_type)
406
+
407
+ # Offload all models
408
+ self.maybe_free_model_hooks()
409
+
410
+ if not return_dict:
411
+ return (image,)
412
+
413
+ return ImagePipelineOutput(images=image)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py ADDED
@@ -0,0 +1,1310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
23
+
24
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
25
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
27
+ from ...models.lora import adjust_lora_scale_text_encoder
28
+ from ...schedulers import KarrasDiffusionSchedulers
29
+ from ...utils import (
30
+ USE_PEFT_BACKEND,
31
+ deprecate,
32
+ logging,
33
+ replace_example_docstring,
34
+ scale_lora_layers,
35
+ unscale_lora_layers,
36
+ )
37
+ from ...utils.torch_utils import is_compiled_module, randn_tensor
38
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
39
+ from ..stable_diffusion import StableDiffusionPipelineOutput
40
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
41
+ from .multicontrolnet import MultiControlNetModel
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+ EXAMPLE_DOC_STRING = """
48
+ Examples:
49
+ ```py
50
+ >>> # !pip install opencv-python transformers accelerate
51
+ >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
52
+ >>> from diffusers.utils import load_image
53
+ >>> import numpy as np
54
+ >>> import torch
55
+
56
+ >>> import cv2
57
+ >>> from PIL import Image
58
+
59
+ >>> # download an image
60
+ >>> image = load_image(
61
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
62
+ ... )
63
+ >>> np_image = np.array(image)
64
+
65
+ >>> # get canny image
66
+ >>> np_image = cv2.Canny(np_image, 100, 200)
67
+ >>> np_image = np_image[:, :, None]
68
+ >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
69
+ >>> canny_image = Image.fromarray(np_image)
70
+
71
+ >>> # load control net and stable diffusion v1-5
72
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
73
+ >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
74
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
75
+ ... )
76
+
77
+ >>> # speed up diffusion process with faster scheduler and memory optimization
78
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
79
+ >>> pipe.enable_model_cpu_offload()
80
+
81
+ >>> # generate image
82
+ >>> generator = torch.manual_seed(0)
83
+ >>> image = pipe(
84
+ ... "futuristic-looking woman",
85
+ ... num_inference_steps=20,
86
+ ... generator=generator,
87
+ ... image=image,
88
+ ... control_image=canny_image,
89
+ ... ).images[0]
90
+ ```
91
+ """
92
+
93
+
94
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
95
+ def retrieve_latents(
96
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
97
+ ):
98
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
99
+ return encoder_output.latent_dist.sample(generator)
100
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
101
+ return encoder_output.latent_dist.mode()
102
+ elif hasattr(encoder_output, "latents"):
103
+ return encoder_output.latents
104
+ else:
105
+ raise AttributeError("Could not access latents of provided encoder_output")
106
+
107
+
108
+ def prepare_image(image):
109
+ if isinstance(image, torch.Tensor):
110
+ # Batch single image
111
+ if image.ndim == 3:
112
+ image = image.unsqueeze(0)
113
+
114
+ image = image.to(dtype=torch.float32)
115
+ else:
116
+ # preprocess image
117
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
118
+ image = [image]
119
+
120
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
121
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
122
+ image = np.concatenate(image, axis=0)
123
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
124
+ image = np.concatenate([i[None, :] for i in image], axis=0)
125
+
126
+ image = image.transpose(0, 3, 1, 2)
127
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
128
+
129
+ return image
130
+
131
+
132
+ class StableDiffusionControlNetImg2ImgPipeline(
133
+ DiffusionPipeline,
134
+ StableDiffusionMixin,
135
+ TextualInversionLoaderMixin,
136
+ LoraLoaderMixin,
137
+ IPAdapterMixin,
138
+ FromSingleFileMixin,
139
+ ):
140
+ r"""
141
+ Pipeline for image-to-image generation using Stable Diffusion with ControlNet guidance.
142
+
143
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
144
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
145
+
146
+ The pipeline also inherits the following loading methods:
147
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
148
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
149
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
150
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
151
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
152
+
153
+ Args:
154
+ vae ([`AutoencoderKL`]):
155
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
156
+ text_encoder ([`~transformers.CLIPTextModel`]):
157
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
158
+ tokenizer ([`~transformers.CLIPTokenizer`]):
159
+ A `CLIPTokenizer` to tokenize text.
160
+ unet ([`UNet2DConditionModel`]):
161
+ A `UNet2DConditionModel` to denoise the encoded image latents.
162
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
163
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
164
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
165
+ additional conditioning.
166
+ scheduler ([`SchedulerMixin`]):
167
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
168
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
169
+ safety_checker ([`StableDiffusionSafetyChecker`]):
170
+ Classification module that estimates whether generated images could be considered offensive or harmful.
171
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
172
+ about a model's potential harms.
173
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
174
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
175
+ """
176
+
177
+ model_cpu_offload_seq = "text_encoder->unet->vae"
178
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
179
+ _exclude_from_cpu_offload = ["safety_checker"]
180
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
181
+
182
+ def __init__(
183
+ self,
184
+ vae: AutoencoderKL,
185
+ text_encoder: CLIPTextModel,
186
+ tokenizer: CLIPTokenizer,
187
+ unet: UNet2DConditionModel,
188
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
189
+ scheduler: KarrasDiffusionSchedulers,
190
+ safety_checker: StableDiffusionSafetyChecker,
191
+ feature_extractor: CLIPImageProcessor,
192
+ image_encoder: CLIPVisionModelWithProjection = None,
193
+ requires_safety_checker: bool = True,
194
+ ):
195
+ super().__init__()
196
+
197
+ if safety_checker is None and requires_safety_checker:
198
+ logger.warning(
199
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
200
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
201
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
202
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
203
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
204
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
205
+ )
206
+
207
+ if safety_checker is not None and feature_extractor is None:
208
+ raise ValueError(
209
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
210
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
211
+ )
212
+
213
+ if isinstance(controlnet, (list, tuple)):
214
+ controlnet = MultiControlNetModel(controlnet)
215
+
216
+ self.register_modules(
217
+ vae=vae,
218
+ text_encoder=text_encoder,
219
+ tokenizer=tokenizer,
220
+ unet=unet,
221
+ controlnet=controlnet,
222
+ scheduler=scheduler,
223
+ safety_checker=safety_checker,
224
+ feature_extractor=feature_extractor,
225
+ image_encoder=image_encoder,
226
+ )
227
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
228
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
229
+ self.control_image_processor = VaeImageProcessor(
230
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
231
+ )
232
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
233
+
234
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
235
+ def _encode_prompt(
236
+ self,
237
+ prompt,
238
+ device,
239
+ num_images_per_prompt,
240
+ do_classifier_free_guidance,
241
+ negative_prompt=None,
242
+ prompt_embeds: Optional[torch.FloatTensor] = None,
243
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
244
+ lora_scale: Optional[float] = None,
245
+ **kwargs,
246
+ ):
247
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
248
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
249
+
250
+ prompt_embeds_tuple = self.encode_prompt(
251
+ prompt=prompt,
252
+ device=device,
253
+ num_images_per_prompt=num_images_per_prompt,
254
+ do_classifier_free_guidance=do_classifier_free_guidance,
255
+ negative_prompt=negative_prompt,
256
+ prompt_embeds=prompt_embeds,
257
+ negative_prompt_embeds=negative_prompt_embeds,
258
+ lora_scale=lora_scale,
259
+ **kwargs,
260
+ )
261
+
262
+ # concatenate for backwards comp
263
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
264
+
265
+ return prompt_embeds
266
+
267
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
268
+ def encode_prompt(
269
+ self,
270
+ prompt,
271
+ device,
272
+ num_images_per_prompt,
273
+ do_classifier_free_guidance,
274
+ negative_prompt=None,
275
+ prompt_embeds: Optional[torch.FloatTensor] = None,
276
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
277
+ lora_scale: Optional[float] = None,
278
+ clip_skip: Optional[int] = None,
279
+ ):
280
+ r"""
281
+ Encodes the prompt into text encoder hidden states.
282
+
283
+ Args:
284
+ prompt (`str` or `List[str]`, *optional*):
285
+ prompt to be encoded
286
+ device: (`torch.device`):
287
+ torch device
288
+ num_images_per_prompt (`int`):
289
+ number of images that should be generated per prompt
290
+ do_classifier_free_guidance (`bool`):
291
+ whether to use classifier free guidance or not
292
+ negative_prompt (`str` or `List[str]`, *optional*):
293
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
294
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
295
+ less than `1`).
296
+ prompt_embeds (`torch.FloatTensor`, *optional*):
297
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
298
+ provided, text embeddings will be generated from `prompt` input argument.
299
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
300
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
301
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
302
+ argument.
303
+ lora_scale (`float`, *optional*):
304
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
305
+ clip_skip (`int`, *optional*):
306
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
307
+ the output of the pre-final layer will be used for computing the prompt embeddings.
308
+ """
309
+ # set lora scale so that monkey patched LoRA
310
+ # function of text encoder can correctly access it
311
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
312
+ self._lora_scale = lora_scale
313
+
314
+ # dynamically adjust the LoRA scale
315
+ if not USE_PEFT_BACKEND:
316
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
317
+ else:
318
+ scale_lora_layers(self.text_encoder, lora_scale)
319
+
320
+ if prompt is not None and isinstance(prompt, str):
321
+ batch_size = 1
322
+ elif prompt is not None and isinstance(prompt, list):
323
+ batch_size = len(prompt)
324
+ else:
325
+ batch_size = prompt_embeds.shape[0]
326
+
327
+ if prompt_embeds is None:
328
+ # textual inversion: process multi-vector tokens if necessary
329
+ if isinstance(self, TextualInversionLoaderMixin):
330
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
331
+
332
+ text_inputs = self.tokenizer(
333
+ prompt,
334
+ padding="max_length",
335
+ max_length=self.tokenizer.model_max_length,
336
+ truncation=True,
337
+ return_tensors="pt",
338
+ )
339
+ text_input_ids = text_inputs.input_ids
340
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
341
+
342
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
343
+ text_input_ids, untruncated_ids
344
+ ):
345
+ removed_text = self.tokenizer.batch_decode(
346
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
347
+ )
348
+ logger.warning(
349
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
350
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
351
+ )
352
+
353
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
354
+ attention_mask = text_inputs.attention_mask.to(device)
355
+ else:
356
+ attention_mask = None
357
+
358
+ if clip_skip is None:
359
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
360
+ prompt_embeds = prompt_embeds[0]
361
+ else:
362
+ prompt_embeds = self.text_encoder(
363
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
364
+ )
365
+ # Access the `hidden_states` first, that contains a tuple of
366
+ # all the hidden states from the encoder layers. Then index into
367
+ # the tuple to access the hidden states from the desired layer.
368
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
369
+ # We also need to apply the final LayerNorm here to not mess with the
370
+ # representations. The `last_hidden_states` that we typically use for
371
+ # obtaining the final prompt representations passes through the LayerNorm
372
+ # layer.
373
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
374
+
375
+ if self.text_encoder is not None:
376
+ prompt_embeds_dtype = self.text_encoder.dtype
377
+ elif self.unet is not None:
378
+ prompt_embeds_dtype = self.unet.dtype
379
+ else:
380
+ prompt_embeds_dtype = prompt_embeds.dtype
381
+
382
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
383
+
384
+ bs_embed, seq_len, _ = prompt_embeds.shape
385
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
386
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
387
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
388
+
389
+ # get unconditional embeddings for classifier free guidance
390
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
391
+ uncond_tokens: List[str]
392
+ if negative_prompt is None:
393
+ uncond_tokens = [""] * batch_size
394
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
395
+ raise TypeError(
396
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
397
+ f" {type(prompt)}."
398
+ )
399
+ elif isinstance(negative_prompt, str):
400
+ uncond_tokens = [negative_prompt]
401
+ elif batch_size != len(negative_prompt):
402
+ raise ValueError(
403
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
404
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
405
+ " the batch size of `prompt`."
406
+ )
407
+ else:
408
+ uncond_tokens = negative_prompt
409
+
410
+ # textual inversion: process multi-vector tokens if necessary
411
+ if isinstance(self, TextualInversionLoaderMixin):
412
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
413
+
414
+ max_length = prompt_embeds.shape[1]
415
+ uncond_input = self.tokenizer(
416
+ uncond_tokens,
417
+ padding="max_length",
418
+ max_length=max_length,
419
+ truncation=True,
420
+ return_tensors="pt",
421
+ )
422
+
423
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
424
+ attention_mask = uncond_input.attention_mask.to(device)
425
+ else:
426
+ attention_mask = None
427
+
428
+ negative_prompt_embeds = self.text_encoder(
429
+ uncond_input.input_ids.to(device),
430
+ attention_mask=attention_mask,
431
+ )
432
+ negative_prompt_embeds = negative_prompt_embeds[0]
433
+
434
+ if do_classifier_free_guidance:
435
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
436
+ seq_len = negative_prompt_embeds.shape[1]
437
+
438
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
439
+
440
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
441
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
442
+
443
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
444
+ # Retrieve the original scale by scaling back the LoRA layers
445
+ unscale_lora_layers(self.text_encoder, lora_scale)
446
+
447
+ return prompt_embeds, negative_prompt_embeds
448
+
449
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
450
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
451
+ dtype = next(self.image_encoder.parameters()).dtype
452
+
453
+ if not isinstance(image, torch.Tensor):
454
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
455
+
456
+ image = image.to(device=device, dtype=dtype)
457
+ if output_hidden_states:
458
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
459
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
460
+ uncond_image_enc_hidden_states = self.image_encoder(
461
+ torch.zeros_like(image), output_hidden_states=True
462
+ ).hidden_states[-2]
463
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
464
+ num_images_per_prompt, dim=0
465
+ )
466
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
467
+ else:
468
+ image_embeds = self.image_encoder(image).image_embeds
469
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
470
+ uncond_image_embeds = torch.zeros_like(image_embeds)
471
+
472
+ return image_embeds, uncond_image_embeds
473
+
474
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
475
+ def prepare_ip_adapter_image_embeds(
476
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
477
+ ):
478
+ if ip_adapter_image_embeds is None:
479
+ if not isinstance(ip_adapter_image, list):
480
+ ip_adapter_image = [ip_adapter_image]
481
+
482
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
483
+ raise ValueError(
484
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
485
+ )
486
+
487
+ image_embeds = []
488
+ for single_ip_adapter_image, image_proj_layer in zip(
489
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
490
+ ):
491
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
492
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
493
+ single_ip_adapter_image, device, 1, output_hidden_state
494
+ )
495
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
496
+ single_negative_image_embeds = torch.stack(
497
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
498
+ )
499
+
500
+ if do_classifier_free_guidance:
501
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
502
+ single_image_embeds = single_image_embeds.to(device)
503
+
504
+ image_embeds.append(single_image_embeds)
505
+ else:
506
+ repeat_dims = [1]
507
+ image_embeds = []
508
+ for single_image_embeds in ip_adapter_image_embeds:
509
+ if do_classifier_free_guidance:
510
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
511
+ single_image_embeds = single_image_embeds.repeat(
512
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
513
+ )
514
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
515
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
516
+ )
517
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
518
+ else:
519
+ single_image_embeds = single_image_embeds.repeat(
520
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
521
+ )
522
+ image_embeds.append(single_image_embeds)
523
+
524
+ return image_embeds
525
+
526
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
527
+ def run_safety_checker(self, image, device, dtype):
528
+ if self.safety_checker is None:
529
+ has_nsfw_concept = None
530
+ else:
531
+ if torch.is_tensor(image):
532
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
533
+ else:
534
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
535
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
536
+ image, has_nsfw_concept = self.safety_checker(
537
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
538
+ )
539
+ return image, has_nsfw_concept
540
+
541
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
542
+ def decode_latents(self, latents):
543
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
544
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
545
+
546
+ latents = 1 / self.vae.config.scaling_factor * latents
547
+ image = self.vae.decode(latents, return_dict=False)[0]
548
+ image = (image / 2 + 0.5).clamp(0, 1)
549
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
550
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
551
+ return image
552
+
553
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
554
+ def prepare_extra_step_kwargs(self, generator, eta):
555
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
556
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
557
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
558
+ # and should be between [0, 1]
559
+
560
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
561
+ extra_step_kwargs = {}
562
+ if accepts_eta:
563
+ extra_step_kwargs["eta"] = eta
564
+
565
+ # check if the scheduler accepts generator
566
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
567
+ if accepts_generator:
568
+ extra_step_kwargs["generator"] = generator
569
+ return extra_step_kwargs
570
+
571
+ def check_inputs(
572
+ self,
573
+ prompt,
574
+ image,
575
+ callback_steps,
576
+ negative_prompt=None,
577
+ prompt_embeds=None,
578
+ negative_prompt_embeds=None,
579
+ ip_adapter_image=None,
580
+ ip_adapter_image_embeds=None,
581
+ controlnet_conditioning_scale=1.0,
582
+ control_guidance_start=0.0,
583
+ control_guidance_end=1.0,
584
+ callback_on_step_end_tensor_inputs=None,
585
+ ):
586
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
587
+ raise ValueError(
588
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
589
+ f" {type(callback_steps)}."
590
+ )
591
+
592
+ if callback_on_step_end_tensor_inputs is not None and not all(
593
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
594
+ ):
595
+ raise ValueError(
596
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
597
+ )
598
+
599
+ if prompt is not None and prompt_embeds is not None:
600
+ raise ValueError(
601
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
602
+ " only forward one of the two."
603
+ )
604
+ elif prompt is None and prompt_embeds is None:
605
+ raise ValueError(
606
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
607
+ )
608
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
609
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
610
+
611
+ if negative_prompt is not None and negative_prompt_embeds is not None:
612
+ raise ValueError(
613
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
614
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
615
+ )
616
+
617
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
618
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
619
+ raise ValueError(
620
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
621
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
622
+ f" {negative_prompt_embeds.shape}."
623
+ )
624
+
625
+ # `prompt` needs more sophisticated handling when there are multiple
626
+ # conditionings.
627
+ if isinstance(self.controlnet, MultiControlNetModel):
628
+ if isinstance(prompt, list):
629
+ logger.warning(
630
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
631
+ " prompts. The conditionings will be fixed across the prompts."
632
+ )
633
+
634
+ # Check `image`
635
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
636
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
637
+ )
638
+ if (
639
+ isinstance(self.controlnet, ControlNetModel)
640
+ or is_compiled
641
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
642
+ ):
643
+ self.check_image(image, prompt, prompt_embeds)
644
+ elif (
645
+ isinstance(self.controlnet, MultiControlNetModel)
646
+ or is_compiled
647
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
648
+ ):
649
+ if not isinstance(image, list):
650
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
651
+
652
+ # When `image` is a nested list:
653
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
654
+ elif any(isinstance(i, list) for i in image):
655
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
656
+ elif len(image) != len(self.controlnet.nets):
657
+ raise ValueError(
658
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
659
+ )
660
+
661
+ for image_ in image:
662
+ self.check_image(image_, prompt, prompt_embeds)
663
+ else:
664
+ assert False
665
+
666
+ # Check `controlnet_conditioning_scale`
667
+ if (
668
+ isinstance(self.controlnet, ControlNetModel)
669
+ or is_compiled
670
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
671
+ ):
672
+ if not isinstance(controlnet_conditioning_scale, float):
673
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
674
+ elif (
675
+ isinstance(self.controlnet, MultiControlNetModel)
676
+ or is_compiled
677
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
678
+ ):
679
+ if isinstance(controlnet_conditioning_scale, list):
680
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
681
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
682
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
683
+ self.controlnet.nets
684
+ ):
685
+ raise ValueError(
686
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
687
+ " the same length as the number of controlnets"
688
+ )
689
+ else:
690
+ assert False
691
+
692
+ if len(control_guidance_start) != len(control_guidance_end):
693
+ raise ValueError(
694
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
695
+ )
696
+
697
+ if isinstance(self.controlnet, MultiControlNetModel):
698
+ if len(control_guidance_start) != len(self.controlnet.nets):
699
+ raise ValueError(
700
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
701
+ )
702
+
703
+ for start, end in zip(control_guidance_start, control_guidance_end):
704
+ if start >= end:
705
+ raise ValueError(
706
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
707
+ )
708
+ if start < 0.0:
709
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
710
+ if end > 1.0:
711
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
712
+
713
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
714
+ raise ValueError(
715
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
716
+ )
717
+
718
+ if ip_adapter_image_embeds is not None:
719
+ if not isinstance(ip_adapter_image_embeds, list):
720
+ raise ValueError(
721
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
722
+ )
723
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
724
+ raise ValueError(
725
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
726
+ )
727
+
728
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
729
+ def check_image(self, image, prompt, prompt_embeds):
730
+ image_is_pil = isinstance(image, PIL.Image.Image)
731
+ image_is_tensor = isinstance(image, torch.Tensor)
732
+ image_is_np = isinstance(image, np.ndarray)
733
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
734
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
735
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
736
+
737
+ if (
738
+ not image_is_pil
739
+ and not image_is_tensor
740
+ and not image_is_np
741
+ and not image_is_pil_list
742
+ and not image_is_tensor_list
743
+ and not image_is_np_list
744
+ ):
745
+ raise TypeError(
746
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
747
+ )
748
+
749
+ if image_is_pil:
750
+ image_batch_size = 1
751
+ else:
752
+ image_batch_size = len(image)
753
+
754
+ if prompt is not None and isinstance(prompt, str):
755
+ prompt_batch_size = 1
756
+ elif prompt is not None and isinstance(prompt, list):
757
+ prompt_batch_size = len(prompt)
758
+ elif prompt_embeds is not None:
759
+ prompt_batch_size = prompt_embeds.shape[0]
760
+
761
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
762
+ raise ValueError(
763
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
764
+ )
765
+
766
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
767
+ def prepare_control_image(
768
+ self,
769
+ image,
770
+ width,
771
+ height,
772
+ batch_size,
773
+ num_images_per_prompt,
774
+ device,
775
+ dtype,
776
+ do_classifier_free_guidance=False,
777
+ guess_mode=False,
778
+ ):
779
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
780
+ image_batch_size = image.shape[0]
781
+
782
+ if image_batch_size == 1:
783
+ repeat_by = batch_size
784
+ else:
785
+ # image batch size is the same as prompt batch size
786
+ repeat_by = num_images_per_prompt
787
+
788
+ image = image.repeat_interleave(repeat_by, dim=0)
789
+
790
+ image = image.to(device=device, dtype=dtype)
791
+
792
+ if do_classifier_free_guidance and not guess_mode:
793
+ image = torch.cat([image] * 2)
794
+
795
+ return image
796
+
797
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
798
+ def get_timesteps(self, num_inference_steps, strength, device):
799
+ # get the original timestep using init_timestep
800
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
801
+
802
+ t_start = max(num_inference_steps - init_timestep, 0)
803
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
804
+ if hasattr(self.scheduler, "set_begin_index"):
805
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
806
+
807
+ return timesteps, num_inference_steps - t_start
808
+
809
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
810
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
811
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
812
+ raise ValueError(
813
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
814
+ )
815
+
816
+ image = image.to(device=device, dtype=dtype)
817
+
818
+ batch_size = batch_size * num_images_per_prompt
819
+
820
+ if image.shape[1] == 4:
821
+ init_latents = image
822
+
823
+ else:
824
+ if isinstance(generator, list) and len(generator) != batch_size:
825
+ raise ValueError(
826
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
827
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
828
+ )
829
+
830
+ elif isinstance(generator, list):
831
+ init_latents = [
832
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
833
+ for i in range(batch_size)
834
+ ]
835
+ init_latents = torch.cat(init_latents, dim=0)
836
+ else:
837
+ init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
838
+
839
+ init_latents = self.vae.config.scaling_factor * init_latents
840
+
841
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
842
+ # expand init_latents for batch_size
843
+ deprecation_message = (
844
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
845
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
846
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
847
+ " your script to pass as many initial images as text prompts to suppress this warning."
848
+ )
849
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
850
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
851
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
852
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
853
+ raise ValueError(
854
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
855
+ )
856
+ else:
857
+ init_latents = torch.cat([init_latents], dim=0)
858
+
859
+ shape = init_latents.shape
860
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
861
+
862
+ # get latents
863
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
864
+ latents = init_latents
865
+
866
+ return latents
867
+
868
+ @property
869
+ def guidance_scale(self):
870
+ return self._guidance_scale
871
+
872
+ @property
873
+ def clip_skip(self):
874
+ return self._clip_skip
875
+
876
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
877
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
878
+ # corresponds to doing no classifier free guidance.
879
+ @property
880
+ def do_classifier_free_guidance(self):
881
+ return self._guidance_scale > 1
882
+
883
+ @property
884
+ def cross_attention_kwargs(self):
885
+ return self._cross_attention_kwargs
886
+
887
+ @property
888
+ def num_timesteps(self):
889
+ return self._num_timesteps
890
+
891
+ @torch.no_grad()
892
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
893
+ def __call__(
894
+ self,
895
+ prompt: Union[str, List[str]] = None,
896
+ image: PipelineImageInput = None,
897
+ control_image: PipelineImageInput = None,
898
+ height: Optional[int] = None,
899
+ width: Optional[int] = None,
900
+ strength: float = 0.8,
901
+ num_inference_steps: int = 50,
902
+ guidance_scale: float = 7.5,
903
+ negative_prompt: Optional[Union[str, List[str]]] = None,
904
+ num_images_per_prompt: Optional[int] = 1,
905
+ eta: float = 0.0,
906
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
907
+ latents: Optional[torch.FloatTensor] = None,
908
+ prompt_embeds: Optional[torch.FloatTensor] = None,
909
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
910
+ ip_adapter_image: Optional[PipelineImageInput] = None,
911
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
912
+ output_type: Optional[str] = "pil",
913
+ return_dict: bool = True,
914
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
915
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
916
+ guess_mode: bool = False,
917
+ control_guidance_start: Union[float, List[float]] = 0.0,
918
+ control_guidance_end: Union[float, List[float]] = 1.0,
919
+ clip_skip: Optional[int] = None,
920
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
921
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
922
+ **kwargs,
923
+ ):
924
+ r"""
925
+ The call function to the pipeline for generation.
926
+
927
+ Args:
928
+ prompt (`str` or `List[str]`, *optional*):
929
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
930
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
931
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
932
+ The initial image to be used as the starting point for the image generation process. Can also accept
933
+ image latents as `image`, and if passing latents directly they are not encoded again.
934
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
935
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
936
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
937
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
938
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
939
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
940
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
941
+ input to a single ControlNet.
942
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
943
+ The height in pixels of the generated image.
944
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
945
+ The width in pixels of the generated image.
946
+ strength (`float`, *optional*, defaults to 0.8):
947
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
948
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
949
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
950
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
951
+ essentially ignores `image`.
952
+ num_inference_steps (`int`, *optional*, defaults to 50):
953
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
954
+ expense of slower inference.
955
+ guidance_scale (`float`, *optional*, defaults to 7.5):
956
+ A higher guidance scale value encourages the model to generate images closely linked to the text
957
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
958
+ negative_prompt (`str` or `List[str]`, *optional*):
959
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
960
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
961
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
962
+ The number of images to generate per prompt.
963
+ eta (`float`, *optional*, defaults to 0.0):
964
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
965
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
966
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
967
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
968
+ generation deterministic.
969
+ latents (`torch.FloatTensor`, *optional*):
970
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
971
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
972
+ tensor is generated by sampling using the supplied random `generator`.
973
+ prompt_embeds (`torch.FloatTensor`, *optional*):
974
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
975
+ provided, text embeddings are generated from the `prompt` input argument.
976
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
977
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
978
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
979
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
980
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
981
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
982
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
983
+ if `do_classifier_free_guidance` is set to `True`.
984
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
985
+ output_type (`str`, *optional*, defaults to `"pil"`):
986
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
987
+ return_dict (`bool`, *optional*, defaults to `True`):
988
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
989
+ plain tuple.
990
+ cross_attention_kwargs (`dict`, *optional*):
991
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
992
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
993
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
994
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
995
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
996
+ the corresponding scale as a list.
997
+ guess_mode (`bool`, *optional*, defaults to `False`):
998
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
999
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
1000
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1001
+ The percentage of total steps at which the ControlNet starts applying.
1002
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1003
+ The percentage of total steps at which the ControlNet stops applying.
1004
+ clip_skip (`int`, *optional*):
1005
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1006
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1007
+ callback_on_step_end (`Callable`, *optional*):
1008
+ A function that calls at the end of each denoising steps during the inference. The function is called
1009
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1010
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1011
+ `callback_on_step_end_tensor_inputs`.
1012
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1013
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1014
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1015
+ `._callback_tensor_inputs` attribute of your pipeine class.
1016
+
1017
+ Examples:
1018
+
1019
+ Returns:
1020
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1021
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1022
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
1023
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
1024
+ "not-safe-for-work" (nsfw) content.
1025
+ """
1026
+
1027
+ callback = kwargs.pop("callback", None)
1028
+ callback_steps = kwargs.pop("callback_steps", None)
1029
+
1030
+ if callback is not None:
1031
+ deprecate(
1032
+ "callback",
1033
+ "1.0.0",
1034
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1035
+ )
1036
+ if callback_steps is not None:
1037
+ deprecate(
1038
+ "callback_steps",
1039
+ "1.0.0",
1040
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1041
+ )
1042
+
1043
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1044
+
1045
+ # align format for control guidance
1046
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1047
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1048
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1049
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1050
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1051
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1052
+ control_guidance_start, control_guidance_end = (
1053
+ mult * [control_guidance_start],
1054
+ mult * [control_guidance_end],
1055
+ )
1056
+
1057
+ # 1. Check inputs. Raise error if not correct
1058
+ self.check_inputs(
1059
+ prompt,
1060
+ control_image,
1061
+ callback_steps,
1062
+ negative_prompt,
1063
+ prompt_embeds,
1064
+ negative_prompt_embeds,
1065
+ ip_adapter_image,
1066
+ ip_adapter_image_embeds,
1067
+ controlnet_conditioning_scale,
1068
+ control_guidance_start,
1069
+ control_guidance_end,
1070
+ callback_on_step_end_tensor_inputs,
1071
+ )
1072
+
1073
+ self._guidance_scale = guidance_scale
1074
+ self._clip_skip = clip_skip
1075
+ self._cross_attention_kwargs = cross_attention_kwargs
1076
+
1077
+ # 2. Define call parameters
1078
+ if prompt is not None and isinstance(prompt, str):
1079
+ batch_size = 1
1080
+ elif prompt is not None and isinstance(prompt, list):
1081
+ batch_size = len(prompt)
1082
+ else:
1083
+ batch_size = prompt_embeds.shape[0]
1084
+
1085
+ device = self._execution_device
1086
+
1087
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1088
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1089
+
1090
+ global_pool_conditions = (
1091
+ controlnet.config.global_pool_conditions
1092
+ if isinstance(controlnet, ControlNetModel)
1093
+ else controlnet.nets[0].config.global_pool_conditions
1094
+ )
1095
+ guess_mode = guess_mode or global_pool_conditions
1096
+
1097
+ # 3. Encode input prompt
1098
+ text_encoder_lora_scale = (
1099
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1100
+ )
1101
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1102
+ prompt,
1103
+ device,
1104
+ num_images_per_prompt,
1105
+ self.do_classifier_free_guidance,
1106
+ negative_prompt,
1107
+ prompt_embeds=prompt_embeds,
1108
+ negative_prompt_embeds=negative_prompt_embeds,
1109
+ lora_scale=text_encoder_lora_scale,
1110
+ clip_skip=self.clip_skip,
1111
+ )
1112
+ # For classifier free guidance, we need to do two forward passes.
1113
+ # Here we concatenate the unconditional and text embeddings into a single batch
1114
+ # to avoid doing two forward passes
1115
+ if self.do_classifier_free_guidance:
1116
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1117
+
1118
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1119
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1120
+ ip_adapter_image,
1121
+ ip_adapter_image_embeds,
1122
+ device,
1123
+ batch_size * num_images_per_prompt,
1124
+ self.do_classifier_free_guidance,
1125
+ )
1126
+
1127
+ # 4. Prepare image
1128
+ image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
1129
+
1130
+ # 5. Prepare controlnet_conditioning_image
1131
+ if isinstance(controlnet, ControlNetModel):
1132
+ control_image = self.prepare_control_image(
1133
+ image=control_image,
1134
+ width=width,
1135
+ height=height,
1136
+ batch_size=batch_size * num_images_per_prompt,
1137
+ num_images_per_prompt=num_images_per_prompt,
1138
+ device=device,
1139
+ dtype=controlnet.dtype,
1140
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1141
+ guess_mode=guess_mode,
1142
+ )
1143
+ elif isinstance(controlnet, MultiControlNetModel):
1144
+ control_images = []
1145
+
1146
+ for control_image_ in control_image:
1147
+ control_image_ = self.prepare_control_image(
1148
+ image=control_image_,
1149
+ width=width,
1150
+ height=height,
1151
+ batch_size=batch_size * num_images_per_prompt,
1152
+ num_images_per_prompt=num_images_per_prompt,
1153
+ device=device,
1154
+ dtype=controlnet.dtype,
1155
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1156
+ guess_mode=guess_mode,
1157
+ )
1158
+
1159
+ control_images.append(control_image_)
1160
+
1161
+ control_image = control_images
1162
+ else:
1163
+ assert False
1164
+
1165
+ # 5. Prepare timesteps
1166
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1167
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
1168
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1169
+ self._num_timesteps = len(timesteps)
1170
+
1171
+ # 6. Prepare latent variables
1172
+ latents = self.prepare_latents(
1173
+ image,
1174
+ latent_timestep,
1175
+ batch_size,
1176
+ num_images_per_prompt,
1177
+ prompt_embeds.dtype,
1178
+ device,
1179
+ generator,
1180
+ )
1181
+
1182
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1183
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1184
+
1185
+ # 7.1 Add image embeds for IP-Adapter
1186
+ added_cond_kwargs = (
1187
+ {"image_embeds": image_embeds}
1188
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
1189
+ else None
1190
+ )
1191
+
1192
+ # 7.2 Create tensor stating which controlnets to keep
1193
+ controlnet_keep = []
1194
+ for i in range(len(timesteps)):
1195
+ keeps = [
1196
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1197
+ for s, e in zip(control_guidance_start, control_guidance_end)
1198
+ ]
1199
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1200
+
1201
+ # 8. Denoising loop
1202
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1203
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1204
+ for i, t in enumerate(timesteps):
1205
+ # expand the latents if we are doing classifier free guidance
1206
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1207
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1208
+
1209
+ # controlnet(s) inference
1210
+ if guess_mode and self.do_classifier_free_guidance:
1211
+ # Infer ControlNet only for the conditional batch.
1212
+ control_model_input = latents
1213
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1214
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1215
+ else:
1216
+ control_model_input = latent_model_input
1217
+ controlnet_prompt_embeds = prompt_embeds
1218
+
1219
+ if isinstance(controlnet_keep[i], list):
1220
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1221
+ else:
1222
+ controlnet_cond_scale = controlnet_conditioning_scale
1223
+ if isinstance(controlnet_cond_scale, list):
1224
+ controlnet_cond_scale = controlnet_cond_scale[0]
1225
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1226
+
1227
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1228
+ control_model_input,
1229
+ t,
1230
+ encoder_hidden_states=controlnet_prompt_embeds,
1231
+ controlnet_cond=control_image,
1232
+ conditioning_scale=cond_scale,
1233
+ guess_mode=guess_mode,
1234
+ return_dict=False,
1235
+ )
1236
+
1237
+ if guess_mode and self.do_classifier_free_guidance:
1238
+ # Infered ControlNet only for the conditional batch.
1239
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1240
+ # add 0 to the unconditional batch to keep it unchanged.
1241
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1242
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1243
+
1244
+ # predict the noise residual
1245
+ noise_pred = self.unet(
1246
+ latent_model_input,
1247
+ t,
1248
+ encoder_hidden_states=prompt_embeds,
1249
+ cross_attention_kwargs=self.cross_attention_kwargs,
1250
+ down_block_additional_residuals=down_block_res_samples,
1251
+ mid_block_additional_residual=mid_block_res_sample,
1252
+ added_cond_kwargs=added_cond_kwargs,
1253
+ return_dict=False,
1254
+ )[0]
1255
+
1256
+ # perform guidance
1257
+ if self.do_classifier_free_guidance:
1258
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1259
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1260
+
1261
+ # compute the previous noisy sample x_t -> x_t-1
1262
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1263
+
1264
+ if callback_on_step_end is not None:
1265
+ callback_kwargs = {}
1266
+ for k in callback_on_step_end_tensor_inputs:
1267
+ callback_kwargs[k] = locals()[k]
1268
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1269
+
1270
+ latents = callback_outputs.pop("latents", latents)
1271
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1272
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1273
+
1274
+ # call the callback, if provided
1275
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1276
+ progress_bar.update()
1277
+ if callback is not None and i % callback_steps == 0:
1278
+ step_idx = i // getattr(self.scheduler, "order", 1)
1279
+ callback(step_idx, t, latents)
1280
+
1281
+ # If we do sequential model offloading, let's offload unet and controlnet
1282
+ # manually for max memory savings
1283
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1284
+ self.unet.to("cpu")
1285
+ self.controlnet.to("cpu")
1286
+ torch.cuda.empty_cache()
1287
+
1288
+ if not output_type == "latent":
1289
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1290
+ 0
1291
+ ]
1292
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1293
+ else:
1294
+ image = latents
1295
+ has_nsfw_concept = None
1296
+
1297
+ if has_nsfw_concept is None:
1298
+ do_denormalize = [True] * image.shape[0]
1299
+ else:
1300
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1301
+
1302
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1303
+
1304
+ # Offload all models
1305
+ self.maybe_free_model_hooks()
1306
+
1307
+ if not return_dict:
1308
+ return (image, has_nsfw_concept)
1309
+
1310
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py ADDED
@@ -0,0 +1,1620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
16
+
17
+ import inspect
18
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import PIL.Image
22
+ import torch
23
+ import torch.nn.functional as F
24
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
25
+
26
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
27
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
29
+ from ...models.lora import adjust_lora_scale_text_encoder
30
+ from ...schedulers import KarrasDiffusionSchedulers
31
+ from ...utils import (
32
+ USE_PEFT_BACKEND,
33
+ deprecate,
34
+ logging,
35
+ replace_example_docstring,
36
+ scale_lora_layers,
37
+ unscale_lora_layers,
38
+ )
39
+ from ...utils.torch_utils import is_compiled_module, randn_tensor
40
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
41
+ from ..stable_diffusion import StableDiffusionPipelineOutput
42
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
43
+ from .multicontrolnet import MultiControlNetModel
44
+
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ EXAMPLE_DOC_STRING = """
50
+ Examples:
51
+ ```py
52
+ >>> # !pip install transformers accelerate
53
+ >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
54
+ >>> from diffusers.utils import load_image
55
+ >>> import numpy as np
56
+ >>> import torch
57
+
58
+ >>> init_image = load_image(
59
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
60
+ ... )
61
+ >>> init_image = init_image.resize((512, 512))
62
+
63
+ >>> generator = torch.Generator(device="cpu").manual_seed(1)
64
+
65
+ >>> mask_image = load_image(
66
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
67
+ ... )
68
+ >>> mask_image = mask_image.resize((512, 512))
69
+
70
+
71
+ >>> def make_canny_condition(image):
72
+ ... image = np.array(image)
73
+ ... image = cv2.Canny(image, 100, 200)
74
+ ... image = image[:, :, None]
75
+ ... image = np.concatenate([image, image, image], axis=2)
76
+ ... image = Image.fromarray(image)
77
+ ... return image
78
+
79
+
80
+ >>> control_image = make_canny_condition(init_image)
81
+
82
+ >>> controlnet = ControlNetModel.from_pretrained(
83
+ ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
84
+ ... )
85
+ >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
86
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
87
+ ... )
88
+
89
+ >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
90
+ >>> pipe.enable_model_cpu_offload()
91
+
92
+ >>> # generate image
93
+ >>> image = pipe(
94
+ ... "a handsome man with ray-ban sunglasses",
95
+ ... num_inference_steps=20,
96
+ ... generator=generator,
97
+ ... eta=1.0,
98
+ ... image=init_image,
99
+ ... mask_image=mask_image,
100
+ ... control_image=control_image,
101
+ ... ).images[0]
102
+ ```
103
+ """
104
+
105
+
106
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
107
+ def retrieve_latents(
108
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
109
+ ):
110
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
111
+ return encoder_output.latent_dist.sample(generator)
112
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
113
+ return encoder_output.latent_dist.mode()
114
+ elif hasattr(encoder_output, "latents"):
115
+ return encoder_output.latents
116
+ else:
117
+ raise AttributeError("Could not access latents of provided encoder_output")
118
+
119
+
120
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image
121
+ def prepare_mask_and_masked_image(image, mask, height, width, return_image=False):
122
+ """
123
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
124
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
125
+ ``image`` and ``1`` for the ``mask``.
126
+
127
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
128
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
129
+
130
+ Args:
131
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
132
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
133
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
134
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
135
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
136
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
137
+
138
+
139
+ Raises:
140
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
141
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
142
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
143
+ (ot the other way around).
144
+
145
+ Returns:
146
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
147
+ dimensions: ``batch x channels x height x width``.
148
+ """
149
+ deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
150
+ deprecate(
151
+ "prepare_mask_and_masked_image",
152
+ "0.30.0",
153
+ deprecation_message,
154
+ )
155
+ if image is None:
156
+ raise ValueError("`image` input cannot be undefined.")
157
+
158
+ if mask is None:
159
+ raise ValueError("`mask_image` input cannot be undefined.")
160
+
161
+ if isinstance(image, torch.Tensor):
162
+ if not isinstance(mask, torch.Tensor):
163
+ raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
164
+
165
+ # Batch single image
166
+ if image.ndim == 3:
167
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
168
+ image = image.unsqueeze(0)
169
+
170
+ # Batch and add channel dim for single mask
171
+ if mask.ndim == 2:
172
+ mask = mask.unsqueeze(0).unsqueeze(0)
173
+
174
+ # Batch single mask or add channel dim
175
+ if mask.ndim == 3:
176
+ # Single batched mask, no channel dim or single mask not batched but channel dim
177
+ if mask.shape[0] == 1:
178
+ mask = mask.unsqueeze(0)
179
+
180
+ # Batched masks no channel dim
181
+ else:
182
+ mask = mask.unsqueeze(1)
183
+
184
+ assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
185
+ assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
186
+ assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
187
+
188
+ # Check image is in [-1, 1]
189
+ if image.min() < -1 or image.max() > 1:
190
+ raise ValueError("Image should be in [-1, 1] range")
191
+
192
+ # Check mask is in [0, 1]
193
+ if mask.min() < 0 or mask.max() > 1:
194
+ raise ValueError("Mask should be in [0, 1] range")
195
+
196
+ # Binarize mask
197
+ mask[mask < 0.5] = 0
198
+ mask[mask >= 0.5] = 1
199
+
200
+ # Image as float32
201
+ image = image.to(dtype=torch.float32)
202
+ elif isinstance(mask, torch.Tensor):
203
+ raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
204
+ else:
205
+ # preprocess image
206
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
207
+ image = [image]
208
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
209
+ # resize all images w.r.t passed height an width
210
+ image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
211
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
212
+ image = np.concatenate(image, axis=0)
213
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
214
+ image = np.concatenate([i[None, :] for i in image], axis=0)
215
+
216
+ image = image.transpose(0, 3, 1, 2)
217
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
218
+
219
+ # preprocess mask
220
+ if isinstance(mask, (PIL.Image.Image, np.ndarray)):
221
+ mask = [mask]
222
+
223
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
224
+ mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
225
+ mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
226
+ mask = mask.astype(np.float32) / 255.0
227
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
228
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
229
+
230
+ mask[mask < 0.5] = 0
231
+ mask[mask >= 0.5] = 1
232
+ mask = torch.from_numpy(mask)
233
+
234
+ masked_image = image * (mask < 0.5)
235
+
236
+ # n.b. ensure backwards compatibility as old function does not return image
237
+ if return_image:
238
+ return mask, masked_image, image
239
+
240
+ return mask, masked_image
241
+
242
+
243
+ class StableDiffusionControlNetInpaintPipeline(
244
+ DiffusionPipeline,
245
+ StableDiffusionMixin,
246
+ TextualInversionLoaderMixin,
247
+ LoraLoaderMixin,
248
+ IPAdapterMixin,
249
+ FromSingleFileMixin,
250
+ ):
251
+ r"""
252
+ Pipeline for image inpainting using Stable Diffusion with ControlNet guidance.
253
+
254
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
255
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
256
+
257
+ The pipeline also inherits the following loading methods:
258
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
259
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
260
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
261
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
262
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
263
+
264
+ <Tip>
265
+
266
+ This pipeline can be used with checkpoints that have been specifically fine-tuned for inpainting
267
+ ([runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting)) as well as
268
+ default text-to-image Stable Diffusion checkpoints
269
+ ([runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)). Default text-to-image
270
+ Stable Diffusion checkpoints might be preferable for ControlNets that have been fine-tuned on those, such as
271
+ [lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint).
272
+
273
+ </Tip>
274
+
275
+ Args:
276
+ vae ([`AutoencoderKL`]):
277
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
278
+ text_encoder ([`~transformers.CLIPTextModel`]):
279
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
280
+ tokenizer ([`~transformers.CLIPTokenizer`]):
281
+ A `CLIPTokenizer` to tokenize text.
282
+ unet ([`UNet2DConditionModel`]):
283
+ A `UNet2DConditionModel` to denoise the encoded image latents.
284
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
285
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
286
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
287
+ additional conditioning.
288
+ scheduler ([`SchedulerMixin`]):
289
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
290
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
291
+ safety_checker ([`StableDiffusionSafetyChecker`]):
292
+ Classification module that estimates whether generated images could be considered offensive or harmful.
293
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
294
+ about a model's potential harms.
295
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
296
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
297
+ """
298
+
299
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
300
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
301
+ _exclude_from_cpu_offload = ["safety_checker"]
302
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
303
+
304
+ def __init__(
305
+ self,
306
+ vae: AutoencoderKL,
307
+ text_encoder: CLIPTextModel,
308
+ tokenizer: CLIPTokenizer,
309
+ unet: UNet2DConditionModel,
310
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
311
+ scheduler: KarrasDiffusionSchedulers,
312
+ safety_checker: StableDiffusionSafetyChecker,
313
+ feature_extractor: CLIPImageProcessor,
314
+ image_encoder: CLIPVisionModelWithProjection = None,
315
+ requires_safety_checker: bool = True,
316
+ ):
317
+ super().__init__()
318
+
319
+ if safety_checker is None and requires_safety_checker:
320
+ logger.warning(
321
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
322
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
323
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
324
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
325
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
326
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
327
+ )
328
+
329
+ if safety_checker is not None and feature_extractor is None:
330
+ raise ValueError(
331
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
332
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
333
+ )
334
+
335
+ if isinstance(controlnet, (list, tuple)):
336
+ controlnet = MultiControlNetModel(controlnet)
337
+
338
+ self.register_modules(
339
+ vae=vae,
340
+ text_encoder=text_encoder,
341
+ tokenizer=tokenizer,
342
+ unet=unet,
343
+ controlnet=controlnet,
344
+ scheduler=scheduler,
345
+ safety_checker=safety_checker,
346
+ feature_extractor=feature_extractor,
347
+ image_encoder=image_encoder,
348
+ )
349
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
350
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
351
+ self.mask_processor = VaeImageProcessor(
352
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
353
+ )
354
+ self.control_image_processor = VaeImageProcessor(
355
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
356
+ )
357
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
358
+
359
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
360
+ def _encode_prompt(
361
+ self,
362
+ prompt,
363
+ device,
364
+ num_images_per_prompt,
365
+ do_classifier_free_guidance,
366
+ negative_prompt=None,
367
+ prompt_embeds: Optional[torch.FloatTensor] = None,
368
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
369
+ lora_scale: Optional[float] = None,
370
+ **kwargs,
371
+ ):
372
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
373
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
374
+
375
+ prompt_embeds_tuple = self.encode_prompt(
376
+ prompt=prompt,
377
+ device=device,
378
+ num_images_per_prompt=num_images_per_prompt,
379
+ do_classifier_free_guidance=do_classifier_free_guidance,
380
+ negative_prompt=negative_prompt,
381
+ prompt_embeds=prompt_embeds,
382
+ negative_prompt_embeds=negative_prompt_embeds,
383
+ lora_scale=lora_scale,
384
+ **kwargs,
385
+ )
386
+
387
+ # concatenate for backwards comp
388
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
389
+
390
+ return prompt_embeds
391
+
392
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
393
+ def encode_prompt(
394
+ self,
395
+ prompt,
396
+ device,
397
+ num_images_per_prompt,
398
+ do_classifier_free_guidance,
399
+ negative_prompt=None,
400
+ prompt_embeds: Optional[torch.FloatTensor] = None,
401
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
402
+ lora_scale: Optional[float] = None,
403
+ clip_skip: Optional[int] = None,
404
+ ):
405
+ r"""
406
+ Encodes the prompt into text encoder hidden states.
407
+
408
+ Args:
409
+ prompt (`str` or `List[str]`, *optional*):
410
+ prompt to be encoded
411
+ device: (`torch.device`):
412
+ torch device
413
+ num_images_per_prompt (`int`):
414
+ number of images that should be generated per prompt
415
+ do_classifier_free_guidance (`bool`):
416
+ whether to use classifier free guidance or not
417
+ negative_prompt (`str` or `List[str]`, *optional*):
418
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
419
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
420
+ less than `1`).
421
+ prompt_embeds (`torch.FloatTensor`, *optional*):
422
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
423
+ provided, text embeddings will be generated from `prompt` input argument.
424
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
425
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
426
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
427
+ argument.
428
+ lora_scale (`float`, *optional*):
429
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
430
+ clip_skip (`int`, *optional*):
431
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
432
+ the output of the pre-final layer will be used for computing the prompt embeddings.
433
+ """
434
+ # set lora scale so that monkey patched LoRA
435
+ # function of text encoder can correctly access it
436
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
437
+ self._lora_scale = lora_scale
438
+
439
+ # dynamically adjust the LoRA scale
440
+ if not USE_PEFT_BACKEND:
441
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
442
+ else:
443
+ scale_lora_layers(self.text_encoder, lora_scale)
444
+
445
+ if prompt is not None and isinstance(prompt, str):
446
+ batch_size = 1
447
+ elif prompt is not None and isinstance(prompt, list):
448
+ batch_size = len(prompt)
449
+ else:
450
+ batch_size = prompt_embeds.shape[0]
451
+
452
+ if prompt_embeds is None:
453
+ # textual inversion: process multi-vector tokens if necessary
454
+ if isinstance(self, TextualInversionLoaderMixin):
455
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
456
+
457
+ text_inputs = self.tokenizer(
458
+ prompt,
459
+ padding="max_length",
460
+ max_length=self.tokenizer.model_max_length,
461
+ truncation=True,
462
+ return_tensors="pt",
463
+ )
464
+ text_input_ids = text_inputs.input_ids
465
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
466
+
467
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
468
+ text_input_ids, untruncated_ids
469
+ ):
470
+ removed_text = self.tokenizer.batch_decode(
471
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
472
+ )
473
+ logger.warning(
474
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
475
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
476
+ )
477
+
478
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
479
+ attention_mask = text_inputs.attention_mask.to(device)
480
+ else:
481
+ attention_mask = None
482
+
483
+ if clip_skip is None:
484
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
485
+ prompt_embeds = prompt_embeds[0]
486
+ else:
487
+ prompt_embeds = self.text_encoder(
488
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
489
+ )
490
+ # Access the `hidden_states` first, that contains a tuple of
491
+ # all the hidden states from the encoder layers. Then index into
492
+ # the tuple to access the hidden states from the desired layer.
493
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
494
+ # We also need to apply the final LayerNorm here to not mess with the
495
+ # representations. The `last_hidden_states` that we typically use for
496
+ # obtaining the final prompt representations passes through the LayerNorm
497
+ # layer.
498
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
499
+
500
+ if self.text_encoder is not None:
501
+ prompt_embeds_dtype = self.text_encoder.dtype
502
+ elif self.unet is not None:
503
+ prompt_embeds_dtype = self.unet.dtype
504
+ else:
505
+ prompt_embeds_dtype = prompt_embeds.dtype
506
+
507
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
508
+
509
+ bs_embed, seq_len, _ = prompt_embeds.shape
510
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
511
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
512
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
513
+
514
+ # get unconditional embeddings for classifier free guidance
515
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
516
+ uncond_tokens: List[str]
517
+ if negative_prompt is None:
518
+ uncond_tokens = [""] * batch_size
519
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
520
+ raise TypeError(
521
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
522
+ f" {type(prompt)}."
523
+ )
524
+ elif isinstance(negative_prompt, str):
525
+ uncond_tokens = [negative_prompt]
526
+ elif batch_size != len(negative_prompt):
527
+ raise ValueError(
528
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
529
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
530
+ " the batch size of `prompt`."
531
+ )
532
+ else:
533
+ uncond_tokens = negative_prompt
534
+
535
+ # textual inversion: process multi-vector tokens if necessary
536
+ if isinstance(self, TextualInversionLoaderMixin):
537
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
538
+
539
+ max_length = prompt_embeds.shape[1]
540
+ uncond_input = self.tokenizer(
541
+ uncond_tokens,
542
+ padding="max_length",
543
+ max_length=max_length,
544
+ truncation=True,
545
+ return_tensors="pt",
546
+ )
547
+
548
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
549
+ attention_mask = uncond_input.attention_mask.to(device)
550
+ else:
551
+ attention_mask = None
552
+
553
+ negative_prompt_embeds = self.text_encoder(
554
+ uncond_input.input_ids.to(device),
555
+ attention_mask=attention_mask,
556
+ )
557
+ negative_prompt_embeds = negative_prompt_embeds[0]
558
+
559
+ if do_classifier_free_guidance:
560
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
561
+ seq_len = negative_prompt_embeds.shape[1]
562
+
563
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
564
+
565
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
566
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
567
+
568
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
569
+ # Retrieve the original scale by scaling back the LoRA layers
570
+ unscale_lora_layers(self.text_encoder, lora_scale)
571
+
572
+ return prompt_embeds, negative_prompt_embeds
573
+
574
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
575
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
576
+ dtype = next(self.image_encoder.parameters()).dtype
577
+
578
+ if not isinstance(image, torch.Tensor):
579
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
580
+
581
+ image = image.to(device=device, dtype=dtype)
582
+ if output_hidden_states:
583
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
584
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
585
+ uncond_image_enc_hidden_states = self.image_encoder(
586
+ torch.zeros_like(image), output_hidden_states=True
587
+ ).hidden_states[-2]
588
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
589
+ num_images_per_prompt, dim=0
590
+ )
591
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
592
+ else:
593
+ image_embeds = self.image_encoder(image).image_embeds
594
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
595
+ uncond_image_embeds = torch.zeros_like(image_embeds)
596
+
597
+ return image_embeds, uncond_image_embeds
598
+
599
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
600
+ def prepare_ip_adapter_image_embeds(
601
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
602
+ ):
603
+ if ip_adapter_image_embeds is None:
604
+ if not isinstance(ip_adapter_image, list):
605
+ ip_adapter_image = [ip_adapter_image]
606
+
607
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
608
+ raise ValueError(
609
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
610
+ )
611
+
612
+ image_embeds = []
613
+ for single_ip_adapter_image, image_proj_layer in zip(
614
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
615
+ ):
616
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
617
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
618
+ single_ip_adapter_image, device, 1, output_hidden_state
619
+ )
620
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
621
+ single_negative_image_embeds = torch.stack(
622
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
623
+ )
624
+
625
+ if do_classifier_free_guidance:
626
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
627
+ single_image_embeds = single_image_embeds.to(device)
628
+
629
+ image_embeds.append(single_image_embeds)
630
+ else:
631
+ repeat_dims = [1]
632
+ image_embeds = []
633
+ for single_image_embeds in ip_adapter_image_embeds:
634
+ if do_classifier_free_guidance:
635
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
636
+ single_image_embeds = single_image_embeds.repeat(
637
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
638
+ )
639
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
640
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
641
+ )
642
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
643
+ else:
644
+ single_image_embeds = single_image_embeds.repeat(
645
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
646
+ )
647
+ image_embeds.append(single_image_embeds)
648
+
649
+ return image_embeds
650
+
651
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
652
+ def run_safety_checker(self, image, device, dtype):
653
+ if self.safety_checker is None:
654
+ has_nsfw_concept = None
655
+ else:
656
+ if torch.is_tensor(image):
657
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
658
+ else:
659
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
660
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
661
+ image, has_nsfw_concept = self.safety_checker(
662
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
663
+ )
664
+ return image, has_nsfw_concept
665
+
666
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
667
+ def decode_latents(self, latents):
668
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
669
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
670
+
671
+ latents = 1 / self.vae.config.scaling_factor * latents
672
+ image = self.vae.decode(latents, return_dict=False)[0]
673
+ image = (image / 2 + 0.5).clamp(0, 1)
674
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
675
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
676
+ return image
677
+
678
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
679
+ def prepare_extra_step_kwargs(self, generator, eta):
680
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
681
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
682
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
683
+ # and should be between [0, 1]
684
+
685
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
686
+ extra_step_kwargs = {}
687
+ if accepts_eta:
688
+ extra_step_kwargs["eta"] = eta
689
+
690
+ # check if the scheduler accepts generator
691
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
692
+ if accepts_generator:
693
+ extra_step_kwargs["generator"] = generator
694
+ return extra_step_kwargs
695
+
696
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
697
+ def get_timesteps(self, num_inference_steps, strength, device):
698
+ # get the original timestep using init_timestep
699
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
700
+
701
+ t_start = max(num_inference_steps - init_timestep, 0)
702
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
703
+ if hasattr(self.scheduler, "set_begin_index"):
704
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
705
+
706
+ return timesteps, num_inference_steps - t_start
707
+
708
+ def check_inputs(
709
+ self,
710
+ prompt,
711
+ image,
712
+ mask_image,
713
+ height,
714
+ width,
715
+ callback_steps,
716
+ output_type,
717
+ negative_prompt=None,
718
+ prompt_embeds=None,
719
+ negative_prompt_embeds=None,
720
+ ip_adapter_image=None,
721
+ ip_adapter_image_embeds=None,
722
+ controlnet_conditioning_scale=1.0,
723
+ control_guidance_start=0.0,
724
+ control_guidance_end=1.0,
725
+ callback_on_step_end_tensor_inputs=None,
726
+ padding_mask_crop=None,
727
+ ):
728
+ if height is not None and height % 8 != 0 or width is not None and width % 8 != 0:
729
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
730
+
731
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
732
+ raise ValueError(
733
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
734
+ f" {type(callback_steps)}."
735
+ )
736
+
737
+ if callback_on_step_end_tensor_inputs is not None and not all(
738
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
739
+ ):
740
+ raise ValueError(
741
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
742
+ )
743
+
744
+ if prompt is not None and prompt_embeds is not None:
745
+ raise ValueError(
746
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
747
+ " only forward one of the two."
748
+ )
749
+ elif prompt is None and prompt_embeds is None:
750
+ raise ValueError(
751
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
752
+ )
753
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
754
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
755
+
756
+ if negative_prompt is not None and negative_prompt_embeds is not None:
757
+ raise ValueError(
758
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
759
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
760
+ )
761
+
762
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
763
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
764
+ raise ValueError(
765
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
766
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
767
+ f" {negative_prompt_embeds.shape}."
768
+ )
769
+
770
+ if padding_mask_crop is not None:
771
+ if not isinstance(image, PIL.Image.Image):
772
+ raise ValueError(
773
+ f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
774
+ )
775
+ if not isinstance(mask_image, PIL.Image.Image):
776
+ raise ValueError(
777
+ f"The mask image should be a PIL image when inpainting mask crop, but is of type"
778
+ f" {type(mask_image)}."
779
+ )
780
+ if output_type != "pil":
781
+ raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
782
+
783
+ # `prompt` needs more sophisticated handling when there are multiple
784
+ # conditionings.
785
+ if isinstance(self.controlnet, MultiControlNetModel):
786
+ if isinstance(prompt, list):
787
+ logger.warning(
788
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
789
+ " prompts. The conditionings will be fixed across the prompts."
790
+ )
791
+
792
+ # Check `image`
793
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
794
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
795
+ )
796
+ if (
797
+ isinstance(self.controlnet, ControlNetModel)
798
+ or is_compiled
799
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
800
+ ):
801
+ self.check_image(image, prompt, prompt_embeds)
802
+ elif (
803
+ isinstance(self.controlnet, MultiControlNetModel)
804
+ or is_compiled
805
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
806
+ ):
807
+ if not isinstance(image, list):
808
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
809
+
810
+ # When `image` is a nested list:
811
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
812
+ elif any(isinstance(i, list) for i in image):
813
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
814
+ elif len(image) != len(self.controlnet.nets):
815
+ raise ValueError(
816
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
817
+ )
818
+
819
+ for image_ in image:
820
+ self.check_image(image_, prompt, prompt_embeds)
821
+ else:
822
+ assert False
823
+
824
+ # Check `controlnet_conditioning_scale`
825
+ if (
826
+ isinstance(self.controlnet, ControlNetModel)
827
+ or is_compiled
828
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
829
+ ):
830
+ if not isinstance(controlnet_conditioning_scale, float):
831
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
832
+ elif (
833
+ isinstance(self.controlnet, MultiControlNetModel)
834
+ or is_compiled
835
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
836
+ ):
837
+ if isinstance(controlnet_conditioning_scale, list):
838
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
839
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
840
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
841
+ self.controlnet.nets
842
+ ):
843
+ raise ValueError(
844
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
845
+ " the same length as the number of controlnets"
846
+ )
847
+ else:
848
+ assert False
849
+
850
+ if len(control_guidance_start) != len(control_guidance_end):
851
+ raise ValueError(
852
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
853
+ )
854
+
855
+ if isinstance(self.controlnet, MultiControlNetModel):
856
+ if len(control_guidance_start) != len(self.controlnet.nets):
857
+ raise ValueError(
858
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
859
+ )
860
+
861
+ for start, end in zip(control_guidance_start, control_guidance_end):
862
+ if start >= end:
863
+ raise ValueError(
864
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
865
+ )
866
+ if start < 0.0:
867
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
868
+ if end > 1.0:
869
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
870
+
871
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
872
+ raise ValueError(
873
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
874
+ )
875
+
876
+ if ip_adapter_image_embeds is not None:
877
+ if not isinstance(ip_adapter_image_embeds, list):
878
+ raise ValueError(
879
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
880
+ )
881
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
882
+ raise ValueError(
883
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
884
+ )
885
+
886
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
887
+ def check_image(self, image, prompt, prompt_embeds):
888
+ image_is_pil = isinstance(image, PIL.Image.Image)
889
+ image_is_tensor = isinstance(image, torch.Tensor)
890
+ image_is_np = isinstance(image, np.ndarray)
891
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
892
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
893
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
894
+
895
+ if (
896
+ not image_is_pil
897
+ and not image_is_tensor
898
+ and not image_is_np
899
+ and not image_is_pil_list
900
+ and not image_is_tensor_list
901
+ and not image_is_np_list
902
+ ):
903
+ raise TypeError(
904
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
905
+ )
906
+
907
+ if image_is_pil:
908
+ image_batch_size = 1
909
+ else:
910
+ image_batch_size = len(image)
911
+
912
+ if prompt is not None and isinstance(prompt, str):
913
+ prompt_batch_size = 1
914
+ elif prompt is not None and isinstance(prompt, list):
915
+ prompt_batch_size = len(prompt)
916
+ elif prompt_embeds is not None:
917
+ prompt_batch_size = prompt_embeds.shape[0]
918
+
919
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
920
+ raise ValueError(
921
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
922
+ )
923
+
924
+ def prepare_control_image(
925
+ self,
926
+ image,
927
+ width,
928
+ height,
929
+ batch_size,
930
+ num_images_per_prompt,
931
+ device,
932
+ dtype,
933
+ crops_coords,
934
+ resize_mode,
935
+ do_classifier_free_guidance=False,
936
+ guess_mode=False,
937
+ ):
938
+ image = self.control_image_processor.preprocess(
939
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
940
+ ).to(dtype=torch.float32)
941
+ image_batch_size = image.shape[0]
942
+
943
+ if image_batch_size == 1:
944
+ repeat_by = batch_size
945
+ else:
946
+ # image batch size is the same as prompt batch size
947
+ repeat_by = num_images_per_prompt
948
+
949
+ image = image.repeat_interleave(repeat_by, dim=0)
950
+
951
+ image = image.to(device=device, dtype=dtype)
952
+
953
+ if do_classifier_free_guidance and not guess_mode:
954
+ image = torch.cat([image] * 2)
955
+
956
+ return image
957
+
958
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_latents
959
+ def prepare_latents(
960
+ self,
961
+ batch_size,
962
+ num_channels_latents,
963
+ height,
964
+ width,
965
+ dtype,
966
+ device,
967
+ generator,
968
+ latents=None,
969
+ image=None,
970
+ timestep=None,
971
+ is_strength_max=True,
972
+ return_noise=False,
973
+ return_image_latents=False,
974
+ ):
975
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
976
+ if isinstance(generator, list) and len(generator) != batch_size:
977
+ raise ValueError(
978
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
979
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
980
+ )
981
+
982
+ if (image is None or timestep is None) and not is_strength_max:
983
+ raise ValueError(
984
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
985
+ "However, either the image or the noise timestep has not been provided."
986
+ )
987
+
988
+ if return_image_latents or (latents is None and not is_strength_max):
989
+ image = image.to(device=device, dtype=dtype)
990
+
991
+ if image.shape[1] == 4:
992
+ image_latents = image
993
+ else:
994
+ image_latents = self._encode_vae_image(image=image, generator=generator)
995
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
996
+
997
+ if latents is None:
998
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
999
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
1000
+ latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
1001
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
1002
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
1003
+ else:
1004
+ noise = latents.to(device)
1005
+ latents = noise * self.scheduler.init_noise_sigma
1006
+
1007
+ outputs = (latents,)
1008
+
1009
+ if return_noise:
1010
+ outputs += (noise,)
1011
+
1012
+ if return_image_latents:
1013
+ outputs += (image_latents,)
1014
+
1015
+ return outputs
1016
+
1017
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents
1018
+ def prepare_mask_latents(
1019
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
1020
+ ):
1021
+ # resize the mask to latents shape as we concatenate the mask to the latents
1022
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1023
+ # and half precision
1024
+ mask = torch.nn.functional.interpolate(
1025
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
1026
+ )
1027
+ mask = mask.to(device=device, dtype=dtype)
1028
+
1029
+ masked_image = masked_image.to(device=device, dtype=dtype)
1030
+
1031
+ if masked_image.shape[1] == 4:
1032
+ masked_image_latents = masked_image
1033
+ else:
1034
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1035
+
1036
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1037
+ if mask.shape[0] < batch_size:
1038
+ if not batch_size % mask.shape[0] == 0:
1039
+ raise ValueError(
1040
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1041
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1042
+ " of masks that you pass is divisible by the total requested batch size."
1043
+ )
1044
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1045
+ if masked_image_latents.shape[0] < batch_size:
1046
+ if not batch_size % masked_image_latents.shape[0] == 0:
1047
+ raise ValueError(
1048
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1049
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1050
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
1051
+ )
1052
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
1053
+
1054
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1055
+ masked_image_latents = (
1056
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1057
+ )
1058
+
1059
+ # aligning device to prevent device errors when concating it with the latent model input
1060
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1061
+ return mask, masked_image_latents
1062
+
1063
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image
1064
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
1065
+ if isinstance(generator, list):
1066
+ image_latents = [
1067
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
1068
+ for i in range(image.shape[0])
1069
+ ]
1070
+ image_latents = torch.cat(image_latents, dim=0)
1071
+ else:
1072
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
1073
+
1074
+ image_latents = self.vae.config.scaling_factor * image_latents
1075
+
1076
+ return image_latents
1077
+
1078
+ @property
1079
+ def guidance_scale(self):
1080
+ return self._guidance_scale
1081
+
1082
+ @property
1083
+ def clip_skip(self):
1084
+ return self._clip_skip
1085
+
1086
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1087
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1088
+ # corresponds to doing no classifier free guidance.
1089
+ @property
1090
+ def do_classifier_free_guidance(self):
1091
+ return self._guidance_scale > 1
1092
+
1093
+ @property
1094
+ def cross_attention_kwargs(self):
1095
+ return self._cross_attention_kwargs
1096
+
1097
+ @property
1098
+ def num_timesteps(self):
1099
+ return self._num_timesteps
1100
+
1101
+ @torch.no_grad()
1102
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1103
+ def __call__(
1104
+ self,
1105
+ prompt: Union[str, List[str]] = None,
1106
+ image: PipelineImageInput = None,
1107
+ mask_image: PipelineImageInput = None,
1108
+ control_image: PipelineImageInput = None,
1109
+ height: Optional[int] = None,
1110
+ width: Optional[int] = None,
1111
+ padding_mask_crop: Optional[int] = None,
1112
+ strength: float = 1.0,
1113
+ num_inference_steps: int = 50,
1114
+ guidance_scale: float = 7.5,
1115
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1116
+ num_images_per_prompt: Optional[int] = 1,
1117
+ eta: float = 0.0,
1118
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1119
+ latents: Optional[torch.FloatTensor] = None,
1120
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1121
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1122
+ ip_adapter_image: Optional[PipelineImageInput] = None,
1123
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
1124
+ output_type: Optional[str] = "pil",
1125
+ return_dict: bool = True,
1126
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1127
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.5,
1128
+ guess_mode: bool = False,
1129
+ control_guidance_start: Union[float, List[float]] = 0.0,
1130
+ control_guidance_end: Union[float, List[float]] = 1.0,
1131
+ clip_skip: Optional[int] = None,
1132
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1133
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1134
+ **kwargs,
1135
+ ):
1136
+ r"""
1137
+ The call function to the pipeline for generation.
1138
+
1139
+ Args:
1140
+ prompt (`str` or `List[str]`, *optional*):
1141
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
1142
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`,
1143
+ `List[PIL.Image.Image]`, or `List[np.ndarray]`):
1144
+ `Image`, NumPy array or tensor representing an image batch to be used as the starting point. For both
1145
+ NumPy array and PyTorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a
1146
+ list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a NumPy array or
1147
+ a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`. It can also accept image
1148
+ latents as `image`, but if passing latents directly it is not encoded again.
1149
+ mask_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`,
1150
+ `List[PIL.Image.Image]`, or `List[np.ndarray]`):
1151
+ `Image`, NumPy array or tensor representing an image batch to mask `image`. White pixels in the mask
1152
+ are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
1153
+ single channel (luminance) before use. If it's a NumPy array or PyTorch tensor, it should contain one
1154
+ color channel (L) instead of 3, so the expected shape for PyTorch tensor would be `(B, 1, H, W)`, `(B,
1155
+ H, W)`, `(1, H, W)`, `(H, W)`. And for NumPy array, it would be for `(B, H, W, 1)`, `(B, H, W)`, `(H,
1156
+ W, 1)`, or `(H, W)`.
1157
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
1158
+ `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):
1159
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
1160
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
1161
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1162
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
1163
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
1164
+ input to a single ControlNet.
1165
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1166
+ The height in pixels of the generated image.
1167
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1168
+ The width in pixels of the generated image.
1169
+ padding_mask_crop (`int`, *optional*, defaults to `None`):
1170
+ The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If
1171
+ `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and
1172
+ contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on
1173
+ the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large
1174
+ and contain information inreleant for inpainging, such as background.
1175
+ strength (`float`, *optional*, defaults to 1.0):
1176
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
1177
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
1178
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
1179
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
1180
+ essentially ignores `image`.
1181
+ num_inference_steps (`int`, *optional*, defaults to 50):
1182
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1183
+ expense of slower inference.
1184
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1185
+ A higher guidance scale value encourages the model to generate images closely linked to the text
1186
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1187
+ negative_prompt (`str` or `List[str]`, *optional*):
1188
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1189
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
1190
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1191
+ The number of images to generate per prompt.
1192
+ eta (`float`, *optional*, defaults to 0.0):
1193
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1194
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1195
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1196
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1197
+ generation deterministic.
1198
+ latents (`torch.FloatTensor`, *optional*):
1199
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1200
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1201
+ tensor is generated by sampling using the supplied random `generator`.
1202
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1203
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1204
+ provided, text embeddings are generated from the `prompt` input argument.
1205
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1206
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1207
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1208
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1209
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
1210
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
1211
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
1212
+ if `do_classifier_free_guidance` is set to `True`.
1213
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
1214
+ output_type (`str`, *optional*, defaults to `"pil"`):
1215
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1216
+ return_dict (`bool`, *optional*, defaults to `True`):
1217
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1218
+ plain tuple.
1219
+ cross_attention_kwargs (`dict`, *optional*):
1220
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1221
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1222
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 0.5):
1223
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
1224
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
1225
+ the corresponding scale as a list.
1226
+ guess_mode (`bool`, *optional*, defaults to `False`):
1227
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
1228
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
1229
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1230
+ The percentage of total steps at which the ControlNet starts applying.
1231
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1232
+ The percentage of total steps at which the ControlNet stops applying.
1233
+ clip_skip (`int`, *optional*):
1234
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1235
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1236
+ callback_on_step_end (`Callable`, *optional*):
1237
+ A function that calls at the end of each denoising steps during the inference. The function is called
1238
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1239
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1240
+ `callback_on_step_end_tensor_inputs`.
1241
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1242
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1243
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1244
+ `._callback_tensor_inputs` attribute of your pipeine class.
1245
+
1246
+ Examples:
1247
+
1248
+ Returns:
1249
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1250
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1251
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
1252
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
1253
+ "not-safe-for-work" (nsfw) content.
1254
+ """
1255
+
1256
+ callback = kwargs.pop("callback", None)
1257
+ callback_steps = kwargs.pop("callback_steps", None)
1258
+
1259
+ if callback is not None:
1260
+ deprecate(
1261
+ "callback",
1262
+ "1.0.0",
1263
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1264
+ )
1265
+ if callback_steps is not None:
1266
+ deprecate(
1267
+ "callback_steps",
1268
+ "1.0.0",
1269
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1270
+ )
1271
+
1272
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1273
+
1274
+ # align format for control guidance
1275
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1276
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1277
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1278
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1279
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1280
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1281
+ control_guidance_start, control_guidance_end = (
1282
+ mult * [control_guidance_start],
1283
+ mult * [control_guidance_end],
1284
+ )
1285
+
1286
+ # 1. Check inputs. Raise error if not correct
1287
+ self.check_inputs(
1288
+ prompt,
1289
+ control_image,
1290
+ mask_image,
1291
+ height,
1292
+ width,
1293
+ callback_steps,
1294
+ output_type,
1295
+ negative_prompt,
1296
+ prompt_embeds,
1297
+ negative_prompt_embeds,
1298
+ ip_adapter_image,
1299
+ ip_adapter_image_embeds,
1300
+ controlnet_conditioning_scale,
1301
+ control_guidance_start,
1302
+ control_guidance_end,
1303
+ callback_on_step_end_tensor_inputs,
1304
+ padding_mask_crop,
1305
+ )
1306
+
1307
+ self._guidance_scale = guidance_scale
1308
+ self._clip_skip = clip_skip
1309
+ self._cross_attention_kwargs = cross_attention_kwargs
1310
+
1311
+ # 2. Define call parameters
1312
+ if prompt is not None and isinstance(prompt, str):
1313
+ batch_size = 1
1314
+ elif prompt is not None and isinstance(prompt, list):
1315
+ batch_size = len(prompt)
1316
+ else:
1317
+ batch_size = prompt_embeds.shape[0]
1318
+
1319
+ if padding_mask_crop is not None:
1320
+ height, width = self.image_processor.get_default_height_width(image, height, width)
1321
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
1322
+ resize_mode = "fill"
1323
+ else:
1324
+ crops_coords = None
1325
+ resize_mode = "default"
1326
+
1327
+ device = self._execution_device
1328
+
1329
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1330
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1331
+
1332
+ global_pool_conditions = (
1333
+ controlnet.config.global_pool_conditions
1334
+ if isinstance(controlnet, ControlNetModel)
1335
+ else controlnet.nets[0].config.global_pool_conditions
1336
+ )
1337
+ guess_mode = guess_mode or global_pool_conditions
1338
+
1339
+ # 3. Encode input prompt
1340
+ text_encoder_lora_scale = (
1341
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1342
+ )
1343
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1344
+ prompt,
1345
+ device,
1346
+ num_images_per_prompt,
1347
+ self.do_classifier_free_guidance,
1348
+ negative_prompt,
1349
+ prompt_embeds=prompt_embeds,
1350
+ negative_prompt_embeds=negative_prompt_embeds,
1351
+ lora_scale=text_encoder_lora_scale,
1352
+ clip_skip=self.clip_skip,
1353
+ )
1354
+ # For classifier free guidance, we need to do two forward passes.
1355
+ # Here we concatenate the unconditional and text embeddings into a single batch
1356
+ # to avoid doing two forward passes
1357
+ if self.do_classifier_free_guidance:
1358
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1359
+
1360
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1361
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1362
+ ip_adapter_image,
1363
+ ip_adapter_image_embeds,
1364
+ device,
1365
+ batch_size * num_images_per_prompt,
1366
+ self.do_classifier_free_guidance,
1367
+ )
1368
+
1369
+ # 4. Prepare image
1370
+ if isinstance(controlnet, ControlNetModel):
1371
+ control_image = self.prepare_control_image(
1372
+ image=control_image,
1373
+ width=width,
1374
+ height=height,
1375
+ batch_size=batch_size * num_images_per_prompt,
1376
+ num_images_per_prompt=num_images_per_prompt,
1377
+ device=device,
1378
+ dtype=controlnet.dtype,
1379
+ crops_coords=crops_coords,
1380
+ resize_mode=resize_mode,
1381
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1382
+ guess_mode=guess_mode,
1383
+ )
1384
+ elif isinstance(controlnet, MultiControlNetModel):
1385
+ control_images = []
1386
+
1387
+ for control_image_ in control_image:
1388
+ control_image_ = self.prepare_control_image(
1389
+ image=control_image_,
1390
+ width=width,
1391
+ height=height,
1392
+ batch_size=batch_size * num_images_per_prompt,
1393
+ num_images_per_prompt=num_images_per_prompt,
1394
+ device=device,
1395
+ dtype=controlnet.dtype,
1396
+ crops_coords=crops_coords,
1397
+ resize_mode=resize_mode,
1398
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1399
+ guess_mode=guess_mode,
1400
+ )
1401
+
1402
+ control_images.append(control_image_)
1403
+
1404
+ control_image = control_images
1405
+ else:
1406
+ assert False
1407
+
1408
+ # 4.1 Preprocess mask and image - resizes image and mask w.r.t height and width
1409
+ original_image = image
1410
+ init_image = self.image_processor.preprocess(
1411
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
1412
+ )
1413
+ init_image = init_image.to(dtype=torch.float32)
1414
+
1415
+ mask = self.mask_processor.preprocess(
1416
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
1417
+ )
1418
+
1419
+ masked_image = init_image * (mask < 0.5)
1420
+ _, _, height, width = init_image.shape
1421
+
1422
+ # 5. Prepare timesteps
1423
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1424
+ timesteps, num_inference_steps = self.get_timesteps(
1425
+ num_inference_steps=num_inference_steps, strength=strength, device=device
1426
+ )
1427
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1428
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1429
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1430
+ is_strength_max = strength == 1.0
1431
+ self._num_timesteps = len(timesteps)
1432
+
1433
+ # 6. Prepare latent variables
1434
+ num_channels_latents = self.vae.config.latent_channels
1435
+ num_channels_unet = self.unet.config.in_channels
1436
+ return_image_latents = num_channels_unet == 4
1437
+ latents_outputs = self.prepare_latents(
1438
+ batch_size * num_images_per_prompt,
1439
+ num_channels_latents,
1440
+ height,
1441
+ width,
1442
+ prompt_embeds.dtype,
1443
+ device,
1444
+ generator,
1445
+ latents,
1446
+ image=init_image,
1447
+ timestep=latent_timestep,
1448
+ is_strength_max=is_strength_max,
1449
+ return_noise=True,
1450
+ return_image_latents=return_image_latents,
1451
+ )
1452
+
1453
+ if return_image_latents:
1454
+ latents, noise, image_latents = latents_outputs
1455
+ else:
1456
+ latents, noise = latents_outputs
1457
+
1458
+ # 7. Prepare mask latent variables
1459
+ mask, masked_image_latents = self.prepare_mask_latents(
1460
+ mask,
1461
+ masked_image,
1462
+ batch_size * num_images_per_prompt,
1463
+ height,
1464
+ width,
1465
+ prompt_embeds.dtype,
1466
+ device,
1467
+ generator,
1468
+ self.do_classifier_free_guidance,
1469
+ )
1470
+
1471
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1472
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1473
+
1474
+ # 7.1 Add image embeds for IP-Adapter
1475
+ added_cond_kwargs = (
1476
+ {"image_embeds": image_embeds}
1477
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
1478
+ else None
1479
+ )
1480
+
1481
+ # 7.2 Create tensor stating which controlnets to keep
1482
+ controlnet_keep = []
1483
+ for i in range(len(timesteps)):
1484
+ keeps = [
1485
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1486
+ for s, e in zip(control_guidance_start, control_guidance_end)
1487
+ ]
1488
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1489
+
1490
+ # 8. Denoising loop
1491
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1492
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1493
+ for i, t in enumerate(timesteps):
1494
+ # expand the latents if we are doing classifier free guidance
1495
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1496
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1497
+
1498
+ # controlnet(s) inference
1499
+ if guess_mode and self.do_classifier_free_guidance:
1500
+ # Infer ControlNet only for the conditional batch.
1501
+ control_model_input = latents
1502
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1503
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1504
+ else:
1505
+ control_model_input = latent_model_input
1506
+ controlnet_prompt_embeds = prompt_embeds
1507
+
1508
+ if isinstance(controlnet_keep[i], list):
1509
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1510
+ else:
1511
+ controlnet_cond_scale = controlnet_conditioning_scale
1512
+ if isinstance(controlnet_cond_scale, list):
1513
+ controlnet_cond_scale = controlnet_cond_scale[0]
1514
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1515
+
1516
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1517
+ control_model_input,
1518
+ t,
1519
+ encoder_hidden_states=controlnet_prompt_embeds,
1520
+ controlnet_cond=control_image,
1521
+ conditioning_scale=cond_scale,
1522
+ guess_mode=guess_mode,
1523
+ return_dict=False,
1524
+ )
1525
+
1526
+ if guess_mode and self.do_classifier_free_guidance:
1527
+ # Infered ControlNet only for the conditional batch.
1528
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1529
+ # add 0 to the unconditional batch to keep it unchanged.
1530
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1531
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1532
+
1533
+ # predict the noise residual
1534
+ if num_channels_unet == 9:
1535
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1536
+
1537
+ noise_pred = self.unet(
1538
+ latent_model_input,
1539
+ t,
1540
+ encoder_hidden_states=prompt_embeds,
1541
+ cross_attention_kwargs=self.cross_attention_kwargs,
1542
+ down_block_additional_residuals=down_block_res_samples,
1543
+ mid_block_additional_residual=mid_block_res_sample,
1544
+ added_cond_kwargs=added_cond_kwargs,
1545
+ return_dict=False,
1546
+ )[0]
1547
+
1548
+ # perform guidance
1549
+ if self.do_classifier_free_guidance:
1550
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1551
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1552
+
1553
+ # compute the previous noisy sample x_t -> x_t-1
1554
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1555
+
1556
+ if num_channels_unet == 4:
1557
+ init_latents_proper = image_latents
1558
+ if self.do_classifier_free_guidance:
1559
+ init_mask, _ = mask.chunk(2)
1560
+ else:
1561
+ init_mask = mask
1562
+
1563
+ if i < len(timesteps) - 1:
1564
+ noise_timestep = timesteps[i + 1]
1565
+ init_latents_proper = self.scheduler.add_noise(
1566
+ init_latents_proper, noise, torch.tensor([noise_timestep])
1567
+ )
1568
+
1569
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1570
+
1571
+ if callback_on_step_end is not None:
1572
+ callback_kwargs = {}
1573
+ for k in callback_on_step_end_tensor_inputs:
1574
+ callback_kwargs[k] = locals()[k]
1575
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1576
+
1577
+ latents = callback_outputs.pop("latents", latents)
1578
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1579
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1580
+
1581
+ # call the callback, if provided
1582
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1583
+ progress_bar.update()
1584
+ if callback is not None and i % callback_steps == 0:
1585
+ step_idx = i // getattr(self.scheduler, "order", 1)
1586
+ callback(step_idx, t, latents)
1587
+
1588
+ # If we do sequential model offloading, let's offload unet and controlnet
1589
+ # manually for max memory savings
1590
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1591
+ self.unet.to("cpu")
1592
+ self.controlnet.to("cpu")
1593
+ torch.cuda.empty_cache()
1594
+
1595
+ if not output_type == "latent":
1596
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1597
+ 0
1598
+ ]
1599
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1600
+ else:
1601
+ image = latents
1602
+ has_nsfw_concept = None
1603
+
1604
+ if has_nsfw_concept is None:
1605
+ do_denormalize = [True] * image.shape[0]
1606
+ else:
1607
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1608
+
1609
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1610
+
1611
+ if padding_mask_crop is not None:
1612
+ image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
1613
+
1614
+ # Offload all models
1615
+ self.maybe_free_model_hooks()
1616
+
1617
+ if not return_dict:
1618
+ return (image, has_nsfw_concept)
1619
+
1620
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py ADDED
@@ -0,0 +1,1818 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Harutatsu Akiyama, Jinbin Bai, and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from transformers import (
23
+ CLIPImageProcessor,
24
+ CLIPTextModel,
25
+ CLIPTextModelWithProjection,
26
+ CLIPTokenizer,
27
+ CLIPVisionModelWithProjection,
28
+ )
29
+
30
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
31
+ from ...loaders import (
32
+ FromSingleFileMixin,
33
+ IPAdapterMixin,
34
+ StableDiffusionXLLoraLoaderMixin,
35
+ TextualInversionLoaderMixin,
36
+ )
37
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
38
+ from ...models.attention_processor import (
39
+ AttnProcessor2_0,
40
+ LoRAAttnProcessor2_0,
41
+ LoRAXFormersAttnProcessor,
42
+ XFormersAttnProcessor,
43
+ )
44
+ from ...models.lora import adjust_lora_scale_text_encoder
45
+ from ...schedulers import KarrasDiffusionSchedulers
46
+ from ...utils import (
47
+ USE_PEFT_BACKEND,
48
+ deprecate,
49
+ is_invisible_watermark_available,
50
+ logging,
51
+ replace_example_docstring,
52
+ scale_lora_layers,
53
+ unscale_lora_layers,
54
+ )
55
+ from ...utils.torch_utils import is_compiled_module, randn_tensor
56
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
57
+ from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
58
+ from .multicontrolnet import MultiControlNetModel
59
+
60
+
61
+ if is_invisible_watermark_available():
62
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
63
+
64
+
65
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
66
+
67
+
68
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
69
+ def retrieve_latents(
70
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
71
+ ):
72
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
73
+ return encoder_output.latent_dist.sample(generator)
74
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
75
+ return encoder_output.latent_dist.mode()
76
+ elif hasattr(encoder_output, "latents"):
77
+ return encoder_output.latents
78
+ else:
79
+ raise AttributeError("Could not access latents of provided encoder_output")
80
+
81
+
82
+ EXAMPLE_DOC_STRING = """
83
+ Examples:
84
+ ```py
85
+ >>> # !pip install transformers accelerate
86
+ >>> from diffusers import StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
87
+ >>> from diffusers.utils import load_image
88
+ >>> import numpy as np
89
+ >>> import torch
90
+
91
+ >>> init_image = load_image(
92
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
93
+ ... )
94
+ >>> init_image = init_image.resize((1024, 1024))
95
+
96
+ >>> generator = torch.Generator(device="cpu").manual_seed(1)
97
+
98
+ >>> mask_image = load_image(
99
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
100
+ ... )
101
+ >>> mask_image = mask_image.resize((1024, 1024))
102
+
103
+
104
+ >>> def make_canny_condition(image):
105
+ ... image = np.array(image)
106
+ ... image = cv2.Canny(image, 100, 200)
107
+ ... image = image[:, :, None]
108
+ ... image = np.concatenate([image, image, image], axis=2)
109
+ ... image = Image.fromarray(image)
110
+ ... return image
111
+
112
+
113
+ >>> control_image = make_canny_condition(init_image)
114
+
115
+ >>> controlnet = ControlNetModel.from_pretrained(
116
+ ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
117
+ ... )
118
+ >>> pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(
119
+ ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
120
+ ... )
121
+
122
+ >>> pipe.enable_model_cpu_offload()
123
+
124
+ >>> # generate image
125
+ >>> image = pipe(
126
+ ... "a handsome man with ray-ban sunglasses",
127
+ ... num_inference_steps=20,
128
+ ... generator=generator,
129
+ ... eta=1.0,
130
+ ... image=init_image,
131
+ ... mask_image=mask_image,
132
+ ... control_image=control_image,
133
+ ... ).images[0]
134
+ ```
135
+ """
136
+
137
+
138
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
139
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
140
+ """
141
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
142
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
143
+ """
144
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
145
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
146
+ # rescale the results from guidance (fixes overexposure)
147
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
148
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
149
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
150
+ return noise_cfg
151
+
152
+
153
+ class StableDiffusionXLControlNetInpaintPipeline(
154
+ DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin
155
+ ):
156
+ r"""
157
+ Pipeline for text-to-image generation using Stable Diffusion XL.
158
+
159
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
160
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
161
+
162
+ The pipeline also inherits the following loading methods:
163
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
164
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
165
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
166
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
167
+
168
+ Args:
169
+ vae ([`AutoencoderKL`]):
170
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
171
+ text_encoder ([`CLIPTextModel`]):
172
+ Frozen text-encoder. Stable Diffusion XL uses the text portion of
173
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
174
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
175
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
176
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
177
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
178
+ specifically the
179
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
180
+ variant.
181
+ tokenizer (`CLIPTokenizer`):
182
+ Tokenizer of class
183
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
184
+ tokenizer_2 (`CLIPTokenizer`):
185
+ Second Tokenizer of class
186
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
187
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
188
+ scheduler ([`SchedulerMixin`]):
189
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
190
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
191
+ """
192
+
193
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
194
+ _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
195
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
196
+
197
+ def __init__(
198
+ self,
199
+ vae: AutoencoderKL,
200
+ text_encoder: CLIPTextModel,
201
+ text_encoder_2: CLIPTextModelWithProjection,
202
+ tokenizer: CLIPTokenizer,
203
+ tokenizer_2: CLIPTokenizer,
204
+ unet: UNet2DConditionModel,
205
+ controlnet: ControlNetModel,
206
+ scheduler: KarrasDiffusionSchedulers,
207
+ requires_aesthetics_score: bool = False,
208
+ force_zeros_for_empty_prompt: bool = True,
209
+ add_watermarker: Optional[bool] = None,
210
+ feature_extractor: Optional[CLIPImageProcessor] = None,
211
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
212
+ ):
213
+ super().__init__()
214
+
215
+ if isinstance(controlnet, (list, tuple)):
216
+ controlnet = MultiControlNetModel(controlnet)
217
+
218
+ self.register_modules(
219
+ vae=vae,
220
+ text_encoder=text_encoder,
221
+ text_encoder_2=text_encoder_2,
222
+ tokenizer=tokenizer,
223
+ tokenizer_2=tokenizer_2,
224
+ unet=unet,
225
+ controlnet=controlnet,
226
+ scheduler=scheduler,
227
+ feature_extractor=feature_extractor,
228
+ image_encoder=image_encoder,
229
+ )
230
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
231
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
232
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
233
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
234
+ self.mask_processor = VaeImageProcessor(
235
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
236
+ )
237
+ self.control_image_processor = VaeImageProcessor(
238
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
239
+ )
240
+
241
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
242
+
243
+ if add_watermarker:
244
+ self.watermark = StableDiffusionXLWatermarker()
245
+ else:
246
+ self.watermark = None
247
+
248
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
249
+ def encode_prompt(
250
+ self,
251
+ prompt: str,
252
+ prompt_2: Optional[str] = None,
253
+ device: Optional[torch.device] = None,
254
+ num_images_per_prompt: int = 1,
255
+ do_classifier_free_guidance: bool = True,
256
+ negative_prompt: Optional[str] = None,
257
+ negative_prompt_2: Optional[str] = None,
258
+ prompt_embeds: Optional[torch.FloatTensor] = None,
259
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
260
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
261
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
262
+ lora_scale: Optional[float] = None,
263
+ clip_skip: Optional[int] = None,
264
+ ):
265
+ r"""
266
+ Encodes the prompt into text encoder hidden states.
267
+
268
+ Args:
269
+ prompt (`str` or `List[str]`, *optional*):
270
+ prompt to be encoded
271
+ prompt_2 (`str` or `List[str]`, *optional*):
272
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
273
+ used in both text-encoders
274
+ device: (`torch.device`):
275
+ torch device
276
+ num_images_per_prompt (`int`):
277
+ number of images that should be generated per prompt
278
+ do_classifier_free_guidance (`bool`):
279
+ whether to use classifier free guidance or not
280
+ negative_prompt (`str` or `List[str]`, *optional*):
281
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
282
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
283
+ less than `1`).
284
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
285
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
286
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
287
+ prompt_embeds (`torch.FloatTensor`, *optional*):
288
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
289
+ provided, text embeddings will be generated from `prompt` input argument.
290
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
291
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
292
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
293
+ argument.
294
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
295
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
296
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
297
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
298
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
299
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
300
+ input argument.
301
+ lora_scale (`float`, *optional*):
302
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
303
+ clip_skip (`int`, *optional*):
304
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
305
+ the output of the pre-final layer will be used for computing the prompt embeddings.
306
+ """
307
+ device = device or self._execution_device
308
+
309
+ # set lora scale so that monkey patched LoRA
310
+ # function of text encoder can correctly access it
311
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
312
+ self._lora_scale = lora_scale
313
+
314
+ # dynamically adjust the LoRA scale
315
+ if self.text_encoder is not None:
316
+ if not USE_PEFT_BACKEND:
317
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
318
+ else:
319
+ scale_lora_layers(self.text_encoder, lora_scale)
320
+
321
+ if self.text_encoder_2 is not None:
322
+ if not USE_PEFT_BACKEND:
323
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
324
+ else:
325
+ scale_lora_layers(self.text_encoder_2, lora_scale)
326
+
327
+ prompt = [prompt] if isinstance(prompt, str) else prompt
328
+
329
+ if prompt is not None:
330
+ batch_size = len(prompt)
331
+ else:
332
+ batch_size = prompt_embeds.shape[0]
333
+
334
+ # Define tokenizers and text encoders
335
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
336
+ text_encoders = (
337
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
338
+ )
339
+
340
+ if prompt_embeds is None:
341
+ prompt_2 = prompt_2 or prompt
342
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
343
+
344
+ # textual inversion: process multi-vector tokens if necessary
345
+ prompt_embeds_list = []
346
+ prompts = [prompt, prompt_2]
347
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
348
+ if isinstance(self, TextualInversionLoaderMixin):
349
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
350
+
351
+ text_inputs = tokenizer(
352
+ prompt,
353
+ padding="max_length",
354
+ max_length=tokenizer.model_max_length,
355
+ truncation=True,
356
+ return_tensors="pt",
357
+ )
358
+
359
+ text_input_ids = text_inputs.input_ids
360
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
361
+
362
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
363
+ text_input_ids, untruncated_ids
364
+ ):
365
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
366
+ logger.warning(
367
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
368
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
369
+ )
370
+
371
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
372
+
373
+ # We are only ALWAYS interested in the pooled output of the final text encoder
374
+ pooled_prompt_embeds = prompt_embeds[0]
375
+ if clip_skip is None:
376
+ prompt_embeds = prompt_embeds.hidden_states[-2]
377
+ else:
378
+ # "2" because SDXL always indexes from the penultimate layer.
379
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
380
+
381
+ prompt_embeds_list.append(prompt_embeds)
382
+
383
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
384
+
385
+ # get unconditional embeddings for classifier free guidance
386
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
387
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
388
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
389
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
390
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
391
+ negative_prompt = negative_prompt or ""
392
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
393
+
394
+ # normalize str to list
395
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
396
+ negative_prompt_2 = (
397
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
398
+ )
399
+
400
+ uncond_tokens: List[str]
401
+ if prompt is not None and type(prompt) is not type(negative_prompt):
402
+ raise TypeError(
403
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
404
+ f" {type(prompt)}."
405
+ )
406
+ elif batch_size != len(negative_prompt):
407
+ raise ValueError(
408
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
409
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
410
+ " the batch size of `prompt`."
411
+ )
412
+ else:
413
+ uncond_tokens = [negative_prompt, negative_prompt_2]
414
+
415
+ negative_prompt_embeds_list = []
416
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
417
+ if isinstance(self, TextualInversionLoaderMixin):
418
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
419
+
420
+ max_length = prompt_embeds.shape[1]
421
+ uncond_input = tokenizer(
422
+ negative_prompt,
423
+ padding="max_length",
424
+ max_length=max_length,
425
+ truncation=True,
426
+ return_tensors="pt",
427
+ )
428
+
429
+ negative_prompt_embeds = text_encoder(
430
+ uncond_input.input_ids.to(device),
431
+ output_hidden_states=True,
432
+ )
433
+ # We are only ALWAYS interested in the pooled output of the final text encoder
434
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
435
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
436
+
437
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
438
+
439
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
440
+
441
+ if self.text_encoder_2 is not None:
442
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
443
+ else:
444
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
445
+
446
+ bs_embed, seq_len, _ = prompt_embeds.shape
447
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
448
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
449
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
450
+
451
+ if do_classifier_free_guidance:
452
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
453
+ seq_len = negative_prompt_embeds.shape[1]
454
+
455
+ if self.text_encoder_2 is not None:
456
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
457
+ else:
458
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
459
+
460
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
461
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
462
+
463
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
464
+ bs_embed * num_images_per_prompt, -1
465
+ )
466
+ if do_classifier_free_guidance:
467
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
468
+ bs_embed * num_images_per_prompt, -1
469
+ )
470
+
471
+ if self.text_encoder is not None:
472
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
473
+ # Retrieve the original scale by scaling back the LoRA layers
474
+ unscale_lora_layers(self.text_encoder, lora_scale)
475
+
476
+ if self.text_encoder_2 is not None:
477
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
478
+ # Retrieve the original scale by scaling back the LoRA layers
479
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
480
+
481
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
482
+
483
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
484
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
485
+ dtype = next(self.image_encoder.parameters()).dtype
486
+
487
+ if not isinstance(image, torch.Tensor):
488
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
489
+
490
+ image = image.to(device=device, dtype=dtype)
491
+ if output_hidden_states:
492
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
493
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
494
+ uncond_image_enc_hidden_states = self.image_encoder(
495
+ torch.zeros_like(image), output_hidden_states=True
496
+ ).hidden_states[-2]
497
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
498
+ num_images_per_prompt, dim=0
499
+ )
500
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
501
+ else:
502
+ image_embeds = self.image_encoder(image).image_embeds
503
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
504
+ uncond_image_embeds = torch.zeros_like(image_embeds)
505
+
506
+ return image_embeds, uncond_image_embeds
507
+
508
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
509
+ def prepare_ip_adapter_image_embeds(
510
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
511
+ ):
512
+ if ip_adapter_image_embeds is None:
513
+ if not isinstance(ip_adapter_image, list):
514
+ ip_adapter_image = [ip_adapter_image]
515
+
516
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
517
+ raise ValueError(
518
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
519
+ )
520
+
521
+ image_embeds = []
522
+ for single_ip_adapter_image, image_proj_layer in zip(
523
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
524
+ ):
525
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
526
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
527
+ single_ip_adapter_image, device, 1, output_hidden_state
528
+ )
529
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
530
+ single_negative_image_embeds = torch.stack(
531
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
532
+ )
533
+
534
+ if do_classifier_free_guidance:
535
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
536
+ single_image_embeds = single_image_embeds.to(device)
537
+
538
+ image_embeds.append(single_image_embeds)
539
+ else:
540
+ repeat_dims = [1]
541
+ image_embeds = []
542
+ for single_image_embeds in ip_adapter_image_embeds:
543
+ if do_classifier_free_guidance:
544
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
545
+ single_image_embeds = single_image_embeds.repeat(
546
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
547
+ )
548
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
549
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
550
+ )
551
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
552
+ else:
553
+ single_image_embeds = single_image_embeds.repeat(
554
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
555
+ )
556
+ image_embeds.append(single_image_embeds)
557
+
558
+ return image_embeds
559
+
560
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
561
+ def prepare_extra_step_kwargs(self, generator, eta):
562
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
563
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
564
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
565
+ # and should be between [0, 1]
566
+
567
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
568
+ extra_step_kwargs = {}
569
+ if accepts_eta:
570
+ extra_step_kwargs["eta"] = eta
571
+
572
+ # check if the scheduler accepts generator
573
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
574
+ if accepts_generator:
575
+ extra_step_kwargs["generator"] = generator
576
+ return extra_step_kwargs
577
+
578
+ def check_image(self, image, prompt, prompt_embeds):
579
+ image_is_pil = isinstance(image, PIL.Image.Image)
580
+ image_is_tensor = isinstance(image, torch.Tensor)
581
+ image_is_np = isinstance(image, np.ndarray)
582
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
583
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
584
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
585
+
586
+ if (
587
+ not image_is_pil
588
+ and not image_is_tensor
589
+ and not image_is_np
590
+ and not image_is_pil_list
591
+ and not image_is_tensor_list
592
+ and not image_is_np_list
593
+ ):
594
+ raise TypeError(
595
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
596
+ )
597
+
598
+ if image_is_pil:
599
+ image_batch_size = 1
600
+ else:
601
+ image_batch_size = len(image)
602
+
603
+ if prompt is not None and isinstance(prompt, str):
604
+ prompt_batch_size = 1
605
+ elif prompt is not None and isinstance(prompt, list):
606
+ prompt_batch_size = len(prompt)
607
+ elif prompt_embeds is not None:
608
+ prompt_batch_size = prompt_embeds.shape[0]
609
+
610
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
611
+ raise ValueError(
612
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
613
+ )
614
+
615
+ def check_inputs(
616
+ self,
617
+ prompt,
618
+ prompt_2,
619
+ image,
620
+ mask_image,
621
+ strength,
622
+ num_inference_steps,
623
+ callback_steps,
624
+ output_type,
625
+ negative_prompt=None,
626
+ negative_prompt_2=None,
627
+ prompt_embeds=None,
628
+ negative_prompt_embeds=None,
629
+ ip_adapter_image=None,
630
+ ip_adapter_image_embeds=None,
631
+ pooled_prompt_embeds=None,
632
+ negative_pooled_prompt_embeds=None,
633
+ controlnet_conditioning_scale=1.0,
634
+ control_guidance_start=0.0,
635
+ control_guidance_end=1.0,
636
+ callback_on_step_end_tensor_inputs=None,
637
+ padding_mask_crop=None,
638
+ ):
639
+ if strength < 0 or strength > 1:
640
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
641
+ if num_inference_steps is None:
642
+ raise ValueError("`num_inference_steps` cannot be None.")
643
+ elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
644
+ raise ValueError(
645
+ f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
646
+ f" {type(num_inference_steps)}."
647
+ )
648
+
649
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
650
+ raise ValueError(
651
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
652
+ f" {type(callback_steps)}."
653
+ )
654
+
655
+ if callback_on_step_end_tensor_inputs is not None and not all(
656
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
657
+ ):
658
+ raise ValueError(
659
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
660
+ )
661
+
662
+ if prompt is not None and prompt_embeds is not None:
663
+ raise ValueError(
664
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
665
+ " only forward one of the two."
666
+ )
667
+ elif prompt_2 is not None and prompt_embeds is not None:
668
+ raise ValueError(
669
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
670
+ " only forward one of the two."
671
+ )
672
+ elif prompt is None and prompt_embeds is None:
673
+ raise ValueError(
674
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
675
+ )
676
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
677
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
678
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
679
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
680
+
681
+ if negative_prompt is not None and negative_prompt_embeds is not None:
682
+ raise ValueError(
683
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
684
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
685
+ )
686
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
687
+ raise ValueError(
688
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
689
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
690
+ )
691
+
692
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
693
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
694
+ raise ValueError(
695
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
696
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
697
+ f" {negative_prompt_embeds.shape}."
698
+ )
699
+
700
+ if padding_mask_crop is not None:
701
+ if not isinstance(image, PIL.Image.Image):
702
+ raise ValueError(
703
+ f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
704
+ )
705
+ if not isinstance(mask_image, PIL.Image.Image):
706
+ raise ValueError(
707
+ f"The mask image should be a PIL image when inpainting mask crop, but is of type"
708
+ f" {type(mask_image)}."
709
+ )
710
+ if output_type != "pil":
711
+ raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
712
+
713
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
714
+ raise ValueError(
715
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
716
+ )
717
+
718
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
719
+ raise ValueError(
720
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
721
+ )
722
+
723
+ # `prompt` needs more sophisticated handling when there are multiple
724
+ # conditionings.
725
+ if isinstance(self.controlnet, MultiControlNetModel):
726
+ if isinstance(prompt, list):
727
+ logger.warning(
728
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
729
+ " prompts. The conditionings will be fixed across the prompts."
730
+ )
731
+
732
+ # Check `image`
733
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
734
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
735
+ )
736
+ if (
737
+ isinstance(self.controlnet, ControlNetModel)
738
+ or is_compiled
739
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
740
+ ):
741
+ self.check_image(image, prompt, prompt_embeds)
742
+ elif (
743
+ isinstance(self.controlnet, MultiControlNetModel)
744
+ or is_compiled
745
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
746
+ ):
747
+ if not isinstance(image, list):
748
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
749
+
750
+ # When `image` is a nested list:
751
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
752
+ elif any(isinstance(i, list) for i in image):
753
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
754
+ elif len(image) != len(self.controlnet.nets):
755
+ raise ValueError(
756
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
757
+ )
758
+
759
+ for image_ in image:
760
+ self.check_image(image_, prompt, prompt_embeds)
761
+ else:
762
+ assert False
763
+
764
+ # Check `controlnet_conditioning_scale`
765
+ if (
766
+ isinstance(self.controlnet, ControlNetModel)
767
+ or is_compiled
768
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
769
+ ):
770
+ if not isinstance(controlnet_conditioning_scale, float):
771
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
772
+ elif (
773
+ isinstance(self.controlnet, MultiControlNetModel)
774
+ or is_compiled
775
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
776
+ ):
777
+ if isinstance(controlnet_conditioning_scale, list):
778
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
779
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
780
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
781
+ self.controlnet.nets
782
+ ):
783
+ raise ValueError(
784
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
785
+ " the same length as the number of controlnets"
786
+ )
787
+ else:
788
+ assert False
789
+
790
+ if not isinstance(control_guidance_start, (tuple, list)):
791
+ control_guidance_start = [control_guidance_start]
792
+
793
+ if not isinstance(control_guidance_end, (tuple, list)):
794
+ control_guidance_end = [control_guidance_end]
795
+
796
+ if len(control_guidance_start) != len(control_guidance_end):
797
+ raise ValueError(
798
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
799
+ )
800
+
801
+ if isinstance(self.controlnet, MultiControlNetModel):
802
+ if len(control_guidance_start) != len(self.controlnet.nets):
803
+ raise ValueError(
804
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
805
+ )
806
+
807
+ for start, end in zip(control_guidance_start, control_guidance_end):
808
+ if start >= end:
809
+ raise ValueError(
810
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
811
+ )
812
+ if start < 0.0:
813
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
814
+ if end > 1.0:
815
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
816
+
817
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
818
+ raise ValueError(
819
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
820
+ )
821
+
822
+ if ip_adapter_image_embeds is not None:
823
+ if not isinstance(ip_adapter_image_embeds, list):
824
+ raise ValueError(
825
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
826
+ )
827
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
828
+ raise ValueError(
829
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
830
+ )
831
+
832
+ def prepare_control_image(
833
+ self,
834
+ image,
835
+ width,
836
+ height,
837
+ batch_size,
838
+ num_images_per_prompt,
839
+ device,
840
+ dtype,
841
+ crops_coords,
842
+ resize_mode,
843
+ do_classifier_free_guidance=False,
844
+ guess_mode=False,
845
+ ):
846
+ image = self.control_image_processor.preprocess(
847
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
848
+ ).to(dtype=torch.float32)
849
+ image_batch_size = image.shape[0]
850
+
851
+ if image_batch_size == 1:
852
+ repeat_by = batch_size
853
+ else:
854
+ # image batch size is the same as prompt batch size
855
+ repeat_by = num_images_per_prompt
856
+
857
+ image = image.repeat_interleave(repeat_by, dim=0)
858
+
859
+ image = image.to(device=device, dtype=dtype)
860
+
861
+ if do_classifier_free_guidance and not guess_mode:
862
+ image = torch.cat([image] * 2)
863
+
864
+ return image
865
+
866
+ def prepare_latents(
867
+ self,
868
+ batch_size,
869
+ num_channels_latents,
870
+ height,
871
+ width,
872
+ dtype,
873
+ device,
874
+ generator,
875
+ latents=None,
876
+ image=None,
877
+ timestep=None,
878
+ is_strength_max=True,
879
+ add_noise=True,
880
+ return_noise=False,
881
+ return_image_latents=False,
882
+ ):
883
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
884
+ if isinstance(generator, list) and len(generator) != batch_size:
885
+ raise ValueError(
886
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
887
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
888
+ )
889
+
890
+ if (image is None or timestep is None) and not is_strength_max:
891
+ raise ValueError(
892
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
893
+ "However, either the image or the noise timestep has not been provided."
894
+ )
895
+
896
+ if return_image_latents or (latents is None and not is_strength_max):
897
+ image = image.to(device=device, dtype=dtype)
898
+
899
+ if image.shape[1] == 4:
900
+ image_latents = image
901
+ else:
902
+ image_latents = self._encode_vae_image(image=image, generator=generator)
903
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
904
+
905
+ if latents is None and add_noise:
906
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
907
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
908
+ latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
909
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
910
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
911
+ elif add_noise:
912
+ noise = latents.to(device)
913
+ latents = noise * self.scheduler.init_noise_sigma
914
+ else:
915
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
916
+ latents = image_latents.to(device)
917
+
918
+ outputs = (latents,)
919
+
920
+ if return_noise:
921
+ outputs += (noise,)
922
+
923
+ if return_image_latents:
924
+ outputs += (image_latents,)
925
+
926
+ return outputs
927
+
928
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
929
+ dtype = image.dtype
930
+ if self.vae.config.force_upcast:
931
+ image = image.float()
932
+ self.vae.to(dtype=torch.float32)
933
+
934
+ if isinstance(generator, list):
935
+ image_latents = [
936
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
937
+ for i in range(image.shape[0])
938
+ ]
939
+ image_latents = torch.cat(image_latents, dim=0)
940
+ else:
941
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
942
+
943
+ if self.vae.config.force_upcast:
944
+ self.vae.to(dtype)
945
+
946
+ image_latents = image_latents.to(dtype)
947
+ image_latents = self.vae.config.scaling_factor * image_latents
948
+
949
+ return image_latents
950
+
951
+ def prepare_mask_latents(
952
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
953
+ ):
954
+ # resize the mask to latents shape as we concatenate the mask to the latents
955
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
956
+ # and half precision
957
+ mask = torch.nn.functional.interpolate(
958
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
959
+ )
960
+ mask = mask.to(device=device, dtype=dtype)
961
+
962
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
963
+ if mask.shape[0] < batch_size:
964
+ if not batch_size % mask.shape[0] == 0:
965
+ raise ValueError(
966
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
967
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
968
+ " of masks that you pass is divisible by the total requested batch size."
969
+ )
970
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
971
+
972
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
973
+
974
+ masked_image_latents = None
975
+ if masked_image is not None:
976
+ masked_image = masked_image.to(device=device, dtype=dtype)
977
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
978
+ if masked_image_latents.shape[0] < batch_size:
979
+ if not batch_size % masked_image_latents.shape[0] == 0:
980
+ raise ValueError(
981
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
982
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
983
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
984
+ )
985
+ masked_image_latents = masked_image_latents.repeat(
986
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
987
+ )
988
+
989
+ masked_image_latents = (
990
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
991
+ )
992
+
993
+ # aligning device to prevent device errors when concating it with the latent model input
994
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
995
+
996
+ return mask, masked_image_latents
997
+
998
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
999
+ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
1000
+ # get the original timestep using init_timestep
1001
+ if denoising_start is None:
1002
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
1003
+ t_start = max(num_inference_steps - init_timestep, 0)
1004
+ else:
1005
+ t_start = 0
1006
+
1007
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
1008
+
1009
+ # Strength is irrelevant if we directly request a timestep to start at;
1010
+ # that is, strength is determined by the denoising_start instead.
1011
+ if denoising_start is not None:
1012
+ discrete_timestep_cutoff = int(
1013
+ round(
1014
+ self.scheduler.config.num_train_timesteps
1015
+ - (denoising_start * self.scheduler.config.num_train_timesteps)
1016
+ )
1017
+ )
1018
+
1019
+ num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
1020
+ if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
1021
+ # if the scheduler is a 2nd order scheduler we might have to do +1
1022
+ # because `num_inference_steps` might be even given that every timestep
1023
+ # (except the highest one) is duplicated. If `num_inference_steps` is even it would
1024
+ # mean that we cut the timesteps in the middle of the denoising step
1025
+ # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
1026
+ # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
1027
+ num_inference_steps = num_inference_steps + 1
1028
+
1029
+ # because t_n+1 >= t_n, we slice the timesteps starting from the end
1030
+ timesteps = timesteps[-num_inference_steps:]
1031
+ return timesteps, num_inference_steps
1032
+
1033
+ return timesteps, num_inference_steps - t_start
1034
+
1035
+ def _get_add_time_ids(
1036
+ self,
1037
+ original_size,
1038
+ crops_coords_top_left,
1039
+ target_size,
1040
+ aesthetic_score,
1041
+ negative_aesthetic_score,
1042
+ dtype,
1043
+ text_encoder_projection_dim=None,
1044
+ ):
1045
+ if self.config.requires_aesthetics_score:
1046
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
1047
+ add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
1048
+ else:
1049
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1050
+ add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
1051
+
1052
+ passed_add_embed_dim = (
1053
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
1054
+ )
1055
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
1056
+
1057
+ if (
1058
+ expected_add_embed_dim > passed_add_embed_dim
1059
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
1060
+ ):
1061
+ raise ValueError(
1062
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
1063
+ )
1064
+ elif (
1065
+ expected_add_embed_dim < passed_add_embed_dim
1066
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
1067
+ ):
1068
+ raise ValueError(
1069
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
1070
+ )
1071
+ elif expected_add_embed_dim != passed_add_embed_dim:
1072
+ raise ValueError(
1073
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1074
+ )
1075
+
1076
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1077
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1078
+
1079
+ return add_time_ids, add_neg_time_ids
1080
+
1081
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1082
+ def upcast_vae(self):
1083
+ dtype = self.vae.dtype
1084
+ self.vae.to(dtype=torch.float32)
1085
+ use_torch_2_0_or_xformers = isinstance(
1086
+ self.vae.decoder.mid_block.attentions[0].processor,
1087
+ (
1088
+ AttnProcessor2_0,
1089
+ XFormersAttnProcessor,
1090
+ LoRAXFormersAttnProcessor,
1091
+ LoRAAttnProcessor2_0,
1092
+ ),
1093
+ )
1094
+ # if xformers or torch_2_0 is used attention block does not need
1095
+ # to be in float32 which can save lots of memory
1096
+ if use_torch_2_0_or_xformers:
1097
+ self.vae.post_quant_conv.to(dtype)
1098
+ self.vae.decoder.conv_in.to(dtype)
1099
+ self.vae.decoder.mid_block.to(dtype)
1100
+
1101
+ @property
1102
+ def guidance_scale(self):
1103
+ return self._guidance_scale
1104
+
1105
+ @property
1106
+ def clip_skip(self):
1107
+ return self._clip_skip
1108
+
1109
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1110
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1111
+ # corresponds to doing no classifier free guidance.
1112
+ @property
1113
+ def do_classifier_free_guidance(self):
1114
+ return self._guidance_scale > 1
1115
+
1116
+ @property
1117
+ def cross_attention_kwargs(self):
1118
+ return self._cross_attention_kwargs
1119
+
1120
+ @property
1121
+ def num_timesteps(self):
1122
+ return self._num_timesteps
1123
+
1124
+ @torch.no_grad()
1125
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1126
+ def __call__(
1127
+ self,
1128
+ prompt: Union[str, List[str]] = None,
1129
+ prompt_2: Optional[Union[str, List[str]]] = None,
1130
+ image: PipelineImageInput = None,
1131
+ mask_image: PipelineImageInput = None,
1132
+ control_image: Union[
1133
+ PipelineImageInput,
1134
+ List[PipelineImageInput],
1135
+ ] = None,
1136
+ height: Optional[int] = None,
1137
+ width: Optional[int] = None,
1138
+ padding_mask_crop: Optional[int] = None,
1139
+ strength: float = 0.9999,
1140
+ num_inference_steps: int = 50,
1141
+ denoising_start: Optional[float] = None,
1142
+ denoising_end: Optional[float] = None,
1143
+ guidance_scale: float = 5.0,
1144
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1145
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
1146
+ num_images_per_prompt: Optional[int] = 1,
1147
+ eta: float = 0.0,
1148
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1149
+ latents: Optional[torch.FloatTensor] = None,
1150
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1151
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1152
+ ip_adapter_image: Optional[PipelineImageInput] = None,
1153
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
1154
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1155
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1156
+ output_type: Optional[str] = "pil",
1157
+ return_dict: bool = True,
1158
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1159
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
1160
+ guess_mode: bool = False,
1161
+ control_guidance_start: Union[float, List[float]] = 0.0,
1162
+ control_guidance_end: Union[float, List[float]] = 1.0,
1163
+ guidance_rescale: float = 0.0,
1164
+ original_size: Tuple[int, int] = None,
1165
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
1166
+ target_size: Tuple[int, int] = None,
1167
+ aesthetic_score: float = 6.0,
1168
+ negative_aesthetic_score: float = 2.5,
1169
+ clip_skip: Optional[int] = None,
1170
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1171
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1172
+ **kwargs,
1173
+ ):
1174
+ r"""
1175
+ Function invoked when calling the pipeline for generation.
1176
+
1177
+ Args:
1178
+ prompt (`str` or `List[str]`, *optional*):
1179
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1180
+ instead.
1181
+ prompt_2 (`str` or `List[str]`, *optional*):
1182
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1183
+ used in both text-encoders
1184
+ image (`PIL.Image.Image`):
1185
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
1186
+ be masked out with `mask_image` and repainted according to `prompt`.
1187
+ mask_image (`PIL.Image.Image`):
1188
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1189
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
1190
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
1191
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
1192
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1193
+ The height in pixels of the generated image.
1194
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1195
+ The width in pixels of the generated image.
1196
+ padding_mask_crop (`int`, *optional*, defaults to `None`):
1197
+ The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If
1198
+ `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and
1199
+ contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on
1200
+ the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large
1201
+ and contain information inreleant for inpainging, such as background.
1202
+ strength (`float`, *optional*, defaults to 0.9999):
1203
+ Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
1204
+ between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
1205
+ `strength`. The number of denoising steps depends on the amount of noise initially added. When
1206
+ `strength` is 1, added noise will be maximum and the denoising process will run for the full number of
1207
+ iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
1208
+ portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
1209
+ integer, the value of `strength` will be ignored.
1210
+ num_inference_steps (`int`, *optional*, defaults to 50):
1211
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1212
+ expense of slower inference.
1213
+ denoising_start (`float`, *optional*):
1214
+ When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1215
+ bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1216
+ it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1217
+ strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1218
+ is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1219
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1220
+ denoising_end (`float`, *optional*):
1221
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1222
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1223
+ still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
1224
+ denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
1225
+ final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
1226
+ forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1227
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1228
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1229
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1230
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1231
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1232
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1233
+ usually at the expense of lower image quality.
1234
+ negative_prompt (`str` or `List[str]`, *optional*):
1235
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1236
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1237
+ less than `1`).
1238
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1239
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1240
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1241
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1242
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1243
+ provided, text embeddings will be generated from `prompt` input argument.
1244
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1245
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1246
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1247
+ argument.
1248
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1249
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
1250
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
1251
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
1252
+ if `do_classifier_free_guidance` is set to `True`.
1253
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
1254
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1255
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1256
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1257
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1258
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1259
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1260
+ input argument.
1261
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1262
+ The number of images to generate per prompt.
1263
+ eta (`float`, *optional*, defaults to 0.0):
1264
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1265
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1266
+ generator (`torch.Generator`, *optional*):
1267
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1268
+ to make generation deterministic.
1269
+ latents (`torch.FloatTensor`, *optional*):
1270
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1271
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1272
+ tensor will ge generated by sampling using the supplied random `generator`.
1273
+ output_type (`str`, *optional*, defaults to `"pil"`):
1274
+ The output format of the generate image. Choose between
1275
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1276
+ return_dict (`bool`, *optional*, defaults to `True`):
1277
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1278
+ plain tuple.
1279
+ cross_attention_kwargs (`dict`, *optional*):
1280
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1281
+ `self.processor` in
1282
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1283
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1284
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1285
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
1286
+ explained in section 2.2 of
1287
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1288
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1289
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1290
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1291
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1292
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1293
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1294
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1295
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
1296
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1297
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1298
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1299
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1300
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1301
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1302
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1303
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1304
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1305
+ clip_skip (`int`, *optional*):
1306
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1307
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1308
+ callback_on_step_end (`Callable`, *optional*):
1309
+ A function that calls at the end of each denoising steps during the inference. The function is called
1310
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1311
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1312
+ `callback_on_step_end_tensor_inputs`.
1313
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1314
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1315
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1316
+ `._callback_tensor_inputs` attribute of your pipeine class.
1317
+
1318
+ Examples:
1319
+
1320
+ Returns:
1321
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
1322
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1323
+ `tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
1324
+ """
1325
+
1326
+ callback = kwargs.pop("callback", None)
1327
+ callback_steps = kwargs.pop("callback_steps", None)
1328
+
1329
+ if callback is not None:
1330
+ deprecate(
1331
+ "callback",
1332
+ "1.0.0",
1333
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1334
+ )
1335
+ if callback_steps is not None:
1336
+ deprecate(
1337
+ "callback_steps",
1338
+ "1.0.0",
1339
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1340
+ )
1341
+
1342
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1343
+
1344
+ # align format for control guidance
1345
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1346
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1347
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1348
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1349
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1350
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1351
+ control_guidance_start, control_guidance_end = (
1352
+ mult * [control_guidance_start],
1353
+ mult * [control_guidance_end],
1354
+ )
1355
+
1356
+ # # 0.0 Default height and width to unet
1357
+ # height = height or self.unet.config.sample_size * self.vae_scale_factor
1358
+ # width = width or self.unet.config.sample_size * self.vae_scale_factor
1359
+
1360
+ # 0.1 align format for control guidance
1361
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1362
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1363
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1364
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1365
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1366
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1367
+ control_guidance_start, control_guidance_end = (
1368
+ mult * [control_guidance_start],
1369
+ mult * [control_guidance_end],
1370
+ )
1371
+
1372
+ # 1. Check inputs
1373
+ self.check_inputs(
1374
+ prompt,
1375
+ prompt_2,
1376
+ control_image,
1377
+ mask_image,
1378
+ strength,
1379
+ num_inference_steps,
1380
+ callback_steps,
1381
+ output_type,
1382
+ negative_prompt,
1383
+ negative_prompt_2,
1384
+ prompt_embeds,
1385
+ negative_prompt_embeds,
1386
+ ip_adapter_image,
1387
+ ip_adapter_image_embeds,
1388
+ pooled_prompt_embeds,
1389
+ negative_pooled_prompt_embeds,
1390
+ controlnet_conditioning_scale,
1391
+ control_guidance_start,
1392
+ control_guidance_end,
1393
+ callback_on_step_end_tensor_inputs,
1394
+ padding_mask_crop,
1395
+ )
1396
+
1397
+ self._guidance_scale = guidance_scale
1398
+ self._clip_skip = clip_skip
1399
+ self._cross_attention_kwargs = cross_attention_kwargs
1400
+
1401
+ # 2. Define call parameters
1402
+ if prompt is not None and isinstance(prompt, str):
1403
+ batch_size = 1
1404
+ elif prompt is not None and isinstance(prompt, list):
1405
+ batch_size = len(prompt)
1406
+ else:
1407
+ batch_size = prompt_embeds.shape[0]
1408
+
1409
+ device = self._execution_device
1410
+
1411
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1412
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1413
+
1414
+ # 3. Encode input prompt
1415
+ text_encoder_lora_scale = (
1416
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1417
+ )
1418
+
1419
+ (
1420
+ prompt_embeds,
1421
+ negative_prompt_embeds,
1422
+ pooled_prompt_embeds,
1423
+ negative_pooled_prompt_embeds,
1424
+ ) = self.encode_prompt(
1425
+ prompt=prompt,
1426
+ prompt_2=prompt_2,
1427
+ device=device,
1428
+ num_images_per_prompt=num_images_per_prompt,
1429
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1430
+ negative_prompt=negative_prompt,
1431
+ negative_prompt_2=negative_prompt_2,
1432
+ prompt_embeds=prompt_embeds,
1433
+ negative_prompt_embeds=negative_prompt_embeds,
1434
+ pooled_prompt_embeds=pooled_prompt_embeds,
1435
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1436
+ lora_scale=text_encoder_lora_scale,
1437
+ clip_skip=self.clip_skip,
1438
+ )
1439
+
1440
+ # 3.1 Encode ip_adapter_image
1441
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1442
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1443
+ ip_adapter_image,
1444
+ ip_adapter_image_embeds,
1445
+ device,
1446
+ batch_size * num_images_per_prompt,
1447
+ self.do_classifier_free_guidance,
1448
+ )
1449
+
1450
+ # 4. set timesteps
1451
+ def denoising_value_valid(dnv):
1452
+ return isinstance(dnv, float) and 0 < dnv < 1
1453
+
1454
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1455
+ timesteps, num_inference_steps = self.get_timesteps(
1456
+ num_inference_steps,
1457
+ strength,
1458
+ device,
1459
+ denoising_start=denoising_start if denoising_value_valid(denoising_start) else None,
1460
+ )
1461
+ # check that number of inference steps is not < 1 - as this doesn't make sense
1462
+ if num_inference_steps < 1:
1463
+ raise ValueError(
1464
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1465
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1466
+ )
1467
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1468
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1469
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1470
+ is_strength_max = strength == 1.0
1471
+ self._num_timesteps = len(timesteps)
1472
+
1473
+ # 5. Preprocess mask and image - resizes image and mask w.r.t height and width
1474
+ # 5.1 Prepare init image
1475
+ if padding_mask_crop is not None:
1476
+ height, width = self.image_processor.get_default_height_width(image, height, width)
1477
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
1478
+ resize_mode = "fill"
1479
+ else:
1480
+ crops_coords = None
1481
+ resize_mode = "default"
1482
+
1483
+ original_image = image
1484
+ init_image = self.image_processor.preprocess(
1485
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
1486
+ )
1487
+ init_image = init_image.to(dtype=torch.float32)
1488
+
1489
+ # 5.2 Prepare control images
1490
+ if isinstance(controlnet, ControlNetModel):
1491
+ control_image = self.prepare_control_image(
1492
+ image=control_image,
1493
+ width=width,
1494
+ height=height,
1495
+ batch_size=batch_size * num_images_per_prompt,
1496
+ num_images_per_prompt=num_images_per_prompt,
1497
+ device=device,
1498
+ dtype=controlnet.dtype,
1499
+ crops_coords=crops_coords,
1500
+ resize_mode=resize_mode,
1501
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1502
+ guess_mode=guess_mode,
1503
+ )
1504
+ elif isinstance(controlnet, MultiControlNetModel):
1505
+ control_images = []
1506
+
1507
+ for control_image_ in control_image:
1508
+ control_image_ = self.prepare_control_image(
1509
+ image=control_image_,
1510
+ width=width,
1511
+ height=height,
1512
+ batch_size=batch_size * num_images_per_prompt,
1513
+ num_images_per_prompt=num_images_per_prompt,
1514
+ device=device,
1515
+ dtype=controlnet.dtype,
1516
+ crops_coords=crops_coords,
1517
+ resize_mode=resize_mode,
1518
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1519
+ guess_mode=guess_mode,
1520
+ )
1521
+
1522
+ control_images.append(control_image_)
1523
+
1524
+ control_image = control_images
1525
+ else:
1526
+ raise ValueError(f"{controlnet.__class__} is not supported.")
1527
+
1528
+ # 5.3 Prepare mask
1529
+ mask = self.mask_processor.preprocess(
1530
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
1531
+ )
1532
+
1533
+ masked_image = init_image * (mask < 0.5)
1534
+ _, _, height, width = init_image.shape
1535
+
1536
+ # 6. Prepare latent variables
1537
+ num_channels_latents = self.vae.config.latent_channels
1538
+ num_channels_unet = self.unet.config.in_channels
1539
+ return_image_latents = num_channels_unet == 4
1540
+
1541
+ add_noise = True if denoising_start is None else False
1542
+ latents_outputs = self.prepare_latents(
1543
+ batch_size * num_images_per_prompt,
1544
+ num_channels_latents,
1545
+ height,
1546
+ width,
1547
+ prompt_embeds.dtype,
1548
+ device,
1549
+ generator,
1550
+ latents,
1551
+ image=init_image,
1552
+ timestep=latent_timestep,
1553
+ is_strength_max=is_strength_max,
1554
+ add_noise=add_noise,
1555
+ return_noise=True,
1556
+ return_image_latents=return_image_latents,
1557
+ )
1558
+
1559
+ if return_image_latents:
1560
+ latents, noise, image_latents = latents_outputs
1561
+ else:
1562
+ latents, noise = latents_outputs
1563
+
1564
+ # 7. Prepare mask latent variables
1565
+ mask, masked_image_latents = self.prepare_mask_latents(
1566
+ mask,
1567
+ masked_image,
1568
+ batch_size * num_images_per_prompt,
1569
+ height,
1570
+ width,
1571
+ prompt_embeds.dtype,
1572
+ device,
1573
+ generator,
1574
+ self.do_classifier_free_guidance,
1575
+ )
1576
+
1577
+ # 8. Check that sizes of mask, masked image and latents match
1578
+ if num_channels_unet == 9:
1579
+ # default case for runwayml/stable-diffusion-inpainting
1580
+ num_channels_mask = mask.shape[1]
1581
+ num_channels_masked_image = masked_image_latents.shape[1]
1582
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1583
+ raise ValueError(
1584
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1585
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1586
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1587
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1588
+ " `pipeline.unet` or your `mask_image` or `image` input."
1589
+ )
1590
+ elif num_channels_unet != 4:
1591
+ raise ValueError(
1592
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1593
+ )
1594
+ # 8.1 Prepare extra step kwargs.
1595
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1596
+
1597
+ # 8.2 Create tensor stating which controlnets to keep
1598
+ controlnet_keep = []
1599
+ for i in range(len(timesteps)):
1600
+ keeps = [
1601
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1602
+ for s, e in zip(control_guidance_start, control_guidance_end)
1603
+ ]
1604
+ if isinstance(self.controlnet, MultiControlNetModel):
1605
+ controlnet_keep.append(keeps)
1606
+ else:
1607
+ controlnet_keep.append(keeps[0])
1608
+
1609
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1610
+ height, width = latents.shape[-2:]
1611
+ height = height * self.vae_scale_factor
1612
+ width = width * self.vae_scale_factor
1613
+
1614
+ original_size = original_size or (height, width)
1615
+ target_size = target_size or (height, width)
1616
+
1617
+ # 10. Prepare added time ids & embeddings
1618
+ add_text_embeds = pooled_prompt_embeds
1619
+ if self.text_encoder_2 is None:
1620
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1621
+ else:
1622
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1623
+
1624
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1625
+ original_size,
1626
+ crops_coords_top_left,
1627
+ target_size,
1628
+ aesthetic_score,
1629
+ negative_aesthetic_score,
1630
+ dtype=prompt_embeds.dtype,
1631
+ text_encoder_projection_dim=text_encoder_projection_dim,
1632
+ )
1633
+ add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1634
+
1635
+ if self.do_classifier_free_guidance:
1636
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1637
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1638
+ add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1639
+ add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1640
+
1641
+ prompt_embeds = prompt_embeds.to(device)
1642
+ add_text_embeds = add_text_embeds.to(device)
1643
+ add_time_ids = add_time_ids.to(device)
1644
+
1645
+ # 11. Denoising loop
1646
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1647
+
1648
+ if (
1649
+ denoising_end is not None
1650
+ and denoising_start is not None
1651
+ and denoising_value_valid(denoising_end)
1652
+ and denoising_value_valid(denoising_start)
1653
+ and denoising_start >= denoising_end
1654
+ ):
1655
+ raise ValueError(
1656
+ f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
1657
+ + f" {denoising_end} when using type float."
1658
+ )
1659
+ elif denoising_end is not None and denoising_value_valid(denoising_end):
1660
+ discrete_timestep_cutoff = int(
1661
+ round(
1662
+ self.scheduler.config.num_train_timesteps
1663
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
1664
+ )
1665
+ )
1666
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1667
+ timesteps = timesteps[:num_inference_steps]
1668
+
1669
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1670
+ for i, t in enumerate(timesteps):
1671
+ # expand the latents if we are doing classifier free guidance
1672
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1673
+
1674
+ # concat latents, mask, masked_image_latents in the channel dimension
1675
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1676
+
1677
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1678
+
1679
+ # controlnet(s) inference
1680
+ if guess_mode and self.do_classifier_free_guidance:
1681
+ # Infer ControlNet only for the conditional batch.
1682
+ control_model_input = latents
1683
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1684
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1685
+ controlnet_added_cond_kwargs = {
1686
+ "text_embeds": add_text_embeds.chunk(2)[1],
1687
+ "time_ids": add_time_ids.chunk(2)[1],
1688
+ }
1689
+ else:
1690
+ control_model_input = latent_model_input
1691
+ controlnet_prompt_embeds = prompt_embeds
1692
+ controlnet_added_cond_kwargs = added_cond_kwargs
1693
+
1694
+ if isinstance(controlnet_keep[i], list):
1695
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1696
+ else:
1697
+ controlnet_cond_scale = controlnet_conditioning_scale
1698
+ if isinstance(controlnet_cond_scale, list):
1699
+ controlnet_cond_scale = controlnet_cond_scale[0]
1700
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1701
+
1702
+ # # Resize control_image to match the size of the input to the controlnet
1703
+ # if control_image.shape[-2:] != control_model_input.shape[-2:]:
1704
+ # control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False)
1705
+
1706
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1707
+ control_model_input,
1708
+ t,
1709
+ encoder_hidden_states=controlnet_prompt_embeds,
1710
+ controlnet_cond=control_image,
1711
+ conditioning_scale=cond_scale,
1712
+ guess_mode=guess_mode,
1713
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1714
+ return_dict=False,
1715
+ )
1716
+
1717
+ if guess_mode and self.do_classifier_free_guidance:
1718
+ # Infered ControlNet only for the conditional batch.
1719
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1720
+ # add 0 to the unconditional batch to keep it unchanged.
1721
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1722
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1723
+
1724
+ if ip_adapter_image is not None:
1725
+ added_cond_kwargs["image_embeds"] = image_embeds
1726
+
1727
+ if num_channels_unet == 9:
1728
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1729
+
1730
+ # predict the noise residual
1731
+ noise_pred = self.unet(
1732
+ latent_model_input,
1733
+ t,
1734
+ encoder_hidden_states=prompt_embeds,
1735
+ cross_attention_kwargs=self.cross_attention_kwargs,
1736
+ down_block_additional_residuals=down_block_res_samples,
1737
+ mid_block_additional_residual=mid_block_res_sample,
1738
+ added_cond_kwargs=added_cond_kwargs,
1739
+ return_dict=False,
1740
+ )[0]
1741
+
1742
+ # perform guidance
1743
+ if self.do_classifier_free_guidance:
1744
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1745
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1746
+
1747
+ if self.do_classifier_free_guidance and guidance_rescale > 0.0:
1748
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1749
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1750
+
1751
+ # compute the previous noisy sample x_t -> x_t-1
1752
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1753
+
1754
+ if num_channels_unet == 4:
1755
+ init_latents_proper = image_latents
1756
+ if self.do_classifier_free_guidance:
1757
+ init_mask, _ = mask.chunk(2)
1758
+ else:
1759
+ init_mask = mask
1760
+
1761
+ if i < len(timesteps) - 1:
1762
+ noise_timestep = timesteps[i + 1]
1763
+ init_latents_proper = self.scheduler.add_noise(
1764
+ init_latents_proper, noise, torch.tensor([noise_timestep])
1765
+ )
1766
+
1767
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1768
+
1769
+ if callback_on_step_end is not None:
1770
+ callback_kwargs = {}
1771
+ for k in callback_on_step_end_tensor_inputs:
1772
+ callback_kwargs[k] = locals()[k]
1773
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1774
+
1775
+ latents = callback_outputs.pop("latents", latents)
1776
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1777
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1778
+
1779
+ # call the callback, if provided
1780
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1781
+ progress_bar.update()
1782
+ if callback is not None and i % callback_steps == 0:
1783
+ step_idx = i // getattr(self.scheduler, "order", 1)
1784
+ callback(step_idx, t, latents)
1785
+
1786
+ # make sure the VAE is in float32 mode, as it overflows in float16
1787
+ if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1788
+ self.upcast_vae()
1789
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1790
+
1791
+ # If we do sequential model offloading, let's offload unet and controlnet
1792
+ # manually for max memory savings
1793
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1794
+ self.unet.to("cpu")
1795
+ self.controlnet.to("cpu")
1796
+ torch.cuda.empty_cache()
1797
+
1798
+ if not output_type == "latent":
1799
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1800
+ else:
1801
+ return StableDiffusionXLPipelineOutput(images=latents)
1802
+
1803
+ # apply watermark if available
1804
+ if self.watermark is not None:
1805
+ image = self.watermark.apply_watermark(image)
1806
+
1807
+ image = self.image_processor.postprocess(image, output_type=output_type)
1808
+
1809
+ if padding_mask_crop is not None:
1810
+ image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
1811
+
1812
+ # Offload all models
1813
+ self.maybe_free_model_hooks()
1814
+
1815
+ if not return_dict:
1816
+ return (image,)
1817
+
1818
+ return StableDiffusionXLPipelineOutput(images=image)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py ADDED
@@ -0,0 +1,1499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import (
24
+ CLIPImageProcessor,
25
+ CLIPTextModel,
26
+ CLIPTextModelWithProjection,
27
+ CLIPTokenizer,
28
+ CLIPVisionModelWithProjection,
29
+ )
30
+
31
+ from diffusers.utils.import_utils import is_invisible_watermark_available
32
+
33
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
34
+ from ...loaders import (
35
+ FromSingleFileMixin,
36
+ IPAdapterMixin,
37
+ StableDiffusionXLLoraLoaderMixin,
38
+ TextualInversionLoaderMixin,
39
+ )
40
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
41
+ from ...models.attention_processor import (
42
+ AttnProcessor2_0,
43
+ LoRAAttnProcessor2_0,
44
+ LoRAXFormersAttnProcessor,
45
+ XFormersAttnProcessor,
46
+ )
47
+ from ...models.lora import adjust_lora_scale_text_encoder
48
+ from ...schedulers import KarrasDiffusionSchedulers
49
+ from ...utils import (
50
+ USE_PEFT_BACKEND,
51
+ deprecate,
52
+ logging,
53
+ replace_example_docstring,
54
+ scale_lora_layers,
55
+ unscale_lora_layers,
56
+ )
57
+ from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
58
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
59
+ from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
60
+
61
+
62
+ if is_invisible_watermark_available():
63
+ from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
64
+
65
+ from .multicontrolnet import MultiControlNetModel
66
+
67
+
68
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
69
+
70
+
71
+ EXAMPLE_DOC_STRING = """
72
+ Examples:
73
+ ```py
74
+ >>> # !pip install opencv-python transformers accelerate
75
+ >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
76
+ >>> from diffusers.utils import load_image
77
+ >>> import numpy as np
78
+ >>> import torch
79
+
80
+ >>> import cv2
81
+ >>> from PIL import Image
82
+
83
+ >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
84
+ >>> negative_prompt = "low quality, bad quality, sketches"
85
+
86
+ >>> # download an image
87
+ >>> image = load_image(
88
+ ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
89
+ ... )
90
+
91
+ >>> # initialize the models and pipeline
92
+ >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
93
+ >>> controlnet = ControlNetModel.from_pretrained(
94
+ ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
95
+ ... )
96
+ >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
97
+ >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
98
+ ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16
99
+ ... )
100
+ >>> pipe.enable_model_cpu_offload()
101
+
102
+ >>> # get canny image
103
+ >>> image = np.array(image)
104
+ >>> image = cv2.Canny(image, 100, 200)
105
+ >>> image = image[:, :, None]
106
+ >>> image = np.concatenate([image, image, image], axis=2)
107
+ >>> canny_image = Image.fromarray(image)
108
+
109
+ >>> # generate image
110
+ >>> image = pipe(
111
+ ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
112
+ ... ).images[0]
113
+ ```
114
+ """
115
+
116
+
117
+ class StableDiffusionXLControlNetPipeline(
118
+ DiffusionPipeline,
119
+ StableDiffusionMixin,
120
+ TextualInversionLoaderMixin,
121
+ StableDiffusionXLLoraLoaderMixin,
122
+ IPAdapterMixin,
123
+ FromSingleFileMixin,
124
+ ):
125
+ r"""
126
+ Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance.
127
+
128
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
129
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
130
+
131
+ The pipeline also inherits the following loading methods:
132
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
133
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
134
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
135
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
136
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
137
+
138
+ Args:
139
+ vae ([`AutoencoderKL`]):
140
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
141
+ text_encoder ([`~transformers.CLIPTextModel`]):
142
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
143
+ text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
144
+ Second frozen text-encoder
145
+ ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
146
+ tokenizer ([`~transformers.CLIPTokenizer`]):
147
+ A `CLIPTokenizer` to tokenize text.
148
+ tokenizer_2 ([`~transformers.CLIPTokenizer`]):
149
+ A `CLIPTokenizer` to tokenize text.
150
+ unet ([`UNet2DConditionModel`]):
151
+ A `UNet2DConditionModel` to denoise the encoded image latents.
152
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
153
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
154
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
155
+ additional conditioning.
156
+ scheduler ([`SchedulerMixin`]):
157
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
158
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
159
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
160
+ Whether the negative prompt embeddings should always be set to 0. Also see the config of
161
+ `stabilityai/stable-diffusion-xl-base-1-0`.
162
+ add_watermarker (`bool`, *optional*):
163
+ Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to
164
+ watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no
165
+ watermarker is used.
166
+ """
167
+
168
+ # leave controlnet out on purpose because it iterates with unet
169
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
170
+ _optional_components = [
171
+ "tokenizer",
172
+ "tokenizer_2",
173
+ "text_encoder",
174
+ "text_encoder_2",
175
+ "feature_extractor",
176
+ "image_encoder",
177
+ ]
178
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
179
+
180
+ def __init__(
181
+ self,
182
+ vae: AutoencoderKL,
183
+ text_encoder: CLIPTextModel,
184
+ text_encoder_2: CLIPTextModelWithProjection,
185
+ tokenizer: CLIPTokenizer,
186
+ tokenizer_2: CLIPTokenizer,
187
+ unet: UNet2DConditionModel,
188
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
189
+ scheduler: KarrasDiffusionSchedulers,
190
+ force_zeros_for_empty_prompt: bool = True,
191
+ add_watermarker: Optional[bool] = None,
192
+ feature_extractor: CLIPImageProcessor = None,
193
+ image_encoder: CLIPVisionModelWithProjection = None,
194
+ ):
195
+ super().__init__()
196
+
197
+ if isinstance(controlnet, (list, tuple)):
198
+ controlnet = MultiControlNetModel(controlnet)
199
+
200
+ self.register_modules(
201
+ vae=vae,
202
+ text_encoder=text_encoder,
203
+ text_encoder_2=text_encoder_2,
204
+ tokenizer=tokenizer,
205
+ tokenizer_2=tokenizer_2,
206
+ unet=unet,
207
+ controlnet=controlnet,
208
+ scheduler=scheduler,
209
+ feature_extractor=feature_extractor,
210
+ image_encoder=image_encoder,
211
+ )
212
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
213
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
214
+ self.control_image_processor = VaeImageProcessor(
215
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
216
+ )
217
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
218
+
219
+ if add_watermarker:
220
+ self.watermark = StableDiffusionXLWatermarker()
221
+ else:
222
+ self.watermark = None
223
+
224
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
225
+
226
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
227
+ def encode_prompt(
228
+ self,
229
+ prompt: str,
230
+ prompt_2: Optional[str] = None,
231
+ device: Optional[torch.device] = None,
232
+ num_images_per_prompt: int = 1,
233
+ do_classifier_free_guidance: bool = True,
234
+ negative_prompt: Optional[str] = None,
235
+ negative_prompt_2: Optional[str] = None,
236
+ prompt_embeds: Optional[torch.FloatTensor] = None,
237
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
238
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
239
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
240
+ lora_scale: Optional[float] = None,
241
+ clip_skip: Optional[int] = None,
242
+ ):
243
+ r"""
244
+ Encodes the prompt into text encoder hidden states.
245
+
246
+ Args:
247
+ prompt (`str` or `List[str]`, *optional*):
248
+ prompt to be encoded
249
+ prompt_2 (`str` or `List[str]`, *optional*):
250
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
251
+ used in both text-encoders
252
+ device: (`torch.device`):
253
+ torch device
254
+ num_images_per_prompt (`int`):
255
+ number of images that should be generated per prompt
256
+ do_classifier_free_guidance (`bool`):
257
+ whether to use classifier free guidance or not
258
+ negative_prompt (`str` or `List[str]`, *optional*):
259
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
260
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
261
+ less than `1`).
262
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
263
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
264
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
265
+ prompt_embeds (`torch.FloatTensor`, *optional*):
266
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
267
+ provided, text embeddings will be generated from `prompt` input argument.
268
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
269
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
270
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
271
+ argument.
272
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
273
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
274
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
275
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
276
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
277
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
278
+ input argument.
279
+ lora_scale (`float`, *optional*):
280
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
281
+ clip_skip (`int`, *optional*):
282
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
283
+ the output of the pre-final layer will be used for computing the prompt embeddings.
284
+ """
285
+ device = device or self._execution_device
286
+
287
+ # set lora scale so that monkey patched LoRA
288
+ # function of text encoder can correctly access it
289
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
290
+ self._lora_scale = lora_scale
291
+
292
+ # dynamically adjust the LoRA scale
293
+ if self.text_encoder is not None:
294
+ if not USE_PEFT_BACKEND:
295
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
296
+ else:
297
+ scale_lora_layers(self.text_encoder, lora_scale)
298
+
299
+ if self.text_encoder_2 is not None:
300
+ if not USE_PEFT_BACKEND:
301
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
302
+ else:
303
+ scale_lora_layers(self.text_encoder_2, lora_scale)
304
+
305
+ prompt = [prompt] if isinstance(prompt, str) else prompt
306
+
307
+ if prompt is not None:
308
+ batch_size = len(prompt)
309
+ else:
310
+ batch_size = prompt_embeds.shape[0]
311
+
312
+ # Define tokenizers and text encoders
313
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
314
+ text_encoders = (
315
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
316
+ )
317
+
318
+ if prompt_embeds is None:
319
+ prompt_2 = prompt_2 or prompt
320
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
321
+
322
+ # textual inversion: process multi-vector tokens if necessary
323
+ prompt_embeds_list = []
324
+ prompts = [prompt, prompt_2]
325
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
326
+ if isinstance(self, TextualInversionLoaderMixin):
327
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
328
+
329
+ text_inputs = tokenizer(
330
+ prompt,
331
+ padding="max_length",
332
+ max_length=tokenizer.model_max_length,
333
+ truncation=True,
334
+ return_tensors="pt",
335
+ )
336
+
337
+ text_input_ids = text_inputs.input_ids
338
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
339
+
340
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
341
+ text_input_ids, untruncated_ids
342
+ ):
343
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
344
+ logger.warning(
345
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
346
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
347
+ )
348
+
349
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
350
+
351
+ # We are only ALWAYS interested in the pooled output of the final text encoder
352
+ pooled_prompt_embeds = prompt_embeds[0]
353
+ if clip_skip is None:
354
+ prompt_embeds = prompt_embeds.hidden_states[-2]
355
+ else:
356
+ # "2" because SDXL always indexes from the penultimate layer.
357
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
358
+
359
+ prompt_embeds_list.append(prompt_embeds)
360
+
361
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
362
+
363
+ # get unconditional embeddings for classifier free guidance
364
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
365
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
366
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
367
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
368
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
369
+ negative_prompt = negative_prompt or ""
370
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
371
+
372
+ # normalize str to list
373
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
374
+ negative_prompt_2 = (
375
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
376
+ )
377
+
378
+ uncond_tokens: List[str]
379
+ if prompt is not None and type(prompt) is not type(negative_prompt):
380
+ raise TypeError(
381
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
382
+ f" {type(prompt)}."
383
+ )
384
+ elif batch_size != len(negative_prompt):
385
+ raise ValueError(
386
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
387
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
388
+ " the batch size of `prompt`."
389
+ )
390
+ else:
391
+ uncond_tokens = [negative_prompt, negative_prompt_2]
392
+
393
+ negative_prompt_embeds_list = []
394
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
395
+ if isinstance(self, TextualInversionLoaderMixin):
396
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
397
+
398
+ max_length = prompt_embeds.shape[1]
399
+ uncond_input = tokenizer(
400
+ negative_prompt,
401
+ padding="max_length",
402
+ max_length=max_length,
403
+ truncation=True,
404
+ return_tensors="pt",
405
+ )
406
+
407
+ negative_prompt_embeds = text_encoder(
408
+ uncond_input.input_ids.to(device),
409
+ output_hidden_states=True,
410
+ )
411
+ # We are only ALWAYS interested in the pooled output of the final text encoder
412
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
413
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
414
+
415
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
416
+
417
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
418
+
419
+ if self.text_encoder_2 is not None:
420
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
421
+ else:
422
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
423
+
424
+ bs_embed, seq_len, _ = prompt_embeds.shape
425
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
426
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
427
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
428
+
429
+ if do_classifier_free_guidance:
430
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
431
+ seq_len = negative_prompt_embeds.shape[1]
432
+
433
+ if self.text_encoder_2 is not None:
434
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
435
+ else:
436
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
437
+
438
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
439
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
440
+
441
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
442
+ bs_embed * num_images_per_prompt, -1
443
+ )
444
+ if do_classifier_free_guidance:
445
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
446
+ bs_embed * num_images_per_prompt, -1
447
+ )
448
+
449
+ if self.text_encoder is not None:
450
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
451
+ # Retrieve the original scale by scaling back the LoRA layers
452
+ unscale_lora_layers(self.text_encoder, lora_scale)
453
+
454
+ if self.text_encoder_2 is not None:
455
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
456
+ # Retrieve the original scale by scaling back the LoRA layers
457
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
458
+
459
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
460
+
461
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
462
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
463
+ dtype = next(self.image_encoder.parameters()).dtype
464
+
465
+ if not isinstance(image, torch.Tensor):
466
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
467
+
468
+ image = image.to(device=device, dtype=dtype)
469
+ if output_hidden_states:
470
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
471
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
472
+ uncond_image_enc_hidden_states = self.image_encoder(
473
+ torch.zeros_like(image), output_hidden_states=True
474
+ ).hidden_states[-2]
475
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
476
+ num_images_per_prompt, dim=0
477
+ )
478
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
479
+ else:
480
+ image_embeds = self.image_encoder(image).image_embeds
481
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
482
+ uncond_image_embeds = torch.zeros_like(image_embeds)
483
+
484
+ return image_embeds, uncond_image_embeds
485
+
486
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
487
+ def prepare_ip_adapter_image_embeds(
488
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
489
+ ):
490
+ if ip_adapter_image_embeds is None:
491
+ if not isinstance(ip_adapter_image, list):
492
+ ip_adapter_image = [ip_adapter_image]
493
+
494
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
495
+ raise ValueError(
496
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
497
+ )
498
+
499
+ image_embeds = []
500
+ for single_ip_adapter_image, image_proj_layer in zip(
501
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
502
+ ):
503
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
504
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
505
+ single_ip_adapter_image, device, 1, output_hidden_state
506
+ )
507
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
508
+ single_negative_image_embeds = torch.stack(
509
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
510
+ )
511
+
512
+ if do_classifier_free_guidance:
513
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
514
+ single_image_embeds = single_image_embeds.to(device)
515
+
516
+ image_embeds.append(single_image_embeds)
517
+ else:
518
+ repeat_dims = [1]
519
+ image_embeds = []
520
+ for single_image_embeds in ip_adapter_image_embeds:
521
+ if do_classifier_free_guidance:
522
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
523
+ single_image_embeds = single_image_embeds.repeat(
524
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
525
+ )
526
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
527
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
528
+ )
529
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
530
+ else:
531
+ single_image_embeds = single_image_embeds.repeat(
532
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
533
+ )
534
+ image_embeds.append(single_image_embeds)
535
+
536
+ return image_embeds
537
+
538
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
539
+ def prepare_extra_step_kwargs(self, generator, eta):
540
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
541
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
542
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
543
+ # and should be between [0, 1]
544
+
545
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
546
+ extra_step_kwargs = {}
547
+ if accepts_eta:
548
+ extra_step_kwargs["eta"] = eta
549
+
550
+ # check if the scheduler accepts generator
551
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
552
+ if accepts_generator:
553
+ extra_step_kwargs["generator"] = generator
554
+ return extra_step_kwargs
555
+
556
+ def check_inputs(
557
+ self,
558
+ prompt,
559
+ prompt_2,
560
+ image,
561
+ callback_steps,
562
+ negative_prompt=None,
563
+ negative_prompt_2=None,
564
+ prompt_embeds=None,
565
+ negative_prompt_embeds=None,
566
+ pooled_prompt_embeds=None,
567
+ ip_adapter_image=None,
568
+ ip_adapter_image_embeds=None,
569
+ negative_pooled_prompt_embeds=None,
570
+ controlnet_conditioning_scale=1.0,
571
+ control_guidance_start=0.0,
572
+ control_guidance_end=1.0,
573
+ callback_on_step_end_tensor_inputs=None,
574
+ ):
575
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
576
+ raise ValueError(
577
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
578
+ f" {type(callback_steps)}."
579
+ )
580
+
581
+ if callback_on_step_end_tensor_inputs is not None and not all(
582
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
583
+ ):
584
+ raise ValueError(
585
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
586
+ )
587
+
588
+ if prompt is not None and prompt_embeds is not None:
589
+ raise ValueError(
590
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
591
+ " only forward one of the two."
592
+ )
593
+ elif prompt_2 is not None and prompt_embeds is not None:
594
+ raise ValueError(
595
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
596
+ " only forward one of the two."
597
+ )
598
+ elif prompt is None and prompt_embeds is None:
599
+ raise ValueError(
600
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
601
+ )
602
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
603
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
604
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
605
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
606
+
607
+ if negative_prompt is not None and negative_prompt_embeds is not None:
608
+ raise ValueError(
609
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
610
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
611
+ )
612
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
613
+ raise ValueError(
614
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
615
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
616
+ )
617
+
618
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
619
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
620
+ raise ValueError(
621
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
622
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
623
+ f" {negative_prompt_embeds.shape}."
624
+ )
625
+
626
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
627
+ raise ValueError(
628
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
629
+ )
630
+
631
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
632
+ raise ValueError(
633
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
634
+ )
635
+
636
+ # `prompt` needs more sophisticated handling when there are multiple
637
+ # conditionings.
638
+ if isinstance(self.controlnet, MultiControlNetModel):
639
+ if isinstance(prompt, list):
640
+ logger.warning(
641
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
642
+ " prompts. The conditionings will be fixed across the prompts."
643
+ )
644
+
645
+ # Check `image`
646
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
647
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
648
+ )
649
+ if (
650
+ isinstance(self.controlnet, ControlNetModel)
651
+ or is_compiled
652
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
653
+ ):
654
+ self.check_image(image, prompt, prompt_embeds)
655
+ elif (
656
+ isinstance(self.controlnet, MultiControlNetModel)
657
+ or is_compiled
658
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
659
+ ):
660
+ if not isinstance(image, list):
661
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
662
+
663
+ # When `image` is a nested list:
664
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
665
+ elif any(isinstance(i, list) for i in image):
666
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
667
+ elif len(image) != len(self.controlnet.nets):
668
+ raise ValueError(
669
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
670
+ )
671
+
672
+ for image_ in image:
673
+ self.check_image(image_, prompt, prompt_embeds)
674
+ else:
675
+ assert False
676
+
677
+ # Check `controlnet_conditioning_scale`
678
+ if (
679
+ isinstance(self.controlnet, ControlNetModel)
680
+ or is_compiled
681
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
682
+ ):
683
+ if not isinstance(controlnet_conditioning_scale, float):
684
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
685
+ elif (
686
+ isinstance(self.controlnet, MultiControlNetModel)
687
+ or is_compiled
688
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
689
+ ):
690
+ if isinstance(controlnet_conditioning_scale, list):
691
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
692
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
693
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
694
+ self.controlnet.nets
695
+ ):
696
+ raise ValueError(
697
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
698
+ " the same length as the number of controlnets"
699
+ )
700
+ else:
701
+ assert False
702
+
703
+ if not isinstance(control_guidance_start, (tuple, list)):
704
+ control_guidance_start = [control_guidance_start]
705
+
706
+ if not isinstance(control_guidance_end, (tuple, list)):
707
+ control_guidance_end = [control_guidance_end]
708
+
709
+ if len(control_guidance_start) != len(control_guidance_end):
710
+ raise ValueError(
711
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
712
+ )
713
+
714
+ if isinstance(self.controlnet, MultiControlNetModel):
715
+ if len(control_guidance_start) != len(self.controlnet.nets):
716
+ raise ValueError(
717
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
718
+ )
719
+
720
+ for start, end in zip(control_guidance_start, control_guidance_end):
721
+ if start >= end:
722
+ raise ValueError(
723
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
724
+ )
725
+ if start < 0.0:
726
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
727
+ if end > 1.0:
728
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
729
+
730
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
731
+ raise ValueError(
732
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
733
+ )
734
+
735
+ if ip_adapter_image_embeds is not None:
736
+ if not isinstance(ip_adapter_image_embeds, list):
737
+ raise ValueError(
738
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
739
+ )
740
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
741
+ raise ValueError(
742
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
743
+ )
744
+
745
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
746
+ def check_image(self, image, prompt, prompt_embeds):
747
+ image_is_pil = isinstance(image, PIL.Image.Image)
748
+ image_is_tensor = isinstance(image, torch.Tensor)
749
+ image_is_np = isinstance(image, np.ndarray)
750
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
751
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
752
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
753
+
754
+ if (
755
+ not image_is_pil
756
+ and not image_is_tensor
757
+ and not image_is_np
758
+ and not image_is_pil_list
759
+ and not image_is_tensor_list
760
+ and not image_is_np_list
761
+ ):
762
+ raise TypeError(
763
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
764
+ )
765
+
766
+ if image_is_pil:
767
+ image_batch_size = 1
768
+ else:
769
+ image_batch_size = len(image)
770
+
771
+ if prompt is not None and isinstance(prompt, str):
772
+ prompt_batch_size = 1
773
+ elif prompt is not None and isinstance(prompt, list):
774
+ prompt_batch_size = len(prompt)
775
+ elif prompt_embeds is not None:
776
+ prompt_batch_size = prompt_embeds.shape[0]
777
+
778
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
779
+ raise ValueError(
780
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
781
+ )
782
+
783
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
784
+ def prepare_image(
785
+ self,
786
+ image,
787
+ width,
788
+ height,
789
+ batch_size,
790
+ num_images_per_prompt,
791
+ device,
792
+ dtype,
793
+ do_classifier_free_guidance=False,
794
+ guess_mode=False,
795
+ ):
796
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
797
+ image_batch_size = image.shape[0]
798
+
799
+ if image_batch_size == 1:
800
+ repeat_by = batch_size
801
+ else:
802
+ # image batch size is the same as prompt batch size
803
+ repeat_by = num_images_per_prompt
804
+
805
+ image = image.repeat_interleave(repeat_by, dim=0)
806
+
807
+ image = image.to(device=device, dtype=dtype)
808
+
809
+ if do_classifier_free_guidance and not guess_mode:
810
+ image = torch.cat([image] * 2)
811
+
812
+ return image
813
+
814
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
815
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
816
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
817
+ if isinstance(generator, list) and len(generator) != batch_size:
818
+ raise ValueError(
819
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
820
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
821
+ )
822
+
823
+ if latents is None:
824
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
825
+ else:
826
+ latents = latents.to(device)
827
+
828
+ # scale the initial noise by the standard deviation required by the scheduler
829
+ latents = latents * self.scheduler.init_noise_sigma
830
+ return latents
831
+
832
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
833
+ def _get_add_time_ids(
834
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
835
+ ):
836
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
837
+
838
+ passed_add_embed_dim = (
839
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
840
+ )
841
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
842
+
843
+ if expected_add_embed_dim != passed_add_embed_dim:
844
+ raise ValueError(
845
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
846
+ )
847
+
848
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
849
+ return add_time_ids
850
+
851
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
852
+ def upcast_vae(self):
853
+ dtype = self.vae.dtype
854
+ self.vae.to(dtype=torch.float32)
855
+ use_torch_2_0_or_xformers = isinstance(
856
+ self.vae.decoder.mid_block.attentions[0].processor,
857
+ (
858
+ AttnProcessor2_0,
859
+ XFormersAttnProcessor,
860
+ LoRAXFormersAttnProcessor,
861
+ LoRAAttnProcessor2_0,
862
+ ),
863
+ )
864
+ # if xformers or torch_2_0 is used attention block does not need
865
+ # to be in float32 which can save lots of memory
866
+ if use_torch_2_0_or_xformers:
867
+ self.vae.post_quant_conv.to(dtype)
868
+ self.vae.decoder.conv_in.to(dtype)
869
+ self.vae.decoder.mid_block.to(dtype)
870
+
871
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
872
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
873
+ """
874
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
875
+
876
+ Args:
877
+ timesteps (`torch.Tensor`):
878
+ generate embedding vectors at these timesteps
879
+ embedding_dim (`int`, *optional*, defaults to 512):
880
+ dimension of the embeddings to generate
881
+ dtype:
882
+ data type of the generated embeddings
883
+
884
+ Returns:
885
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
886
+ """
887
+ assert len(w.shape) == 1
888
+ w = w * 1000.0
889
+
890
+ half_dim = embedding_dim // 2
891
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
892
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
893
+ emb = w.to(dtype)[:, None] * emb[None, :]
894
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
895
+ if embedding_dim % 2 == 1: # zero pad
896
+ emb = torch.nn.functional.pad(emb, (0, 1))
897
+ assert emb.shape == (w.shape[0], embedding_dim)
898
+ return emb
899
+
900
+ @property
901
+ def guidance_scale(self):
902
+ return self._guidance_scale
903
+
904
+ @property
905
+ def clip_skip(self):
906
+ return self._clip_skip
907
+
908
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
909
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
910
+ # corresponds to doing no classifier free guidance.
911
+ @property
912
+ def do_classifier_free_guidance(self):
913
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
914
+
915
+ @property
916
+ def cross_attention_kwargs(self):
917
+ return self._cross_attention_kwargs
918
+
919
+ @property
920
+ def denoising_end(self):
921
+ return self._denoising_end
922
+
923
+ @property
924
+ def num_timesteps(self):
925
+ return self._num_timesteps
926
+
927
+ @torch.no_grad()
928
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
929
+ def __call__(
930
+ self,
931
+ prompt: Union[str, List[str]] = None,
932
+ prompt_2: Optional[Union[str, List[str]]] = None,
933
+ image: PipelineImageInput = None,
934
+ height: Optional[int] = None,
935
+ width: Optional[int] = None,
936
+ num_inference_steps: int = 50,
937
+ denoising_end: Optional[float] = None,
938
+ guidance_scale: float = 5.0,
939
+ negative_prompt: Optional[Union[str, List[str]]] = None,
940
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
941
+ num_images_per_prompt: Optional[int] = 1,
942
+ eta: float = 0.0,
943
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
944
+ latents: Optional[torch.FloatTensor] = None,
945
+ prompt_embeds: Optional[torch.FloatTensor] = None,
946
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
947
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
948
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
949
+ ip_adapter_image: Optional[PipelineImageInput] = None,
950
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
951
+ output_type: Optional[str] = "pil",
952
+ return_dict: bool = True,
953
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
954
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
955
+ guess_mode: bool = False,
956
+ control_guidance_start: Union[float, List[float]] = 0.0,
957
+ control_guidance_end: Union[float, List[float]] = 1.0,
958
+ original_size: Tuple[int, int] = None,
959
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
960
+ target_size: Tuple[int, int] = None,
961
+ negative_original_size: Optional[Tuple[int, int]] = None,
962
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
963
+ negative_target_size: Optional[Tuple[int, int]] = None,
964
+ clip_skip: Optional[int] = None,
965
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
966
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
967
+ **kwargs,
968
+ ):
969
+ r"""
970
+ The call function to the pipeline for generation.
971
+
972
+ Args:
973
+ prompt (`str` or `List[str]`, *optional*):
974
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
975
+ prompt_2 (`str` or `List[str]`, *optional*):
976
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
977
+ used in both text-encoders.
978
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
979
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
980
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
981
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
982
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
983
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
984
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
985
+ input to a single ControlNet.
986
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
987
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
988
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
989
+ and checkpoints that are not specifically fine-tuned on low resolutions.
990
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
991
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
992
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
993
+ and checkpoints that are not specifically fine-tuned on low resolutions.
994
+ num_inference_steps (`int`, *optional*, defaults to 50):
995
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
996
+ expense of slower inference.
997
+ denoising_end (`float`, *optional*):
998
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
999
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1000
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1001
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1002
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1003
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1004
+ guidance_scale (`float`, *optional*, defaults to 5.0):
1005
+ A higher guidance scale value encourages the model to generate images closely linked to the text
1006
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1007
+ negative_prompt (`str` or `List[str]`, *optional*):
1008
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1009
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
1010
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1011
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
1012
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
1013
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1014
+ The number of images to generate per prompt.
1015
+ eta (`float`, *optional*, defaults to 0.0):
1016
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1017
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1018
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1019
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1020
+ generation deterministic.
1021
+ latents (`torch.FloatTensor`, *optional*):
1022
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1023
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1024
+ tensor is generated by sampling using the supplied random `generator`.
1025
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1026
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1027
+ provided, text embeddings are generated from the `prompt` input argument.
1028
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1029
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1030
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1031
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1032
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1033
+ not provided, pooled text embeddings are generated from `prompt` input argument.
1034
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1035
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
1036
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
1037
+ argument.
1038
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1039
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
1040
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
1041
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
1042
+ if `do_classifier_free_guidance` is set to `True`.
1043
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
1044
+ output_type (`str`, *optional*, defaults to `"pil"`):
1045
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1046
+ return_dict (`bool`, *optional*, defaults to `True`):
1047
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1048
+ plain tuple.
1049
+ cross_attention_kwargs (`dict`, *optional*):
1050
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1051
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1052
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1053
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
1054
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
1055
+ the corresponding scale as a list.
1056
+ guess_mode (`bool`, *optional*, defaults to `False`):
1057
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
1058
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
1059
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1060
+ The percentage of total steps at which the ControlNet starts applying.
1061
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1062
+ The percentage of total steps at which the ControlNet stops applying.
1063
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1064
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1065
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1066
+ explained in section 2.2 of
1067
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1068
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1069
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1070
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1071
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1072
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1073
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1074
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1075
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1076
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1077
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1078
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1079
+ micro-conditioning as explained in section 2.2 of
1080
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1081
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1082
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1083
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1084
+ micro-conditioning as explained in section 2.2 of
1085
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1086
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1087
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1088
+ To negatively condition the generation process based on a target image resolution. It should be as same
1089
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1090
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1091
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1092
+ clip_skip (`int`, *optional*):
1093
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1094
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1095
+ callback_on_step_end (`Callable`, *optional*):
1096
+ A function that calls at the end of each denoising steps during the inference. The function is called
1097
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1098
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1099
+ `callback_on_step_end_tensor_inputs`.
1100
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1101
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1102
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1103
+ `._callback_tensor_inputs` attribute of your pipeine class.
1104
+
1105
+ Examples:
1106
+
1107
+ Returns:
1108
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1109
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1110
+ otherwise a `tuple` is returned containing the output images.
1111
+ """
1112
+
1113
+ callback = kwargs.pop("callback", None)
1114
+ callback_steps = kwargs.pop("callback_steps", None)
1115
+
1116
+ if callback is not None:
1117
+ deprecate(
1118
+ "callback",
1119
+ "1.0.0",
1120
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1121
+ )
1122
+ if callback_steps is not None:
1123
+ deprecate(
1124
+ "callback_steps",
1125
+ "1.0.0",
1126
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1127
+ )
1128
+
1129
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1130
+
1131
+ # align format for control guidance
1132
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1133
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1134
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1135
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1136
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1137
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1138
+ control_guidance_start, control_guidance_end = (
1139
+ mult * [control_guidance_start],
1140
+ mult * [control_guidance_end],
1141
+ )
1142
+
1143
+ # 1. Check inputs. Raise error if not correct
1144
+ self.check_inputs(
1145
+ prompt,
1146
+ prompt_2,
1147
+ image,
1148
+ callback_steps,
1149
+ negative_prompt,
1150
+ negative_prompt_2,
1151
+ prompt_embeds,
1152
+ negative_prompt_embeds,
1153
+ pooled_prompt_embeds,
1154
+ ip_adapter_image,
1155
+ ip_adapter_image_embeds,
1156
+ negative_pooled_prompt_embeds,
1157
+ controlnet_conditioning_scale,
1158
+ control_guidance_start,
1159
+ control_guidance_end,
1160
+ callback_on_step_end_tensor_inputs,
1161
+ )
1162
+
1163
+ self._guidance_scale = guidance_scale
1164
+ self._clip_skip = clip_skip
1165
+ self._cross_attention_kwargs = cross_attention_kwargs
1166
+ self._denoising_end = denoising_end
1167
+
1168
+ # 2. Define call parameters
1169
+ if prompt is not None and isinstance(prompt, str):
1170
+ batch_size = 1
1171
+ elif prompt is not None and isinstance(prompt, list):
1172
+ batch_size = len(prompt)
1173
+ else:
1174
+ batch_size = prompt_embeds.shape[0]
1175
+
1176
+ device = self._execution_device
1177
+
1178
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1179
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1180
+
1181
+ global_pool_conditions = (
1182
+ controlnet.config.global_pool_conditions
1183
+ if isinstance(controlnet, ControlNetModel)
1184
+ else controlnet.nets[0].config.global_pool_conditions
1185
+ )
1186
+ guess_mode = guess_mode or global_pool_conditions
1187
+
1188
+ # 3.1 Encode input prompt
1189
+ text_encoder_lora_scale = (
1190
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1191
+ )
1192
+ (
1193
+ prompt_embeds,
1194
+ negative_prompt_embeds,
1195
+ pooled_prompt_embeds,
1196
+ negative_pooled_prompt_embeds,
1197
+ ) = self.encode_prompt(
1198
+ prompt,
1199
+ prompt_2,
1200
+ device,
1201
+ num_images_per_prompt,
1202
+ self.do_classifier_free_guidance,
1203
+ negative_prompt,
1204
+ negative_prompt_2,
1205
+ prompt_embeds=prompt_embeds,
1206
+ negative_prompt_embeds=negative_prompt_embeds,
1207
+ pooled_prompt_embeds=pooled_prompt_embeds,
1208
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1209
+ lora_scale=text_encoder_lora_scale,
1210
+ clip_skip=self.clip_skip,
1211
+ )
1212
+
1213
+ # 3.2 Encode ip_adapter_image
1214
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1215
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1216
+ ip_adapter_image,
1217
+ ip_adapter_image_embeds,
1218
+ device,
1219
+ batch_size * num_images_per_prompt,
1220
+ self.do_classifier_free_guidance,
1221
+ )
1222
+
1223
+ # 4. Prepare image
1224
+ if isinstance(controlnet, ControlNetModel):
1225
+ image = self.prepare_image(
1226
+ image=image,
1227
+ width=width,
1228
+ height=height,
1229
+ batch_size=batch_size * num_images_per_prompt,
1230
+ num_images_per_prompt=num_images_per_prompt,
1231
+ device=device,
1232
+ dtype=controlnet.dtype,
1233
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1234
+ guess_mode=guess_mode,
1235
+ )
1236
+ height, width = image.shape[-2:]
1237
+ elif isinstance(controlnet, MultiControlNetModel):
1238
+ images = []
1239
+
1240
+ for image_ in image:
1241
+ image_ = self.prepare_image(
1242
+ image=image_,
1243
+ width=width,
1244
+ height=height,
1245
+ batch_size=batch_size * num_images_per_prompt,
1246
+ num_images_per_prompt=num_images_per_prompt,
1247
+ device=device,
1248
+ dtype=controlnet.dtype,
1249
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1250
+ guess_mode=guess_mode,
1251
+ )
1252
+
1253
+ images.append(image_)
1254
+
1255
+ image = images
1256
+ height, width = image[0].shape[-2:]
1257
+ else:
1258
+ assert False
1259
+
1260
+ # 5. Prepare timesteps
1261
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1262
+ timesteps = self.scheduler.timesteps
1263
+ self._num_timesteps = len(timesteps)
1264
+
1265
+ # 6. Prepare latent variables
1266
+ num_channels_latents = self.unet.config.in_channels
1267
+ latents = self.prepare_latents(
1268
+ batch_size * num_images_per_prompt,
1269
+ num_channels_latents,
1270
+ height,
1271
+ width,
1272
+ prompt_embeds.dtype,
1273
+ device,
1274
+ generator,
1275
+ latents,
1276
+ )
1277
+
1278
+ # 6.5 Optionally get Guidance Scale Embedding
1279
+ timestep_cond = None
1280
+ if self.unet.config.time_cond_proj_dim is not None:
1281
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1282
+ timestep_cond = self.get_guidance_scale_embedding(
1283
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1284
+ ).to(device=device, dtype=latents.dtype)
1285
+
1286
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1287
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1288
+
1289
+ # 7.1 Create tensor stating which controlnets to keep
1290
+ controlnet_keep = []
1291
+ for i in range(len(timesteps)):
1292
+ keeps = [
1293
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1294
+ for s, e in zip(control_guidance_start, control_guidance_end)
1295
+ ]
1296
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1297
+
1298
+ # 7.2 Prepare added time ids & embeddings
1299
+ if isinstance(image, list):
1300
+ original_size = original_size or image[0].shape[-2:]
1301
+ else:
1302
+ original_size = original_size or image.shape[-2:]
1303
+ target_size = target_size or (height, width)
1304
+
1305
+ add_text_embeds = pooled_prompt_embeds
1306
+ if self.text_encoder_2 is None:
1307
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1308
+ else:
1309
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1310
+
1311
+ add_time_ids = self._get_add_time_ids(
1312
+ original_size,
1313
+ crops_coords_top_left,
1314
+ target_size,
1315
+ dtype=prompt_embeds.dtype,
1316
+ text_encoder_projection_dim=text_encoder_projection_dim,
1317
+ )
1318
+
1319
+ if negative_original_size is not None and negative_target_size is not None:
1320
+ negative_add_time_ids = self._get_add_time_ids(
1321
+ negative_original_size,
1322
+ negative_crops_coords_top_left,
1323
+ negative_target_size,
1324
+ dtype=prompt_embeds.dtype,
1325
+ text_encoder_projection_dim=text_encoder_projection_dim,
1326
+ )
1327
+ else:
1328
+ negative_add_time_ids = add_time_ids
1329
+
1330
+ if self.do_classifier_free_guidance:
1331
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1332
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1333
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1334
+
1335
+ prompt_embeds = prompt_embeds.to(device)
1336
+ add_text_embeds = add_text_embeds.to(device)
1337
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1338
+
1339
+ # 8. Denoising loop
1340
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1341
+
1342
+ # 8.1 Apply denoising_end
1343
+ if (
1344
+ self.denoising_end is not None
1345
+ and isinstance(self.denoising_end, float)
1346
+ and self.denoising_end > 0
1347
+ and self.denoising_end < 1
1348
+ ):
1349
+ discrete_timestep_cutoff = int(
1350
+ round(
1351
+ self.scheduler.config.num_train_timesteps
1352
+ - (self.denoising_end * self.scheduler.config.num_train_timesteps)
1353
+ )
1354
+ )
1355
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1356
+ timesteps = timesteps[:num_inference_steps]
1357
+
1358
+ is_unet_compiled = is_compiled_module(self.unet)
1359
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
1360
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
1361
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1362
+ for i, t in enumerate(timesteps):
1363
+ # Relevant thread:
1364
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
1365
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
1366
+ torch._inductor.cudagraph_mark_step_begin()
1367
+ # expand the latents if we are doing classifier free guidance
1368
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1369
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1370
+
1371
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1372
+
1373
+ # controlnet(s) inference
1374
+ if guess_mode and self.do_classifier_free_guidance:
1375
+ # Infer ControlNet only for the conditional batch.
1376
+ control_model_input = latents
1377
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1378
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1379
+ controlnet_added_cond_kwargs = {
1380
+ "text_embeds": add_text_embeds.chunk(2)[1],
1381
+ "time_ids": add_time_ids.chunk(2)[1],
1382
+ }
1383
+ else:
1384
+ control_model_input = latent_model_input
1385
+ controlnet_prompt_embeds = prompt_embeds
1386
+ controlnet_added_cond_kwargs = added_cond_kwargs
1387
+
1388
+ if isinstance(controlnet_keep[i], list):
1389
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1390
+ else:
1391
+ controlnet_cond_scale = controlnet_conditioning_scale
1392
+ if isinstance(controlnet_cond_scale, list):
1393
+ controlnet_cond_scale = controlnet_cond_scale[0]
1394
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1395
+
1396
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1397
+ control_model_input,
1398
+ t,
1399
+ encoder_hidden_states=controlnet_prompt_embeds,
1400
+ controlnet_cond=image,
1401
+ conditioning_scale=cond_scale,
1402
+ guess_mode=guess_mode,
1403
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1404
+ return_dict=False,
1405
+ )
1406
+
1407
+ if guess_mode and self.do_classifier_free_guidance:
1408
+ # Infered ControlNet only for the conditional batch.
1409
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1410
+ # add 0 to the unconditional batch to keep it unchanged.
1411
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1412
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1413
+
1414
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1415
+ added_cond_kwargs["image_embeds"] = image_embeds
1416
+
1417
+ # predict the noise residual
1418
+ noise_pred = self.unet(
1419
+ latent_model_input,
1420
+ t,
1421
+ encoder_hidden_states=prompt_embeds,
1422
+ timestep_cond=timestep_cond,
1423
+ cross_attention_kwargs=self.cross_attention_kwargs,
1424
+ down_block_additional_residuals=down_block_res_samples,
1425
+ mid_block_additional_residual=mid_block_res_sample,
1426
+ added_cond_kwargs=added_cond_kwargs,
1427
+ return_dict=False,
1428
+ )[0]
1429
+
1430
+ # perform guidance
1431
+ if self.do_classifier_free_guidance:
1432
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1433
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1434
+
1435
+ # compute the previous noisy sample x_t -> x_t-1
1436
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1437
+
1438
+ if callback_on_step_end is not None:
1439
+ callback_kwargs = {}
1440
+ for k in callback_on_step_end_tensor_inputs:
1441
+ callback_kwargs[k] = locals()[k]
1442
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1443
+
1444
+ latents = callback_outputs.pop("latents", latents)
1445
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1446
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1447
+
1448
+ # call the callback, if provided
1449
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1450
+ progress_bar.update()
1451
+ if callback is not None and i % callback_steps == 0:
1452
+ step_idx = i // getattr(self.scheduler, "order", 1)
1453
+ callback(step_idx, t, latents)
1454
+
1455
+ if not output_type == "latent":
1456
+ # make sure the VAE is in float32 mode, as it overflows in float16
1457
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1458
+
1459
+ if needs_upcasting:
1460
+ self.upcast_vae()
1461
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1462
+
1463
+ # unscale/denormalize the latents
1464
+ # denormalize with the mean and std if available and not None
1465
+ has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
1466
+ has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
1467
+ if has_latents_mean and has_latents_std:
1468
+ latents_mean = (
1469
+ torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1470
+ )
1471
+ latents_std = (
1472
+ torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1473
+ )
1474
+ latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
1475
+ else:
1476
+ latents = latents / self.vae.config.scaling_factor
1477
+
1478
+ image = self.vae.decode(latents, return_dict=False)[0]
1479
+
1480
+ # cast back to fp16 if needed
1481
+ if needs_upcasting:
1482
+ self.vae.to(dtype=torch.float16)
1483
+ else:
1484
+ image = latents
1485
+
1486
+ if not output_type == "latent":
1487
+ # apply watermark if available
1488
+ if self.watermark is not None:
1489
+ image = self.watermark.apply_watermark(image)
1490
+
1491
+ image = self.image_processor.postprocess(image, output_type=output_type)
1492
+
1493
+ # Offload all models
1494
+ self.maybe_free_model_hooks()
1495
+
1496
+ if not return_dict:
1497
+ return (image,)
1498
+
1499
+ return StableDiffusionXLPipelineOutput(images=image)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py ADDED
@@ -0,0 +1,1626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import (
24
+ CLIPImageProcessor,
25
+ CLIPTextModel,
26
+ CLIPTextModelWithProjection,
27
+ CLIPTokenizer,
28
+ CLIPVisionModelWithProjection,
29
+ )
30
+
31
+ from diffusers.utils.import_utils import is_invisible_watermark_available
32
+
33
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
34
+ from ...loaders import (
35
+ IPAdapterMixin,
36
+ StableDiffusionXLLoraLoaderMixin,
37
+ TextualInversionLoaderMixin,
38
+ )
39
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
40
+ from ...models.attention_processor import (
41
+ AttnProcessor2_0,
42
+ LoRAAttnProcessor2_0,
43
+ LoRAXFormersAttnProcessor,
44
+ XFormersAttnProcessor,
45
+ )
46
+ from ...models.lora import adjust_lora_scale_text_encoder
47
+ from ...schedulers import KarrasDiffusionSchedulers
48
+ from ...utils import (
49
+ USE_PEFT_BACKEND,
50
+ deprecate,
51
+ logging,
52
+ replace_example_docstring,
53
+ scale_lora_layers,
54
+ unscale_lora_layers,
55
+ )
56
+ from ...utils.torch_utils import is_compiled_module, randn_tensor
57
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
58
+ from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
59
+
60
+
61
+ if is_invisible_watermark_available():
62
+ from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
63
+
64
+ from .multicontrolnet import MultiControlNetModel
65
+
66
+
67
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
68
+
69
+
70
+ EXAMPLE_DOC_STRING = """
71
+ Examples:
72
+ ```py
73
+ >>> # pip install accelerate transformers safetensors diffusers
74
+
75
+ >>> import torch
76
+ >>> import numpy as np
77
+ >>> from PIL import Image
78
+
79
+ >>> from transformers import DPTFeatureExtractor, DPTForDepthEstimation
80
+ >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL
81
+ >>> from diffusers.utils import load_image
82
+
83
+
84
+ >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
85
+ >>> feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas")
86
+ >>> controlnet = ControlNetModel.from_pretrained(
87
+ ... "diffusers/controlnet-depth-sdxl-1.0-small",
88
+ ... variant="fp16",
89
+ ... use_safetensors=True,
90
+ ... torch_dtype=torch.float16,
91
+ ... ).to("cuda")
92
+ >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda")
93
+ >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
94
+ ... "stabilityai/stable-diffusion-xl-base-1.0",
95
+ ... controlnet=controlnet,
96
+ ... vae=vae,
97
+ ... variant="fp16",
98
+ ... use_safetensors=True,
99
+ ... torch_dtype=torch.float16,
100
+ ... ).to("cuda")
101
+ >>> pipe.enable_model_cpu_offload()
102
+
103
+
104
+ >>> def get_depth_map(image):
105
+ ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
106
+ ... with torch.no_grad(), torch.autocast("cuda"):
107
+ ... depth_map = depth_estimator(image).predicted_depth
108
+
109
+ ... depth_map = torch.nn.functional.interpolate(
110
+ ... depth_map.unsqueeze(1),
111
+ ... size=(1024, 1024),
112
+ ... mode="bicubic",
113
+ ... align_corners=False,
114
+ ... )
115
+ ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
116
+ ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
117
+ ... depth_map = (depth_map - depth_min) / (depth_max - depth_min)
118
+ ... image = torch.cat([depth_map] * 3, dim=1)
119
+ ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
120
+ ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
121
+ ... return image
122
+
123
+
124
+ >>> prompt = "A robot, 4k photo"
125
+ >>> image = load_image(
126
+ ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
127
+ ... "/kandinsky/cat.png"
128
+ ... ).resize((1024, 1024))
129
+ >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
130
+ >>> depth_image = get_depth_map(image)
131
+
132
+ >>> images = pipe(
133
+ ... prompt,
134
+ ... image=image,
135
+ ... control_image=depth_image,
136
+ ... strength=0.99,
137
+ ... num_inference_steps=50,
138
+ ... controlnet_conditioning_scale=controlnet_conditioning_scale,
139
+ ... ).images
140
+ >>> images[0].save(f"robot_cat.png")
141
+ ```
142
+ """
143
+
144
+
145
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
146
+ def retrieve_latents(
147
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
148
+ ):
149
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
150
+ return encoder_output.latent_dist.sample(generator)
151
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
152
+ return encoder_output.latent_dist.mode()
153
+ elif hasattr(encoder_output, "latents"):
154
+ return encoder_output.latents
155
+ else:
156
+ raise AttributeError("Could not access latents of provided encoder_output")
157
+
158
+
159
+ class StableDiffusionXLControlNetImg2ImgPipeline(
160
+ DiffusionPipeline,
161
+ StableDiffusionMixin,
162
+ TextualInversionLoaderMixin,
163
+ StableDiffusionXLLoraLoaderMixin,
164
+ IPAdapterMixin,
165
+ ):
166
+ r"""
167
+ Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance.
168
+
169
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
170
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
171
+
172
+ The pipeline also inherits the following loading methods:
173
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
174
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
175
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
176
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
177
+
178
+ Args:
179
+ vae ([`AutoencoderKL`]):
180
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
181
+ text_encoder ([`CLIPTextModel`]):
182
+ Frozen text-encoder. Stable Diffusion uses the text portion of
183
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
184
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
185
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
186
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
187
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
188
+ specifically the
189
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
190
+ variant.
191
+ tokenizer (`CLIPTokenizer`):
192
+ Tokenizer of class
193
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
194
+ tokenizer_2 (`CLIPTokenizer`):
195
+ Second Tokenizer of class
196
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
197
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
198
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
199
+ Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
200
+ as a list, the outputs from each ControlNet are added together to create one combined additional
201
+ conditioning.
202
+ scheduler ([`SchedulerMixin`]):
203
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
204
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
205
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
206
+ Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
207
+ config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
208
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
209
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
210
+ `stabilityai/stable-diffusion-xl-base-1-0`.
211
+ add_watermarker (`bool`, *optional*):
212
+ Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
213
+ watermark output images. If not defined, it will default to True if the package is installed, otherwise no
214
+ watermarker will be used.
215
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
216
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
217
+ """
218
+
219
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
220
+ _optional_components = [
221
+ "tokenizer",
222
+ "tokenizer_2",
223
+ "text_encoder",
224
+ "text_encoder_2",
225
+ "feature_extractor",
226
+ "image_encoder",
227
+ ]
228
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
229
+
230
+ def __init__(
231
+ self,
232
+ vae: AutoencoderKL,
233
+ text_encoder: CLIPTextModel,
234
+ text_encoder_2: CLIPTextModelWithProjection,
235
+ tokenizer: CLIPTokenizer,
236
+ tokenizer_2: CLIPTokenizer,
237
+ unet: UNet2DConditionModel,
238
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
239
+ scheduler: KarrasDiffusionSchedulers,
240
+ requires_aesthetics_score: bool = False,
241
+ force_zeros_for_empty_prompt: bool = True,
242
+ add_watermarker: Optional[bool] = None,
243
+ feature_extractor: CLIPImageProcessor = None,
244
+ image_encoder: CLIPVisionModelWithProjection = None,
245
+ ):
246
+ super().__init__()
247
+
248
+ if isinstance(controlnet, (list, tuple)):
249
+ controlnet = MultiControlNetModel(controlnet)
250
+
251
+ self.register_modules(
252
+ vae=vae,
253
+ text_encoder=text_encoder,
254
+ text_encoder_2=text_encoder_2,
255
+ tokenizer=tokenizer,
256
+ tokenizer_2=tokenizer_2,
257
+ unet=unet,
258
+ controlnet=controlnet,
259
+ scheduler=scheduler,
260
+ feature_extractor=feature_extractor,
261
+ image_encoder=image_encoder,
262
+ )
263
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
264
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
265
+ self.control_image_processor = VaeImageProcessor(
266
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
267
+ )
268
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
269
+
270
+ if add_watermarker:
271
+ self.watermark = StableDiffusionXLWatermarker()
272
+ else:
273
+ self.watermark = None
274
+
275
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
276
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
277
+
278
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
279
+ def encode_prompt(
280
+ self,
281
+ prompt: str,
282
+ prompt_2: Optional[str] = None,
283
+ device: Optional[torch.device] = None,
284
+ num_images_per_prompt: int = 1,
285
+ do_classifier_free_guidance: bool = True,
286
+ negative_prompt: Optional[str] = None,
287
+ negative_prompt_2: Optional[str] = None,
288
+ prompt_embeds: Optional[torch.FloatTensor] = None,
289
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
290
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
291
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
292
+ lora_scale: Optional[float] = None,
293
+ clip_skip: Optional[int] = None,
294
+ ):
295
+ r"""
296
+ Encodes the prompt into text encoder hidden states.
297
+
298
+ Args:
299
+ prompt (`str` or `List[str]`, *optional*):
300
+ prompt to be encoded
301
+ prompt_2 (`str` or `List[str]`, *optional*):
302
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
303
+ used in both text-encoders
304
+ device: (`torch.device`):
305
+ torch device
306
+ num_images_per_prompt (`int`):
307
+ number of images that should be generated per prompt
308
+ do_classifier_free_guidance (`bool`):
309
+ whether to use classifier free guidance or not
310
+ negative_prompt (`str` or `List[str]`, *optional*):
311
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
312
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
313
+ less than `1`).
314
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
315
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
316
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
317
+ prompt_embeds (`torch.FloatTensor`, *optional*):
318
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
319
+ provided, text embeddings will be generated from `prompt` input argument.
320
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
321
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
322
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
323
+ argument.
324
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
325
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
326
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
327
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
328
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
329
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
330
+ input argument.
331
+ lora_scale (`float`, *optional*):
332
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
333
+ clip_skip (`int`, *optional*):
334
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
335
+ the output of the pre-final layer will be used for computing the prompt embeddings.
336
+ """
337
+ device = device or self._execution_device
338
+
339
+ # set lora scale so that monkey patched LoRA
340
+ # function of text encoder can correctly access it
341
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
342
+ self._lora_scale = lora_scale
343
+
344
+ # dynamically adjust the LoRA scale
345
+ if self.text_encoder is not None:
346
+ if not USE_PEFT_BACKEND:
347
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
348
+ else:
349
+ scale_lora_layers(self.text_encoder, lora_scale)
350
+
351
+ if self.text_encoder_2 is not None:
352
+ if not USE_PEFT_BACKEND:
353
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
354
+ else:
355
+ scale_lora_layers(self.text_encoder_2, lora_scale)
356
+
357
+ prompt = [prompt] if isinstance(prompt, str) else prompt
358
+
359
+ if prompt is not None:
360
+ batch_size = len(prompt)
361
+ else:
362
+ batch_size = prompt_embeds.shape[0]
363
+
364
+ # Define tokenizers and text encoders
365
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
366
+ text_encoders = (
367
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
368
+ )
369
+
370
+ if prompt_embeds is None:
371
+ prompt_2 = prompt_2 or prompt
372
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
373
+
374
+ # textual inversion: process multi-vector tokens if necessary
375
+ prompt_embeds_list = []
376
+ prompts = [prompt, prompt_2]
377
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
378
+ if isinstance(self, TextualInversionLoaderMixin):
379
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
380
+
381
+ text_inputs = tokenizer(
382
+ prompt,
383
+ padding="max_length",
384
+ max_length=tokenizer.model_max_length,
385
+ truncation=True,
386
+ return_tensors="pt",
387
+ )
388
+
389
+ text_input_ids = text_inputs.input_ids
390
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
391
+
392
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
393
+ text_input_ids, untruncated_ids
394
+ ):
395
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
396
+ logger.warning(
397
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
398
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
399
+ )
400
+
401
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
402
+
403
+ # We are only ALWAYS interested in the pooled output of the final text encoder
404
+ pooled_prompt_embeds = prompt_embeds[0]
405
+ if clip_skip is None:
406
+ prompt_embeds = prompt_embeds.hidden_states[-2]
407
+ else:
408
+ # "2" because SDXL always indexes from the penultimate layer.
409
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
410
+
411
+ prompt_embeds_list.append(prompt_embeds)
412
+
413
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
414
+
415
+ # get unconditional embeddings for classifier free guidance
416
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
417
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
418
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
419
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
420
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
421
+ negative_prompt = negative_prompt or ""
422
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
423
+
424
+ # normalize str to list
425
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
426
+ negative_prompt_2 = (
427
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
428
+ )
429
+
430
+ uncond_tokens: List[str]
431
+ if prompt is not None and type(prompt) is not type(negative_prompt):
432
+ raise TypeError(
433
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
434
+ f" {type(prompt)}."
435
+ )
436
+ elif batch_size != len(negative_prompt):
437
+ raise ValueError(
438
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
439
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
440
+ " the batch size of `prompt`."
441
+ )
442
+ else:
443
+ uncond_tokens = [negative_prompt, negative_prompt_2]
444
+
445
+ negative_prompt_embeds_list = []
446
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
447
+ if isinstance(self, TextualInversionLoaderMixin):
448
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
449
+
450
+ max_length = prompt_embeds.shape[1]
451
+ uncond_input = tokenizer(
452
+ negative_prompt,
453
+ padding="max_length",
454
+ max_length=max_length,
455
+ truncation=True,
456
+ return_tensors="pt",
457
+ )
458
+
459
+ negative_prompt_embeds = text_encoder(
460
+ uncond_input.input_ids.to(device),
461
+ output_hidden_states=True,
462
+ )
463
+ # We are only ALWAYS interested in the pooled output of the final text encoder
464
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
465
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
466
+
467
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
468
+
469
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
470
+
471
+ if self.text_encoder_2 is not None:
472
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
473
+ else:
474
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
475
+
476
+ bs_embed, seq_len, _ = prompt_embeds.shape
477
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
478
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
479
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
480
+
481
+ if do_classifier_free_guidance:
482
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
483
+ seq_len = negative_prompt_embeds.shape[1]
484
+
485
+ if self.text_encoder_2 is not None:
486
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
487
+ else:
488
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
489
+
490
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
491
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
492
+
493
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
494
+ bs_embed * num_images_per_prompt, -1
495
+ )
496
+ if do_classifier_free_guidance:
497
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
498
+ bs_embed * num_images_per_prompt, -1
499
+ )
500
+
501
+ if self.text_encoder is not None:
502
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
503
+ # Retrieve the original scale by scaling back the LoRA layers
504
+ unscale_lora_layers(self.text_encoder, lora_scale)
505
+
506
+ if self.text_encoder_2 is not None:
507
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
508
+ # Retrieve the original scale by scaling back the LoRA layers
509
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
510
+
511
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
512
+
513
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
514
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
515
+ dtype = next(self.image_encoder.parameters()).dtype
516
+
517
+ if not isinstance(image, torch.Tensor):
518
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
519
+
520
+ image = image.to(device=device, dtype=dtype)
521
+ if output_hidden_states:
522
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
523
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
524
+ uncond_image_enc_hidden_states = self.image_encoder(
525
+ torch.zeros_like(image), output_hidden_states=True
526
+ ).hidden_states[-2]
527
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
528
+ num_images_per_prompt, dim=0
529
+ )
530
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
531
+ else:
532
+ image_embeds = self.image_encoder(image).image_embeds
533
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
534
+ uncond_image_embeds = torch.zeros_like(image_embeds)
535
+
536
+ return image_embeds, uncond_image_embeds
537
+
538
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
539
+ def prepare_ip_adapter_image_embeds(
540
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
541
+ ):
542
+ if ip_adapter_image_embeds is None:
543
+ if not isinstance(ip_adapter_image, list):
544
+ ip_adapter_image = [ip_adapter_image]
545
+
546
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
547
+ raise ValueError(
548
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
549
+ )
550
+
551
+ image_embeds = []
552
+ for single_ip_adapter_image, image_proj_layer in zip(
553
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
554
+ ):
555
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
556
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
557
+ single_ip_adapter_image, device, 1, output_hidden_state
558
+ )
559
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
560
+ single_negative_image_embeds = torch.stack(
561
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
562
+ )
563
+
564
+ if do_classifier_free_guidance:
565
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
566
+ single_image_embeds = single_image_embeds.to(device)
567
+
568
+ image_embeds.append(single_image_embeds)
569
+ else:
570
+ repeat_dims = [1]
571
+ image_embeds = []
572
+ for single_image_embeds in ip_adapter_image_embeds:
573
+ if do_classifier_free_guidance:
574
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
575
+ single_image_embeds = single_image_embeds.repeat(
576
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
577
+ )
578
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
579
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
580
+ )
581
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
582
+ else:
583
+ single_image_embeds = single_image_embeds.repeat(
584
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
585
+ )
586
+ image_embeds.append(single_image_embeds)
587
+
588
+ return image_embeds
589
+
590
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
591
+ def prepare_extra_step_kwargs(self, generator, eta):
592
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
593
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
594
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
595
+ # and should be between [0, 1]
596
+
597
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
598
+ extra_step_kwargs = {}
599
+ if accepts_eta:
600
+ extra_step_kwargs["eta"] = eta
601
+
602
+ # check if the scheduler accepts generator
603
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
604
+ if accepts_generator:
605
+ extra_step_kwargs["generator"] = generator
606
+ return extra_step_kwargs
607
+
608
+ def check_inputs(
609
+ self,
610
+ prompt,
611
+ prompt_2,
612
+ image,
613
+ strength,
614
+ num_inference_steps,
615
+ callback_steps,
616
+ negative_prompt=None,
617
+ negative_prompt_2=None,
618
+ prompt_embeds=None,
619
+ negative_prompt_embeds=None,
620
+ pooled_prompt_embeds=None,
621
+ negative_pooled_prompt_embeds=None,
622
+ ip_adapter_image=None,
623
+ ip_adapter_image_embeds=None,
624
+ controlnet_conditioning_scale=1.0,
625
+ control_guidance_start=0.0,
626
+ control_guidance_end=1.0,
627
+ callback_on_step_end_tensor_inputs=None,
628
+ ):
629
+ if strength < 0 or strength > 1:
630
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
631
+ if num_inference_steps is None:
632
+ raise ValueError("`num_inference_steps` cannot be None.")
633
+ elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
634
+ raise ValueError(
635
+ f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
636
+ f" {type(num_inference_steps)}."
637
+ )
638
+
639
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
640
+ raise ValueError(
641
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
642
+ f" {type(callback_steps)}."
643
+ )
644
+
645
+ if callback_on_step_end_tensor_inputs is not None and not all(
646
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
647
+ ):
648
+ raise ValueError(
649
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
650
+ )
651
+
652
+ if prompt is not None and prompt_embeds is not None:
653
+ raise ValueError(
654
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
655
+ " only forward one of the two."
656
+ )
657
+ elif prompt_2 is not None and prompt_embeds is not None:
658
+ raise ValueError(
659
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
660
+ " only forward one of the two."
661
+ )
662
+ elif prompt is None and prompt_embeds is None:
663
+ raise ValueError(
664
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
665
+ )
666
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
667
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
668
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
669
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
670
+
671
+ if negative_prompt is not None and negative_prompt_embeds is not None:
672
+ raise ValueError(
673
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
674
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
675
+ )
676
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
677
+ raise ValueError(
678
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
679
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
680
+ )
681
+
682
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
683
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
684
+ raise ValueError(
685
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
686
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
687
+ f" {negative_prompt_embeds.shape}."
688
+ )
689
+
690
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
691
+ raise ValueError(
692
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
693
+ )
694
+
695
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
696
+ raise ValueError(
697
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
698
+ )
699
+
700
+ # `prompt` needs more sophisticated handling when there are multiple
701
+ # conditionings.
702
+ if isinstance(self.controlnet, MultiControlNetModel):
703
+ if isinstance(prompt, list):
704
+ logger.warning(
705
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
706
+ " prompts. The conditionings will be fixed across the prompts."
707
+ )
708
+
709
+ # Check `image`
710
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
711
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
712
+ )
713
+ if (
714
+ isinstance(self.controlnet, ControlNetModel)
715
+ or is_compiled
716
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
717
+ ):
718
+ self.check_image(image, prompt, prompt_embeds)
719
+ elif (
720
+ isinstance(self.controlnet, MultiControlNetModel)
721
+ or is_compiled
722
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
723
+ ):
724
+ if not isinstance(image, list):
725
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
726
+
727
+ # When `image` is a nested list:
728
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
729
+ elif any(isinstance(i, list) for i in image):
730
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
731
+ elif len(image) != len(self.controlnet.nets):
732
+ raise ValueError(
733
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
734
+ )
735
+
736
+ for image_ in image:
737
+ self.check_image(image_, prompt, prompt_embeds)
738
+ else:
739
+ assert False
740
+
741
+ # Check `controlnet_conditioning_scale`
742
+ if (
743
+ isinstance(self.controlnet, ControlNetModel)
744
+ or is_compiled
745
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
746
+ ):
747
+ if not isinstance(controlnet_conditioning_scale, float):
748
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
749
+ elif (
750
+ isinstance(self.controlnet, MultiControlNetModel)
751
+ or is_compiled
752
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
753
+ ):
754
+ if isinstance(controlnet_conditioning_scale, list):
755
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
756
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
757
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
758
+ self.controlnet.nets
759
+ ):
760
+ raise ValueError(
761
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
762
+ " the same length as the number of controlnets"
763
+ )
764
+ else:
765
+ assert False
766
+
767
+ if not isinstance(control_guidance_start, (tuple, list)):
768
+ control_guidance_start = [control_guidance_start]
769
+
770
+ if not isinstance(control_guidance_end, (tuple, list)):
771
+ control_guidance_end = [control_guidance_end]
772
+
773
+ if len(control_guidance_start) != len(control_guidance_end):
774
+ raise ValueError(
775
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
776
+ )
777
+
778
+ if isinstance(self.controlnet, MultiControlNetModel):
779
+ if len(control_guidance_start) != len(self.controlnet.nets):
780
+ raise ValueError(
781
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
782
+ )
783
+
784
+ for start, end in zip(control_guidance_start, control_guidance_end):
785
+ if start >= end:
786
+ raise ValueError(
787
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
788
+ )
789
+ if start < 0.0:
790
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
791
+ if end > 1.0:
792
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
793
+
794
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
795
+ raise ValueError(
796
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
797
+ )
798
+
799
+ if ip_adapter_image_embeds is not None:
800
+ if not isinstance(ip_adapter_image_embeds, list):
801
+ raise ValueError(
802
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
803
+ )
804
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
805
+ raise ValueError(
806
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
807
+ )
808
+
809
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
810
+ def check_image(self, image, prompt, prompt_embeds):
811
+ image_is_pil = isinstance(image, PIL.Image.Image)
812
+ image_is_tensor = isinstance(image, torch.Tensor)
813
+ image_is_np = isinstance(image, np.ndarray)
814
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
815
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
816
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
817
+
818
+ if (
819
+ not image_is_pil
820
+ and not image_is_tensor
821
+ and not image_is_np
822
+ and not image_is_pil_list
823
+ and not image_is_tensor_list
824
+ and not image_is_np_list
825
+ ):
826
+ raise TypeError(
827
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
828
+ )
829
+
830
+ if image_is_pil:
831
+ image_batch_size = 1
832
+ else:
833
+ image_batch_size = len(image)
834
+
835
+ if prompt is not None and isinstance(prompt, str):
836
+ prompt_batch_size = 1
837
+ elif prompt is not None and isinstance(prompt, list):
838
+ prompt_batch_size = len(prompt)
839
+ elif prompt_embeds is not None:
840
+ prompt_batch_size = prompt_embeds.shape[0]
841
+
842
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
843
+ raise ValueError(
844
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
845
+ )
846
+
847
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
848
+ def prepare_control_image(
849
+ self,
850
+ image,
851
+ width,
852
+ height,
853
+ batch_size,
854
+ num_images_per_prompt,
855
+ device,
856
+ dtype,
857
+ do_classifier_free_guidance=False,
858
+ guess_mode=False,
859
+ ):
860
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
861
+ image_batch_size = image.shape[0]
862
+
863
+ if image_batch_size == 1:
864
+ repeat_by = batch_size
865
+ else:
866
+ # image batch size is the same as prompt batch size
867
+ repeat_by = num_images_per_prompt
868
+
869
+ image = image.repeat_interleave(repeat_by, dim=0)
870
+
871
+ image = image.to(device=device, dtype=dtype)
872
+
873
+ if do_classifier_free_guidance and not guess_mode:
874
+ image = torch.cat([image] * 2)
875
+
876
+ return image
877
+
878
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
879
+ def get_timesteps(self, num_inference_steps, strength, device):
880
+ # get the original timestep using init_timestep
881
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
882
+
883
+ t_start = max(num_inference_steps - init_timestep, 0)
884
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
885
+ if hasattr(self.scheduler, "set_begin_index"):
886
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
887
+
888
+ return timesteps, num_inference_steps - t_start
889
+
890
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
891
+ def prepare_latents(
892
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
893
+ ):
894
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
895
+ raise ValueError(
896
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
897
+ )
898
+
899
+ # Offload text encoder if `enable_model_cpu_offload` was enabled
900
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
901
+ self.text_encoder_2.to("cpu")
902
+ torch.cuda.empty_cache()
903
+
904
+ image = image.to(device=device, dtype=dtype)
905
+
906
+ batch_size = batch_size * num_images_per_prompt
907
+
908
+ if image.shape[1] == 4:
909
+ init_latents = image
910
+
911
+ else:
912
+ # make sure the VAE is in float32 mode, as it overflows in float16
913
+ if self.vae.config.force_upcast:
914
+ image = image.float()
915
+ self.vae.to(dtype=torch.float32)
916
+
917
+ if isinstance(generator, list) and len(generator) != batch_size:
918
+ raise ValueError(
919
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
920
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
921
+ )
922
+
923
+ elif isinstance(generator, list):
924
+ init_latents = [
925
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
926
+ for i in range(batch_size)
927
+ ]
928
+ init_latents = torch.cat(init_latents, dim=0)
929
+ else:
930
+ init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
931
+
932
+ if self.vae.config.force_upcast:
933
+ self.vae.to(dtype)
934
+
935
+ init_latents = init_latents.to(dtype)
936
+ init_latents = self.vae.config.scaling_factor * init_latents
937
+
938
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
939
+ # expand init_latents for batch_size
940
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
941
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
942
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
943
+ raise ValueError(
944
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
945
+ )
946
+ else:
947
+ init_latents = torch.cat([init_latents], dim=0)
948
+
949
+ if add_noise:
950
+ shape = init_latents.shape
951
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
952
+ # get latents
953
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
954
+
955
+ latents = init_latents
956
+
957
+ return latents
958
+
959
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
960
+ def _get_add_time_ids(
961
+ self,
962
+ original_size,
963
+ crops_coords_top_left,
964
+ target_size,
965
+ aesthetic_score,
966
+ negative_aesthetic_score,
967
+ negative_original_size,
968
+ negative_crops_coords_top_left,
969
+ negative_target_size,
970
+ dtype,
971
+ text_encoder_projection_dim=None,
972
+ ):
973
+ if self.config.requires_aesthetics_score:
974
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
975
+ add_neg_time_ids = list(
976
+ negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
977
+ )
978
+ else:
979
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
980
+ add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
981
+
982
+ passed_add_embed_dim = (
983
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
984
+ )
985
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
986
+
987
+ if (
988
+ expected_add_embed_dim > passed_add_embed_dim
989
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
990
+ ):
991
+ raise ValueError(
992
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
993
+ )
994
+ elif (
995
+ expected_add_embed_dim < passed_add_embed_dim
996
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
997
+ ):
998
+ raise ValueError(
999
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
1000
+ )
1001
+ elif expected_add_embed_dim != passed_add_embed_dim:
1002
+ raise ValueError(
1003
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1004
+ )
1005
+
1006
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1007
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1008
+
1009
+ return add_time_ids, add_neg_time_ids
1010
+
1011
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1012
+ def upcast_vae(self):
1013
+ dtype = self.vae.dtype
1014
+ self.vae.to(dtype=torch.float32)
1015
+ use_torch_2_0_or_xformers = isinstance(
1016
+ self.vae.decoder.mid_block.attentions[0].processor,
1017
+ (
1018
+ AttnProcessor2_0,
1019
+ XFormersAttnProcessor,
1020
+ LoRAXFormersAttnProcessor,
1021
+ LoRAAttnProcessor2_0,
1022
+ ),
1023
+ )
1024
+ # if xformers or torch_2_0 is used attention block does not need
1025
+ # to be in float32 which can save lots of memory
1026
+ if use_torch_2_0_or_xformers:
1027
+ self.vae.post_quant_conv.to(dtype)
1028
+ self.vae.decoder.conv_in.to(dtype)
1029
+ self.vae.decoder.mid_block.to(dtype)
1030
+
1031
+ @property
1032
+ def guidance_scale(self):
1033
+ return self._guidance_scale
1034
+
1035
+ @property
1036
+ def clip_skip(self):
1037
+ return self._clip_skip
1038
+
1039
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1040
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1041
+ # corresponds to doing no classifier free guidance.
1042
+ @property
1043
+ def do_classifier_free_guidance(self):
1044
+ return self._guidance_scale > 1
1045
+
1046
+ @property
1047
+ def cross_attention_kwargs(self):
1048
+ return self._cross_attention_kwargs
1049
+
1050
+ @property
1051
+ def num_timesteps(self):
1052
+ return self._num_timesteps
1053
+
1054
+ @torch.no_grad()
1055
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1056
+ def __call__(
1057
+ self,
1058
+ prompt: Union[str, List[str]] = None,
1059
+ prompt_2: Optional[Union[str, List[str]]] = None,
1060
+ image: PipelineImageInput = None,
1061
+ control_image: PipelineImageInput = None,
1062
+ height: Optional[int] = None,
1063
+ width: Optional[int] = None,
1064
+ strength: float = 0.8,
1065
+ num_inference_steps: int = 50,
1066
+ guidance_scale: float = 5.0,
1067
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1068
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
1069
+ num_images_per_prompt: Optional[int] = 1,
1070
+ eta: float = 0.0,
1071
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1072
+ latents: Optional[torch.FloatTensor] = None,
1073
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1074
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1075
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1076
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1077
+ ip_adapter_image: Optional[PipelineImageInput] = None,
1078
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
1079
+ output_type: Optional[str] = "pil",
1080
+ return_dict: bool = True,
1081
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1082
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
1083
+ guess_mode: bool = False,
1084
+ control_guidance_start: Union[float, List[float]] = 0.0,
1085
+ control_guidance_end: Union[float, List[float]] = 1.0,
1086
+ original_size: Tuple[int, int] = None,
1087
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
1088
+ target_size: Tuple[int, int] = None,
1089
+ negative_original_size: Optional[Tuple[int, int]] = None,
1090
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
1091
+ negative_target_size: Optional[Tuple[int, int]] = None,
1092
+ aesthetic_score: float = 6.0,
1093
+ negative_aesthetic_score: float = 2.5,
1094
+ clip_skip: Optional[int] = None,
1095
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1096
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1097
+ **kwargs,
1098
+ ):
1099
+ r"""
1100
+ Function invoked when calling the pipeline for generation.
1101
+
1102
+ Args:
1103
+ prompt (`str` or `List[str]`, *optional*):
1104
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1105
+ instead.
1106
+ prompt_2 (`str` or `List[str]`, *optional*):
1107
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1108
+ used in both text-encoders
1109
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1110
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1111
+ The initial image will be used as the starting point for the image generation process. Can also accept
1112
+ image latents as `image`, if passing latents directly, it will not be encoded again.
1113
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1114
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1115
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
1116
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
1117
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
1118
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
1119
+ specified in init, images must be passed as a list such that each element of the list can be correctly
1120
+ batched for input to a single controlnet.
1121
+ height (`int`, *optional*, defaults to the size of control_image):
1122
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
1123
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1124
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1125
+ width (`int`, *optional*, defaults to the size of control_image):
1126
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
1127
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1128
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1129
+ strength (`float`, *optional*, defaults to 0.8):
1130
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
1131
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
1132
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
1133
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
1134
+ essentially ignores `image`.
1135
+ num_inference_steps (`int`, *optional*, defaults to 50):
1136
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1137
+ expense of slower inference.
1138
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1139
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1140
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1141
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1142
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1143
+ usually at the expense of lower image quality.
1144
+ negative_prompt (`str` or `List[str]`, *optional*):
1145
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1146
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1147
+ less than `1`).
1148
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1149
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1150
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1151
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1152
+ The number of images to generate per prompt.
1153
+ eta (`float`, *optional*, defaults to 0.0):
1154
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1155
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1156
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1157
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1158
+ to make generation deterministic.
1159
+ latents (`torch.FloatTensor`, *optional*):
1160
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1161
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1162
+ tensor will ge generated by sampling using the supplied random `generator`.
1163
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1164
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1165
+ provided, text embeddings will be generated from `prompt` input argument.
1166
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1167
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1168
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1169
+ argument.
1170
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1171
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1172
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1173
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1174
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1175
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1176
+ input argument.
1177
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1178
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
1179
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
1180
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
1181
+ if `do_classifier_free_guidance` is set to `True`.
1182
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
1183
+ output_type (`str`, *optional*, defaults to `"pil"`):
1184
+ The output format of the generate image. Choose between
1185
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1186
+ return_dict (`bool`, *optional*, defaults to `True`):
1187
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1188
+ plain tuple.
1189
+ cross_attention_kwargs (`dict`, *optional*):
1190
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1191
+ `self.processor` in
1192
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1193
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1194
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
1195
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
1196
+ corresponding scale as a list.
1197
+ guess_mode (`bool`, *optional*, defaults to `False`):
1198
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
1199
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
1200
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1201
+ The percentage of total steps at which the controlnet starts applying.
1202
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1203
+ The percentage of total steps at which the controlnet stops applying.
1204
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1205
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1206
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1207
+ explained in section 2.2 of
1208
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1209
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1210
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1211
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1212
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1213
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1214
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1215
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1216
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1217
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1218
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1219
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1220
+ micro-conditioning as explained in section 2.2 of
1221
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1222
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1223
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1224
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1225
+ micro-conditioning as explained in section 2.2 of
1226
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1227
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1228
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1229
+ To negatively condition the generation process based on a target image resolution. It should be as same
1230
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1231
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1232
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1233
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1234
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1235
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1236
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1237
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1238
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1239
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1240
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1241
+ clip_skip (`int`, *optional*):
1242
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1243
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1244
+ callback_on_step_end (`Callable`, *optional*):
1245
+ A function that calls at the end of each denoising steps during the inference. The function is called
1246
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1247
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1248
+ `callback_on_step_end_tensor_inputs`.
1249
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1250
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1251
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1252
+ `._callback_tensor_inputs` attribute of your pipeine class.
1253
+
1254
+ Examples:
1255
+
1256
+ Returns:
1257
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1258
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
1259
+ containing the output images.
1260
+ """
1261
+
1262
+ callback = kwargs.pop("callback", None)
1263
+ callback_steps = kwargs.pop("callback_steps", None)
1264
+
1265
+ if callback is not None:
1266
+ deprecate(
1267
+ "callback",
1268
+ "1.0.0",
1269
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1270
+ )
1271
+ if callback_steps is not None:
1272
+ deprecate(
1273
+ "callback_steps",
1274
+ "1.0.0",
1275
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1276
+ )
1277
+
1278
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1279
+
1280
+ # align format for control guidance
1281
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1282
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1283
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1284
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1285
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1286
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1287
+ control_guidance_start, control_guidance_end = (
1288
+ mult * [control_guidance_start],
1289
+ mult * [control_guidance_end],
1290
+ )
1291
+
1292
+ # 1. Check inputs. Raise error if not correct
1293
+ self.check_inputs(
1294
+ prompt,
1295
+ prompt_2,
1296
+ control_image,
1297
+ strength,
1298
+ num_inference_steps,
1299
+ callback_steps,
1300
+ negative_prompt,
1301
+ negative_prompt_2,
1302
+ prompt_embeds,
1303
+ negative_prompt_embeds,
1304
+ pooled_prompt_embeds,
1305
+ negative_pooled_prompt_embeds,
1306
+ ip_adapter_image,
1307
+ ip_adapter_image_embeds,
1308
+ controlnet_conditioning_scale,
1309
+ control_guidance_start,
1310
+ control_guidance_end,
1311
+ callback_on_step_end_tensor_inputs,
1312
+ )
1313
+
1314
+ self._guidance_scale = guidance_scale
1315
+ self._clip_skip = clip_skip
1316
+ self._cross_attention_kwargs = cross_attention_kwargs
1317
+
1318
+ # 2. Define call parameters
1319
+ if prompt is not None and isinstance(prompt, str):
1320
+ batch_size = 1
1321
+ elif prompt is not None and isinstance(prompt, list):
1322
+ batch_size = len(prompt)
1323
+ else:
1324
+ batch_size = prompt_embeds.shape[0]
1325
+
1326
+ device = self._execution_device
1327
+
1328
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1329
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1330
+
1331
+ global_pool_conditions = (
1332
+ controlnet.config.global_pool_conditions
1333
+ if isinstance(controlnet, ControlNetModel)
1334
+ else controlnet.nets[0].config.global_pool_conditions
1335
+ )
1336
+ guess_mode = guess_mode or global_pool_conditions
1337
+
1338
+ # 3.1. Encode input prompt
1339
+ text_encoder_lora_scale = (
1340
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1341
+ )
1342
+ (
1343
+ prompt_embeds,
1344
+ negative_prompt_embeds,
1345
+ pooled_prompt_embeds,
1346
+ negative_pooled_prompt_embeds,
1347
+ ) = self.encode_prompt(
1348
+ prompt,
1349
+ prompt_2,
1350
+ device,
1351
+ num_images_per_prompt,
1352
+ self.do_classifier_free_guidance,
1353
+ negative_prompt,
1354
+ negative_prompt_2,
1355
+ prompt_embeds=prompt_embeds,
1356
+ negative_prompt_embeds=negative_prompt_embeds,
1357
+ pooled_prompt_embeds=pooled_prompt_embeds,
1358
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1359
+ lora_scale=text_encoder_lora_scale,
1360
+ clip_skip=self.clip_skip,
1361
+ )
1362
+
1363
+ # 3.2 Encode ip_adapter_image
1364
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1365
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1366
+ ip_adapter_image,
1367
+ ip_adapter_image_embeds,
1368
+ device,
1369
+ batch_size * num_images_per_prompt,
1370
+ self.do_classifier_free_guidance,
1371
+ )
1372
+
1373
+ # 4. Prepare image and controlnet_conditioning_image
1374
+ image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
1375
+
1376
+ if isinstance(controlnet, ControlNetModel):
1377
+ control_image = self.prepare_control_image(
1378
+ image=control_image,
1379
+ width=width,
1380
+ height=height,
1381
+ batch_size=batch_size * num_images_per_prompt,
1382
+ num_images_per_prompt=num_images_per_prompt,
1383
+ device=device,
1384
+ dtype=controlnet.dtype,
1385
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1386
+ guess_mode=guess_mode,
1387
+ )
1388
+ height, width = control_image.shape[-2:]
1389
+ elif isinstance(controlnet, MultiControlNetModel):
1390
+ control_images = []
1391
+
1392
+ for control_image_ in control_image:
1393
+ control_image_ = self.prepare_control_image(
1394
+ image=control_image_,
1395
+ width=width,
1396
+ height=height,
1397
+ batch_size=batch_size * num_images_per_prompt,
1398
+ num_images_per_prompt=num_images_per_prompt,
1399
+ device=device,
1400
+ dtype=controlnet.dtype,
1401
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1402
+ guess_mode=guess_mode,
1403
+ )
1404
+
1405
+ control_images.append(control_image_)
1406
+
1407
+ control_image = control_images
1408
+ height, width = control_image[0].shape[-2:]
1409
+ else:
1410
+ assert False
1411
+
1412
+ # 5. Prepare timesteps
1413
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1414
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
1415
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1416
+ self._num_timesteps = len(timesteps)
1417
+
1418
+ # 6. Prepare latent variables
1419
+ latents = self.prepare_latents(
1420
+ image,
1421
+ latent_timestep,
1422
+ batch_size,
1423
+ num_images_per_prompt,
1424
+ prompt_embeds.dtype,
1425
+ device,
1426
+ generator,
1427
+ True,
1428
+ )
1429
+
1430
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1431
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1432
+
1433
+ # 7.1 Create tensor stating which controlnets to keep
1434
+ controlnet_keep = []
1435
+ for i in range(len(timesteps)):
1436
+ keeps = [
1437
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1438
+ for s, e in zip(control_guidance_start, control_guidance_end)
1439
+ ]
1440
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1441
+
1442
+ # 7.2 Prepare added time ids & embeddings
1443
+ if isinstance(control_image, list):
1444
+ original_size = original_size or control_image[0].shape[-2:]
1445
+ else:
1446
+ original_size = original_size or control_image.shape[-2:]
1447
+ target_size = target_size or (height, width)
1448
+
1449
+ if negative_original_size is None:
1450
+ negative_original_size = original_size
1451
+ if negative_target_size is None:
1452
+ negative_target_size = target_size
1453
+ add_text_embeds = pooled_prompt_embeds
1454
+
1455
+ if self.text_encoder_2 is None:
1456
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1457
+ else:
1458
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1459
+
1460
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1461
+ original_size,
1462
+ crops_coords_top_left,
1463
+ target_size,
1464
+ aesthetic_score,
1465
+ negative_aesthetic_score,
1466
+ negative_original_size,
1467
+ negative_crops_coords_top_left,
1468
+ negative_target_size,
1469
+ dtype=prompt_embeds.dtype,
1470
+ text_encoder_projection_dim=text_encoder_projection_dim,
1471
+ )
1472
+ add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1473
+
1474
+ if self.do_classifier_free_guidance:
1475
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1476
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1477
+ add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1478
+ add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1479
+
1480
+ prompt_embeds = prompt_embeds.to(device)
1481
+ add_text_embeds = add_text_embeds.to(device)
1482
+ add_time_ids = add_time_ids.to(device)
1483
+
1484
+ # 8. Denoising loop
1485
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1486
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1487
+ for i, t in enumerate(timesteps):
1488
+ # expand the latents if we are doing classifier free guidance
1489
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1490
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1491
+
1492
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1493
+
1494
+ # controlnet(s) inference
1495
+ if guess_mode and self.do_classifier_free_guidance:
1496
+ # Infer ControlNet only for the conditional batch.
1497
+ control_model_input = latents
1498
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1499
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1500
+ controlnet_added_cond_kwargs = {
1501
+ "text_embeds": add_text_embeds.chunk(2)[1],
1502
+ "time_ids": add_time_ids.chunk(2)[1],
1503
+ }
1504
+ else:
1505
+ control_model_input = latent_model_input
1506
+ controlnet_prompt_embeds = prompt_embeds
1507
+ controlnet_added_cond_kwargs = added_cond_kwargs
1508
+
1509
+ if isinstance(controlnet_keep[i], list):
1510
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1511
+ else:
1512
+ controlnet_cond_scale = controlnet_conditioning_scale
1513
+ if isinstance(controlnet_cond_scale, list):
1514
+ controlnet_cond_scale = controlnet_cond_scale[0]
1515
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1516
+
1517
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1518
+ control_model_input,
1519
+ t,
1520
+ encoder_hidden_states=controlnet_prompt_embeds,
1521
+ controlnet_cond=control_image,
1522
+ conditioning_scale=cond_scale,
1523
+ guess_mode=guess_mode,
1524
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1525
+ return_dict=False,
1526
+ )
1527
+
1528
+ if guess_mode and self.do_classifier_free_guidance:
1529
+ # Infered ControlNet only for the conditional batch.
1530
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1531
+ # add 0 to the unconditional batch to keep it unchanged.
1532
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1533
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1534
+
1535
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1536
+ added_cond_kwargs["image_embeds"] = image_embeds
1537
+
1538
+ # predict the noise residual
1539
+ noise_pred = self.unet(
1540
+ latent_model_input,
1541
+ t,
1542
+ encoder_hidden_states=prompt_embeds,
1543
+ cross_attention_kwargs=self.cross_attention_kwargs,
1544
+ down_block_additional_residuals=down_block_res_samples,
1545
+ mid_block_additional_residual=mid_block_res_sample,
1546
+ added_cond_kwargs=added_cond_kwargs,
1547
+ return_dict=False,
1548
+ )[0]
1549
+
1550
+ # perform guidance
1551
+ if self.do_classifier_free_guidance:
1552
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1553
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1554
+
1555
+ # compute the previous noisy sample x_t -> x_t-1
1556
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1557
+
1558
+ if callback_on_step_end is not None:
1559
+ callback_kwargs = {}
1560
+ for k in callback_on_step_end_tensor_inputs:
1561
+ callback_kwargs[k] = locals()[k]
1562
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1563
+
1564
+ latents = callback_outputs.pop("latents", latents)
1565
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1566
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1567
+
1568
+ # call the callback, if provided
1569
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1570
+ progress_bar.update()
1571
+ if callback is not None and i % callback_steps == 0:
1572
+ step_idx = i // getattr(self.scheduler, "order", 1)
1573
+ callback(step_idx, t, latents)
1574
+
1575
+ # If we do sequential model offloading, let's offload unet and controlnet
1576
+ # manually for max memory savings
1577
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1578
+ self.unet.to("cpu")
1579
+ self.controlnet.to("cpu")
1580
+ torch.cuda.empty_cache()
1581
+
1582
+ if not output_type == "latent":
1583
+ # make sure the VAE is in float32 mode, as it overflows in float16
1584
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1585
+
1586
+ if needs_upcasting:
1587
+ self.upcast_vae()
1588
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1589
+
1590
+ # unscale/denormalize the latents
1591
+ # denormalize with the mean and std if available and not None
1592
+ has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
1593
+ has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
1594
+ if has_latents_mean and has_latents_std:
1595
+ latents_mean = (
1596
+ torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1597
+ )
1598
+ latents_std = (
1599
+ torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1600
+ )
1601
+ latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
1602
+ else:
1603
+ latents = latents / self.vae.config.scaling_factor
1604
+
1605
+ image = self.vae.decode(latents, return_dict=False)[0]
1606
+
1607
+ # cast back to fp16 if needed
1608
+ if needs_upcasting:
1609
+ self.vae.to(dtype=torch.float16)
1610
+ else:
1611
+ image = latents
1612
+ return StableDiffusionXLPipelineOutput(images=image)
1613
+
1614
+ # apply watermark if available
1615
+ if self.watermark is not None:
1616
+ image = self.watermark.apply_watermark(image)
1617
+
1618
+ image = self.image_processor.postprocess(image, output_type=output_type)
1619
+
1620
+ # Offload all models
1621
+ self.maybe_free_model_hooks()
1622
+
1623
+ if not return_dict:
1624
+ return (image,)
1625
+
1626
+ return StableDiffusionXLPipelineOutput(images=image)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from functools import partial
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict
23
+ from flax.jax_utils import unreplicate
24
+ from flax.training.common_utils import shard
25
+ from PIL import Image
26
+ from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel
27
+
28
+ from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel
29
+ from ...schedulers import (
30
+ FlaxDDIMScheduler,
31
+ FlaxDPMSolverMultistepScheduler,
32
+ FlaxLMSDiscreteScheduler,
33
+ FlaxPNDMScheduler,
34
+ )
35
+ from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring
36
+ from ..pipeline_flax_utils import FlaxDiffusionPipeline
37
+ from ..stable_diffusion import FlaxStableDiffusionPipelineOutput
38
+ from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ # Set to True to use python for loop instead of jax.fori_loop for easier debugging
44
+ DEBUG = False
45
+
46
+ EXAMPLE_DOC_STRING = """
47
+ Examples:
48
+ ```py
49
+ >>> import jax
50
+ >>> import numpy as np
51
+ >>> import jax.numpy as jnp
52
+ >>> from flax.jax_utils import replicate
53
+ >>> from flax.training.common_utils import shard
54
+ >>> from diffusers.utils import load_image, make_image_grid
55
+ >>> from PIL import Image
56
+ >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel
57
+
58
+
59
+ >>> def create_key(seed=0):
60
+ ... return jax.random.PRNGKey(seed)
61
+
62
+
63
+ >>> rng = create_key(0)
64
+
65
+ >>> # get canny image
66
+ >>> canny_image = load_image(
67
+ ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg"
68
+ ... )
69
+
70
+ >>> prompts = "best quality, extremely detailed"
71
+ >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality"
72
+
73
+ >>> # load control net and stable diffusion v1-5
74
+ >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
75
+ ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32
76
+ ... )
77
+ >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
78
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
79
+ ... )
80
+ >>> params["controlnet"] = controlnet_params
81
+
82
+ >>> num_samples = jax.device_count()
83
+ >>> rng = jax.random.split(rng, jax.device_count())
84
+
85
+ >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
86
+ >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)
87
+ >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
88
+
89
+ >>> p_params = replicate(params)
90
+ >>> prompt_ids = shard(prompt_ids)
91
+ >>> negative_prompt_ids = shard(negative_prompt_ids)
92
+ >>> processed_image = shard(processed_image)
93
+
94
+ >>> output = pipe(
95
+ ... prompt_ids=prompt_ids,
96
+ ... image=processed_image,
97
+ ... params=p_params,
98
+ ... prng_seed=rng,
99
+ ... num_inference_steps=50,
100
+ ... neg_prompt_ids=negative_prompt_ids,
101
+ ... jit=True,
102
+ ... ).images
103
+
104
+ >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
105
+ >>> output_images = make_image_grid(output_images, num_samples // 4, 4)
106
+ >>> output_images.save("generated_image.png")
107
+ ```
108
+ """
109
+
110
+
111
+ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline):
112
+ r"""
113
+ Flax-based pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance.
114
+
115
+ This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
116
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
117
+
118
+ Args:
119
+ vae ([`FlaxAutoencoderKL`]):
120
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
121
+ text_encoder ([`~transformers.FlaxCLIPTextModel`]):
122
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
123
+ tokenizer ([`~transformers.CLIPTokenizer`]):
124
+ A `CLIPTokenizer` to tokenize text.
125
+ unet ([`FlaxUNet2DConditionModel`]):
126
+ A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
127
+ controlnet ([`FlaxControlNetModel`]:
128
+ Provides additional conditioning to the `unet` during the denoising process.
129
+ scheduler ([`SchedulerMixin`]):
130
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
131
+ [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
132
+ [`FlaxDPMSolverMultistepScheduler`].
133
+ safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
134
+ Classification module that estimates whether generated images could be considered offensive or harmful.
135
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
136
+ about a model's potential harms.
137
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
138
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
139
+ """
140
+
141
+ def __init__(
142
+ self,
143
+ vae: FlaxAutoencoderKL,
144
+ text_encoder: FlaxCLIPTextModel,
145
+ tokenizer: CLIPTokenizer,
146
+ unet: FlaxUNet2DConditionModel,
147
+ controlnet: FlaxControlNetModel,
148
+ scheduler: Union[
149
+ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
150
+ ],
151
+ safety_checker: FlaxStableDiffusionSafetyChecker,
152
+ feature_extractor: CLIPFeatureExtractor,
153
+ dtype: jnp.dtype = jnp.float32,
154
+ ):
155
+ super().__init__()
156
+ self.dtype = dtype
157
+
158
+ if safety_checker is None:
159
+ logger.warning(
160
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
161
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
162
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
163
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
164
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
165
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
166
+ )
167
+
168
+ self.register_modules(
169
+ vae=vae,
170
+ text_encoder=text_encoder,
171
+ tokenizer=tokenizer,
172
+ unet=unet,
173
+ controlnet=controlnet,
174
+ scheduler=scheduler,
175
+ safety_checker=safety_checker,
176
+ feature_extractor=feature_extractor,
177
+ )
178
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
179
+
180
+ def prepare_text_inputs(self, prompt: Union[str, List[str]]):
181
+ if not isinstance(prompt, (str, list)):
182
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
183
+
184
+ text_input = self.tokenizer(
185
+ prompt,
186
+ padding="max_length",
187
+ max_length=self.tokenizer.model_max_length,
188
+ truncation=True,
189
+ return_tensors="np",
190
+ )
191
+
192
+ return text_input.input_ids
193
+
194
+ def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]):
195
+ if not isinstance(image, (Image.Image, list)):
196
+ raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
197
+
198
+ if isinstance(image, Image.Image):
199
+ image = [image]
200
+
201
+ processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image])
202
+
203
+ return processed_images
204
+
205
+ def _get_has_nsfw_concepts(self, features, params):
206
+ has_nsfw_concepts = self.safety_checker(features, params)
207
+ return has_nsfw_concepts
208
+
209
+ def _run_safety_checker(self, images, safety_model_params, jit=False):
210
+ # safety_model_params should already be replicated when jit is True
211
+ pil_images = [Image.fromarray(image) for image in images]
212
+ features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
213
+
214
+ if jit:
215
+ features = shard(features)
216
+ has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
217
+ has_nsfw_concepts = unshard(has_nsfw_concepts)
218
+ safety_model_params = unreplicate(safety_model_params)
219
+ else:
220
+ has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
221
+
222
+ images_was_copied = False
223
+ for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
224
+ if has_nsfw_concept:
225
+ if not images_was_copied:
226
+ images_was_copied = True
227
+ images = images.copy()
228
+
229
+ images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
230
+
231
+ if any(has_nsfw_concepts):
232
+ warnings.warn(
233
+ "Potential NSFW content was detected in one or more images. A black image will be returned"
234
+ " instead. Try again with a different prompt and/or seed."
235
+ )
236
+
237
+ return images, has_nsfw_concepts
238
+
239
+ def _generate(
240
+ self,
241
+ prompt_ids: jnp.ndarray,
242
+ image: jnp.ndarray,
243
+ params: Union[Dict, FrozenDict],
244
+ prng_seed: jax.Array,
245
+ num_inference_steps: int,
246
+ guidance_scale: float,
247
+ latents: Optional[jnp.ndarray] = None,
248
+ neg_prompt_ids: Optional[jnp.ndarray] = None,
249
+ controlnet_conditioning_scale: float = 1.0,
250
+ ):
251
+ height, width = image.shape[-2:]
252
+ if height % 64 != 0 or width % 64 != 0:
253
+ raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.")
254
+
255
+ # get prompt text embeddings
256
+ prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
257
+
258
+ # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
259
+ # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
260
+ batch_size = prompt_ids.shape[0]
261
+
262
+ max_length = prompt_ids.shape[-1]
263
+
264
+ if neg_prompt_ids is None:
265
+ uncond_input = self.tokenizer(
266
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
267
+ ).input_ids
268
+ else:
269
+ uncond_input = neg_prompt_ids
270
+ negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
271
+ context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
272
+
273
+ image = jnp.concatenate([image] * 2)
274
+
275
+ latents_shape = (
276
+ batch_size,
277
+ self.unet.config.in_channels,
278
+ height // self.vae_scale_factor,
279
+ width // self.vae_scale_factor,
280
+ )
281
+ if latents is None:
282
+ latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
283
+ else:
284
+ if latents.shape != latents_shape:
285
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
286
+
287
+ def loop_body(step, args):
288
+ latents, scheduler_state = args
289
+ # For classifier free guidance, we need to do two forward passes.
290
+ # Here we concatenate the unconditional and text embeddings into a single batch
291
+ # to avoid doing two forward passes
292
+ latents_input = jnp.concatenate([latents] * 2)
293
+
294
+ t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
295
+ timestep = jnp.broadcast_to(t, latents_input.shape[0])
296
+
297
+ latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
298
+
299
+ down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
300
+ {"params": params["controlnet"]},
301
+ jnp.array(latents_input),
302
+ jnp.array(timestep, dtype=jnp.int32),
303
+ encoder_hidden_states=context,
304
+ controlnet_cond=image,
305
+ conditioning_scale=controlnet_conditioning_scale,
306
+ return_dict=False,
307
+ )
308
+
309
+ # predict the noise residual
310
+ noise_pred = self.unet.apply(
311
+ {"params": params["unet"]},
312
+ jnp.array(latents_input),
313
+ jnp.array(timestep, dtype=jnp.int32),
314
+ encoder_hidden_states=context,
315
+ down_block_additional_residuals=down_block_res_samples,
316
+ mid_block_additional_residual=mid_block_res_sample,
317
+ ).sample
318
+
319
+ # perform guidance
320
+ noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
321
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
322
+
323
+ # compute the previous noisy sample x_t -> x_t-1
324
+ latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
325
+ return latents, scheduler_state
326
+
327
+ scheduler_state = self.scheduler.set_timesteps(
328
+ params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape
329
+ )
330
+
331
+ # scale the initial noise by the standard deviation required by the scheduler
332
+ latents = latents * params["scheduler"].init_noise_sigma
333
+
334
+ if DEBUG:
335
+ # run with python for loop
336
+ for i in range(num_inference_steps):
337
+ latents, scheduler_state = loop_body(i, (latents, scheduler_state))
338
+ else:
339
+ latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state))
340
+
341
+ # scale and decode the image latents with vae
342
+ latents = 1 / self.vae.config.scaling_factor * latents
343
+ image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
344
+
345
+ image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
346
+ return image
347
+
348
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
349
+ def __call__(
350
+ self,
351
+ prompt_ids: jnp.ndarray,
352
+ image: jnp.ndarray,
353
+ params: Union[Dict, FrozenDict],
354
+ prng_seed: jax.Array,
355
+ num_inference_steps: int = 50,
356
+ guidance_scale: Union[float, jnp.ndarray] = 7.5,
357
+ latents: jnp.ndarray = None,
358
+ neg_prompt_ids: jnp.ndarray = None,
359
+ controlnet_conditioning_scale: Union[float, jnp.ndarray] = 1.0,
360
+ return_dict: bool = True,
361
+ jit: bool = False,
362
+ ):
363
+ r"""
364
+ The call function to the pipeline for generation.
365
+
366
+ Args:
367
+ prompt_ids (`jnp.ndarray`):
368
+ The prompt or prompts to guide the image generation.
369
+ image (`jnp.ndarray`):
370
+ Array representing the ControlNet input condition to provide guidance to the `unet` for generation.
371
+ params (`Dict` or `FrozenDict`):
372
+ Dictionary containing the model parameters/weights.
373
+ prng_seed (`jax.Array`):
374
+ Array containing random number generator key.
375
+ num_inference_steps (`int`, *optional*, defaults to 50):
376
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
377
+ expense of slower inference.
378
+ guidance_scale (`float`, *optional*, defaults to 7.5):
379
+ A higher guidance scale value encourages the model to generate images closely linked to the text
380
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
381
+ latents (`jnp.ndarray`, *optional*):
382
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
383
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
384
+ array is generated by sampling using the supplied random `generator`.
385
+ controlnet_conditioning_scale (`float` or `jnp.ndarray`, *optional*, defaults to 1.0):
386
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
387
+ to the residual in the original `unet`.
388
+ return_dict (`bool`, *optional*, defaults to `True`):
389
+ Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
390
+ a plain tuple.
391
+ jit (`bool`, defaults to `False`):
392
+ Whether to run `pmap` versions of the generation and safety scoring functions.
393
+
394
+ <Tip warning={true}>
395
+
396
+ This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
397
+ future release.
398
+
399
+ </Tip>
400
+
401
+ Examples:
402
+
403
+ Returns:
404
+ [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
405
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
406
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated images
407
+ and the second element is a list of `bool`s indicating whether the corresponding generated image
408
+ contains "not-safe-for-work" (nsfw) content.
409
+ """
410
+
411
+ height, width = image.shape[-2:]
412
+
413
+ if isinstance(guidance_scale, float):
414
+ # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
415
+ # shape information, as they may be sharded (when `jit` is `True`), or not.
416
+ guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
417
+ if len(prompt_ids.shape) > 2:
418
+ # Assume sharded
419
+ guidance_scale = guidance_scale[:, None]
420
+
421
+ if isinstance(controlnet_conditioning_scale, float):
422
+ # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
423
+ # shape information, as they may be sharded (when `jit` is `True`), or not.
424
+ controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0])
425
+ if len(prompt_ids.shape) > 2:
426
+ # Assume sharded
427
+ controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
428
+
429
+ if jit:
430
+ images = _p_generate(
431
+ self,
432
+ prompt_ids,
433
+ image,
434
+ params,
435
+ prng_seed,
436
+ num_inference_steps,
437
+ guidance_scale,
438
+ latents,
439
+ neg_prompt_ids,
440
+ controlnet_conditioning_scale,
441
+ )
442
+ else:
443
+ images = self._generate(
444
+ prompt_ids,
445
+ image,
446
+ params,
447
+ prng_seed,
448
+ num_inference_steps,
449
+ guidance_scale,
450
+ latents,
451
+ neg_prompt_ids,
452
+ controlnet_conditioning_scale,
453
+ )
454
+
455
+ if self.safety_checker is not None:
456
+ safety_params = params["safety_checker"]
457
+ images_uint8_casted = (images * 255).round().astype("uint8")
458
+ num_devices, batch_size = images.shape[:2]
459
+
460
+ images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
461
+ images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
462
+ images = np.array(images)
463
+
464
+ # block images
465
+ if any(has_nsfw_concept):
466
+ for i, is_nsfw in enumerate(has_nsfw_concept):
467
+ if is_nsfw:
468
+ images[i] = np.asarray(images_uint8_casted[i])
469
+
470
+ images = images.reshape(num_devices, batch_size, height, width, 3)
471
+ else:
472
+ images = np.asarray(images)
473
+ has_nsfw_concept = False
474
+
475
+ if not return_dict:
476
+ return (images, has_nsfw_concept)
477
+
478
+ return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
479
+
480
+
481
+ # Static argnums are pipe, num_inference_steps. A change would trigger recompilation.
482
+ # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
483
+ @partial(
484
+ jax.pmap,
485
+ in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0),
486
+ static_broadcasted_argnums=(0, 5),
487
+ )
488
+ def _p_generate(
489
+ pipe,
490
+ prompt_ids,
491
+ image,
492
+ params,
493
+ prng_seed,
494
+ num_inference_steps,
495
+ guidance_scale,
496
+ latents,
497
+ neg_prompt_ids,
498
+ controlnet_conditioning_scale,
499
+ ):
500
+ return pipe._generate(
501
+ prompt_ids,
502
+ image,
503
+ params,
504
+ prng_seed,
505
+ num_inference_steps,
506
+ guidance_scale,
507
+ latents,
508
+ neg_prompt_ids,
509
+ controlnet_conditioning_scale,
510
+ )
511
+
512
+
513
+ @partial(jax.pmap, static_broadcasted_argnums=(0,))
514
+ def _p_get_has_nsfw_concepts(pipe, features, params):
515
+ return pipe._get_has_nsfw_concepts(features, params)
516
+
517
+
518
+ def unshard(x: jnp.ndarray):
519
+ # einops.rearrange(x, 'd b ... -> (d b) ...')
520
+ num_devices, batch_size = x.shape[:2]
521
+ rest = x.shape[2:]
522
+ return x.reshape(num_devices * batch_size, *rest)
523
+
524
+
525
+ def preprocess(image, dtype):
526
+ image = image.convert("RGB")
527
+ w, h = image.size
528
+ w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64
529
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
530
+ image = jnp.array(image).astype(dtype) / 255.0
531
+ image = image[None].transpose(0, 3, 1, 2)
532
+ return image
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {
15
+ "timesteps": [
16
+ "fast27_timesteps",
17
+ "smart100_timesteps",
18
+ "smart185_timesteps",
19
+ "smart27_timesteps",
20
+ "smart50_timesteps",
21
+ "super100_timesteps",
22
+ "super27_timesteps",
23
+ "super40_timesteps",
24
+ ]
25
+ }
26
+
27
+ try:
28
+ if not (is_transformers_available() and is_torch_available()):
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
32
+
33
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
34
+ else:
35
+ _import_structure["pipeline_if"] = ["IFPipeline"]
36
+ _import_structure["pipeline_if_img2img"] = ["IFImg2ImgPipeline"]
37
+ _import_structure["pipeline_if_img2img_superresolution"] = ["IFImg2ImgSuperResolutionPipeline"]
38
+ _import_structure["pipeline_if_inpainting"] = ["IFInpaintingPipeline"]
39
+ _import_structure["pipeline_if_inpainting_superresolution"] = ["IFInpaintingSuperResolutionPipeline"]
40
+ _import_structure["pipeline_if_superresolution"] = ["IFSuperResolutionPipeline"]
41
+ _import_structure["pipeline_output"] = ["IFPipelineOutput"]
42
+ _import_structure["safety_checker"] = ["IFSafetyChecker"]
43
+ _import_structure["watermark"] = ["IFWatermarker"]
44
+
45
+
46
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
47
+ try:
48
+ if not (is_transformers_available() and is_torch_available()):
49
+ raise OptionalDependencyNotAvailable()
50
+
51
+ except OptionalDependencyNotAvailable:
52
+ from ...utils.dummy_torch_and_transformers_objects import *
53
+ else:
54
+ from .pipeline_if import IFPipeline
55
+ from .pipeline_if_img2img import IFImg2ImgPipeline
56
+ from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline
57
+ from .pipeline_if_inpainting import IFInpaintingPipeline
58
+ from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
59
+ from .pipeline_if_superresolution import IFSuperResolutionPipeline
60
+ from .pipeline_output import IFPipelineOutput
61
+ from .safety_checker import IFSafetyChecker
62
+ from .timesteps import (
63
+ fast27_timesteps,
64
+ smart27_timesteps,
65
+ smart50_timesteps,
66
+ smart100_timesteps,
67
+ smart185_timesteps,
68
+ super27_timesteps,
69
+ super40_timesteps,
70
+ super100_timesteps,
71
+ )
72
+ from .watermark import IFWatermarker
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(
78
+ __name__,
79
+ globals()["__file__"],
80
+ _import_structure,
81
+ module_spec=__spec__,
82
+ )
83
+
84
+ for name, value in _dummy_objects.items():
85
+ setattr(sys.modules[__name__], name, value)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_img2img.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_img2img_superresolution.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_inpainting.cpython-310.pyc ADDED
Binary file (28.8 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_inpainting_superresolution.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_superresolution.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_output.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/safety_checker.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/timesteps.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/watermark.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if.py ADDED
@@ -0,0 +1,788 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import torch
8
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
9
+
10
+ from ...loaders import LoraLoaderMixin
11
+ from ...models import UNet2DConditionModel
12
+ from ...schedulers import DDPMScheduler
13
+ from ...utils import (
14
+ BACKENDS_MAPPING,
15
+ is_accelerate_available,
16
+ is_bs4_available,
17
+ is_ftfy_available,
18
+ logging,
19
+ replace_example_docstring,
20
+ )
21
+ from ...utils.torch_utils import randn_tensor
22
+ from ..pipeline_utils import DiffusionPipeline
23
+ from .pipeline_output import IFPipelineOutput
24
+ from .safety_checker import IFSafetyChecker
25
+ from .watermark import IFWatermarker
26
+
27
+
28
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
29
+
30
+ if is_bs4_available():
31
+ from bs4 import BeautifulSoup
32
+
33
+ if is_ftfy_available():
34
+ import ftfy
35
+
36
+
37
+ EXAMPLE_DOC_STRING = """
38
+ Examples:
39
+ ```py
40
+ >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline
41
+ >>> from diffusers.utils import pt_to_pil
42
+ >>> import torch
43
+
44
+ >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
45
+ >>> pipe.enable_model_cpu_offload()
46
+
47
+ >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'
48
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
49
+
50
+ >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images
51
+
52
+ >>> # save intermediate image
53
+ >>> pil_image = pt_to_pil(image)
54
+ >>> pil_image[0].save("./if_stage_I.png")
55
+
56
+ >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(
57
+ ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
58
+ ... )
59
+ >>> super_res_1_pipe.enable_model_cpu_offload()
60
+
61
+ >>> image = super_res_1_pipe(
62
+ ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt"
63
+ ... ).images
64
+
65
+ >>> # save intermediate image
66
+ >>> pil_image = pt_to_pil(image)
67
+ >>> pil_image[0].save("./if_stage_I.png")
68
+
69
+ >>> safety_modules = {
70
+ ... "feature_extractor": pipe.feature_extractor,
71
+ ... "safety_checker": pipe.safety_checker,
72
+ ... "watermarker": pipe.watermarker,
73
+ ... }
74
+ >>> super_res_2_pipe = DiffusionPipeline.from_pretrained(
75
+ ... "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16
76
+ ... )
77
+ >>> super_res_2_pipe.enable_model_cpu_offload()
78
+
79
+ >>> image = super_res_2_pipe(
80
+ ... prompt=prompt,
81
+ ... image=image,
82
+ ... ).images
83
+ >>> image[0].save("./if_stage_II.png")
84
+ ```
85
+ """
86
+
87
+
88
+ class IFPipeline(DiffusionPipeline, LoraLoaderMixin):
89
+ tokenizer: T5Tokenizer
90
+ text_encoder: T5EncoderModel
91
+
92
+ unet: UNet2DConditionModel
93
+ scheduler: DDPMScheduler
94
+
95
+ feature_extractor: Optional[CLIPImageProcessor]
96
+ safety_checker: Optional[IFSafetyChecker]
97
+
98
+ watermarker: Optional[IFWatermarker]
99
+
100
+ bad_punct_regex = re.compile(
101
+ r"["
102
+ + "#®•©™&@·º½¾¿¡§~"
103
+ + r"\)"
104
+ + r"\("
105
+ + r"\]"
106
+ + r"\["
107
+ + r"\}"
108
+ + r"\{"
109
+ + r"\|"
110
+ + "\\"
111
+ + r"\/"
112
+ + r"\*"
113
+ + r"]{1,}"
114
+ ) # noqa
115
+
116
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
117
+ model_cpu_offload_seq = "text_encoder->unet"
118
+
119
+ def __init__(
120
+ self,
121
+ tokenizer: T5Tokenizer,
122
+ text_encoder: T5EncoderModel,
123
+ unet: UNet2DConditionModel,
124
+ scheduler: DDPMScheduler,
125
+ safety_checker: Optional[IFSafetyChecker],
126
+ feature_extractor: Optional[CLIPImageProcessor],
127
+ watermarker: Optional[IFWatermarker],
128
+ requires_safety_checker: bool = True,
129
+ ):
130
+ super().__init__()
131
+
132
+ if safety_checker is None and requires_safety_checker:
133
+ logger.warning(
134
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
135
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
136
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
137
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
138
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
139
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
140
+ )
141
+
142
+ if safety_checker is not None and feature_extractor is None:
143
+ raise ValueError(
144
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
145
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
146
+ )
147
+
148
+ self.register_modules(
149
+ tokenizer=tokenizer,
150
+ text_encoder=text_encoder,
151
+ unet=unet,
152
+ scheduler=scheduler,
153
+ safety_checker=safety_checker,
154
+ feature_extractor=feature_extractor,
155
+ watermarker=watermarker,
156
+ )
157
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
158
+
159
+ def remove_all_hooks(self):
160
+ if is_accelerate_available():
161
+ from accelerate.hooks import remove_hook_from_module
162
+ else:
163
+ raise ImportError("Please install accelerate via `pip install accelerate`")
164
+
165
+ for model in [self.text_encoder, self.unet, self.safety_checker]:
166
+ if model is not None:
167
+ remove_hook_from_module(model, recurse=True)
168
+
169
+ self.unet_offload_hook = None
170
+ self.text_encoder_offload_hook = None
171
+ self.final_offload_hook = None
172
+
173
+ @torch.no_grad()
174
+ def encode_prompt(
175
+ self,
176
+ prompt: Union[str, List[str]],
177
+ do_classifier_free_guidance: bool = True,
178
+ num_images_per_prompt: int = 1,
179
+ device: Optional[torch.device] = None,
180
+ negative_prompt: Optional[Union[str, List[str]]] = None,
181
+ prompt_embeds: Optional[torch.FloatTensor] = None,
182
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
183
+ clean_caption: bool = False,
184
+ ):
185
+ r"""
186
+ Encodes the prompt into text encoder hidden states.
187
+
188
+ Args:
189
+ prompt (`str` or `List[str]`, *optional*):
190
+ prompt to be encoded
191
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
192
+ whether to use classifier free guidance or not
193
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
194
+ number of images that should be generated per prompt
195
+ device: (`torch.device`, *optional*):
196
+ torch device to place the resulting embeddings on
197
+ negative_prompt (`str` or `List[str]`, *optional*):
198
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
199
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
200
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
201
+ prompt_embeds (`torch.FloatTensor`, *optional*):
202
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
203
+ provided, text embeddings will be generated from `prompt` input argument.
204
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
205
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
206
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
207
+ argument.
208
+ clean_caption (bool, defaults to `False`):
209
+ If `True`, the function will preprocess and clean the provided caption before encoding.
210
+ """
211
+ if prompt is not None and negative_prompt is not None:
212
+ if type(prompt) is not type(negative_prompt):
213
+ raise TypeError(
214
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
215
+ f" {type(prompt)}."
216
+ )
217
+
218
+ if device is None:
219
+ device = self._execution_device
220
+
221
+ if prompt is not None and isinstance(prompt, str):
222
+ batch_size = 1
223
+ elif prompt is not None and isinstance(prompt, list):
224
+ batch_size = len(prompt)
225
+ else:
226
+ batch_size = prompt_embeds.shape[0]
227
+
228
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
229
+ max_length = 77
230
+
231
+ if prompt_embeds is None:
232
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
233
+ text_inputs = self.tokenizer(
234
+ prompt,
235
+ padding="max_length",
236
+ max_length=max_length,
237
+ truncation=True,
238
+ add_special_tokens=True,
239
+ return_tensors="pt",
240
+ )
241
+ text_input_ids = text_inputs.input_ids
242
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
243
+
244
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
245
+ text_input_ids, untruncated_ids
246
+ ):
247
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
248
+ logger.warning(
249
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
250
+ f" {max_length} tokens: {removed_text}"
251
+ )
252
+
253
+ attention_mask = text_inputs.attention_mask.to(device)
254
+
255
+ prompt_embeds = self.text_encoder(
256
+ text_input_ids.to(device),
257
+ attention_mask=attention_mask,
258
+ )
259
+ prompt_embeds = prompt_embeds[0]
260
+
261
+ if self.text_encoder is not None:
262
+ dtype = self.text_encoder.dtype
263
+ elif self.unet is not None:
264
+ dtype = self.unet.dtype
265
+ else:
266
+ dtype = None
267
+
268
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
269
+
270
+ bs_embed, seq_len, _ = prompt_embeds.shape
271
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
272
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
273
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
274
+
275
+ # get unconditional embeddings for classifier free guidance
276
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
277
+ uncond_tokens: List[str]
278
+ if negative_prompt is None:
279
+ uncond_tokens = [""] * batch_size
280
+ elif isinstance(negative_prompt, str):
281
+ uncond_tokens = [negative_prompt]
282
+ elif batch_size != len(negative_prompt):
283
+ raise ValueError(
284
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
285
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
286
+ " the batch size of `prompt`."
287
+ )
288
+ else:
289
+ uncond_tokens = negative_prompt
290
+
291
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
292
+ max_length = prompt_embeds.shape[1]
293
+ uncond_input = self.tokenizer(
294
+ uncond_tokens,
295
+ padding="max_length",
296
+ max_length=max_length,
297
+ truncation=True,
298
+ return_attention_mask=True,
299
+ add_special_tokens=True,
300
+ return_tensors="pt",
301
+ )
302
+ attention_mask = uncond_input.attention_mask.to(device)
303
+
304
+ negative_prompt_embeds = self.text_encoder(
305
+ uncond_input.input_ids.to(device),
306
+ attention_mask=attention_mask,
307
+ )
308
+ negative_prompt_embeds = negative_prompt_embeds[0]
309
+
310
+ if do_classifier_free_guidance:
311
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
312
+ seq_len = negative_prompt_embeds.shape[1]
313
+
314
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
315
+
316
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
317
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
318
+
319
+ # For classifier free guidance, we need to do two forward passes.
320
+ # Here we concatenate the unconditional and text embeddings into a single batch
321
+ # to avoid doing two forward passes
322
+ else:
323
+ negative_prompt_embeds = None
324
+
325
+ return prompt_embeds, negative_prompt_embeds
326
+
327
+ def run_safety_checker(self, image, device, dtype):
328
+ if self.safety_checker is not None:
329
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
330
+ image, nsfw_detected, watermark_detected = self.safety_checker(
331
+ images=image,
332
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
333
+ )
334
+ else:
335
+ nsfw_detected = None
336
+ watermark_detected = None
337
+
338
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
339
+ self.unet_offload_hook.offload()
340
+
341
+ return image, nsfw_detected, watermark_detected
342
+
343
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
344
+ def prepare_extra_step_kwargs(self, generator, eta):
345
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
346
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
347
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
348
+ # and should be between [0, 1]
349
+
350
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
351
+ extra_step_kwargs = {}
352
+ if accepts_eta:
353
+ extra_step_kwargs["eta"] = eta
354
+
355
+ # check if the scheduler accepts generator
356
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
357
+ if accepts_generator:
358
+ extra_step_kwargs["generator"] = generator
359
+ return extra_step_kwargs
360
+
361
+ def check_inputs(
362
+ self,
363
+ prompt,
364
+ callback_steps,
365
+ negative_prompt=None,
366
+ prompt_embeds=None,
367
+ negative_prompt_embeds=None,
368
+ ):
369
+ if (callback_steps is None) or (
370
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
371
+ ):
372
+ raise ValueError(
373
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
374
+ f" {type(callback_steps)}."
375
+ )
376
+
377
+ if prompt is not None and prompt_embeds is not None:
378
+ raise ValueError(
379
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
380
+ " only forward one of the two."
381
+ )
382
+ elif prompt is None and prompt_embeds is None:
383
+ raise ValueError(
384
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
385
+ )
386
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
387
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
388
+
389
+ if negative_prompt is not None and negative_prompt_embeds is not None:
390
+ raise ValueError(
391
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
392
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
393
+ )
394
+
395
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
396
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
397
+ raise ValueError(
398
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
399
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
400
+ f" {negative_prompt_embeds.shape}."
401
+ )
402
+
403
+ def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator):
404
+ shape = (batch_size, num_channels, height, width)
405
+ if isinstance(generator, list) and len(generator) != batch_size:
406
+ raise ValueError(
407
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
408
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
409
+ )
410
+
411
+ intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
412
+
413
+ # scale the initial noise by the standard deviation required by the scheduler
414
+ intermediate_images = intermediate_images * self.scheduler.init_noise_sigma
415
+ return intermediate_images
416
+
417
+ def _text_preprocessing(self, text, clean_caption=False):
418
+ if clean_caption and not is_bs4_available():
419
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
420
+ logger.warning("Setting `clean_caption` to False...")
421
+ clean_caption = False
422
+
423
+ if clean_caption and not is_ftfy_available():
424
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
425
+ logger.warning("Setting `clean_caption` to False...")
426
+ clean_caption = False
427
+
428
+ if not isinstance(text, (tuple, list)):
429
+ text = [text]
430
+
431
+ def process(text: str):
432
+ if clean_caption:
433
+ text = self._clean_caption(text)
434
+ text = self._clean_caption(text)
435
+ else:
436
+ text = text.lower().strip()
437
+ return text
438
+
439
+ return [process(t) for t in text]
440
+
441
+ def _clean_caption(self, caption):
442
+ caption = str(caption)
443
+ caption = ul.unquote_plus(caption)
444
+ caption = caption.strip().lower()
445
+ caption = re.sub("<person>", "person", caption)
446
+ # urls:
447
+ caption = re.sub(
448
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
449
+ "",
450
+ caption,
451
+ ) # regex for urls
452
+ caption = re.sub(
453
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
454
+ "",
455
+ caption,
456
+ ) # regex for urls
457
+ # html:
458
+ caption = BeautifulSoup(caption, features="html.parser").text
459
+
460
+ # @<nickname>
461
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
462
+
463
+ # 31C0—31EF CJK Strokes
464
+ # 31F0—31FF Katakana Phonetic Extensions
465
+ # 3200—32FF Enclosed CJK Letters and Months
466
+ # 3300—33FF CJK Compatibility
467
+ # 3400—4DBF CJK Unified Ideographs Extension A
468
+ # 4DC0—4DFF Yijing Hexagram Symbols
469
+ # 4E00—9FFF CJK Unified Ideographs
470
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
471
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
472
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
473
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
474
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
475
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
476
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
477
+ #######################################################
478
+
479
+ # все виды тире / all types of dash --> "-"
480
+ caption = re.sub(
481
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
482
+ "-",
483
+ caption,
484
+ )
485
+
486
+ # кавычки к одному стандарту
487
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
488
+ caption = re.sub(r"[‘’]", "'", caption)
489
+
490
+ # &quot;
491
+ caption = re.sub(r"&quot;?", "", caption)
492
+ # &amp
493
+ caption = re.sub(r"&amp", "", caption)
494
+
495
+ # ip adresses:
496
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
497
+
498
+ # article ids:
499
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
500
+
501
+ # \n
502
+ caption = re.sub(r"\\n", " ", caption)
503
+
504
+ # "#123"
505
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
506
+ # "#12345.."
507
+ caption = re.sub(r"#\d{5,}\b", "", caption)
508
+ # "123456.."
509
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
510
+ # filenames:
511
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
512
+
513
+ #
514
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
515
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
516
+
517
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
518
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
519
+
520
+ # this-is-my-cute-cat / this_is_my_cute_cat
521
+ regex2 = re.compile(r"(?:\-|\_)")
522
+ if len(re.findall(regex2, caption)) > 3:
523
+ caption = re.sub(regex2, " ", caption)
524
+
525
+ caption = ftfy.fix_text(caption)
526
+ caption = html.unescape(html.unescape(caption))
527
+
528
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
529
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
530
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
531
+
532
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
533
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
534
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
535
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
536
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
537
+
538
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
539
+
540
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
541
+
542
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
543
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
544
+ caption = re.sub(r"\s+", " ", caption)
545
+
546
+ caption.strip()
547
+
548
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
549
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
550
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
551
+ caption = re.sub(r"^\.\S+$", "", caption)
552
+
553
+ return caption.strip()
554
+
555
+ @torch.no_grad()
556
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
557
+ def __call__(
558
+ self,
559
+ prompt: Union[str, List[str]] = None,
560
+ num_inference_steps: int = 100,
561
+ timesteps: List[int] = None,
562
+ guidance_scale: float = 7.0,
563
+ negative_prompt: Optional[Union[str, List[str]]] = None,
564
+ num_images_per_prompt: Optional[int] = 1,
565
+ height: Optional[int] = None,
566
+ width: Optional[int] = None,
567
+ eta: float = 0.0,
568
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
569
+ prompt_embeds: Optional[torch.FloatTensor] = None,
570
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
571
+ output_type: Optional[str] = "pil",
572
+ return_dict: bool = True,
573
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
574
+ callback_steps: int = 1,
575
+ clean_caption: bool = True,
576
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
577
+ ):
578
+ """
579
+ Function invoked when calling the pipeline for generation.
580
+
581
+ Args:
582
+ prompt (`str` or `List[str]`, *optional*):
583
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
584
+ instead.
585
+ num_inference_steps (`int`, *optional*, defaults to 100):
586
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
587
+ expense of slower inference.
588
+ timesteps (`List[int]`, *optional*):
589
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
590
+ timesteps are used. Must be in descending order.
591
+ guidance_scale (`float`, *optional*, defaults to 7.0):
592
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
593
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
594
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
595
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
596
+ usually at the expense of lower image quality.
597
+ negative_prompt (`str` or `List[str]`, *optional*):
598
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
599
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
600
+ less than `1`).
601
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
602
+ The number of images to generate per prompt.
603
+ height (`int`, *optional*, defaults to self.unet.config.sample_size):
604
+ The height in pixels of the generated image.
605
+ width (`int`, *optional*, defaults to self.unet.config.sample_size):
606
+ The width in pixels of the generated image.
607
+ eta (`float`, *optional*, defaults to 0.0):
608
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
609
+ [`schedulers.DDIMScheduler`], will be ignored for others.
610
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
611
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
612
+ to make generation deterministic.
613
+ prompt_embeds (`torch.FloatTensor`, *optional*):
614
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
615
+ provided, text embeddings will be generated from `prompt` input argument.
616
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
617
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
618
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
619
+ argument.
620
+ output_type (`str`, *optional*, defaults to `"pil"`):
621
+ The output format of the generate image. Choose between
622
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
623
+ return_dict (`bool`, *optional*, defaults to `True`):
624
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
625
+ callback (`Callable`, *optional*):
626
+ A function that will be called every `callback_steps` steps during inference. The function will be
627
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
628
+ callback_steps (`int`, *optional*, defaults to 1):
629
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
630
+ called at every step.
631
+ clean_caption (`bool`, *optional*, defaults to `True`):
632
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
633
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
634
+ prompt.
635
+ cross_attention_kwargs (`dict`, *optional*):
636
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
637
+ `self.processor` in
638
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
639
+
640
+ Examples:
641
+
642
+ Returns:
643
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
644
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
645
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
646
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
647
+ or watermarked content, according to the `safety_checker`.
648
+ """
649
+ # 1. Check inputs. Raise error if not correct
650
+ self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
651
+
652
+ # 2. Define call parameters
653
+ height = height or self.unet.config.sample_size
654
+ width = width or self.unet.config.sample_size
655
+
656
+ if prompt is not None and isinstance(prompt, str):
657
+ batch_size = 1
658
+ elif prompt is not None and isinstance(prompt, list):
659
+ batch_size = len(prompt)
660
+ else:
661
+ batch_size = prompt_embeds.shape[0]
662
+
663
+ device = self._execution_device
664
+
665
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
666
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
667
+ # corresponds to doing no classifier free guidance.
668
+ do_classifier_free_guidance = guidance_scale > 1.0
669
+
670
+ # 3. Encode input prompt
671
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
672
+ prompt,
673
+ do_classifier_free_guidance,
674
+ num_images_per_prompt=num_images_per_prompt,
675
+ device=device,
676
+ negative_prompt=negative_prompt,
677
+ prompt_embeds=prompt_embeds,
678
+ negative_prompt_embeds=negative_prompt_embeds,
679
+ clean_caption=clean_caption,
680
+ )
681
+
682
+ if do_classifier_free_guidance:
683
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
684
+
685
+ # 4. Prepare timesteps
686
+ if timesteps is not None:
687
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
688
+ timesteps = self.scheduler.timesteps
689
+ num_inference_steps = len(timesteps)
690
+ else:
691
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
692
+ timesteps = self.scheduler.timesteps
693
+
694
+ # 5. Prepare intermediate images
695
+ intermediate_images = self.prepare_intermediate_images(
696
+ batch_size * num_images_per_prompt,
697
+ self.unet.config.in_channels,
698
+ height,
699
+ width,
700
+ prompt_embeds.dtype,
701
+ device,
702
+ generator,
703
+ )
704
+
705
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
706
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
707
+
708
+ # HACK: see comment in `enable_model_cpu_offload`
709
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
710
+ self.text_encoder_offload_hook.offload()
711
+
712
+ # 7. Denoising loop
713
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
714
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
715
+ for i, t in enumerate(timesteps):
716
+ model_input = (
717
+ torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images
718
+ )
719
+ model_input = self.scheduler.scale_model_input(model_input, t)
720
+
721
+ # predict the noise residual
722
+ noise_pred = self.unet(
723
+ model_input,
724
+ t,
725
+ encoder_hidden_states=prompt_embeds,
726
+ cross_attention_kwargs=cross_attention_kwargs,
727
+ return_dict=False,
728
+ )[0]
729
+
730
+ # perform guidance
731
+ if do_classifier_free_guidance:
732
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
733
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1)
734
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1)
735
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
736
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
737
+
738
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
739
+ noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1)
740
+
741
+ # compute the previous noisy sample x_t -> x_t-1
742
+ intermediate_images = self.scheduler.step(
743
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
744
+ )[0]
745
+
746
+ # call the callback, if provided
747
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
748
+ progress_bar.update()
749
+ if callback is not None and i % callback_steps == 0:
750
+ callback(i, t, intermediate_images)
751
+
752
+ image = intermediate_images
753
+
754
+ if output_type == "pil":
755
+ # 8. Post-processing
756
+ image = (image / 2 + 0.5).clamp(0, 1)
757
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
758
+
759
+ # 9. Run safety checker
760
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
761
+
762
+ # 10. Convert to PIL
763
+ image = self.numpy_to_pil(image)
764
+
765
+ # 11. Apply watermark
766
+ if self.watermarker is not None:
767
+ image = self.watermarker.apply_watermark(image, self.unet.config.sample_size)
768
+ elif output_type == "pt":
769
+ nsfw_detected = None
770
+ watermark_detected = None
771
+
772
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
773
+ self.unet_offload_hook.offload()
774
+ else:
775
+ # 8. Post-processing
776
+ image = (image / 2 + 0.5).clamp(0, 1)
777
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
778
+
779
+ # 9. Run safety checker
780
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
781
+
782
+ # Offload all models
783
+ self.maybe_free_model_hooks()
784
+
785
+ if not return_dict:
786
+ return (image, nsfw_detected, watermark_detected)
787
+
788
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py ADDED
@@ -0,0 +1,910 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
11
+
12
+ from ...loaders import LoraLoaderMixin
13
+ from ...models import UNet2DConditionModel
14
+ from ...schedulers import DDPMScheduler
15
+ from ...utils import (
16
+ BACKENDS_MAPPING,
17
+ PIL_INTERPOLATION,
18
+ is_accelerate_available,
19
+ is_bs4_available,
20
+ is_ftfy_available,
21
+ logging,
22
+ replace_example_docstring,
23
+ )
24
+ from ...utils.torch_utils import randn_tensor
25
+ from ..pipeline_utils import DiffusionPipeline
26
+ from .pipeline_output import IFPipelineOutput
27
+ from .safety_checker import IFSafetyChecker
28
+ from .watermark import IFWatermarker
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+ if is_bs4_available():
34
+ from bs4 import BeautifulSoup
35
+
36
+ if is_ftfy_available():
37
+ import ftfy
38
+
39
+
40
+ def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image:
41
+ w, h = images.size
42
+
43
+ coef = w / h
44
+
45
+ w, h = img_size, img_size
46
+
47
+ if coef >= 1:
48
+ w = int(round(img_size / 8 * coef) * 8)
49
+ else:
50
+ h = int(round(img_size / 8 / coef) * 8)
51
+
52
+ images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None)
53
+
54
+ return images
55
+
56
+
57
+ EXAMPLE_DOC_STRING = """
58
+ Examples:
59
+ ```py
60
+ >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
61
+ >>> from diffusers.utils import pt_to_pil
62
+ >>> import torch
63
+ >>> from PIL import Image
64
+ >>> import requests
65
+ >>> from io import BytesIO
66
+
67
+ >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
68
+ >>> response = requests.get(url)
69
+ >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")
70
+ >>> original_image = original_image.resize((768, 512))
71
+
72
+ >>> pipe = IFImg2ImgPipeline.from_pretrained(
73
+ ... "DeepFloyd/IF-I-XL-v1.0",
74
+ ... variant="fp16",
75
+ ... torch_dtype=torch.float16,
76
+ ... )
77
+ >>> pipe.enable_model_cpu_offload()
78
+
79
+ >>> prompt = "A fantasy landscape in style minecraft"
80
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
81
+
82
+ >>> image = pipe(
83
+ ... image=original_image,
84
+ ... prompt_embeds=prompt_embeds,
85
+ ... negative_prompt_embeds=negative_embeds,
86
+ ... output_type="pt",
87
+ ... ).images
88
+
89
+ >>> # save intermediate image
90
+ >>> pil_image = pt_to_pil(image)
91
+ >>> pil_image[0].save("./if_stage_I.png")
92
+
93
+ >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
94
+ ... "DeepFloyd/IF-II-L-v1.0",
95
+ ... text_encoder=None,
96
+ ... variant="fp16",
97
+ ... torch_dtype=torch.float16,
98
+ ... )
99
+ >>> super_res_1_pipe.enable_model_cpu_offload()
100
+
101
+ >>> image = super_res_1_pipe(
102
+ ... image=image,
103
+ ... original_image=original_image,
104
+ ... prompt_embeds=prompt_embeds,
105
+ ... negative_prompt_embeds=negative_embeds,
106
+ ... ).images
107
+ >>> image[0].save("./if_stage_II.png")
108
+ ```
109
+ """
110
+
111
+
112
+ class IFImg2ImgPipeline(DiffusionPipeline, LoraLoaderMixin):
113
+ tokenizer: T5Tokenizer
114
+ text_encoder: T5EncoderModel
115
+
116
+ unet: UNet2DConditionModel
117
+ scheduler: DDPMScheduler
118
+
119
+ feature_extractor: Optional[CLIPImageProcessor]
120
+ safety_checker: Optional[IFSafetyChecker]
121
+
122
+ watermarker: Optional[IFWatermarker]
123
+
124
+ bad_punct_regex = re.compile(
125
+ r"["
126
+ + "#®•©™&@·º½¾¿¡§~"
127
+ + r"\)"
128
+ + r"\("
129
+ + r"\]"
130
+ + r"\["
131
+ + r"\}"
132
+ + r"\{"
133
+ + r"\|"
134
+ + "\\"
135
+ + r"\/"
136
+ + r"\*"
137
+ + r"]{1,}"
138
+ ) # noqa
139
+
140
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
141
+ model_cpu_offload_seq = "text_encoder->unet"
142
+
143
+ def __init__(
144
+ self,
145
+ tokenizer: T5Tokenizer,
146
+ text_encoder: T5EncoderModel,
147
+ unet: UNet2DConditionModel,
148
+ scheduler: DDPMScheduler,
149
+ safety_checker: Optional[IFSafetyChecker],
150
+ feature_extractor: Optional[CLIPImageProcessor],
151
+ watermarker: Optional[IFWatermarker],
152
+ requires_safety_checker: bool = True,
153
+ ):
154
+ super().__init__()
155
+
156
+ if safety_checker is None and requires_safety_checker:
157
+ logger.warning(
158
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
159
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
160
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
161
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
162
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
163
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
164
+ )
165
+
166
+ if safety_checker is not None and feature_extractor is None:
167
+ raise ValueError(
168
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
169
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
170
+ )
171
+
172
+ self.register_modules(
173
+ tokenizer=tokenizer,
174
+ text_encoder=text_encoder,
175
+ unet=unet,
176
+ scheduler=scheduler,
177
+ safety_checker=safety_checker,
178
+ feature_extractor=feature_extractor,
179
+ watermarker=watermarker,
180
+ )
181
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
182
+
183
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
184
+ def remove_all_hooks(self):
185
+ if is_accelerate_available():
186
+ from accelerate.hooks import remove_hook_from_module
187
+ else:
188
+ raise ImportError("Please install accelerate via `pip install accelerate`")
189
+
190
+ for model in [self.text_encoder, self.unet, self.safety_checker]:
191
+ if model is not None:
192
+ remove_hook_from_module(model, recurse=True)
193
+
194
+ self.unet_offload_hook = None
195
+ self.text_encoder_offload_hook = None
196
+ self.final_offload_hook = None
197
+
198
+ @torch.no_grad()
199
+ def encode_prompt(
200
+ self,
201
+ prompt: Union[str, List[str]],
202
+ do_classifier_free_guidance: bool = True,
203
+ num_images_per_prompt: int = 1,
204
+ device: Optional[torch.device] = None,
205
+ negative_prompt: Optional[Union[str, List[str]]] = None,
206
+ prompt_embeds: Optional[torch.FloatTensor] = None,
207
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
208
+ clean_caption: bool = False,
209
+ ):
210
+ r"""
211
+ Encodes the prompt into text encoder hidden states.
212
+
213
+ Args:
214
+ prompt (`str` or `List[str]`, *optional*):
215
+ prompt to be encoded
216
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
217
+ whether to use classifier free guidance or not
218
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
219
+ number of images that should be generated per prompt
220
+ device: (`torch.device`, *optional*):
221
+ torch device to place the resulting embeddings on
222
+ negative_prompt (`str` or `List[str]`, *optional*):
223
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
224
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
225
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
226
+ prompt_embeds (`torch.FloatTensor`, *optional*):
227
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
228
+ provided, text embeddings will be generated from `prompt` input argument.
229
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
230
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
231
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
232
+ argument.
233
+ clean_caption (bool, defaults to `False`):
234
+ If `True`, the function will preprocess and clean the provided caption before encoding.
235
+ """
236
+ if prompt is not None and negative_prompt is not None:
237
+ if type(prompt) is not type(negative_prompt):
238
+ raise TypeError(
239
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
240
+ f" {type(prompt)}."
241
+ )
242
+
243
+ if device is None:
244
+ device = self._execution_device
245
+
246
+ if prompt is not None and isinstance(prompt, str):
247
+ batch_size = 1
248
+ elif prompt is not None and isinstance(prompt, list):
249
+ batch_size = len(prompt)
250
+ else:
251
+ batch_size = prompt_embeds.shape[0]
252
+
253
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
254
+ max_length = 77
255
+
256
+ if prompt_embeds is None:
257
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
258
+ text_inputs = self.tokenizer(
259
+ prompt,
260
+ padding="max_length",
261
+ max_length=max_length,
262
+ truncation=True,
263
+ add_special_tokens=True,
264
+ return_tensors="pt",
265
+ )
266
+ text_input_ids = text_inputs.input_ids
267
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
268
+
269
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
270
+ text_input_ids, untruncated_ids
271
+ ):
272
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
273
+ logger.warning(
274
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
275
+ f" {max_length} tokens: {removed_text}"
276
+ )
277
+
278
+ attention_mask = text_inputs.attention_mask.to(device)
279
+
280
+ prompt_embeds = self.text_encoder(
281
+ text_input_ids.to(device),
282
+ attention_mask=attention_mask,
283
+ )
284
+ prompt_embeds = prompt_embeds[0]
285
+
286
+ if self.text_encoder is not None:
287
+ dtype = self.text_encoder.dtype
288
+ elif self.unet is not None:
289
+ dtype = self.unet.dtype
290
+ else:
291
+ dtype = None
292
+
293
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
294
+
295
+ bs_embed, seq_len, _ = prompt_embeds.shape
296
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
297
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
298
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
299
+
300
+ # get unconditional embeddings for classifier free guidance
301
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
302
+ uncond_tokens: List[str]
303
+ if negative_prompt is None:
304
+ uncond_tokens = [""] * batch_size
305
+ elif isinstance(negative_prompt, str):
306
+ uncond_tokens = [negative_prompt]
307
+ elif batch_size != len(negative_prompt):
308
+ raise ValueError(
309
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
310
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
311
+ " the batch size of `prompt`."
312
+ )
313
+ else:
314
+ uncond_tokens = negative_prompt
315
+
316
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
317
+ max_length = prompt_embeds.shape[1]
318
+ uncond_input = self.tokenizer(
319
+ uncond_tokens,
320
+ padding="max_length",
321
+ max_length=max_length,
322
+ truncation=True,
323
+ return_attention_mask=True,
324
+ add_special_tokens=True,
325
+ return_tensors="pt",
326
+ )
327
+ attention_mask = uncond_input.attention_mask.to(device)
328
+
329
+ negative_prompt_embeds = self.text_encoder(
330
+ uncond_input.input_ids.to(device),
331
+ attention_mask=attention_mask,
332
+ )
333
+ negative_prompt_embeds = negative_prompt_embeds[0]
334
+
335
+ if do_classifier_free_guidance:
336
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
337
+ seq_len = negative_prompt_embeds.shape[1]
338
+
339
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
340
+
341
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
342
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
343
+
344
+ # For classifier free guidance, we need to do two forward passes.
345
+ # Here we concatenate the unconditional and text embeddings into a single batch
346
+ # to avoid doing two forward passes
347
+ else:
348
+ negative_prompt_embeds = None
349
+
350
+ return prompt_embeds, negative_prompt_embeds
351
+
352
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
353
+ def run_safety_checker(self, image, device, dtype):
354
+ if self.safety_checker is not None:
355
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
356
+ image, nsfw_detected, watermark_detected = self.safety_checker(
357
+ images=image,
358
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
359
+ )
360
+ else:
361
+ nsfw_detected = None
362
+ watermark_detected = None
363
+
364
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
365
+ self.unet_offload_hook.offload()
366
+
367
+ return image, nsfw_detected, watermark_detected
368
+
369
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
370
+ def prepare_extra_step_kwargs(self, generator, eta):
371
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
372
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
373
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
374
+ # and should be between [0, 1]
375
+
376
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
377
+ extra_step_kwargs = {}
378
+ if accepts_eta:
379
+ extra_step_kwargs["eta"] = eta
380
+
381
+ # check if the scheduler accepts generator
382
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
383
+ if accepts_generator:
384
+ extra_step_kwargs["generator"] = generator
385
+ return extra_step_kwargs
386
+
387
+ def check_inputs(
388
+ self,
389
+ prompt,
390
+ image,
391
+ batch_size,
392
+ callback_steps,
393
+ negative_prompt=None,
394
+ prompt_embeds=None,
395
+ negative_prompt_embeds=None,
396
+ ):
397
+ if (callback_steps is None) or (
398
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
399
+ ):
400
+ raise ValueError(
401
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
402
+ f" {type(callback_steps)}."
403
+ )
404
+
405
+ if prompt is not None and prompt_embeds is not None:
406
+ raise ValueError(
407
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
408
+ " only forward one of the two."
409
+ )
410
+ elif prompt is None and prompt_embeds is None:
411
+ raise ValueError(
412
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
413
+ )
414
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
415
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
416
+
417
+ if negative_prompt is not None and negative_prompt_embeds is not None:
418
+ raise ValueError(
419
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
420
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
421
+ )
422
+
423
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
424
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
425
+ raise ValueError(
426
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
427
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
428
+ f" {negative_prompt_embeds.shape}."
429
+ )
430
+
431
+ if isinstance(image, list):
432
+ check_image_type = image[0]
433
+ else:
434
+ check_image_type = image
435
+
436
+ if (
437
+ not isinstance(check_image_type, torch.Tensor)
438
+ and not isinstance(check_image_type, PIL.Image.Image)
439
+ and not isinstance(check_image_type, np.ndarray)
440
+ ):
441
+ raise ValueError(
442
+ "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
443
+ f" {type(check_image_type)}"
444
+ )
445
+
446
+ if isinstance(image, list):
447
+ image_batch_size = len(image)
448
+ elif isinstance(image, torch.Tensor):
449
+ image_batch_size = image.shape[0]
450
+ elif isinstance(image, PIL.Image.Image):
451
+ image_batch_size = 1
452
+ elif isinstance(image, np.ndarray):
453
+ image_batch_size = image.shape[0]
454
+ else:
455
+ assert False
456
+
457
+ if batch_size != image_batch_size:
458
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
459
+
460
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
461
+ def _text_preprocessing(self, text, clean_caption=False):
462
+ if clean_caption and not is_bs4_available():
463
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
464
+ logger.warning("Setting `clean_caption` to False...")
465
+ clean_caption = False
466
+
467
+ if clean_caption and not is_ftfy_available():
468
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
469
+ logger.warning("Setting `clean_caption` to False...")
470
+ clean_caption = False
471
+
472
+ if not isinstance(text, (tuple, list)):
473
+ text = [text]
474
+
475
+ def process(text: str):
476
+ if clean_caption:
477
+ text = self._clean_caption(text)
478
+ text = self._clean_caption(text)
479
+ else:
480
+ text = text.lower().strip()
481
+ return text
482
+
483
+ return [process(t) for t in text]
484
+
485
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
486
+ def _clean_caption(self, caption):
487
+ caption = str(caption)
488
+ caption = ul.unquote_plus(caption)
489
+ caption = caption.strip().lower()
490
+ caption = re.sub("<person>", "person", caption)
491
+ # urls:
492
+ caption = re.sub(
493
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
494
+ "",
495
+ caption,
496
+ ) # regex for urls
497
+ caption = re.sub(
498
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
499
+ "",
500
+ caption,
501
+ ) # regex for urls
502
+ # html:
503
+ caption = BeautifulSoup(caption, features="html.parser").text
504
+
505
+ # @<nickname>
506
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
507
+
508
+ # 31C0—31EF CJK Strokes
509
+ # 31F0—31FF Katakana Phonetic Extensions
510
+ # 3200—32FF Enclosed CJK Letters and Months
511
+ # 3300—33FF CJK Compatibility
512
+ # 3400—4DBF CJK Unified Ideographs Extension A
513
+ # 4DC0—4DFF Yijing Hexagram Symbols
514
+ # 4E00—9FFF CJK Unified Ideographs
515
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
516
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
517
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
518
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
519
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
520
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
521
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
522
+ #######################################################
523
+
524
+ # все виды тире / all types of dash --> "-"
525
+ caption = re.sub(
526
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
527
+ "-",
528
+ caption,
529
+ )
530
+
531
+ # кавычки к одному стандарту
532
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
533
+ caption = re.sub(r"[‘’]", "'", caption)
534
+
535
+ # &quot;
536
+ caption = re.sub(r"&quot;?", "", caption)
537
+ # &amp
538
+ caption = re.sub(r"&amp", "", caption)
539
+
540
+ # ip adresses:
541
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
542
+
543
+ # article ids:
544
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
545
+
546
+ # \n
547
+ caption = re.sub(r"\\n", " ", caption)
548
+
549
+ # "#123"
550
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
551
+ # "#12345.."
552
+ caption = re.sub(r"#\d{5,}\b", "", caption)
553
+ # "123456.."
554
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
555
+ # filenames:
556
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
557
+
558
+ #
559
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
560
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
561
+
562
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
563
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
564
+
565
+ # this-is-my-cute-cat / this_is_my_cute_cat
566
+ regex2 = re.compile(r"(?:\-|\_)")
567
+ if len(re.findall(regex2, caption)) > 3:
568
+ caption = re.sub(regex2, " ", caption)
569
+
570
+ caption = ftfy.fix_text(caption)
571
+ caption = html.unescape(html.unescape(caption))
572
+
573
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
574
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
575
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
576
+
577
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
578
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
579
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
580
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
581
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
582
+
583
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
584
+
585
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
586
+
587
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
588
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
589
+ caption = re.sub(r"\s+", " ", caption)
590
+
591
+ caption.strip()
592
+
593
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
594
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
595
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
596
+ caption = re.sub(r"^\.\S+$", "", caption)
597
+
598
+ return caption.strip()
599
+
600
+ def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor:
601
+ if not isinstance(image, list):
602
+ image = [image]
603
+
604
+ def numpy_to_pt(images):
605
+ if images.ndim == 3:
606
+ images = images[..., None]
607
+
608
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
609
+ return images
610
+
611
+ if isinstance(image[0], PIL.Image.Image):
612
+ new_image = []
613
+
614
+ for image_ in image:
615
+ image_ = image_.convert("RGB")
616
+ image_ = resize(image_, self.unet.sample_size)
617
+ image_ = np.array(image_)
618
+ image_ = image_.astype(np.float32)
619
+ image_ = image_ / 127.5 - 1
620
+ new_image.append(image_)
621
+
622
+ image = new_image
623
+
624
+ image = np.stack(image, axis=0) # to np
625
+ image = numpy_to_pt(image) # to pt
626
+
627
+ elif isinstance(image[0], np.ndarray):
628
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
629
+ image = numpy_to_pt(image)
630
+
631
+ elif isinstance(image[0], torch.Tensor):
632
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
633
+
634
+ return image
635
+
636
+ def get_timesteps(self, num_inference_steps, strength):
637
+ # get the original timestep using init_timestep
638
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
639
+
640
+ t_start = max(num_inference_steps - init_timestep, 0)
641
+ timesteps = self.scheduler.timesteps[t_start:]
642
+
643
+ return timesteps, num_inference_steps - t_start
644
+
645
+ def prepare_intermediate_images(
646
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None
647
+ ):
648
+ _, channels, height, width = image.shape
649
+
650
+ batch_size = batch_size * num_images_per_prompt
651
+
652
+ shape = (batch_size, channels, height, width)
653
+
654
+ if isinstance(generator, list) and len(generator) != batch_size:
655
+ raise ValueError(
656
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
657
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
658
+ )
659
+
660
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
661
+
662
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
663
+ image = self.scheduler.add_noise(image, noise, timestep)
664
+
665
+ return image
666
+
667
+ @torch.no_grad()
668
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
669
+ def __call__(
670
+ self,
671
+ prompt: Union[str, List[str]] = None,
672
+ image: Union[
673
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
674
+ ] = None,
675
+ strength: float = 0.7,
676
+ num_inference_steps: int = 80,
677
+ timesteps: List[int] = None,
678
+ guidance_scale: float = 10.0,
679
+ negative_prompt: Optional[Union[str, List[str]]] = None,
680
+ num_images_per_prompt: Optional[int] = 1,
681
+ eta: float = 0.0,
682
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
683
+ prompt_embeds: Optional[torch.FloatTensor] = None,
684
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
685
+ output_type: Optional[str] = "pil",
686
+ return_dict: bool = True,
687
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
688
+ callback_steps: int = 1,
689
+ clean_caption: bool = True,
690
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
691
+ ):
692
+ """
693
+ Function invoked when calling the pipeline for generation.
694
+
695
+ Args:
696
+ prompt (`str` or `List[str]`, *optional*):
697
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
698
+ instead.
699
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
700
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
701
+ process.
702
+ strength (`float`, *optional*, defaults to 0.7):
703
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
704
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
705
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
706
+ be maximum and the denoising process will run for the full number of iterations specified in
707
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
708
+ num_inference_steps (`int`, *optional*, defaults to 80):
709
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
710
+ expense of slower inference.
711
+ timesteps (`List[int]`, *optional*):
712
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
713
+ timesteps are used. Must be in descending order.
714
+ guidance_scale (`float`, *optional*, defaults to 10.0):
715
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
716
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
717
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
718
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
719
+ usually at the expense of lower image quality.
720
+ negative_prompt (`str` or `List[str]`, *optional*):
721
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
722
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
723
+ less than `1`).
724
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
725
+ The number of images to generate per prompt.
726
+ eta (`float`, *optional*, defaults to 0.0):
727
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
728
+ [`schedulers.DDIMScheduler`], will be ignored for others.
729
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
730
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
731
+ to make generation deterministic.
732
+ prompt_embeds (`torch.FloatTensor`, *optional*):
733
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
734
+ provided, text embeddings will be generated from `prompt` input argument.
735
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
736
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
737
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
738
+ argument.
739
+ output_type (`str`, *optional*, defaults to `"pil"`):
740
+ The output format of the generate image. Choose between
741
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
742
+ return_dict (`bool`, *optional*, defaults to `True`):
743
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
744
+ callback (`Callable`, *optional*):
745
+ A function that will be called every `callback_steps` steps during inference. The function will be
746
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
747
+ callback_steps (`int`, *optional*, defaults to 1):
748
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
749
+ called at every step.
750
+ clean_caption (`bool`, *optional*, defaults to `True`):
751
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
752
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
753
+ prompt.
754
+ cross_attention_kwargs (`dict`, *optional*):
755
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
756
+ `self.processor` in
757
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
758
+
759
+ Examples:
760
+
761
+ Returns:
762
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
763
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
764
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
765
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
766
+ or watermarked content, according to the `safety_checker`.
767
+ """
768
+ # 1. Check inputs. Raise error if not correct
769
+ if prompt is not None and isinstance(prompt, str):
770
+ batch_size = 1
771
+ elif prompt is not None and isinstance(prompt, list):
772
+ batch_size = len(prompt)
773
+ else:
774
+ batch_size = prompt_embeds.shape[0]
775
+
776
+ self.check_inputs(
777
+ prompt, image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
778
+ )
779
+
780
+ # 2. Define call parameters
781
+ device = self._execution_device
782
+
783
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
784
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
785
+ # corresponds to doing no classifier free guidance.
786
+ do_classifier_free_guidance = guidance_scale > 1.0
787
+
788
+ # 3. Encode input prompt
789
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
790
+ prompt,
791
+ do_classifier_free_guidance,
792
+ num_images_per_prompt=num_images_per_prompt,
793
+ device=device,
794
+ negative_prompt=negative_prompt,
795
+ prompt_embeds=prompt_embeds,
796
+ negative_prompt_embeds=negative_prompt_embeds,
797
+ clean_caption=clean_caption,
798
+ )
799
+
800
+ if do_classifier_free_guidance:
801
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
802
+
803
+ dtype = prompt_embeds.dtype
804
+
805
+ # 4. Prepare timesteps
806
+ if timesteps is not None:
807
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
808
+ timesteps = self.scheduler.timesteps
809
+ num_inference_steps = len(timesteps)
810
+ else:
811
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
812
+ timesteps = self.scheduler.timesteps
813
+
814
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
815
+
816
+ # 5. Prepare intermediate images
817
+ image = self.preprocess_image(image)
818
+ image = image.to(device=device, dtype=dtype)
819
+
820
+ noise_timestep = timesteps[0:1]
821
+ noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt)
822
+
823
+ intermediate_images = self.prepare_intermediate_images(
824
+ image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, generator
825
+ )
826
+
827
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
828
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
829
+
830
+ # HACK: see comment in `enable_model_cpu_offload`
831
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
832
+ self.text_encoder_offload_hook.offload()
833
+
834
+ # 7. Denoising loop
835
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
836
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
837
+ for i, t in enumerate(timesteps):
838
+ model_input = (
839
+ torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images
840
+ )
841
+ model_input = self.scheduler.scale_model_input(model_input, t)
842
+
843
+ # predict the noise residual
844
+ noise_pred = self.unet(
845
+ model_input,
846
+ t,
847
+ encoder_hidden_states=prompt_embeds,
848
+ cross_attention_kwargs=cross_attention_kwargs,
849
+ return_dict=False,
850
+ )[0]
851
+
852
+ # perform guidance
853
+ if do_classifier_free_guidance:
854
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
855
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1)
856
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1)
857
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
858
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
859
+
860
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
861
+ noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1)
862
+
863
+ # compute the previous noisy sample x_t -> x_t-1
864
+ intermediate_images = self.scheduler.step(
865
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
866
+ )[0]
867
+
868
+ # call the callback, if provided
869
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
870
+ progress_bar.update()
871
+ if callback is not None and i % callback_steps == 0:
872
+ callback(i, t, intermediate_images)
873
+
874
+ image = intermediate_images
875
+
876
+ if output_type == "pil":
877
+ # 8. Post-processing
878
+ image = (image / 2 + 0.5).clamp(0, 1)
879
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
880
+
881
+ # 9. Run safety checker
882
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
883
+
884
+ # 10. Convert to PIL
885
+ image = self.numpy_to_pil(image)
886
+
887
+ # 11. Apply watermark
888
+ if self.watermarker is not None:
889
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
890
+ elif output_type == "pt":
891
+ nsfw_detected = None
892
+ watermark_detected = None
893
+
894
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
895
+ self.unet_offload_hook.offload()
896
+ else:
897
+ # 8. Post-processing
898
+ image = (image / 2 + 0.5).clamp(0, 1)
899
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
900
+
901
+ # 9. Run safety checker
902
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
903
+
904
+ # Offload all models
905
+ self.maybe_free_model_hooks()
906
+
907
+ if not return_dict:
908
+ return (image, nsfw_detected, watermark_detected)
909
+
910
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py ADDED
@@ -0,0 +1,1029 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
12
+
13
+ from ...loaders import LoraLoaderMixin
14
+ from ...models import UNet2DConditionModel
15
+ from ...schedulers import DDPMScheduler
16
+ from ...utils import (
17
+ BACKENDS_MAPPING,
18
+ PIL_INTERPOLATION,
19
+ is_accelerate_available,
20
+ is_bs4_available,
21
+ is_ftfy_available,
22
+ logging,
23
+ replace_example_docstring,
24
+ )
25
+ from ...utils.torch_utils import randn_tensor
26
+ from ..pipeline_utils import DiffusionPipeline
27
+ from .pipeline_output import IFPipelineOutput
28
+ from .safety_checker import IFSafetyChecker
29
+ from .watermark import IFWatermarker
30
+
31
+
32
+ if is_bs4_available():
33
+ from bs4 import BeautifulSoup
34
+
35
+ if is_ftfy_available():
36
+ import ftfy
37
+
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+
42
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize
43
+ def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image:
44
+ w, h = images.size
45
+
46
+ coef = w / h
47
+
48
+ w, h = img_size, img_size
49
+
50
+ if coef >= 1:
51
+ w = int(round(img_size / 8 * coef) * 8)
52
+ else:
53
+ h = int(round(img_size / 8 / coef) * 8)
54
+
55
+ images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None)
56
+
57
+ return images
58
+
59
+
60
+ EXAMPLE_DOC_STRING = """
61
+ Examples:
62
+ ```py
63
+ >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
64
+ >>> from diffusers.utils import pt_to_pil
65
+ >>> import torch
66
+ >>> from PIL import Image
67
+ >>> import requests
68
+ >>> from io import BytesIO
69
+
70
+ >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
71
+ >>> response = requests.get(url)
72
+ >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")
73
+ >>> original_image = original_image.resize((768, 512))
74
+
75
+ >>> pipe = IFImg2ImgPipeline.from_pretrained(
76
+ ... "DeepFloyd/IF-I-XL-v1.0",
77
+ ... variant="fp16",
78
+ ... torch_dtype=torch.float16,
79
+ ... )
80
+ >>> pipe.enable_model_cpu_offload()
81
+
82
+ >>> prompt = "A fantasy landscape in style minecraft"
83
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
84
+
85
+ >>> image = pipe(
86
+ ... image=original_image,
87
+ ... prompt_embeds=prompt_embeds,
88
+ ... negative_prompt_embeds=negative_embeds,
89
+ ... output_type="pt",
90
+ ... ).images
91
+
92
+ >>> # save intermediate image
93
+ >>> pil_image = pt_to_pil(image)
94
+ >>> pil_image[0].save("./if_stage_I.png")
95
+
96
+ >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
97
+ ... "DeepFloyd/IF-II-L-v1.0",
98
+ ... text_encoder=None,
99
+ ... variant="fp16",
100
+ ... torch_dtype=torch.float16,
101
+ ... )
102
+ >>> super_res_1_pipe.enable_model_cpu_offload()
103
+
104
+ >>> image = super_res_1_pipe(
105
+ ... image=image,
106
+ ... original_image=original_image,
107
+ ... prompt_embeds=prompt_embeds,
108
+ ... negative_prompt_embeds=negative_embeds,
109
+ ... ).images
110
+ >>> image[0].save("./if_stage_II.png")
111
+ ```
112
+ """
113
+
114
+
115
+ class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
116
+ tokenizer: T5Tokenizer
117
+ text_encoder: T5EncoderModel
118
+
119
+ unet: UNet2DConditionModel
120
+ scheduler: DDPMScheduler
121
+ image_noising_scheduler: DDPMScheduler
122
+
123
+ feature_extractor: Optional[CLIPImageProcessor]
124
+ safety_checker: Optional[IFSafetyChecker]
125
+
126
+ watermarker: Optional[IFWatermarker]
127
+
128
+ bad_punct_regex = re.compile(
129
+ r"["
130
+ + "#®•©™&@·º½¾¿¡§~"
131
+ + r"\)"
132
+ + r"\("
133
+ + r"\]"
134
+ + r"\["
135
+ + r"\}"
136
+ + r"\{"
137
+ + r"\|"
138
+ + "\\"
139
+ + r"\/"
140
+ + r"\*"
141
+ + r"]{1,}"
142
+ ) # noqa
143
+
144
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor"]
145
+ model_cpu_offload_seq = "text_encoder->unet"
146
+
147
+ def __init__(
148
+ self,
149
+ tokenizer: T5Tokenizer,
150
+ text_encoder: T5EncoderModel,
151
+ unet: UNet2DConditionModel,
152
+ scheduler: DDPMScheduler,
153
+ image_noising_scheduler: DDPMScheduler,
154
+ safety_checker: Optional[IFSafetyChecker],
155
+ feature_extractor: Optional[CLIPImageProcessor],
156
+ watermarker: Optional[IFWatermarker],
157
+ requires_safety_checker: bool = True,
158
+ ):
159
+ super().__init__()
160
+
161
+ if safety_checker is None and requires_safety_checker:
162
+ logger.warning(
163
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
164
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
165
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
166
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
167
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
168
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
169
+ )
170
+
171
+ if safety_checker is not None and feature_extractor is None:
172
+ raise ValueError(
173
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
174
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
175
+ )
176
+
177
+ if unet.config.in_channels != 6:
178
+ logger.warning(
179
+ "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`."
180
+ )
181
+
182
+ self.register_modules(
183
+ tokenizer=tokenizer,
184
+ text_encoder=text_encoder,
185
+ unet=unet,
186
+ scheduler=scheduler,
187
+ image_noising_scheduler=image_noising_scheduler,
188
+ safety_checker=safety_checker,
189
+ feature_extractor=feature_extractor,
190
+ watermarker=watermarker,
191
+ )
192
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
193
+
194
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
195
+ def remove_all_hooks(self):
196
+ if is_accelerate_available():
197
+ from accelerate.hooks import remove_hook_from_module
198
+ else:
199
+ raise ImportError("Please install accelerate via `pip install accelerate`")
200
+
201
+ for model in [self.text_encoder, self.unet, self.safety_checker]:
202
+ if model is not None:
203
+ remove_hook_from_module(model, recurse=True)
204
+
205
+ self.unet_offload_hook = None
206
+ self.text_encoder_offload_hook = None
207
+ self.final_offload_hook = None
208
+
209
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
210
+ def _text_preprocessing(self, text, clean_caption=False):
211
+ if clean_caption and not is_bs4_available():
212
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
213
+ logger.warning("Setting `clean_caption` to False...")
214
+ clean_caption = False
215
+
216
+ if clean_caption and not is_ftfy_available():
217
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
218
+ logger.warning("Setting `clean_caption` to False...")
219
+ clean_caption = False
220
+
221
+ if not isinstance(text, (tuple, list)):
222
+ text = [text]
223
+
224
+ def process(text: str):
225
+ if clean_caption:
226
+ text = self._clean_caption(text)
227
+ text = self._clean_caption(text)
228
+ else:
229
+ text = text.lower().strip()
230
+ return text
231
+
232
+ return [process(t) for t in text]
233
+
234
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
235
+ def _clean_caption(self, caption):
236
+ caption = str(caption)
237
+ caption = ul.unquote_plus(caption)
238
+ caption = caption.strip().lower()
239
+ caption = re.sub("<person>", "person", caption)
240
+ # urls:
241
+ caption = re.sub(
242
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
243
+ "",
244
+ caption,
245
+ ) # regex for urls
246
+ caption = re.sub(
247
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
248
+ "",
249
+ caption,
250
+ ) # regex for urls
251
+ # html:
252
+ caption = BeautifulSoup(caption, features="html.parser").text
253
+
254
+ # @<nickname>
255
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
256
+
257
+ # 31C0—31EF CJK Strokes
258
+ # 31F0—31FF Katakana Phonetic Extensions
259
+ # 3200—32FF Enclosed CJK Letters and Months
260
+ # 3300—33FF CJK Compatibility
261
+ # 3400—4DBF CJK Unified Ideographs Extension A
262
+ # 4DC0—4DFF Yijing Hexagram Symbols
263
+ # 4E00—9FFF CJK Unified Ideographs
264
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
265
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
266
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
267
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
268
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
269
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
270
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
271
+ #######################################################
272
+
273
+ # все виды тире / all types of dash --> "-"
274
+ caption = re.sub(
275
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
276
+ "-",
277
+ caption,
278
+ )
279
+
280
+ # кавычки к одному стандарту
281
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
282
+ caption = re.sub(r"[‘’]", "'", caption)
283
+
284
+ # &quot;
285
+ caption = re.sub(r"&quot;?", "", caption)
286
+ # &amp
287
+ caption = re.sub(r"&amp", "", caption)
288
+
289
+ # ip adresses:
290
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
291
+
292
+ # article ids:
293
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
294
+
295
+ # \n
296
+ caption = re.sub(r"\\n", " ", caption)
297
+
298
+ # "#123"
299
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
300
+ # "#12345.."
301
+ caption = re.sub(r"#\d{5,}\b", "", caption)
302
+ # "123456.."
303
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
304
+ # filenames:
305
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
306
+
307
+ #
308
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
309
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
310
+
311
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
312
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
313
+
314
+ # this-is-my-cute-cat / this_is_my_cute_cat
315
+ regex2 = re.compile(r"(?:\-|\_)")
316
+ if len(re.findall(regex2, caption)) > 3:
317
+ caption = re.sub(regex2, " ", caption)
318
+
319
+ caption = ftfy.fix_text(caption)
320
+ caption = html.unescape(html.unescape(caption))
321
+
322
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
323
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
324
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
325
+
326
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
327
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
328
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
329
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
330
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
331
+
332
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
333
+
334
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
335
+
336
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
337
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
338
+ caption = re.sub(r"\s+", " ", caption)
339
+
340
+ caption.strip()
341
+
342
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
343
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
344
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
345
+ caption = re.sub(r"^\.\S+$", "", caption)
346
+
347
+ return caption.strip()
348
+
349
+ @torch.no_grad()
350
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
351
+ def encode_prompt(
352
+ self,
353
+ prompt: Union[str, List[str]],
354
+ do_classifier_free_guidance: bool = True,
355
+ num_images_per_prompt: int = 1,
356
+ device: Optional[torch.device] = None,
357
+ negative_prompt: Optional[Union[str, List[str]]] = None,
358
+ prompt_embeds: Optional[torch.FloatTensor] = None,
359
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
360
+ clean_caption: bool = False,
361
+ ):
362
+ r"""
363
+ Encodes the prompt into text encoder hidden states.
364
+
365
+ Args:
366
+ prompt (`str` or `List[str]`, *optional*):
367
+ prompt to be encoded
368
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
369
+ whether to use classifier free guidance or not
370
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
371
+ number of images that should be generated per prompt
372
+ device: (`torch.device`, *optional*):
373
+ torch device to place the resulting embeddings on
374
+ negative_prompt (`str` or `List[str]`, *optional*):
375
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
376
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
377
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
378
+ prompt_embeds (`torch.FloatTensor`, *optional*):
379
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
380
+ provided, text embeddings will be generated from `prompt` input argument.
381
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
382
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
383
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
384
+ argument.
385
+ clean_caption (bool, defaults to `False`):
386
+ If `True`, the function will preprocess and clean the provided caption before encoding.
387
+ """
388
+ if prompt is not None and negative_prompt is not None:
389
+ if type(prompt) is not type(negative_prompt):
390
+ raise TypeError(
391
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
392
+ f" {type(prompt)}."
393
+ )
394
+
395
+ if device is None:
396
+ device = self._execution_device
397
+
398
+ if prompt is not None and isinstance(prompt, str):
399
+ batch_size = 1
400
+ elif prompt is not None and isinstance(prompt, list):
401
+ batch_size = len(prompt)
402
+ else:
403
+ batch_size = prompt_embeds.shape[0]
404
+
405
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
406
+ max_length = 77
407
+
408
+ if prompt_embeds is None:
409
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
410
+ text_inputs = self.tokenizer(
411
+ prompt,
412
+ padding="max_length",
413
+ max_length=max_length,
414
+ truncation=True,
415
+ add_special_tokens=True,
416
+ return_tensors="pt",
417
+ )
418
+ text_input_ids = text_inputs.input_ids
419
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
420
+
421
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
422
+ text_input_ids, untruncated_ids
423
+ ):
424
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
425
+ logger.warning(
426
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
427
+ f" {max_length} tokens: {removed_text}"
428
+ )
429
+
430
+ attention_mask = text_inputs.attention_mask.to(device)
431
+
432
+ prompt_embeds = self.text_encoder(
433
+ text_input_ids.to(device),
434
+ attention_mask=attention_mask,
435
+ )
436
+ prompt_embeds = prompt_embeds[0]
437
+
438
+ if self.text_encoder is not None:
439
+ dtype = self.text_encoder.dtype
440
+ elif self.unet is not None:
441
+ dtype = self.unet.dtype
442
+ else:
443
+ dtype = None
444
+
445
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
446
+
447
+ bs_embed, seq_len, _ = prompt_embeds.shape
448
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
449
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
450
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
451
+
452
+ # get unconditional embeddings for classifier free guidance
453
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
454
+ uncond_tokens: List[str]
455
+ if negative_prompt is None:
456
+ uncond_tokens = [""] * batch_size
457
+ elif isinstance(negative_prompt, str):
458
+ uncond_tokens = [negative_prompt]
459
+ elif batch_size != len(negative_prompt):
460
+ raise ValueError(
461
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
462
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
463
+ " the batch size of `prompt`."
464
+ )
465
+ else:
466
+ uncond_tokens = negative_prompt
467
+
468
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
469
+ max_length = prompt_embeds.shape[1]
470
+ uncond_input = self.tokenizer(
471
+ uncond_tokens,
472
+ padding="max_length",
473
+ max_length=max_length,
474
+ truncation=True,
475
+ return_attention_mask=True,
476
+ add_special_tokens=True,
477
+ return_tensors="pt",
478
+ )
479
+ attention_mask = uncond_input.attention_mask.to(device)
480
+
481
+ negative_prompt_embeds = self.text_encoder(
482
+ uncond_input.input_ids.to(device),
483
+ attention_mask=attention_mask,
484
+ )
485
+ negative_prompt_embeds = negative_prompt_embeds[0]
486
+
487
+ if do_classifier_free_guidance:
488
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
489
+ seq_len = negative_prompt_embeds.shape[1]
490
+
491
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
492
+
493
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
494
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
495
+
496
+ # For classifier free guidance, we need to do two forward passes.
497
+ # Here we concatenate the unconditional and text embeddings into a single batch
498
+ # to avoid doing two forward passes
499
+ else:
500
+ negative_prompt_embeds = None
501
+
502
+ return prompt_embeds, negative_prompt_embeds
503
+
504
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
505
+ def run_safety_checker(self, image, device, dtype):
506
+ if self.safety_checker is not None:
507
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
508
+ image, nsfw_detected, watermark_detected = self.safety_checker(
509
+ images=image,
510
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
511
+ )
512
+ else:
513
+ nsfw_detected = None
514
+ watermark_detected = None
515
+
516
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
517
+ self.unet_offload_hook.offload()
518
+
519
+ return image, nsfw_detected, watermark_detected
520
+
521
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
522
+ def prepare_extra_step_kwargs(self, generator, eta):
523
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
524
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
525
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
526
+ # and should be between [0, 1]
527
+
528
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
529
+ extra_step_kwargs = {}
530
+ if accepts_eta:
531
+ extra_step_kwargs["eta"] = eta
532
+
533
+ # check if the scheduler accepts generator
534
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
535
+ if accepts_generator:
536
+ extra_step_kwargs["generator"] = generator
537
+ return extra_step_kwargs
538
+
539
+ def check_inputs(
540
+ self,
541
+ prompt,
542
+ image,
543
+ original_image,
544
+ batch_size,
545
+ callback_steps,
546
+ negative_prompt=None,
547
+ prompt_embeds=None,
548
+ negative_prompt_embeds=None,
549
+ ):
550
+ if (callback_steps is None) or (
551
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
552
+ ):
553
+ raise ValueError(
554
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
555
+ f" {type(callback_steps)}."
556
+ )
557
+
558
+ if prompt is not None and prompt_embeds is not None:
559
+ raise ValueError(
560
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
561
+ " only forward one of the two."
562
+ )
563
+ elif prompt is None and prompt_embeds is None:
564
+ raise ValueError(
565
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
566
+ )
567
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
568
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
569
+
570
+ if negative_prompt is not None and negative_prompt_embeds is not None:
571
+ raise ValueError(
572
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
573
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
574
+ )
575
+
576
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
577
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
578
+ raise ValueError(
579
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
580
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
581
+ f" {negative_prompt_embeds.shape}."
582
+ )
583
+
584
+ # image
585
+
586
+ if isinstance(image, list):
587
+ check_image_type = image[0]
588
+ else:
589
+ check_image_type = image
590
+
591
+ if (
592
+ not isinstance(check_image_type, torch.Tensor)
593
+ and not isinstance(check_image_type, PIL.Image.Image)
594
+ and not isinstance(check_image_type, np.ndarray)
595
+ ):
596
+ raise ValueError(
597
+ "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
598
+ f" {type(check_image_type)}"
599
+ )
600
+
601
+ if isinstance(image, list):
602
+ image_batch_size = len(image)
603
+ elif isinstance(image, torch.Tensor):
604
+ image_batch_size = image.shape[0]
605
+ elif isinstance(image, PIL.Image.Image):
606
+ image_batch_size = 1
607
+ elif isinstance(image, np.ndarray):
608
+ image_batch_size = image.shape[0]
609
+ else:
610
+ assert False
611
+
612
+ if batch_size != image_batch_size:
613
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
614
+
615
+ # original_image
616
+
617
+ if isinstance(original_image, list):
618
+ check_image_type = original_image[0]
619
+ else:
620
+ check_image_type = original_image
621
+
622
+ if (
623
+ not isinstance(check_image_type, torch.Tensor)
624
+ and not isinstance(check_image_type, PIL.Image.Image)
625
+ and not isinstance(check_image_type, np.ndarray)
626
+ ):
627
+ raise ValueError(
628
+ "`original_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
629
+ f" {type(check_image_type)}"
630
+ )
631
+
632
+ if isinstance(original_image, list):
633
+ image_batch_size = len(original_image)
634
+ elif isinstance(original_image, torch.Tensor):
635
+ image_batch_size = original_image.shape[0]
636
+ elif isinstance(original_image, PIL.Image.Image):
637
+ image_batch_size = 1
638
+ elif isinstance(original_image, np.ndarray):
639
+ image_batch_size = original_image.shape[0]
640
+ else:
641
+ assert False
642
+
643
+ if batch_size != image_batch_size:
644
+ raise ValueError(
645
+ f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}"
646
+ )
647
+
648
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image
649
+ def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor:
650
+ if not isinstance(image, list):
651
+ image = [image]
652
+
653
+ def numpy_to_pt(images):
654
+ if images.ndim == 3:
655
+ images = images[..., None]
656
+
657
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
658
+ return images
659
+
660
+ if isinstance(image[0], PIL.Image.Image):
661
+ new_image = []
662
+
663
+ for image_ in image:
664
+ image_ = image_.convert("RGB")
665
+ image_ = resize(image_, self.unet.sample_size)
666
+ image_ = np.array(image_)
667
+ image_ = image_.astype(np.float32)
668
+ image_ = image_ / 127.5 - 1
669
+ new_image.append(image_)
670
+
671
+ image = new_image
672
+
673
+ image = np.stack(image, axis=0) # to np
674
+ image = numpy_to_pt(image) # to pt
675
+
676
+ elif isinstance(image[0], np.ndarray):
677
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
678
+ image = numpy_to_pt(image)
679
+
680
+ elif isinstance(image[0], torch.Tensor):
681
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
682
+
683
+ return image
684
+
685
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image
686
+ def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor:
687
+ if not isinstance(image, torch.Tensor) and not isinstance(image, list):
688
+ image = [image]
689
+
690
+ if isinstance(image[0], PIL.Image.Image):
691
+ image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image]
692
+
693
+ image = np.stack(image, axis=0) # to np
694
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
695
+ elif isinstance(image[0], np.ndarray):
696
+ image = np.stack(image, axis=0) # to np
697
+ if image.ndim == 5:
698
+ image = image[0]
699
+
700
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
701
+ elif isinstance(image, list) and isinstance(image[0], torch.Tensor):
702
+ dims = image[0].ndim
703
+
704
+ if dims == 3:
705
+ image = torch.stack(image, dim=0)
706
+ elif dims == 4:
707
+ image = torch.concat(image, dim=0)
708
+ else:
709
+ raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}")
710
+
711
+ image = image.to(device=device, dtype=self.unet.dtype)
712
+
713
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
714
+
715
+ return image
716
+
717
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps
718
+ def get_timesteps(self, num_inference_steps, strength):
719
+ # get the original timestep using init_timestep
720
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
721
+
722
+ t_start = max(num_inference_steps - init_timestep, 0)
723
+ timesteps = self.scheduler.timesteps[t_start:]
724
+
725
+ return timesteps, num_inference_steps - t_start
726
+
727
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.prepare_intermediate_images
728
+ def prepare_intermediate_images(
729
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None
730
+ ):
731
+ _, channels, height, width = image.shape
732
+
733
+ batch_size = batch_size * num_images_per_prompt
734
+
735
+ shape = (batch_size, channels, height, width)
736
+
737
+ if isinstance(generator, list) and len(generator) != batch_size:
738
+ raise ValueError(
739
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
740
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
741
+ )
742
+
743
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
744
+
745
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
746
+ image = self.scheduler.add_noise(image, noise, timestep)
747
+
748
+ return image
749
+
750
+ @torch.no_grad()
751
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
752
+ def __call__(
753
+ self,
754
+ image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor],
755
+ original_image: Union[
756
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
757
+ ] = None,
758
+ strength: float = 0.8,
759
+ prompt: Union[str, List[str]] = None,
760
+ num_inference_steps: int = 50,
761
+ timesteps: List[int] = None,
762
+ guidance_scale: float = 4.0,
763
+ negative_prompt: Optional[Union[str, List[str]]] = None,
764
+ num_images_per_prompt: Optional[int] = 1,
765
+ eta: float = 0.0,
766
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
767
+ prompt_embeds: Optional[torch.FloatTensor] = None,
768
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
769
+ output_type: Optional[str] = "pil",
770
+ return_dict: bool = True,
771
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
772
+ callback_steps: int = 1,
773
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
774
+ noise_level: int = 250,
775
+ clean_caption: bool = True,
776
+ ):
777
+ """
778
+ Function invoked when calling the pipeline for generation.
779
+
780
+ Args:
781
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
782
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
783
+ process.
784
+ original_image (`torch.FloatTensor` or `PIL.Image.Image`):
785
+ The original image that `image` was varied from.
786
+ strength (`float`, *optional*, defaults to 0.8):
787
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
788
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
789
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
790
+ be maximum and the denoising process will run for the full number of iterations specified in
791
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
792
+ prompt (`str` or `List[str]`, *optional*):
793
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
794
+ instead.
795
+ num_inference_steps (`int`, *optional*, defaults to 50):
796
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
797
+ expense of slower inference.
798
+ timesteps (`List[int]`, *optional*):
799
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
800
+ timesteps are used. Must be in descending order.
801
+ guidance_scale (`float`, *optional*, defaults to 4.0):
802
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
803
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
804
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
805
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
806
+ usually at the expense of lower image quality.
807
+ negative_prompt (`str` or `List[str]`, *optional*):
808
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
809
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
810
+ less than `1`).
811
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
812
+ The number of images to generate per prompt.
813
+ eta (`float`, *optional*, defaults to 0.0):
814
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
815
+ [`schedulers.DDIMScheduler`], will be ignored for others.
816
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
817
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
818
+ to make generation deterministic.
819
+ prompt_embeds (`torch.FloatTensor`, *optional*):
820
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
821
+ provided, text embeddings will be generated from `prompt` input argument.
822
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
823
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
824
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
825
+ argument.
826
+ output_type (`str`, *optional*, defaults to `"pil"`):
827
+ The output format of the generate image. Choose between
828
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
829
+ return_dict (`bool`, *optional*, defaults to `True`):
830
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
831
+ callback (`Callable`, *optional*):
832
+ A function that will be called every `callback_steps` steps during inference. The function will be
833
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
834
+ callback_steps (`int`, *optional*, defaults to 1):
835
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
836
+ called at every step.
837
+ cross_attention_kwargs (`dict`, *optional*):
838
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
839
+ `self.processor` in
840
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
841
+ noise_level (`int`, *optional*, defaults to 250):
842
+ The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)`
843
+ clean_caption (`bool`, *optional*, defaults to `True`):
844
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
845
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
846
+ prompt.
847
+
848
+ Examples:
849
+
850
+ Returns:
851
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
852
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
853
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
854
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
855
+ or watermarked content, according to the `safety_checker`.
856
+ """
857
+ # 1. Check inputs. Raise error if not correct
858
+ if prompt is not None and isinstance(prompt, str):
859
+ batch_size = 1
860
+ elif prompt is not None and isinstance(prompt, list):
861
+ batch_size = len(prompt)
862
+ else:
863
+ batch_size = prompt_embeds.shape[0]
864
+
865
+ self.check_inputs(
866
+ prompt,
867
+ image,
868
+ original_image,
869
+ batch_size,
870
+ callback_steps,
871
+ negative_prompt,
872
+ prompt_embeds,
873
+ negative_prompt_embeds,
874
+ )
875
+
876
+ # 2. Define call parameters
877
+
878
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
879
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
880
+ # corresponds to doing no classifier free guidance.
881
+ do_classifier_free_guidance = guidance_scale > 1.0
882
+
883
+ device = self._execution_device
884
+
885
+ # 3. Encode input prompt
886
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
887
+ prompt,
888
+ do_classifier_free_guidance,
889
+ num_images_per_prompt=num_images_per_prompt,
890
+ device=device,
891
+ negative_prompt=negative_prompt,
892
+ prompt_embeds=prompt_embeds,
893
+ negative_prompt_embeds=negative_prompt_embeds,
894
+ clean_caption=clean_caption,
895
+ )
896
+
897
+ if do_classifier_free_guidance:
898
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
899
+
900
+ dtype = prompt_embeds.dtype
901
+
902
+ # 4. Prepare timesteps
903
+ if timesteps is not None:
904
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
905
+ timesteps = self.scheduler.timesteps
906
+ num_inference_steps = len(timesteps)
907
+ else:
908
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
909
+ timesteps = self.scheduler.timesteps
910
+
911
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
912
+
913
+ # 5. prepare original image
914
+ original_image = self.preprocess_original_image(original_image)
915
+ original_image = original_image.to(device=device, dtype=dtype)
916
+
917
+ # 6. Prepare intermediate images
918
+ noise_timestep = timesteps[0:1]
919
+ noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt)
920
+
921
+ intermediate_images = self.prepare_intermediate_images(
922
+ original_image,
923
+ noise_timestep,
924
+ batch_size,
925
+ num_images_per_prompt,
926
+ dtype,
927
+ device,
928
+ generator,
929
+ )
930
+
931
+ # 7. Prepare upscaled image and noise level
932
+ _, _, height, width = original_image.shape
933
+
934
+ image = self.preprocess_image(image, num_images_per_prompt, device)
935
+
936
+ upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True)
937
+
938
+ noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)
939
+ noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)
940
+ upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)
941
+
942
+ if do_classifier_free_guidance:
943
+ noise_level = torch.cat([noise_level] * 2)
944
+
945
+ # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
946
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
947
+
948
+ # HACK: see comment in `enable_model_cpu_offload`
949
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
950
+ self.text_encoder_offload_hook.offload()
951
+
952
+ # 9. Denoising loop
953
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
954
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
955
+ for i, t in enumerate(timesteps):
956
+ model_input = torch.cat([intermediate_images, upscaled], dim=1)
957
+
958
+ model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
959
+ model_input = self.scheduler.scale_model_input(model_input, t)
960
+
961
+ # predict the noise residual
962
+ noise_pred = self.unet(
963
+ model_input,
964
+ t,
965
+ encoder_hidden_states=prompt_embeds,
966
+ class_labels=noise_level,
967
+ cross_attention_kwargs=cross_attention_kwargs,
968
+ return_dict=False,
969
+ )[0]
970
+
971
+ # perform guidance
972
+ if do_classifier_free_guidance:
973
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
974
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)
975
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)
976
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
977
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
978
+
979
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
980
+ noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)
981
+
982
+ # compute the previous noisy sample x_t -> x_t-1
983
+ intermediate_images = self.scheduler.step(
984
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
985
+ )[0]
986
+
987
+ # call the callback, if provided
988
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
989
+ progress_bar.update()
990
+ if callback is not None and i % callback_steps == 0:
991
+ callback(i, t, intermediate_images)
992
+
993
+ image = intermediate_images
994
+
995
+ if output_type == "pil":
996
+ # 10. Post-processing
997
+ image = (image / 2 + 0.5).clamp(0, 1)
998
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
999
+
1000
+ # 11. Run safety checker
1001
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1002
+
1003
+ # 12. Convert to PIL
1004
+ image = self.numpy_to_pil(image)
1005
+
1006
+ # 13. Apply watermark
1007
+ if self.watermarker is not None:
1008
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
1009
+ elif output_type == "pt":
1010
+ nsfw_detected = None
1011
+ watermark_detected = None
1012
+
1013
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
1014
+ self.unet_offload_hook.offload()
1015
+ else:
1016
+ # 10. Post-processing
1017
+ image = (image / 2 + 0.5).clamp(0, 1)
1018
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1019
+
1020
+ # 11. Run safety checker
1021
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1022
+
1023
+ # Offload all models
1024
+ self.maybe_free_model_hooks()
1025
+
1026
+ if not return_dict:
1027
+ return (image, nsfw_detected, watermark_detected)
1028
+
1029
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py ADDED
@@ -0,0 +1,1030 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
11
+
12
+ from ...loaders import LoraLoaderMixin
13
+ from ...models import UNet2DConditionModel
14
+ from ...schedulers import DDPMScheduler
15
+ from ...utils import (
16
+ BACKENDS_MAPPING,
17
+ PIL_INTERPOLATION,
18
+ is_accelerate_available,
19
+ is_bs4_available,
20
+ is_ftfy_available,
21
+ logging,
22
+ replace_example_docstring,
23
+ )
24
+ from ...utils.torch_utils import randn_tensor
25
+ from ..pipeline_utils import DiffusionPipeline
26
+ from .pipeline_output import IFPipelineOutput
27
+ from .safety_checker import IFSafetyChecker
28
+ from .watermark import IFWatermarker
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+ if is_bs4_available():
34
+ from bs4 import BeautifulSoup
35
+
36
+ if is_ftfy_available():
37
+ import ftfy
38
+
39
+
40
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize
41
+ def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image:
42
+ w, h = images.size
43
+
44
+ coef = w / h
45
+
46
+ w, h = img_size, img_size
47
+
48
+ if coef >= 1:
49
+ w = int(round(img_size / 8 * coef) * 8)
50
+ else:
51
+ h = int(round(img_size / 8 / coef) * 8)
52
+
53
+ images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None)
54
+
55
+ return images
56
+
57
+
58
+ EXAMPLE_DOC_STRING = """
59
+ Examples:
60
+ ```py
61
+ >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
62
+ >>> from diffusers.utils import pt_to_pil
63
+ >>> import torch
64
+ >>> from PIL import Image
65
+ >>> import requests
66
+ >>> from io import BytesIO
67
+
68
+ >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"
69
+ >>> response = requests.get(url)
70
+ >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")
71
+ >>> original_image = original_image
72
+
73
+ >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"
74
+ >>> response = requests.get(url)
75
+ >>> mask_image = Image.open(BytesIO(response.content))
76
+ >>> mask_image = mask_image
77
+
78
+ >>> pipe = IFInpaintingPipeline.from_pretrained(
79
+ ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16
80
+ ... )
81
+ >>> pipe.enable_model_cpu_offload()
82
+
83
+ >>> prompt = "blue sunglasses"
84
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
85
+
86
+ >>> image = pipe(
87
+ ... image=original_image,
88
+ ... mask_image=mask_image,
89
+ ... prompt_embeds=prompt_embeds,
90
+ ... negative_prompt_embeds=negative_embeds,
91
+ ... output_type="pt",
92
+ ... ).images
93
+
94
+ >>> # save intermediate image
95
+ >>> pil_image = pt_to_pil(image)
96
+ >>> pil_image[0].save("./if_stage_I.png")
97
+
98
+ >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(
99
+ ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
100
+ ... )
101
+ >>> super_res_1_pipe.enable_model_cpu_offload()
102
+
103
+ >>> image = super_res_1_pipe(
104
+ ... image=image,
105
+ ... mask_image=mask_image,
106
+ ... original_image=original_image,
107
+ ... prompt_embeds=prompt_embeds,
108
+ ... negative_prompt_embeds=negative_embeds,
109
+ ... ).images
110
+ >>> image[0].save("./if_stage_II.png")
111
+ ```
112
+ """
113
+
114
+
115
+ class IFInpaintingPipeline(DiffusionPipeline, LoraLoaderMixin):
116
+ tokenizer: T5Tokenizer
117
+ text_encoder: T5EncoderModel
118
+
119
+ unet: UNet2DConditionModel
120
+ scheduler: DDPMScheduler
121
+
122
+ feature_extractor: Optional[CLIPImageProcessor]
123
+ safety_checker: Optional[IFSafetyChecker]
124
+
125
+ watermarker: Optional[IFWatermarker]
126
+
127
+ bad_punct_regex = re.compile(
128
+ r"["
129
+ + "#®•©™&@·º½¾¿¡§~"
130
+ + r"\)"
131
+ + r"\("
132
+ + r"\]"
133
+ + r"\["
134
+ + r"\}"
135
+ + r"\{"
136
+ + r"\|"
137
+ + "\\"
138
+ + r"\/"
139
+ + r"\*"
140
+ + r"]{1,}"
141
+ ) # noqa
142
+
143
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
144
+ model_cpu_offload_seq = "text_encoder->unet"
145
+
146
+ def __init__(
147
+ self,
148
+ tokenizer: T5Tokenizer,
149
+ text_encoder: T5EncoderModel,
150
+ unet: UNet2DConditionModel,
151
+ scheduler: DDPMScheduler,
152
+ safety_checker: Optional[IFSafetyChecker],
153
+ feature_extractor: Optional[CLIPImageProcessor],
154
+ watermarker: Optional[IFWatermarker],
155
+ requires_safety_checker: bool = True,
156
+ ):
157
+ super().__init__()
158
+
159
+ if safety_checker is None and requires_safety_checker:
160
+ logger.warning(
161
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
162
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
163
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
164
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
165
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
166
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
167
+ )
168
+
169
+ if safety_checker is not None and feature_extractor is None:
170
+ raise ValueError(
171
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
172
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
173
+ )
174
+
175
+ self.register_modules(
176
+ tokenizer=tokenizer,
177
+ text_encoder=text_encoder,
178
+ unet=unet,
179
+ scheduler=scheduler,
180
+ safety_checker=safety_checker,
181
+ feature_extractor=feature_extractor,
182
+ watermarker=watermarker,
183
+ )
184
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
185
+
186
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
187
+ def remove_all_hooks(self):
188
+ if is_accelerate_available():
189
+ from accelerate.hooks import remove_hook_from_module
190
+ else:
191
+ raise ImportError("Please install accelerate via `pip install accelerate`")
192
+
193
+ for model in [self.text_encoder, self.unet, self.safety_checker]:
194
+ if model is not None:
195
+ remove_hook_from_module(model, recurse=True)
196
+
197
+ self.unet_offload_hook = None
198
+ self.text_encoder_offload_hook = None
199
+ self.final_offload_hook = None
200
+
201
+ @torch.no_grad()
202
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
203
+ def encode_prompt(
204
+ self,
205
+ prompt: Union[str, List[str]],
206
+ do_classifier_free_guidance: bool = True,
207
+ num_images_per_prompt: int = 1,
208
+ device: Optional[torch.device] = None,
209
+ negative_prompt: Optional[Union[str, List[str]]] = None,
210
+ prompt_embeds: Optional[torch.FloatTensor] = None,
211
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
212
+ clean_caption: bool = False,
213
+ ):
214
+ r"""
215
+ Encodes the prompt into text encoder hidden states.
216
+
217
+ Args:
218
+ prompt (`str` or `List[str]`, *optional*):
219
+ prompt to be encoded
220
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
221
+ whether to use classifier free guidance or not
222
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
223
+ number of images that should be generated per prompt
224
+ device: (`torch.device`, *optional*):
225
+ torch device to place the resulting embeddings on
226
+ negative_prompt (`str` or `List[str]`, *optional*):
227
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
228
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
229
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
230
+ prompt_embeds (`torch.FloatTensor`, *optional*):
231
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
232
+ provided, text embeddings will be generated from `prompt` input argument.
233
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
234
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
235
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
236
+ argument.
237
+ clean_caption (bool, defaults to `False`):
238
+ If `True`, the function will preprocess and clean the provided caption before encoding.
239
+ """
240
+ if prompt is not None and negative_prompt is not None:
241
+ if type(prompt) is not type(negative_prompt):
242
+ raise TypeError(
243
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
244
+ f" {type(prompt)}."
245
+ )
246
+
247
+ if device is None:
248
+ device = self._execution_device
249
+
250
+ if prompt is not None and isinstance(prompt, str):
251
+ batch_size = 1
252
+ elif prompt is not None and isinstance(prompt, list):
253
+ batch_size = len(prompt)
254
+ else:
255
+ batch_size = prompt_embeds.shape[0]
256
+
257
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
258
+ max_length = 77
259
+
260
+ if prompt_embeds is None:
261
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
262
+ text_inputs = self.tokenizer(
263
+ prompt,
264
+ padding="max_length",
265
+ max_length=max_length,
266
+ truncation=True,
267
+ add_special_tokens=True,
268
+ return_tensors="pt",
269
+ )
270
+ text_input_ids = text_inputs.input_ids
271
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
272
+
273
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
274
+ text_input_ids, untruncated_ids
275
+ ):
276
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
277
+ logger.warning(
278
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
279
+ f" {max_length} tokens: {removed_text}"
280
+ )
281
+
282
+ attention_mask = text_inputs.attention_mask.to(device)
283
+
284
+ prompt_embeds = self.text_encoder(
285
+ text_input_ids.to(device),
286
+ attention_mask=attention_mask,
287
+ )
288
+ prompt_embeds = prompt_embeds[0]
289
+
290
+ if self.text_encoder is not None:
291
+ dtype = self.text_encoder.dtype
292
+ elif self.unet is not None:
293
+ dtype = self.unet.dtype
294
+ else:
295
+ dtype = None
296
+
297
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
298
+
299
+ bs_embed, seq_len, _ = prompt_embeds.shape
300
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
301
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
302
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
303
+
304
+ # get unconditional embeddings for classifier free guidance
305
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
306
+ uncond_tokens: List[str]
307
+ if negative_prompt is None:
308
+ uncond_tokens = [""] * batch_size
309
+ elif isinstance(negative_prompt, str):
310
+ uncond_tokens = [negative_prompt]
311
+ elif batch_size != len(negative_prompt):
312
+ raise ValueError(
313
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
314
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
315
+ " the batch size of `prompt`."
316
+ )
317
+ else:
318
+ uncond_tokens = negative_prompt
319
+
320
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
321
+ max_length = prompt_embeds.shape[1]
322
+ uncond_input = self.tokenizer(
323
+ uncond_tokens,
324
+ padding="max_length",
325
+ max_length=max_length,
326
+ truncation=True,
327
+ return_attention_mask=True,
328
+ add_special_tokens=True,
329
+ return_tensors="pt",
330
+ )
331
+ attention_mask = uncond_input.attention_mask.to(device)
332
+
333
+ negative_prompt_embeds = self.text_encoder(
334
+ uncond_input.input_ids.to(device),
335
+ attention_mask=attention_mask,
336
+ )
337
+ negative_prompt_embeds = negative_prompt_embeds[0]
338
+
339
+ if do_classifier_free_guidance:
340
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
341
+ seq_len = negative_prompt_embeds.shape[1]
342
+
343
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
344
+
345
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
346
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
347
+
348
+ # For classifier free guidance, we need to do two forward passes.
349
+ # Here we concatenate the unconditional and text embeddings into a single batch
350
+ # to avoid doing two forward passes
351
+ else:
352
+ negative_prompt_embeds = None
353
+
354
+ return prompt_embeds, negative_prompt_embeds
355
+
356
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
357
+ def run_safety_checker(self, image, device, dtype):
358
+ if self.safety_checker is not None:
359
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
360
+ image, nsfw_detected, watermark_detected = self.safety_checker(
361
+ images=image,
362
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
363
+ )
364
+ else:
365
+ nsfw_detected = None
366
+ watermark_detected = None
367
+
368
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
369
+ self.unet_offload_hook.offload()
370
+
371
+ return image, nsfw_detected, watermark_detected
372
+
373
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
374
+ def prepare_extra_step_kwargs(self, generator, eta):
375
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
376
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
377
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
378
+ # and should be between [0, 1]
379
+
380
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
381
+ extra_step_kwargs = {}
382
+ if accepts_eta:
383
+ extra_step_kwargs["eta"] = eta
384
+
385
+ # check if the scheduler accepts generator
386
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
387
+ if accepts_generator:
388
+ extra_step_kwargs["generator"] = generator
389
+ return extra_step_kwargs
390
+
391
+ def check_inputs(
392
+ self,
393
+ prompt,
394
+ image,
395
+ mask_image,
396
+ batch_size,
397
+ callback_steps,
398
+ negative_prompt=None,
399
+ prompt_embeds=None,
400
+ negative_prompt_embeds=None,
401
+ ):
402
+ if (callback_steps is None) or (
403
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
404
+ ):
405
+ raise ValueError(
406
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
407
+ f" {type(callback_steps)}."
408
+ )
409
+
410
+ if prompt is not None and prompt_embeds is not None:
411
+ raise ValueError(
412
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
413
+ " only forward one of the two."
414
+ )
415
+ elif prompt is None and prompt_embeds is None:
416
+ raise ValueError(
417
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
418
+ )
419
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
420
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
421
+
422
+ if negative_prompt is not None and negative_prompt_embeds is not None:
423
+ raise ValueError(
424
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
425
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
426
+ )
427
+
428
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
429
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
430
+ raise ValueError(
431
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
432
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
433
+ f" {negative_prompt_embeds.shape}."
434
+ )
435
+
436
+ # image
437
+
438
+ if isinstance(image, list):
439
+ check_image_type = image[0]
440
+ else:
441
+ check_image_type = image
442
+
443
+ if (
444
+ not isinstance(check_image_type, torch.Tensor)
445
+ and not isinstance(check_image_type, PIL.Image.Image)
446
+ and not isinstance(check_image_type, np.ndarray)
447
+ ):
448
+ raise ValueError(
449
+ "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
450
+ f" {type(check_image_type)}"
451
+ )
452
+
453
+ if isinstance(image, list):
454
+ image_batch_size = len(image)
455
+ elif isinstance(image, torch.Tensor):
456
+ image_batch_size = image.shape[0]
457
+ elif isinstance(image, PIL.Image.Image):
458
+ image_batch_size = 1
459
+ elif isinstance(image, np.ndarray):
460
+ image_batch_size = image.shape[0]
461
+ else:
462
+ assert False
463
+
464
+ if batch_size != image_batch_size:
465
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
466
+
467
+ # mask_image
468
+
469
+ if isinstance(mask_image, list):
470
+ check_image_type = mask_image[0]
471
+ else:
472
+ check_image_type = mask_image
473
+
474
+ if (
475
+ not isinstance(check_image_type, torch.Tensor)
476
+ and not isinstance(check_image_type, PIL.Image.Image)
477
+ and not isinstance(check_image_type, np.ndarray)
478
+ ):
479
+ raise ValueError(
480
+ "`mask_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
481
+ f" {type(check_image_type)}"
482
+ )
483
+
484
+ if isinstance(mask_image, list):
485
+ image_batch_size = len(mask_image)
486
+ elif isinstance(mask_image, torch.Tensor):
487
+ image_batch_size = mask_image.shape[0]
488
+ elif isinstance(mask_image, PIL.Image.Image):
489
+ image_batch_size = 1
490
+ elif isinstance(mask_image, np.ndarray):
491
+ image_batch_size = mask_image.shape[0]
492
+ else:
493
+ assert False
494
+
495
+ if image_batch_size != 1 and batch_size != image_batch_size:
496
+ raise ValueError(
497
+ f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}"
498
+ )
499
+
500
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
501
+ def _text_preprocessing(self, text, clean_caption=False):
502
+ if clean_caption and not is_bs4_available():
503
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
504
+ logger.warning("Setting `clean_caption` to False...")
505
+ clean_caption = False
506
+
507
+ if clean_caption and not is_ftfy_available():
508
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
509
+ logger.warning("Setting `clean_caption` to False...")
510
+ clean_caption = False
511
+
512
+ if not isinstance(text, (tuple, list)):
513
+ text = [text]
514
+
515
+ def process(text: str):
516
+ if clean_caption:
517
+ text = self._clean_caption(text)
518
+ text = self._clean_caption(text)
519
+ else:
520
+ text = text.lower().strip()
521
+ return text
522
+
523
+ return [process(t) for t in text]
524
+
525
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
526
+ def _clean_caption(self, caption):
527
+ caption = str(caption)
528
+ caption = ul.unquote_plus(caption)
529
+ caption = caption.strip().lower()
530
+ caption = re.sub("<person>", "person", caption)
531
+ # urls:
532
+ caption = re.sub(
533
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
534
+ "",
535
+ caption,
536
+ ) # regex for urls
537
+ caption = re.sub(
538
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
539
+ "",
540
+ caption,
541
+ ) # regex for urls
542
+ # html:
543
+ caption = BeautifulSoup(caption, features="html.parser").text
544
+
545
+ # @<nickname>
546
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
547
+
548
+ # 31C0—31EF CJK Strokes
549
+ # 31F0—31FF Katakana Phonetic Extensions
550
+ # 3200—32FF Enclosed CJK Letters and Months
551
+ # 3300—33FF CJK Compatibility
552
+ # 3400—4DBF CJK Unified Ideographs Extension A
553
+ # 4DC0—4DFF Yijing Hexagram Symbols
554
+ # 4E00—9FFF CJK Unified Ideographs
555
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
556
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
557
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
558
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
559
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
560
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
561
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
562
+ #######################################################
563
+
564
+ # все виды тире / all types of dash --> "-"
565
+ caption = re.sub(
566
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
567
+ "-",
568
+ caption,
569
+ )
570
+
571
+ # кавычки к одному стандарту
572
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
573
+ caption = re.sub(r"[‘’]", "'", caption)
574
+
575
+ # &quot;
576
+ caption = re.sub(r"&quot;?", "", caption)
577
+ # &amp
578
+ caption = re.sub(r"&amp", "", caption)
579
+
580
+ # ip adresses:
581
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
582
+
583
+ # article ids:
584
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
585
+
586
+ # \n
587
+ caption = re.sub(r"\\n", " ", caption)
588
+
589
+ # "#123"
590
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
591
+ # "#12345.."
592
+ caption = re.sub(r"#\d{5,}\b", "", caption)
593
+ # "123456.."
594
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
595
+ # filenames:
596
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
597
+
598
+ #
599
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
600
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
601
+
602
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
603
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
604
+
605
+ # this-is-my-cute-cat / this_is_my_cute_cat
606
+ regex2 = re.compile(r"(?:\-|\_)")
607
+ if len(re.findall(regex2, caption)) > 3:
608
+ caption = re.sub(regex2, " ", caption)
609
+
610
+ caption = ftfy.fix_text(caption)
611
+ caption = html.unescape(html.unescape(caption))
612
+
613
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
614
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
615
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
616
+
617
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
618
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
619
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
620
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
621
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
622
+
623
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
624
+
625
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
626
+
627
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
628
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
629
+ caption = re.sub(r"\s+", " ", caption)
630
+
631
+ caption.strip()
632
+
633
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
634
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
635
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
636
+ caption = re.sub(r"^\.\S+$", "", caption)
637
+
638
+ return caption.strip()
639
+
640
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image
641
+ def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor:
642
+ if not isinstance(image, list):
643
+ image = [image]
644
+
645
+ def numpy_to_pt(images):
646
+ if images.ndim == 3:
647
+ images = images[..., None]
648
+
649
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
650
+ return images
651
+
652
+ if isinstance(image[0], PIL.Image.Image):
653
+ new_image = []
654
+
655
+ for image_ in image:
656
+ image_ = image_.convert("RGB")
657
+ image_ = resize(image_, self.unet.sample_size)
658
+ image_ = np.array(image_)
659
+ image_ = image_.astype(np.float32)
660
+ image_ = image_ / 127.5 - 1
661
+ new_image.append(image_)
662
+
663
+ image = new_image
664
+
665
+ image = np.stack(image, axis=0) # to np
666
+ image = numpy_to_pt(image) # to pt
667
+
668
+ elif isinstance(image[0], np.ndarray):
669
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
670
+ image = numpy_to_pt(image)
671
+
672
+ elif isinstance(image[0], torch.Tensor):
673
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
674
+
675
+ return image
676
+
677
+ def preprocess_mask_image(self, mask_image) -> torch.Tensor:
678
+ if not isinstance(mask_image, list):
679
+ mask_image = [mask_image]
680
+
681
+ if isinstance(mask_image[0], torch.Tensor):
682
+ mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0)
683
+
684
+ if mask_image.ndim == 2:
685
+ # Batch and add channel dim for single mask
686
+ mask_image = mask_image.unsqueeze(0).unsqueeze(0)
687
+ elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
688
+ # Single mask, the 0'th dimension is considered to be
689
+ # the existing batch size of 1
690
+ mask_image = mask_image.unsqueeze(0)
691
+ elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
692
+ # Batch of mask, the 0'th dimension is considered to be
693
+ # the batching dimension
694
+ mask_image = mask_image.unsqueeze(1)
695
+
696
+ mask_image[mask_image < 0.5] = 0
697
+ mask_image[mask_image >= 0.5] = 1
698
+
699
+ elif isinstance(mask_image[0], PIL.Image.Image):
700
+ new_mask_image = []
701
+
702
+ for mask_image_ in mask_image:
703
+ mask_image_ = mask_image_.convert("L")
704
+ mask_image_ = resize(mask_image_, self.unet.sample_size)
705
+ mask_image_ = np.array(mask_image_)
706
+ mask_image_ = mask_image_[None, None, :]
707
+ new_mask_image.append(mask_image_)
708
+
709
+ mask_image = new_mask_image
710
+
711
+ mask_image = np.concatenate(mask_image, axis=0)
712
+ mask_image = mask_image.astype(np.float32) / 255.0
713
+ mask_image[mask_image < 0.5] = 0
714
+ mask_image[mask_image >= 0.5] = 1
715
+ mask_image = torch.from_numpy(mask_image)
716
+
717
+ elif isinstance(mask_image[0], np.ndarray):
718
+ mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
719
+
720
+ mask_image[mask_image < 0.5] = 0
721
+ mask_image[mask_image >= 0.5] = 1
722
+ mask_image = torch.from_numpy(mask_image)
723
+
724
+ return mask_image
725
+
726
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps
727
+ def get_timesteps(self, num_inference_steps, strength):
728
+ # get the original timestep using init_timestep
729
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
730
+
731
+ t_start = max(num_inference_steps - init_timestep, 0)
732
+ timesteps = self.scheduler.timesteps[t_start:]
733
+
734
+ return timesteps, num_inference_steps - t_start
735
+
736
+ def prepare_intermediate_images(
737
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None
738
+ ):
739
+ image_batch_size, channels, height, width = image.shape
740
+
741
+ batch_size = batch_size * num_images_per_prompt
742
+
743
+ shape = (batch_size, channels, height, width)
744
+
745
+ if isinstance(generator, list) and len(generator) != batch_size:
746
+ raise ValueError(
747
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
748
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
749
+ )
750
+
751
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
752
+
753
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
754
+ noised_image = self.scheduler.add_noise(image, noise, timestep)
755
+
756
+ image = (1 - mask_image) * image + mask_image * noised_image
757
+
758
+ return image
759
+
760
+ @torch.no_grad()
761
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
762
+ def __call__(
763
+ self,
764
+ prompt: Union[str, List[str]] = None,
765
+ image: Union[
766
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
767
+ ] = None,
768
+ mask_image: Union[
769
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
770
+ ] = None,
771
+ strength: float = 1.0,
772
+ num_inference_steps: int = 50,
773
+ timesteps: List[int] = None,
774
+ guidance_scale: float = 7.0,
775
+ negative_prompt: Optional[Union[str, List[str]]] = None,
776
+ num_images_per_prompt: Optional[int] = 1,
777
+ eta: float = 0.0,
778
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
779
+ prompt_embeds: Optional[torch.FloatTensor] = None,
780
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
781
+ output_type: Optional[str] = "pil",
782
+ return_dict: bool = True,
783
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
784
+ callback_steps: int = 1,
785
+ clean_caption: bool = True,
786
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
787
+ ):
788
+ """
789
+ Function invoked when calling the pipeline for generation.
790
+
791
+ Args:
792
+ prompt (`str` or `List[str]`, *optional*):
793
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
794
+ instead.
795
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
796
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
797
+ process.
798
+ mask_image (`PIL.Image.Image`):
799
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
800
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
801
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
802
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
803
+ strength (`float`, *optional*, defaults to 1.0):
804
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
805
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
806
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
807
+ be maximum and the denoising process will run for the full number of iterations specified in
808
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
809
+ num_inference_steps (`int`, *optional*, defaults to 50):
810
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
811
+ expense of slower inference.
812
+ timesteps (`List[int]`, *optional*):
813
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
814
+ timesteps are used. Must be in descending order.
815
+ guidance_scale (`float`, *optional*, defaults to 7.0):
816
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
817
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
818
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
819
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
820
+ usually at the expense of lower image quality.
821
+ negative_prompt (`str` or `List[str]`, *optional*):
822
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
823
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
824
+ less than `1`).
825
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
826
+ The number of images to generate per prompt.
827
+ eta (`float`, *optional*, defaults to 0.0):
828
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
829
+ [`schedulers.DDIMScheduler`], will be ignored for others.
830
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
831
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
832
+ to make generation deterministic.
833
+ prompt_embeds (`torch.FloatTensor`, *optional*):
834
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
835
+ provided, text embeddings will be generated from `prompt` input argument.
836
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
837
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
838
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
839
+ argument.
840
+ output_type (`str`, *optional*, defaults to `"pil"`):
841
+ The output format of the generate image. Choose between
842
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
843
+ return_dict (`bool`, *optional*, defaults to `True`):
844
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
845
+ callback (`Callable`, *optional*):
846
+ A function that will be called every `callback_steps` steps during inference. The function will be
847
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
848
+ callback_steps (`int`, *optional*, defaults to 1):
849
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
850
+ called at every step.
851
+ clean_caption (`bool`, *optional*, defaults to `True`):
852
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
853
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
854
+ prompt.
855
+ cross_attention_kwargs (`dict`, *optional*):
856
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
857
+ `self.processor` in
858
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
859
+
860
+ Examples:
861
+
862
+ Returns:
863
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
864
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
865
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
866
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
867
+ or watermarked content, according to the `safety_checker`.
868
+ """
869
+ # 1. Check inputs. Raise error if not correct
870
+ if prompt is not None and isinstance(prompt, str):
871
+ batch_size = 1
872
+ elif prompt is not None and isinstance(prompt, list):
873
+ batch_size = len(prompt)
874
+ else:
875
+ batch_size = prompt_embeds.shape[0]
876
+
877
+ self.check_inputs(
878
+ prompt,
879
+ image,
880
+ mask_image,
881
+ batch_size,
882
+ callback_steps,
883
+ negative_prompt,
884
+ prompt_embeds,
885
+ negative_prompt_embeds,
886
+ )
887
+
888
+ # 2. Define call parameters
889
+ device = self._execution_device
890
+
891
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
892
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
893
+ # corresponds to doing no classifier free guidance.
894
+ do_classifier_free_guidance = guidance_scale > 1.0
895
+
896
+ # 3. Encode input prompt
897
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
898
+ prompt,
899
+ do_classifier_free_guidance,
900
+ num_images_per_prompt=num_images_per_prompt,
901
+ device=device,
902
+ negative_prompt=negative_prompt,
903
+ prompt_embeds=prompt_embeds,
904
+ negative_prompt_embeds=negative_prompt_embeds,
905
+ clean_caption=clean_caption,
906
+ )
907
+
908
+ if do_classifier_free_guidance:
909
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
910
+
911
+ dtype = prompt_embeds.dtype
912
+
913
+ # 4. Prepare timesteps
914
+ if timesteps is not None:
915
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
916
+ timesteps = self.scheduler.timesteps
917
+ num_inference_steps = len(timesteps)
918
+ else:
919
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
920
+ timesteps = self.scheduler.timesteps
921
+
922
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
923
+
924
+ # 5. Prepare intermediate images
925
+ image = self.preprocess_image(image)
926
+ image = image.to(device=device, dtype=dtype)
927
+
928
+ mask_image = self.preprocess_mask_image(mask_image)
929
+ mask_image = mask_image.to(device=device, dtype=dtype)
930
+
931
+ if mask_image.shape[0] == 1:
932
+ mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0)
933
+ else:
934
+ mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0)
935
+
936
+ noise_timestep = timesteps[0:1]
937
+ noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt)
938
+
939
+ intermediate_images = self.prepare_intermediate_images(
940
+ image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator
941
+ )
942
+
943
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
944
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
945
+
946
+ # HACK: see comment in `enable_model_cpu_offload`
947
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
948
+ self.text_encoder_offload_hook.offload()
949
+
950
+ # 7. Denoising loop
951
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
952
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
953
+ for i, t in enumerate(timesteps):
954
+ model_input = (
955
+ torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images
956
+ )
957
+ model_input = self.scheduler.scale_model_input(model_input, t)
958
+
959
+ # predict the noise residual
960
+ noise_pred = self.unet(
961
+ model_input,
962
+ t,
963
+ encoder_hidden_states=prompt_embeds,
964
+ cross_attention_kwargs=cross_attention_kwargs,
965
+ return_dict=False,
966
+ )[0]
967
+
968
+ # perform guidance
969
+ if do_classifier_free_guidance:
970
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
971
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1)
972
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1)
973
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
974
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
975
+
976
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
977
+ noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1)
978
+
979
+ # compute the previous noisy sample x_t -> x_t-1
980
+ prev_intermediate_images = intermediate_images
981
+
982
+ intermediate_images = self.scheduler.step(
983
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
984
+ )[0]
985
+
986
+ intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images
987
+
988
+ # call the callback, if provided
989
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
990
+ progress_bar.update()
991
+ if callback is not None and i % callback_steps == 0:
992
+ callback(i, t, intermediate_images)
993
+
994
+ image = intermediate_images
995
+
996
+ if output_type == "pil":
997
+ # 8. Post-processing
998
+ image = (image / 2 + 0.5).clamp(0, 1)
999
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1000
+
1001
+ # 9. Run safety checker
1002
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1003
+
1004
+ # 10. Convert to PIL
1005
+ image = self.numpy_to_pil(image)
1006
+
1007
+ # 11. Apply watermark
1008
+ if self.watermarker is not None:
1009
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
1010
+ elif output_type == "pt":
1011
+ nsfw_detected = None
1012
+ watermark_detected = None
1013
+
1014
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
1015
+ self.unet_offload_hook.offload()
1016
+ else:
1017
+ # 8. Post-processing
1018
+ image = (image / 2 + 0.5).clamp(0, 1)
1019
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1020
+
1021
+ # 9. Run safety checker
1022
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1023
+
1024
+ # Offload all models
1025
+ self.maybe_free_model_hooks()
1026
+
1027
+ if not return_dict:
1028
+ return (image, nsfw_detected, watermark_detected)
1029
+
1030
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py ADDED
@@ -0,0 +1,1137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
12
+
13
+ from ...loaders import LoraLoaderMixin
14
+ from ...models import UNet2DConditionModel
15
+ from ...schedulers import DDPMScheduler
16
+ from ...utils import (
17
+ BACKENDS_MAPPING,
18
+ PIL_INTERPOLATION,
19
+ is_accelerate_available,
20
+ is_bs4_available,
21
+ is_ftfy_available,
22
+ logging,
23
+ replace_example_docstring,
24
+ )
25
+ from ...utils.torch_utils import randn_tensor
26
+ from ..pipeline_utils import DiffusionPipeline
27
+ from .pipeline_output import IFPipelineOutput
28
+ from .safety_checker import IFSafetyChecker
29
+ from .watermark import IFWatermarker
30
+
31
+
32
+ if is_bs4_available():
33
+ from bs4 import BeautifulSoup
34
+
35
+ if is_ftfy_available():
36
+ import ftfy
37
+
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+
42
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize
43
+ def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image:
44
+ w, h = images.size
45
+
46
+ coef = w / h
47
+
48
+ w, h = img_size, img_size
49
+
50
+ if coef >= 1:
51
+ w = int(round(img_size / 8 * coef) * 8)
52
+ else:
53
+ h = int(round(img_size / 8 / coef) * 8)
54
+
55
+ images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None)
56
+
57
+ return images
58
+
59
+
60
+ EXAMPLE_DOC_STRING = """
61
+ Examples:
62
+ ```py
63
+ >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
64
+ >>> from diffusers.utils import pt_to_pil
65
+ >>> import torch
66
+ >>> from PIL import Image
67
+ >>> import requests
68
+ >>> from io import BytesIO
69
+
70
+ >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"
71
+ >>> response = requests.get(url)
72
+ >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")
73
+ >>> original_image = original_image
74
+
75
+ >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"
76
+ >>> response = requests.get(url)
77
+ >>> mask_image = Image.open(BytesIO(response.content))
78
+ >>> mask_image = mask_image
79
+
80
+ >>> pipe = IFInpaintingPipeline.from_pretrained(
81
+ ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16
82
+ ... )
83
+ >>> pipe.enable_model_cpu_offload()
84
+
85
+ >>> prompt = "blue sunglasses"
86
+
87
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
88
+ >>> image = pipe(
89
+ ... image=original_image,
90
+ ... mask_image=mask_image,
91
+ ... prompt_embeds=prompt_embeds,
92
+ ... negative_prompt_embeds=negative_embeds,
93
+ ... output_type="pt",
94
+ ... ).images
95
+
96
+ >>> # save intermediate image
97
+ >>> pil_image = pt_to_pil(image)
98
+ >>> pil_image[0].save("./if_stage_I.png")
99
+
100
+ >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(
101
+ ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
102
+ ... )
103
+ >>> super_res_1_pipe.enable_model_cpu_offload()
104
+
105
+ >>> image = super_res_1_pipe(
106
+ ... image=image,
107
+ ... mask_image=mask_image,
108
+ ... original_image=original_image,
109
+ ... prompt_embeds=prompt_embeds,
110
+ ... negative_prompt_embeds=negative_embeds,
111
+ ... ).images
112
+ >>> image[0].save("./if_stage_II.png")
113
+ ```
114
+ """
115
+
116
+
117
+ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
118
+ tokenizer: T5Tokenizer
119
+ text_encoder: T5EncoderModel
120
+
121
+ unet: UNet2DConditionModel
122
+ scheduler: DDPMScheduler
123
+ image_noising_scheduler: DDPMScheduler
124
+
125
+ feature_extractor: Optional[CLIPImageProcessor]
126
+ safety_checker: Optional[IFSafetyChecker]
127
+
128
+ watermarker: Optional[IFWatermarker]
129
+
130
+ bad_punct_regex = re.compile(
131
+ r"["
132
+ + "#®•©™&@·º½¾¿¡§~"
133
+ + r"\)"
134
+ + r"\("
135
+ + r"\]"
136
+ + r"\["
137
+ + r"\}"
138
+ + r"\{"
139
+ + r"\|"
140
+ + "\\"
141
+ + r"\/"
142
+ + r"\*"
143
+ + r"]{1,}"
144
+ ) # noqa
145
+
146
+ model_cpu_offload_seq = "text_encoder->unet"
147
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
148
+
149
+ def __init__(
150
+ self,
151
+ tokenizer: T5Tokenizer,
152
+ text_encoder: T5EncoderModel,
153
+ unet: UNet2DConditionModel,
154
+ scheduler: DDPMScheduler,
155
+ image_noising_scheduler: DDPMScheduler,
156
+ safety_checker: Optional[IFSafetyChecker],
157
+ feature_extractor: Optional[CLIPImageProcessor],
158
+ watermarker: Optional[IFWatermarker],
159
+ requires_safety_checker: bool = True,
160
+ ):
161
+ super().__init__()
162
+
163
+ if safety_checker is None and requires_safety_checker:
164
+ logger.warning(
165
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
166
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
167
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
168
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
169
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
170
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
171
+ )
172
+
173
+ if safety_checker is not None and feature_extractor is None:
174
+ raise ValueError(
175
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
176
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
177
+ )
178
+
179
+ if unet.config.in_channels != 6:
180
+ logger.warning(
181
+ "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`."
182
+ )
183
+
184
+ self.register_modules(
185
+ tokenizer=tokenizer,
186
+ text_encoder=text_encoder,
187
+ unet=unet,
188
+ scheduler=scheduler,
189
+ image_noising_scheduler=image_noising_scheduler,
190
+ safety_checker=safety_checker,
191
+ feature_extractor=feature_extractor,
192
+ watermarker=watermarker,
193
+ )
194
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
195
+
196
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
197
+ def remove_all_hooks(self):
198
+ if is_accelerate_available():
199
+ from accelerate.hooks import remove_hook_from_module
200
+ else:
201
+ raise ImportError("Please install accelerate via `pip install accelerate`")
202
+
203
+ for model in [self.text_encoder, self.unet, self.safety_checker]:
204
+ if model is not None:
205
+ remove_hook_from_module(model, recurse=True)
206
+
207
+ self.unet_offload_hook = None
208
+ self.text_encoder_offload_hook = None
209
+ self.final_offload_hook = None
210
+
211
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
212
+ def _text_preprocessing(self, text, clean_caption=False):
213
+ if clean_caption and not is_bs4_available():
214
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
215
+ logger.warning("Setting `clean_caption` to False...")
216
+ clean_caption = False
217
+
218
+ if clean_caption and not is_ftfy_available():
219
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
220
+ logger.warning("Setting `clean_caption` to False...")
221
+ clean_caption = False
222
+
223
+ if not isinstance(text, (tuple, list)):
224
+ text = [text]
225
+
226
+ def process(text: str):
227
+ if clean_caption:
228
+ text = self._clean_caption(text)
229
+ text = self._clean_caption(text)
230
+ else:
231
+ text = text.lower().strip()
232
+ return text
233
+
234
+ return [process(t) for t in text]
235
+
236
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
237
+ def _clean_caption(self, caption):
238
+ caption = str(caption)
239
+ caption = ul.unquote_plus(caption)
240
+ caption = caption.strip().lower()
241
+ caption = re.sub("<person>", "person", caption)
242
+ # urls:
243
+ caption = re.sub(
244
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
245
+ "",
246
+ caption,
247
+ ) # regex for urls
248
+ caption = re.sub(
249
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
250
+ "",
251
+ caption,
252
+ ) # regex for urls
253
+ # html:
254
+ caption = BeautifulSoup(caption, features="html.parser").text
255
+
256
+ # @<nickname>
257
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
258
+
259
+ # 31C0—31EF CJK Strokes
260
+ # 31F0—31FF Katakana Phonetic Extensions
261
+ # 3200—32FF Enclosed CJK Letters and Months
262
+ # 3300—33FF CJK Compatibility
263
+ # 3400—4DBF CJK Unified Ideographs Extension A
264
+ # 4DC0—4DFF Yijing Hexagram Symbols
265
+ # 4E00—9FFF CJK Unified Ideographs
266
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
267
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
268
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
269
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
270
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
271
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
272
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
273
+ #######################################################
274
+
275
+ # все виды тире / all types of dash --> "-"
276
+ caption = re.sub(
277
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
278
+ "-",
279
+ caption,
280
+ )
281
+
282
+ # кавычки к одному стандарту
283
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
284
+ caption = re.sub(r"[‘’]", "'", caption)
285
+
286
+ # &quot;
287
+ caption = re.sub(r"&quot;?", "", caption)
288
+ # &amp
289
+ caption = re.sub(r"&amp", "", caption)
290
+
291
+ # ip adresses:
292
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
293
+
294
+ # article ids:
295
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
296
+
297
+ # \n
298
+ caption = re.sub(r"\\n", " ", caption)
299
+
300
+ # "#123"
301
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
302
+ # "#12345.."
303
+ caption = re.sub(r"#\d{5,}\b", "", caption)
304
+ # "123456.."
305
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
306
+ # filenames:
307
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
308
+
309
+ #
310
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
311
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
312
+
313
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
314
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
315
+
316
+ # this-is-my-cute-cat / this_is_my_cute_cat
317
+ regex2 = re.compile(r"(?:\-|\_)")
318
+ if len(re.findall(regex2, caption)) > 3:
319
+ caption = re.sub(regex2, " ", caption)
320
+
321
+ caption = ftfy.fix_text(caption)
322
+ caption = html.unescape(html.unescape(caption))
323
+
324
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
325
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
326
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
327
+
328
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
329
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
330
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
331
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
332
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
333
+
334
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
335
+
336
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
337
+
338
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
339
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
340
+ caption = re.sub(r"\s+", " ", caption)
341
+
342
+ caption.strip()
343
+
344
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
345
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
346
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
347
+ caption = re.sub(r"^\.\S+$", "", caption)
348
+
349
+ return caption.strip()
350
+
351
+ @torch.no_grad()
352
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
353
+ def encode_prompt(
354
+ self,
355
+ prompt: Union[str, List[str]],
356
+ do_classifier_free_guidance: bool = True,
357
+ num_images_per_prompt: int = 1,
358
+ device: Optional[torch.device] = None,
359
+ negative_prompt: Optional[Union[str, List[str]]] = None,
360
+ prompt_embeds: Optional[torch.FloatTensor] = None,
361
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
362
+ clean_caption: bool = False,
363
+ ):
364
+ r"""
365
+ Encodes the prompt into text encoder hidden states.
366
+
367
+ Args:
368
+ prompt (`str` or `List[str]`, *optional*):
369
+ prompt to be encoded
370
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
371
+ whether to use classifier free guidance or not
372
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
373
+ number of images that should be generated per prompt
374
+ device: (`torch.device`, *optional*):
375
+ torch device to place the resulting embeddings on
376
+ negative_prompt (`str` or `List[str]`, *optional*):
377
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
378
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
379
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
380
+ prompt_embeds (`torch.FloatTensor`, *optional*):
381
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
382
+ provided, text embeddings will be generated from `prompt` input argument.
383
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
384
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
385
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
386
+ argument.
387
+ clean_caption (bool, defaults to `False`):
388
+ If `True`, the function will preprocess and clean the provided caption before encoding.
389
+ """
390
+ if prompt is not None and negative_prompt is not None:
391
+ if type(prompt) is not type(negative_prompt):
392
+ raise TypeError(
393
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
394
+ f" {type(prompt)}."
395
+ )
396
+
397
+ if device is None:
398
+ device = self._execution_device
399
+
400
+ if prompt is not None and isinstance(prompt, str):
401
+ batch_size = 1
402
+ elif prompt is not None and isinstance(prompt, list):
403
+ batch_size = len(prompt)
404
+ else:
405
+ batch_size = prompt_embeds.shape[0]
406
+
407
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
408
+ max_length = 77
409
+
410
+ if prompt_embeds is None:
411
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
412
+ text_inputs = self.tokenizer(
413
+ prompt,
414
+ padding="max_length",
415
+ max_length=max_length,
416
+ truncation=True,
417
+ add_special_tokens=True,
418
+ return_tensors="pt",
419
+ )
420
+ text_input_ids = text_inputs.input_ids
421
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
422
+
423
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
424
+ text_input_ids, untruncated_ids
425
+ ):
426
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
427
+ logger.warning(
428
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
429
+ f" {max_length} tokens: {removed_text}"
430
+ )
431
+
432
+ attention_mask = text_inputs.attention_mask.to(device)
433
+
434
+ prompt_embeds = self.text_encoder(
435
+ text_input_ids.to(device),
436
+ attention_mask=attention_mask,
437
+ )
438
+ prompt_embeds = prompt_embeds[0]
439
+
440
+ if self.text_encoder is not None:
441
+ dtype = self.text_encoder.dtype
442
+ elif self.unet is not None:
443
+ dtype = self.unet.dtype
444
+ else:
445
+ dtype = None
446
+
447
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
448
+
449
+ bs_embed, seq_len, _ = prompt_embeds.shape
450
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
451
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
452
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
453
+
454
+ # get unconditional embeddings for classifier free guidance
455
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
456
+ uncond_tokens: List[str]
457
+ if negative_prompt is None:
458
+ uncond_tokens = [""] * batch_size
459
+ elif isinstance(negative_prompt, str):
460
+ uncond_tokens = [negative_prompt]
461
+ elif batch_size != len(negative_prompt):
462
+ raise ValueError(
463
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
464
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
465
+ " the batch size of `prompt`."
466
+ )
467
+ else:
468
+ uncond_tokens = negative_prompt
469
+
470
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
471
+ max_length = prompt_embeds.shape[1]
472
+ uncond_input = self.tokenizer(
473
+ uncond_tokens,
474
+ padding="max_length",
475
+ max_length=max_length,
476
+ truncation=True,
477
+ return_attention_mask=True,
478
+ add_special_tokens=True,
479
+ return_tensors="pt",
480
+ )
481
+ attention_mask = uncond_input.attention_mask.to(device)
482
+
483
+ negative_prompt_embeds = self.text_encoder(
484
+ uncond_input.input_ids.to(device),
485
+ attention_mask=attention_mask,
486
+ )
487
+ negative_prompt_embeds = negative_prompt_embeds[0]
488
+
489
+ if do_classifier_free_guidance:
490
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
491
+ seq_len = negative_prompt_embeds.shape[1]
492
+
493
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
494
+
495
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
496
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
497
+
498
+ # For classifier free guidance, we need to do two forward passes.
499
+ # Here we concatenate the unconditional and text embeddings into a single batch
500
+ # to avoid doing two forward passes
501
+ else:
502
+ negative_prompt_embeds = None
503
+
504
+ return prompt_embeds, negative_prompt_embeds
505
+
506
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
507
+ def run_safety_checker(self, image, device, dtype):
508
+ if self.safety_checker is not None:
509
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
510
+ image, nsfw_detected, watermark_detected = self.safety_checker(
511
+ images=image,
512
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
513
+ )
514
+ else:
515
+ nsfw_detected = None
516
+ watermark_detected = None
517
+
518
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
519
+ self.unet_offload_hook.offload()
520
+
521
+ return image, nsfw_detected, watermark_detected
522
+
523
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
524
+ def prepare_extra_step_kwargs(self, generator, eta):
525
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
526
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
527
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
528
+ # and should be between [0, 1]
529
+
530
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
531
+ extra_step_kwargs = {}
532
+ if accepts_eta:
533
+ extra_step_kwargs["eta"] = eta
534
+
535
+ # check if the scheduler accepts generator
536
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
537
+ if accepts_generator:
538
+ extra_step_kwargs["generator"] = generator
539
+ return extra_step_kwargs
540
+
541
+ def check_inputs(
542
+ self,
543
+ prompt,
544
+ image,
545
+ original_image,
546
+ mask_image,
547
+ batch_size,
548
+ callback_steps,
549
+ negative_prompt=None,
550
+ prompt_embeds=None,
551
+ negative_prompt_embeds=None,
552
+ ):
553
+ if (callback_steps is None) or (
554
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
555
+ ):
556
+ raise ValueError(
557
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
558
+ f" {type(callback_steps)}."
559
+ )
560
+
561
+ if prompt is not None and prompt_embeds is not None:
562
+ raise ValueError(
563
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
564
+ " only forward one of the two."
565
+ )
566
+ elif prompt is None and prompt_embeds is None:
567
+ raise ValueError(
568
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
569
+ )
570
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
571
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
572
+
573
+ if negative_prompt is not None and negative_prompt_embeds is not None:
574
+ raise ValueError(
575
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
576
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
577
+ )
578
+
579
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
580
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
581
+ raise ValueError(
582
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
583
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
584
+ f" {negative_prompt_embeds.shape}."
585
+ )
586
+
587
+ # image
588
+
589
+ if isinstance(image, list):
590
+ check_image_type = image[0]
591
+ else:
592
+ check_image_type = image
593
+
594
+ if (
595
+ not isinstance(check_image_type, torch.Tensor)
596
+ and not isinstance(check_image_type, PIL.Image.Image)
597
+ and not isinstance(check_image_type, np.ndarray)
598
+ ):
599
+ raise ValueError(
600
+ "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
601
+ f" {type(check_image_type)}"
602
+ )
603
+
604
+ if isinstance(image, list):
605
+ image_batch_size = len(image)
606
+ elif isinstance(image, torch.Tensor):
607
+ image_batch_size = image.shape[0]
608
+ elif isinstance(image, PIL.Image.Image):
609
+ image_batch_size = 1
610
+ elif isinstance(image, np.ndarray):
611
+ image_batch_size = image.shape[0]
612
+ else:
613
+ assert False
614
+
615
+ if batch_size != image_batch_size:
616
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
617
+
618
+ # original_image
619
+
620
+ if isinstance(original_image, list):
621
+ check_image_type = original_image[0]
622
+ else:
623
+ check_image_type = original_image
624
+
625
+ if (
626
+ not isinstance(check_image_type, torch.Tensor)
627
+ and not isinstance(check_image_type, PIL.Image.Image)
628
+ and not isinstance(check_image_type, np.ndarray)
629
+ ):
630
+ raise ValueError(
631
+ "`original_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
632
+ f" {type(check_image_type)}"
633
+ )
634
+
635
+ if isinstance(original_image, list):
636
+ image_batch_size = len(original_image)
637
+ elif isinstance(original_image, torch.Tensor):
638
+ image_batch_size = original_image.shape[0]
639
+ elif isinstance(original_image, PIL.Image.Image):
640
+ image_batch_size = 1
641
+ elif isinstance(original_image, np.ndarray):
642
+ image_batch_size = original_image.shape[0]
643
+ else:
644
+ assert False
645
+
646
+ if batch_size != image_batch_size:
647
+ raise ValueError(
648
+ f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}"
649
+ )
650
+
651
+ # mask_image
652
+
653
+ if isinstance(mask_image, list):
654
+ check_image_type = mask_image[0]
655
+ else:
656
+ check_image_type = mask_image
657
+
658
+ if (
659
+ not isinstance(check_image_type, torch.Tensor)
660
+ and not isinstance(check_image_type, PIL.Image.Image)
661
+ and not isinstance(check_image_type, np.ndarray)
662
+ ):
663
+ raise ValueError(
664
+ "`mask_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
665
+ f" {type(check_image_type)}"
666
+ )
667
+
668
+ if isinstance(mask_image, list):
669
+ image_batch_size = len(mask_image)
670
+ elif isinstance(mask_image, torch.Tensor):
671
+ image_batch_size = mask_image.shape[0]
672
+ elif isinstance(mask_image, PIL.Image.Image):
673
+ image_batch_size = 1
674
+ elif isinstance(mask_image, np.ndarray):
675
+ image_batch_size = mask_image.shape[0]
676
+ else:
677
+ assert False
678
+
679
+ if image_batch_size != 1 and batch_size != image_batch_size:
680
+ raise ValueError(
681
+ f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}"
682
+ )
683
+
684
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image
685
+ def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor:
686
+ if not isinstance(image, list):
687
+ image = [image]
688
+
689
+ def numpy_to_pt(images):
690
+ if images.ndim == 3:
691
+ images = images[..., None]
692
+
693
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
694
+ return images
695
+
696
+ if isinstance(image[0], PIL.Image.Image):
697
+ new_image = []
698
+
699
+ for image_ in image:
700
+ image_ = image_.convert("RGB")
701
+ image_ = resize(image_, self.unet.sample_size)
702
+ image_ = np.array(image_)
703
+ image_ = image_.astype(np.float32)
704
+ image_ = image_ / 127.5 - 1
705
+ new_image.append(image_)
706
+
707
+ image = new_image
708
+
709
+ image = np.stack(image, axis=0) # to np
710
+ image = numpy_to_pt(image) # to pt
711
+
712
+ elif isinstance(image[0], np.ndarray):
713
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
714
+ image = numpy_to_pt(image)
715
+
716
+ elif isinstance(image[0], torch.Tensor):
717
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
718
+
719
+ return image
720
+
721
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image
722
+ def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor:
723
+ if not isinstance(image, torch.Tensor) and not isinstance(image, list):
724
+ image = [image]
725
+
726
+ if isinstance(image[0], PIL.Image.Image):
727
+ image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image]
728
+
729
+ image = np.stack(image, axis=0) # to np
730
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
731
+ elif isinstance(image[0], np.ndarray):
732
+ image = np.stack(image, axis=0) # to np
733
+ if image.ndim == 5:
734
+ image = image[0]
735
+
736
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
737
+ elif isinstance(image, list) and isinstance(image[0], torch.Tensor):
738
+ dims = image[0].ndim
739
+
740
+ if dims == 3:
741
+ image = torch.stack(image, dim=0)
742
+ elif dims == 4:
743
+ image = torch.concat(image, dim=0)
744
+ else:
745
+ raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}")
746
+
747
+ image = image.to(device=device, dtype=self.unet.dtype)
748
+
749
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
750
+
751
+ return image
752
+
753
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.preprocess_mask_image
754
+ def preprocess_mask_image(self, mask_image) -> torch.Tensor:
755
+ if not isinstance(mask_image, list):
756
+ mask_image = [mask_image]
757
+
758
+ if isinstance(mask_image[0], torch.Tensor):
759
+ mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0)
760
+
761
+ if mask_image.ndim == 2:
762
+ # Batch and add channel dim for single mask
763
+ mask_image = mask_image.unsqueeze(0).unsqueeze(0)
764
+ elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
765
+ # Single mask, the 0'th dimension is considered to be
766
+ # the existing batch size of 1
767
+ mask_image = mask_image.unsqueeze(0)
768
+ elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
769
+ # Batch of mask, the 0'th dimension is considered to be
770
+ # the batching dimension
771
+ mask_image = mask_image.unsqueeze(1)
772
+
773
+ mask_image[mask_image < 0.5] = 0
774
+ mask_image[mask_image >= 0.5] = 1
775
+
776
+ elif isinstance(mask_image[0], PIL.Image.Image):
777
+ new_mask_image = []
778
+
779
+ for mask_image_ in mask_image:
780
+ mask_image_ = mask_image_.convert("L")
781
+ mask_image_ = resize(mask_image_, self.unet.sample_size)
782
+ mask_image_ = np.array(mask_image_)
783
+ mask_image_ = mask_image_[None, None, :]
784
+ new_mask_image.append(mask_image_)
785
+
786
+ mask_image = new_mask_image
787
+
788
+ mask_image = np.concatenate(mask_image, axis=0)
789
+ mask_image = mask_image.astype(np.float32) / 255.0
790
+ mask_image[mask_image < 0.5] = 0
791
+ mask_image[mask_image >= 0.5] = 1
792
+ mask_image = torch.from_numpy(mask_image)
793
+
794
+ elif isinstance(mask_image[0], np.ndarray):
795
+ mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
796
+
797
+ mask_image[mask_image < 0.5] = 0
798
+ mask_image[mask_image >= 0.5] = 1
799
+ mask_image = torch.from_numpy(mask_image)
800
+
801
+ return mask_image
802
+
803
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps
804
+ def get_timesteps(self, num_inference_steps, strength):
805
+ # get the original timestep using init_timestep
806
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
807
+
808
+ t_start = max(num_inference_steps - init_timestep, 0)
809
+ timesteps = self.scheduler.timesteps[t_start:]
810
+
811
+ return timesteps, num_inference_steps - t_start
812
+
813
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.prepare_intermediate_images
814
+ def prepare_intermediate_images(
815
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None
816
+ ):
817
+ image_batch_size, channels, height, width = image.shape
818
+
819
+ batch_size = batch_size * num_images_per_prompt
820
+
821
+ shape = (batch_size, channels, height, width)
822
+
823
+ if isinstance(generator, list) and len(generator) != batch_size:
824
+ raise ValueError(
825
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
826
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
827
+ )
828
+
829
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
830
+
831
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
832
+ noised_image = self.scheduler.add_noise(image, noise, timestep)
833
+
834
+ image = (1 - mask_image) * image + mask_image * noised_image
835
+
836
+ return image
837
+
838
+ @torch.no_grad()
839
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
840
+ def __call__(
841
+ self,
842
+ image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor],
843
+ original_image: Union[
844
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
845
+ ] = None,
846
+ mask_image: Union[
847
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
848
+ ] = None,
849
+ strength: float = 0.8,
850
+ prompt: Union[str, List[str]] = None,
851
+ num_inference_steps: int = 100,
852
+ timesteps: List[int] = None,
853
+ guidance_scale: float = 4.0,
854
+ negative_prompt: Optional[Union[str, List[str]]] = None,
855
+ num_images_per_prompt: Optional[int] = 1,
856
+ eta: float = 0.0,
857
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
858
+ prompt_embeds: Optional[torch.FloatTensor] = None,
859
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
860
+ output_type: Optional[str] = "pil",
861
+ return_dict: bool = True,
862
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
863
+ callback_steps: int = 1,
864
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
865
+ noise_level: int = 0,
866
+ clean_caption: bool = True,
867
+ ):
868
+ """
869
+ Function invoked when calling the pipeline for generation.
870
+
871
+ Args:
872
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
873
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
874
+ process.
875
+ original_image (`torch.FloatTensor` or `PIL.Image.Image`):
876
+ The original image that `image` was varied from.
877
+ mask_image (`PIL.Image.Image`):
878
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
879
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
880
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
881
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
882
+ strength (`float`, *optional*, defaults to 0.8):
883
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
884
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
885
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
886
+ be maximum and the denoising process will run for the full number of iterations specified in
887
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
888
+ prompt (`str` or `List[str]`, *optional*):
889
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
890
+ instead.
891
+ num_inference_steps (`int`, *optional*, defaults to 100):
892
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
893
+ expense of slower inference.
894
+ timesteps (`List[int]`, *optional*):
895
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
896
+ timesteps are used. Must be in descending order.
897
+ guidance_scale (`float`, *optional*, defaults to 4.0):
898
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
899
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
900
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
901
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
902
+ usually at the expense of lower image quality.
903
+ negative_prompt (`str` or `List[str]`, *optional*):
904
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
905
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
906
+ less than `1`).
907
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
908
+ The number of images to generate per prompt.
909
+ eta (`float`, *optional*, defaults to 0.0):
910
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
911
+ [`schedulers.DDIMScheduler`], will be ignored for others.
912
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
913
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
914
+ to make generation deterministic.
915
+ prompt_embeds (`torch.FloatTensor`, *optional*):
916
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
917
+ provided, text embeddings will be generated from `prompt` input argument.
918
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
919
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
920
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
921
+ argument.
922
+ output_type (`str`, *optional*, defaults to `"pil"`):
923
+ The output format of the generate image. Choose between
924
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
925
+ return_dict (`bool`, *optional*, defaults to `True`):
926
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
927
+ callback (`Callable`, *optional*):
928
+ A function that will be called every `callback_steps` steps during inference. The function will be
929
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
930
+ callback_steps (`int`, *optional*, defaults to 1):
931
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
932
+ called at every step.
933
+ cross_attention_kwargs (`dict`, *optional*):
934
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
935
+ `self.processor` in
936
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
937
+ noise_level (`int`, *optional*, defaults to 0):
938
+ The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)`
939
+ clean_caption (`bool`, *optional*, defaults to `True`):
940
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
941
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
942
+ prompt.
943
+
944
+ Examples:
945
+
946
+ Returns:
947
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
948
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
949
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
950
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
951
+ or watermarked content, according to the `safety_checker`.
952
+ """
953
+ # 1. Check inputs. Raise error if not correct
954
+ if prompt is not None and isinstance(prompt, str):
955
+ batch_size = 1
956
+ elif prompt is not None and isinstance(prompt, list):
957
+ batch_size = len(prompt)
958
+ else:
959
+ batch_size = prompt_embeds.shape[0]
960
+
961
+ self.check_inputs(
962
+ prompt,
963
+ image,
964
+ original_image,
965
+ mask_image,
966
+ batch_size,
967
+ callback_steps,
968
+ negative_prompt,
969
+ prompt_embeds,
970
+ negative_prompt_embeds,
971
+ )
972
+
973
+ # 2. Define call parameters
974
+
975
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
976
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
977
+ # corresponds to doing no classifier free guidance.
978
+ do_classifier_free_guidance = guidance_scale > 1.0
979
+
980
+ device = self._execution_device
981
+
982
+ # 3. Encode input prompt
983
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
984
+ prompt,
985
+ do_classifier_free_guidance,
986
+ num_images_per_prompt=num_images_per_prompt,
987
+ device=device,
988
+ negative_prompt=negative_prompt,
989
+ prompt_embeds=prompt_embeds,
990
+ negative_prompt_embeds=negative_prompt_embeds,
991
+ clean_caption=clean_caption,
992
+ )
993
+
994
+ if do_classifier_free_guidance:
995
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
996
+
997
+ dtype = prompt_embeds.dtype
998
+
999
+ # 4. Prepare timesteps
1000
+ if timesteps is not None:
1001
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
1002
+ timesteps = self.scheduler.timesteps
1003
+ num_inference_steps = len(timesteps)
1004
+ else:
1005
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1006
+ timesteps = self.scheduler.timesteps
1007
+
1008
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
1009
+
1010
+ # 5. prepare original image
1011
+ original_image = self.preprocess_original_image(original_image)
1012
+ original_image = original_image.to(device=device, dtype=dtype)
1013
+
1014
+ # 6. prepare mask image
1015
+ mask_image = self.preprocess_mask_image(mask_image)
1016
+ mask_image = mask_image.to(device=device, dtype=dtype)
1017
+
1018
+ if mask_image.shape[0] == 1:
1019
+ mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0)
1020
+ else:
1021
+ mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0)
1022
+
1023
+ # 6. Prepare intermediate images
1024
+ noise_timestep = timesteps[0:1]
1025
+ noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt)
1026
+
1027
+ intermediate_images = self.prepare_intermediate_images(
1028
+ original_image,
1029
+ noise_timestep,
1030
+ batch_size,
1031
+ num_images_per_prompt,
1032
+ dtype,
1033
+ device,
1034
+ mask_image,
1035
+ generator,
1036
+ )
1037
+
1038
+ # 7. Prepare upscaled image and noise level
1039
+ _, _, height, width = original_image.shape
1040
+
1041
+ image = self.preprocess_image(image, num_images_per_prompt, device)
1042
+
1043
+ upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True)
1044
+
1045
+ noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)
1046
+ noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)
1047
+ upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)
1048
+
1049
+ if do_classifier_free_guidance:
1050
+ noise_level = torch.cat([noise_level] * 2)
1051
+
1052
+ # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1053
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1054
+
1055
+ # HACK: see comment in `enable_model_cpu_offload`
1056
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
1057
+ self.text_encoder_offload_hook.offload()
1058
+
1059
+ # 9. Denoising loop
1060
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1061
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1062
+ for i, t in enumerate(timesteps):
1063
+ model_input = torch.cat([intermediate_images, upscaled], dim=1)
1064
+
1065
+ model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
1066
+ model_input = self.scheduler.scale_model_input(model_input, t)
1067
+
1068
+ # predict the noise residual
1069
+ noise_pred = self.unet(
1070
+ model_input,
1071
+ t,
1072
+ encoder_hidden_states=prompt_embeds,
1073
+ class_labels=noise_level,
1074
+ cross_attention_kwargs=cross_attention_kwargs,
1075
+ return_dict=False,
1076
+ )[0]
1077
+
1078
+ # perform guidance
1079
+ if do_classifier_free_guidance:
1080
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1081
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)
1082
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)
1083
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1084
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
1085
+
1086
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
1087
+ noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)
1088
+
1089
+ # compute the previous noisy sample x_t -> x_t-1
1090
+ prev_intermediate_images = intermediate_images
1091
+
1092
+ intermediate_images = self.scheduler.step(
1093
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
1094
+ )[0]
1095
+
1096
+ intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images
1097
+
1098
+ # call the callback, if provided
1099
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1100
+ progress_bar.update()
1101
+ if callback is not None and i % callback_steps == 0:
1102
+ callback(i, t, intermediate_images)
1103
+
1104
+ image = intermediate_images
1105
+
1106
+ if output_type == "pil":
1107
+ # 10. Post-processing
1108
+ image = (image / 2 + 0.5).clamp(0, 1)
1109
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1110
+
1111
+ # 11. Run safety checker
1112
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1113
+
1114
+ # 12. Convert to PIL
1115
+ image = self.numpy_to_pil(image)
1116
+
1117
+ # 13. Apply watermark
1118
+ if self.watermarker is not None:
1119
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
1120
+ elif output_type == "pt":
1121
+ nsfw_detected = None
1122
+ watermark_detected = None
1123
+
1124
+ else:
1125
+ # 10. Post-processing
1126
+ image = (image / 2 + 0.5).clamp(0, 1)
1127
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1128
+
1129
+ # 11. Run safety checker
1130
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1131
+
1132
+ self.maybe_free_model_hooks()
1133
+
1134
+ if not return_dict:
1135
+ return (image, nsfw_detected, watermark_detected)
1136
+
1137
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
12
+
13
+ from ...loaders import LoraLoaderMixin
14
+ from ...models import UNet2DConditionModel
15
+ from ...schedulers import DDPMScheduler
16
+ from ...utils import (
17
+ BACKENDS_MAPPING,
18
+ is_accelerate_available,
19
+ is_bs4_available,
20
+ is_ftfy_available,
21
+ logging,
22
+ replace_example_docstring,
23
+ )
24
+ from ...utils.torch_utils import randn_tensor
25
+ from ..pipeline_utils import DiffusionPipeline
26
+ from .pipeline_output import IFPipelineOutput
27
+ from .safety_checker import IFSafetyChecker
28
+ from .watermark import IFWatermarker
29
+
30
+
31
+ if is_bs4_available():
32
+ from bs4 import BeautifulSoup
33
+
34
+ if is_ftfy_available():
35
+ import ftfy
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ EXAMPLE_DOC_STRING = """
42
+ Examples:
43
+ ```py
44
+ >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline
45
+ >>> from diffusers.utils import pt_to_pil
46
+ >>> import torch
47
+
48
+ >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
49
+ >>> pipe.enable_model_cpu_offload()
50
+
51
+ >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'
52
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
53
+
54
+ >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images
55
+
56
+ >>> # save intermediate image
57
+ >>> pil_image = pt_to_pil(image)
58
+ >>> pil_image[0].save("./if_stage_I.png")
59
+
60
+ >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(
61
+ ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
62
+ ... )
63
+ >>> super_res_1_pipe.enable_model_cpu_offload()
64
+
65
+ >>> image = super_res_1_pipe(
66
+ ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds
67
+ ... ).images
68
+ >>> image[0].save("./if_stage_II.png")
69
+ ```
70
+ """
71
+
72
+
73
+ class IFSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
74
+ tokenizer: T5Tokenizer
75
+ text_encoder: T5EncoderModel
76
+
77
+ unet: UNet2DConditionModel
78
+ scheduler: DDPMScheduler
79
+ image_noising_scheduler: DDPMScheduler
80
+
81
+ feature_extractor: Optional[CLIPImageProcessor]
82
+ safety_checker: Optional[IFSafetyChecker]
83
+
84
+ watermarker: Optional[IFWatermarker]
85
+
86
+ bad_punct_regex = re.compile(
87
+ r"["
88
+ + "#®•©™&@·º½¾¿¡§~"
89
+ + r"\)"
90
+ + r"\("
91
+ + r"\]"
92
+ + r"\["
93
+ + r"\}"
94
+ + r"\{"
95
+ + r"\|"
96
+ + "\\"
97
+ + r"\/"
98
+ + r"\*"
99
+ + r"]{1,}"
100
+ ) # noqa
101
+
102
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
103
+ model_cpu_offload_seq = "text_encoder->unet"
104
+
105
+ def __init__(
106
+ self,
107
+ tokenizer: T5Tokenizer,
108
+ text_encoder: T5EncoderModel,
109
+ unet: UNet2DConditionModel,
110
+ scheduler: DDPMScheduler,
111
+ image_noising_scheduler: DDPMScheduler,
112
+ safety_checker: Optional[IFSafetyChecker],
113
+ feature_extractor: Optional[CLIPImageProcessor],
114
+ watermarker: Optional[IFWatermarker],
115
+ requires_safety_checker: bool = True,
116
+ ):
117
+ super().__init__()
118
+
119
+ if safety_checker is None and requires_safety_checker:
120
+ logger.warning(
121
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
122
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
123
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
124
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
125
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
126
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
127
+ )
128
+
129
+ if safety_checker is not None and feature_extractor is None:
130
+ raise ValueError(
131
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
132
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
133
+ )
134
+
135
+ if unet.config.in_channels != 6:
136
+ logger.warning(
137
+ "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`."
138
+ )
139
+
140
+ self.register_modules(
141
+ tokenizer=tokenizer,
142
+ text_encoder=text_encoder,
143
+ unet=unet,
144
+ scheduler=scheduler,
145
+ image_noising_scheduler=image_noising_scheduler,
146
+ safety_checker=safety_checker,
147
+ feature_extractor=feature_extractor,
148
+ watermarker=watermarker,
149
+ )
150
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
151
+
152
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
153
+ def remove_all_hooks(self):
154
+ if is_accelerate_available():
155
+ from accelerate.hooks import remove_hook_from_module
156
+ else:
157
+ raise ImportError("Please install accelerate via `pip install accelerate`")
158
+
159
+ for model in [self.text_encoder, self.unet, self.safety_checker]:
160
+ if model is not None:
161
+ remove_hook_from_module(model, recurse=True)
162
+
163
+ self.unet_offload_hook = None
164
+ self.text_encoder_offload_hook = None
165
+ self.final_offload_hook = None
166
+
167
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
168
+ def _text_preprocessing(self, text, clean_caption=False):
169
+ if clean_caption and not is_bs4_available():
170
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
171
+ logger.warning("Setting `clean_caption` to False...")
172
+ clean_caption = False
173
+
174
+ if clean_caption and not is_ftfy_available():
175
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
176
+ logger.warning("Setting `clean_caption` to False...")
177
+ clean_caption = False
178
+
179
+ if not isinstance(text, (tuple, list)):
180
+ text = [text]
181
+
182
+ def process(text: str):
183
+ if clean_caption:
184
+ text = self._clean_caption(text)
185
+ text = self._clean_caption(text)
186
+ else:
187
+ text = text.lower().strip()
188
+ return text
189
+
190
+ return [process(t) for t in text]
191
+
192
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
193
+ def _clean_caption(self, caption):
194
+ caption = str(caption)
195
+ caption = ul.unquote_plus(caption)
196
+ caption = caption.strip().lower()
197
+ caption = re.sub("<person>", "person", caption)
198
+ # urls:
199
+ caption = re.sub(
200
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
201
+ "",
202
+ caption,
203
+ ) # regex for urls
204
+ caption = re.sub(
205
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
206
+ "",
207
+ caption,
208
+ ) # regex for urls
209
+ # html:
210
+ caption = BeautifulSoup(caption, features="html.parser").text
211
+
212
+ # @<nickname>
213
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
214
+
215
+ # 31C0—31EF CJK Strokes
216
+ # 31F0—31FF Katakana Phonetic Extensions
217
+ # 3200—32FF Enclosed CJK Letters and Months
218
+ # 3300—33FF CJK Compatibility
219
+ # 3400—4DBF CJK Unified Ideographs Extension A
220
+ # 4DC0—4DFF Yijing Hexagram Symbols
221
+ # 4E00—9FFF CJK Unified Ideographs
222
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
223
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
224
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
225
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
226
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
227
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
228
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
229
+ #######################################################
230
+
231
+ # все виды тире / all types of dash --> "-"
232
+ caption = re.sub(
233
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
234
+ "-",
235
+ caption,
236
+ )
237
+
238
+ # кавычки к одному стандарту
239
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
240
+ caption = re.sub(r"[‘’]", "'", caption)
241
+
242
+ # &quot;
243
+ caption = re.sub(r"&quot;?", "", caption)
244
+ # &amp
245
+ caption = re.sub(r"&amp", "", caption)
246
+
247
+ # ip adresses:
248
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
249
+
250
+ # article ids:
251
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
252
+
253
+ # \n
254
+ caption = re.sub(r"\\n", " ", caption)
255
+
256
+ # "#123"
257
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
258
+ # "#12345.."
259
+ caption = re.sub(r"#\d{5,}\b", "", caption)
260
+ # "123456.."
261
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
262
+ # filenames:
263
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
264
+
265
+ #
266
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
267
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
268
+
269
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
270
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
271
+
272
+ # this-is-my-cute-cat / this_is_my_cute_cat
273
+ regex2 = re.compile(r"(?:\-|\_)")
274
+ if len(re.findall(regex2, caption)) > 3:
275
+ caption = re.sub(regex2, " ", caption)
276
+
277
+ caption = ftfy.fix_text(caption)
278
+ caption = html.unescape(html.unescape(caption))
279
+
280
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
281
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
282
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
283
+
284
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
285
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
286
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
287
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
288
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
289
+
290
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
291
+
292
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
293
+
294
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
295
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
296
+ caption = re.sub(r"\s+", " ", caption)
297
+
298
+ caption.strip()
299
+
300
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
301
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
302
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
303
+ caption = re.sub(r"^\.\S+$", "", caption)
304
+
305
+ return caption.strip()
306
+
307
+ @torch.no_grad()
308
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
309
+ def encode_prompt(
310
+ self,
311
+ prompt: Union[str, List[str]],
312
+ do_classifier_free_guidance: bool = True,
313
+ num_images_per_prompt: int = 1,
314
+ device: Optional[torch.device] = None,
315
+ negative_prompt: Optional[Union[str, List[str]]] = None,
316
+ prompt_embeds: Optional[torch.FloatTensor] = None,
317
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
318
+ clean_caption: bool = False,
319
+ ):
320
+ r"""
321
+ Encodes the prompt into text encoder hidden states.
322
+
323
+ Args:
324
+ prompt (`str` or `List[str]`, *optional*):
325
+ prompt to be encoded
326
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
327
+ whether to use classifier free guidance or not
328
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
329
+ number of images that should be generated per prompt
330
+ device: (`torch.device`, *optional*):
331
+ torch device to place the resulting embeddings on
332
+ negative_prompt (`str` or `List[str]`, *optional*):
333
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
334
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
335
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
336
+ prompt_embeds (`torch.FloatTensor`, *optional*):
337
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
338
+ provided, text embeddings will be generated from `prompt` input argument.
339
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
340
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
341
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
342
+ argument.
343
+ clean_caption (bool, defaults to `False`):
344
+ If `True`, the function will preprocess and clean the provided caption before encoding.
345
+ """
346
+ if prompt is not None and negative_prompt is not None:
347
+ if type(prompt) is not type(negative_prompt):
348
+ raise TypeError(
349
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
350
+ f" {type(prompt)}."
351
+ )
352
+
353
+ if device is None:
354
+ device = self._execution_device
355
+
356
+ if prompt is not None and isinstance(prompt, str):
357
+ batch_size = 1
358
+ elif prompt is not None and isinstance(prompt, list):
359
+ batch_size = len(prompt)
360
+ else:
361
+ batch_size = prompt_embeds.shape[0]
362
+
363
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
364
+ max_length = 77
365
+
366
+ if prompt_embeds is None:
367
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
368
+ text_inputs = self.tokenizer(
369
+ prompt,
370
+ padding="max_length",
371
+ max_length=max_length,
372
+ truncation=True,
373
+ add_special_tokens=True,
374
+ return_tensors="pt",
375
+ )
376
+ text_input_ids = text_inputs.input_ids
377
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
378
+
379
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
380
+ text_input_ids, untruncated_ids
381
+ ):
382
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
383
+ logger.warning(
384
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
385
+ f" {max_length} tokens: {removed_text}"
386
+ )
387
+
388
+ attention_mask = text_inputs.attention_mask.to(device)
389
+
390
+ prompt_embeds = self.text_encoder(
391
+ text_input_ids.to(device),
392
+ attention_mask=attention_mask,
393
+ )
394
+ prompt_embeds = prompt_embeds[0]
395
+
396
+ if self.text_encoder is not None:
397
+ dtype = self.text_encoder.dtype
398
+ elif self.unet is not None:
399
+ dtype = self.unet.dtype
400
+ else:
401
+ dtype = None
402
+
403
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
404
+
405
+ bs_embed, seq_len, _ = prompt_embeds.shape
406
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
407
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
408
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
409
+
410
+ # get unconditional embeddings for classifier free guidance
411
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
412
+ uncond_tokens: List[str]
413
+ if negative_prompt is None:
414
+ uncond_tokens = [""] * batch_size
415
+ elif isinstance(negative_prompt, str):
416
+ uncond_tokens = [negative_prompt]
417
+ elif batch_size != len(negative_prompt):
418
+ raise ValueError(
419
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
420
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
421
+ " the batch size of `prompt`."
422
+ )
423
+ else:
424
+ uncond_tokens = negative_prompt
425
+
426
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
427
+ max_length = prompt_embeds.shape[1]
428
+ uncond_input = self.tokenizer(
429
+ uncond_tokens,
430
+ padding="max_length",
431
+ max_length=max_length,
432
+ truncation=True,
433
+ return_attention_mask=True,
434
+ add_special_tokens=True,
435
+ return_tensors="pt",
436
+ )
437
+ attention_mask = uncond_input.attention_mask.to(device)
438
+
439
+ negative_prompt_embeds = self.text_encoder(
440
+ uncond_input.input_ids.to(device),
441
+ attention_mask=attention_mask,
442
+ )
443
+ negative_prompt_embeds = negative_prompt_embeds[0]
444
+
445
+ if do_classifier_free_guidance:
446
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
447
+ seq_len = negative_prompt_embeds.shape[1]
448
+
449
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
450
+
451
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
452
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
453
+
454
+ # For classifier free guidance, we need to do two forward passes.
455
+ # Here we concatenate the unconditional and text embeddings into a single batch
456
+ # to avoid doing two forward passes
457
+ else:
458
+ negative_prompt_embeds = None
459
+
460
+ return prompt_embeds, negative_prompt_embeds
461
+
462
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
463
+ def run_safety_checker(self, image, device, dtype):
464
+ if self.safety_checker is not None:
465
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
466
+ image, nsfw_detected, watermark_detected = self.safety_checker(
467
+ images=image,
468
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
469
+ )
470
+ else:
471
+ nsfw_detected = None
472
+ watermark_detected = None
473
+
474
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
475
+ self.unet_offload_hook.offload()
476
+
477
+ return image, nsfw_detected, watermark_detected
478
+
479
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
480
+ def prepare_extra_step_kwargs(self, generator, eta):
481
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
482
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
483
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
484
+ # and should be between [0, 1]
485
+
486
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
487
+ extra_step_kwargs = {}
488
+ if accepts_eta:
489
+ extra_step_kwargs["eta"] = eta
490
+
491
+ # check if the scheduler accepts generator
492
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
493
+ if accepts_generator:
494
+ extra_step_kwargs["generator"] = generator
495
+ return extra_step_kwargs
496
+
497
+ def check_inputs(
498
+ self,
499
+ prompt,
500
+ image,
501
+ batch_size,
502
+ noise_level,
503
+ callback_steps,
504
+ negative_prompt=None,
505
+ prompt_embeds=None,
506
+ negative_prompt_embeds=None,
507
+ ):
508
+ if (callback_steps is None) or (
509
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
510
+ ):
511
+ raise ValueError(
512
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
513
+ f" {type(callback_steps)}."
514
+ )
515
+
516
+ if prompt is not None and prompt_embeds is not None:
517
+ raise ValueError(
518
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
519
+ " only forward one of the two."
520
+ )
521
+ elif prompt is None and prompt_embeds is None:
522
+ raise ValueError(
523
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
524
+ )
525
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
526
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
527
+
528
+ if negative_prompt is not None and negative_prompt_embeds is not None:
529
+ raise ValueError(
530
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
531
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
532
+ )
533
+
534
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
535
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
536
+ raise ValueError(
537
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
538
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
539
+ f" {negative_prompt_embeds.shape}."
540
+ )
541
+
542
+ if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps:
543
+ raise ValueError(
544
+ f"`noise_level`: {noise_level} must be a valid timestep in `self.noising_scheduler`, [0, {self.image_noising_scheduler.config.num_train_timesteps})"
545
+ )
546
+
547
+ if isinstance(image, list):
548
+ check_image_type = image[0]
549
+ else:
550
+ check_image_type = image
551
+
552
+ if (
553
+ not isinstance(check_image_type, torch.Tensor)
554
+ and not isinstance(check_image_type, PIL.Image.Image)
555
+ and not isinstance(check_image_type, np.ndarray)
556
+ ):
557
+ raise ValueError(
558
+ "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
559
+ f" {type(check_image_type)}"
560
+ )
561
+
562
+ if isinstance(image, list):
563
+ image_batch_size = len(image)
564
+ elif isinstance(image, torch.Tensor):
565
+ image_batch_size = image.shape[0]
566
+ elif isinstance(image, PIL.Image.Image):
567
+ image_batch_size = 1
568
+ elif isinstance(image, np.ndarray):
569
+ image_batch_size = image.shape[0]
570
+ else:
571
+ assert False
572
+
573
+ if batch_size != image_batch_size:
574
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
575
+
576
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_intermediate_images
577
+ def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator):
578
+ shape = (batch_size, num_channels, height, width)
579
+ if isinstance(generator, list) and len(generator) != batch_size:
580
+ raise ValueError(
581
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
582
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
583
+ )
584
+
585
+ intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
586
+
587
+ # scale the initial noise by the standard deviation required by the scheduler
588
+ intermediate_images = intermediate_images * self.scheduler.init_noise_sigma
589
+ return intermediate_images
590
+
591
+ def preprocess_image(self, image, num_images_per_prompt, device):
592
+ if not isinstance(image, torch.Tensor) and not isinstance(image, list):
593
+ image = [image]
594
+
595
+ if isinstance(image[0], PIL.Image.Image):
596
+ image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image]
597
+
598
+ image = np.stack(image, axis=0) # to np
599
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
600
+ elif isinstance(image[0], np.ndarray):
601
+ image = np.stack(image, axis=0) # to np
602
+ if image.ndim == 5:
603
+ image = image[0]
604
+
605
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
606
+ elif isinstance(image, list) and isinstance(image[0], torch.Tensor):
607
+ dims = image[0].ndim
608
+
609
+ if dims == 3:
610
+ image = torch.stack(image, dim=0)
611
+ elif dims == 4:
612
+ image = torch.concat(image, dim=0)
613
+ else:
614
+ raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}")
615
+
616
+ image = image.to(device=device, dtype=self.unet.dtype)
617
+
618
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
619
+
620
+ return image
621
+
622
+ @torch.no_grad()
623
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
624
+ def __call__(
625
+ self,
626
+ prompt: Union[str, List[str]] = None,
627
+ height: int = None,
628
+ width: int = None,
629
+ image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor] = None,
630
+ num_inference_steps: int = 50,
631
+ timesteps: List[int] = None,
632
+ guidance_scale: float = 4.0,
633
+ negative_prompt: Optional[Union[str, List[str]]] = None,
634
+ num_images_per_prompt: Optional[int] = 1,
635
+ eta: float = 0.0,
636
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
637
+ prompt_embeds: Optional[torch.FloatTensor] = None,
638
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
639
+ output_type: Optional[str] = "pil",
640
+ return_dict: bool = True,
641
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
642
+ callback_steps: int = 1,
643
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
644
+ noise_level: int = 250,
645
+ clean_caption: bool = True,
646
+ ):
647
+ """
648
+ Function invoked when calling the pipeline for generation.
649
+
650
+ Args:
651
+ prompt (`str` or `List[str]`, *optional*):
652
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
653
+ instead.
654
+ height (`int`, *optional*, defaults to None):
655
+ The height in pixels of the generated image.
656
+ width (`int`, *optional*, defaults to None):
657
+ The width in pixels of the generated image.
658
+ image (`PIL.Image.Image`, `np.ndarray`, `torch.FloatTensor`):
659
+ The image to be upscaled.
660
+ num_inference_steps (`int`, *optional*, defaults to 50):
661
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
662
+ expense of slower inference.
663
+ timesteps (`List[int]`, *optional*, defaults to None):
664
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
665
+ timesteps are used. Must be in descending order.
666
+ guidance_scale (`float`, *optional*, defaults to 4.0):
667
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
668
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
669
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
670
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
671
+ usually at the expense of lower image quality.
672
+ negative_prompt (`str` or `List[str]`, *optional*):
673
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
674
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
675
+ less than `1`).
676
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
677
+ The number of images to generate per prompt.
678
+ eta (`float`, *optional*, defaults to 0.0):
679
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
680
+ [`schedulers.DDIMScheduler`], will be ignored for others.
681
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
682
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
683
+ to make generation deterministic.
684
+ prompt_embeds (`torch.FloatTensor`, *optional*):
685
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
686
+ provided, text embeddings will be generated from `prompt` input argument.
687
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
688
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
689
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
690
+ argument.
691
+ output_type (`str`, *optional*, defaults to `"pil"`):
692
+ The output format of the generate image. Choose between
693
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
694
+ return_dict (`bool`, *optional*, defaults to `True`):
695
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
696
+ callback (`Callable`, *optional*):
697
+ A function that will be called every `callback_steps` steps during inference. The function will be
698
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
699
+ callback_steps (`int`, *optional*, defaults to 1):
700
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
701
+ called at every step.
702
+ cross_attention_kwargs (`dict`, *optional*):
703
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
704
+ `self.processor` in
705
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
706
+ noise_level (`int`, *optional*, defaults to 250):
707
+ The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)`
708
+ clean_caption (`bool`, *optional*, defaults to `True`):
709
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
710
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
711
+ prompt.
712
+
713
+ Examples:
714
+
715
+ Returns:
716
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
717
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
718
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
719
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
720
+ or watermarked content, according to the `safety_checker`.
721
+ """
722
+ # 1. Check inputs. Raise error if not correct
723
+
724
+ if prompt is not None and isinstance(prompt, str):
725
+ batch_size = 1
726
+ elif prompt is not None and isinstance(prompt, list):
727
+ batch_size = len(prompt)
728
+ else:
729
+ batch_size = prompt_embeds.shape[0]
730
+
731
+ self.check_inputs(
732
+ prompt,
733
+ image,
734
+ batch_size,
735
+ noise_level,
736
+ callback_steps,
737
+ negative_prompt,
738
+ prompt_embeds,
739
+ negative_prompt_embeds,
740
+ )
741
+
742
+ # 2. Define call parameters
743
+
744
+ height = height or self.unet.config.sample_size
745
+ width = width or self.unet.config.sample_size
746
+
747
+ device = self._execution_device
748
+
749
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
750
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
751
+ # corresponds to doing no classifier free guidance.
752
+ do_classifier_free_guidance = guidance_scale > 1.0
753
+
754
+ # 3. Encode input prompt
755
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
756
+ prompt,
757
+ do_classifier_free_guidance,
758
+ num_images_per_prompt=num_images_per_prompt,
759
+ device=device,
760
+ negative_prompt=negative_prompt,
761
+ prompt_embeds=prompt_embeds,
762
+ negative_prompt_embeds=negative_prompt_embeds,
763
+ clean_caption=clean_caption,
764
+ )
765
+
766
+ if do_classifier_free_guidance:
767
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
768
+
769
+ # 4. Prepare timesteps
770
+ if timesteps is not None:
771
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
772
+ timesteps = self.scheduler.timesteps
773
+ num_inference_steps = len(timesteps)
774
+ else:
775
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
776
+ timesteps = self.scheduler.timesteps
777
+
778
+ # 5. Prepare intermediate images
779
+ num_channels = self.unet.config.in_channels // 2
780
+ intermediate_images = self.prepare_intermediate_images(
781
+ batch_size * num_images_per_prompt,
782
+ num_channels,
783
+ height,
784
+ width,
785
+ prompt_embeds.dtype,
786
+ device,
787
+ generator,
788
+ )
789
+
790
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
791
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
792
+
793
+ # 7. Prepare upscaled image and noise level
794
+ image = self.preprocess_image(image, num_images_per_prompt, device)
795
+ upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True)
796
+
797
+ noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)
798
+ noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)
799
+ upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)
800
+
801
+ if do_classifier_free_guidance:
802
+ noise_level = torch.cat([noise_level] * 2)
803
+
804
+ # HACK: see comment in `enable_model_cpu_offload`
805
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
806
+ self.text_encoder_offload_hook.offload()
807
+
808
+ # 8. Denoising loop
809
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
810
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
811
+ for i, t in enumerate(timesteps):
812
+ model_input = torch.cat([intermediate_images, upscaled], dim=1)
813
+
814
+ model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
815
+ model_input = self.scheduler.scale_model_input(model_input, t)
816
+
817
+ # predict the noise residual
818
+ noise_pred = self.unet(
819
+ model_input,
820
+ t,
821
+ encoder_hidden_states=prompt_embeds,
822
+ class_labels=noise_level,
823
+ cross_attention_kwargs=cross_attention_kwargs,
824
+ return_dict=False,
825
+ )[0]
826
+
827
+ # perform guidance
828
+ if do_classifier_free_guidance:
829
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
830
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)
831
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)
832
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
833
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
834
+
835
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
836
+ noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)
837
+
838
+ # compute the previous noisy sample x_t -> x_t-1
839
+ intermediate_images = self.scheduler.step(
840
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
841
+ )[0]
842
+
843
+ # call the callback, if provided
844
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
845
+ progress_bar.update()
846
+ if callback is not None and i % callback_steps == 0:
847
+ callback(i, t, intermediate_images)
848
+
849
+ image = intermediate_images
850
+
851
+ if output_type == "pil":
852
+ # 9. Post-processing
853
+ image = (image / 2 + 0.5).clamp(0, 1)
854
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
855
+
856
+ # 10. Run safety checker
857
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
858
+
859
+ # 11. Convert to PIL
860
+ image = self.numpy_to_pil(image)
861
+
862
+ # 12. Apply watermark
863
+ if self.watermarker is not None:
864
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
865
+ elif output_type == "pt":
866
+ nsfw_detected = None
867
+ watermark_detected = None
868
+
869
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
870
+ self.unet_offload_hook.offload()
871
+ else:
872
+ # 9. Post-processing
873
+ image = (image / 2 + 0.5).clamp(0, 1)
874
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
875
+
876
+ # 10. Run safety checker
877
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
878
+
879
+ # Offload all models
880
+ self.maybe_free_model_hooks()
881
+
882
+ if not return_dict:
883
+ return (image, nsfw_detected, watermark_detected)
884
+
885
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_output.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+
7
+ from ...utils import BaseOutput
8
+
9
+
10
+ @dataclass
11
+ class IFPipelineOutput(BaseOutput):
12
+ """
13
+ Args:
14
+ Output class for Stable Diffusion pipelines.
15
+ images (`List[PIL.Image.Image]` or `np.ndarray`)
16
+ List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
17
+ num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
18
+ nsfw_detected (`List[bool]`)
19
+ List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
20
+ (nsfw) content or a watermark. `None` if safety checking could not be performed.
21
+ watermark_detected (`List[bool]`)
22
+ List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety
23
+ checking could not be performed.
24
+ """
25
+
26
+ images: Union[List[PIL.Image.Image], np.ndarray]
27
+ nsfw_detected: Optional[List[bool]]
28
+ watermark_detected: Optional[List[bool]]
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/safety_checker.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
5
+
6
+ from ...utils import logging
7
+
8
+
9
+ logger = logging.get_logger(__name__)
10
+
11
+
12
+ class IFSafetyChecker(PreTrainedModel):
13
+ config_class = CLIPConfig
14
+
15
+ _no_split_modules = ["CLIPEncoderLayer"]
16
+
17
+ def __init__(self, config: CLIPConfig):
18
+ super().__init__(config)
19
+
20
+ self.vision_model = CLIPVisionModelWithProjection(config.vision_config)
21
+
22
+ self.p_head = nn.Linear(config.vision_config.projection_dim, 1)
23
+ self.w_head = nn.Linear(config.vision_config.projection_dim, 1)
24
+
25
+ @torch.no_grad()
26
+ def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5):
27
+ image_embeds = self.vision_model(clip_input)[0]
28
+
29
+ nsfw_detected = self.p_head(image_embeds)
30
+ nsfw_detected = nsfw_detected.flatten()
31
+ nsfw_detected = nsfw_detected > p_threshold
32
+ nsfw_detected = nsfw_detected.tolist()
33
+
34
+ if any(nsfw_detected):
35
+ logger.warning(
36
+ "Potential NSFW content was detected in one or more images. A black image will be returned instead."
37
+ " Try again with a different prompt and/or seed."
38
+ )
39
+
40
+ for idx, nsfw_detected_ in enumerate(nsfw_detected):
41
+ if nsfw_detected_:
42
+ images[idx] = np.zeros(images[idx].shape)
43
+
44
+ watermark_detected = self.w_head(image_embeds)
45
+ watermark_detected = watermark_detected.flatten()
46
+ watermark_detected = watermark_detected > w_threshold
47
+ watermark_detected = watermark_detected.tolist()
48
+
49
+ if any(watermark_detected):
50
+ logger.warning(
51
+ "Potential watermarked content was detected in one or more images. A black image will be returned instead."
52
+ " Try again with a different prompt and/or seed."
53
+ )
54
+
55
+ for idx, watermark_detected_ in enumerate(watermark_detected):
56
+ if watermark_detected_:
57
+ images[idx] = np.zeros(images[idx].shape)
58
+
59
+ return images, nsfw_detected, watermark_detected
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/timesteps.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fast27_timesteps = [
2
+ 999,
3
+ 800,
4
+ 799,
5
+ 600,
6
+ 599,
7
+ 500,
8
+ 400,
9
+ 399,
10
+ 377,
11
+ 355,
12
+ 333,
13
+ 311,
14
+ 288,
15
+ 266,
16
+ 244,
17
+ 222,
18
+ 200,
19
+ 199,
20
+ 177,
21
+ 155,
22
+ 133,
23
+ 111,
24
+ 88,
25
+ 66,
26
+ 44,
27
+ 22,
28
+ 0,
29
+ ]
30
+
31
+ smart27_timesteps = [
32
+ 999,
33
+ 976,
34
+ 952,
35
+ 928,
36
+ 905,
37
+ 882,
38
+ 858,
39
+ 857,
40
+ 810,
41
+ 762,
42
+ 715,
43
+ 714,
44
+ 572,
45
+ 429,
46
+ 428,
47
+ 286,
48
+ 285,
49
+ 238,
50
+ 190,
51
+ 143,
52
+ 142,
53
+ 118,
54
+ 95,
55
+ 71,
56
+ 47,
57
+ 24,
58
+ 0,
59
+ ]
60
+
61
+ smart50_timesteps = [
62
+ 999,
63
+ 988,
64
+ 977,
65
+ 966,
66
+ 955,
67
+ 944,
68
+ 933,
69
+ 922,
70
+ 911,
71
+ 900,
72
+ 899,
73
+ 879,
74
+ 859,
75
+ 840,
76
+ 820,
77
+ 800,
78
+ 799,
79
+ 766,
80
+ 733,
81
+ 700,
82
+ 699,
83
+ 650,
84
+ 600,
85
+ 599,
86
+ 500,
87
+ 499,
88
+ 400,
89
+ 399,
90
+ 350,
91
+ 300,
92
+ 299,
93
+ 266,
94
+ 233,
95
+ 200,
96
+ 199,
97
+ 179,
98
+ 159,
99
+ 140,
100
+ 120,
101
+ 100,
102
+ 99,
103
+ 88,
104
+ 77,
105
+ 66,
106
+ 55,
107
+ 44,
108
+ 33,
109
+ 22,
110
+ 11,
111
+ 0,
112
+ ]
113
+
114
+ smart100_timesteps = [
115
+ 999,
116
+ 995,
117
+ 992,
118
+ 989,
119
+ 985,
120
+ 981,
121
+ 978,
122
+ 975,
123
+ 971,
124
+ 967,
125
+ 964,
126
+ 961,
127
+ 957,
128
+ 956,
129
+ 951,
130
+ 947,
131
+ 942,
132
+ 937,
133
+ 933,
134
+ 928,
135
+ 923,
136
+ 919,
137
+ 914,
138
+ 913,
139
+ 908,
140
+ 903,
141
+ 897,
142
+ 892,
143
+ 887,
144
+ 881,
145
+ 876,
146
+ 871,
147
+ 870,
148
+ 864,
149
+ 858,
150
+ 852,
151
+ 846,
152
+ 840,
153
+ 834,
154
+ 828,
155
+ 827,
156
+ 820,
157
+ 813,
158
+ 806,
159
+ 799,
160
+ 792,
161
+ 785,
162
+ 784,
163
+ 777,
164
+ 770,
165
+ 763,
166
+ 756,
167
+ 749,
168
+ 742,
169
+ 741,
170
+ 733,
171
+ 724,
172
+ 716,
173
+ 707,
174
+ 699,
175
+ 698,
176
+ 688,
177
+ 677,
178
+ 666,
179
+ 656,
180
+ 655,
181
+ 645,
182
+ 634,
183
+ 623,
184
+ 613,
185
+ 612,
186
+ 598,
187
+ 584,
188
+ 570,
189
+ 569,
190
+ 555,
191
+ 541,
192
+ 527,
193
+ 526,
194
+ 505,
195
+ 484,
196
+ 483,
197
+ 462,
198
+ 440,
199
+ 439,
200
+ 396,
201
+ 395,
202
+ 352,
203
+ 351,
204
+ 308,
205
+ 307,
206
+ 264,
207
+ 263,
208
+ 220,
209
+ 219,
210
+ 176,
211
+ 132,
212
+ 88,
213
+ 44,
214
+ 0,
215
+ ]
216
+
217
+ smart185_timesteps = [
218
+ 999,
219
+ 997,
220
+ 995,
221
+ 992,
222
+ 990,
223
+ 988,
224
+ 986,
225
+ 984,
226
+ 981,
227
+ 979,
228
+ 977,
229
+ 975,
230
+ 972,
231
+ 970,
232
+ 968,
233
+ 966,
234
+ 964,
235
+ 961,
236
+ 959,
237
+ 957,
238
+ 956,
239
+ 954,
240
+ 951,
241
+ 949,
242
+ 946,
243
+ 944,
244
+ 941,
245
+ 939,
246
+ 936,
247
+ 934,
248
+ 931,
249
+ 929,
250
+ 926,
251
+ 924,
252
+ 921,
253
+ 919,
254
+ 916,
255
+ 914,
256
+ 913,
257
+ 910,
258
+ 907,
259
+ 905,
260
+ 902,
261
+ 899,
262
+ 896,
263
+ 893,
264
+ 891,
265
+ 888,
266
+ 885,
267
+ 882,
268
+ 879,
269
+ 877,
270
+ 874,
271
+ 871,
272
+ 870,
273
+ 867,
274
+ 864,
275
+ 861,
276
+ 858,
277
+ 855,
278
+ 852,
279
+ 849,
280
+ 846,
281
+ 843,
282
+ 840,
283
+ 837,
284
+ 834,
285
+ 831,
286
+ 828,
287
+ 827,
288
+ 824,
289
+ 821,
290
+ 817,
291
+ 814,
292
+ 811,
293
+ 808,
294
+ 804,
295
+ 801,
296
+ 798,
297
+ 795,
298
+ 791,
299
+ 788,
300
+ 785,
301
+ 784,
302
+ 780,
303
+ 777,
304
+ 774,
305
+ 770,
306
+ 766,
307
+ 763,
308
+ 760,
309
+ 756,
310
+ 752,
311
+ 749,
312
+ 746,
313
+ 742,
314
+ 741,
315
+ 737,
316
+ 733,
317
+ 730,
318
+ 726,
319
+ 722,
320
+ 718,
321
+ 714,
322
+ 710,
323
+ 707,
324
+ 703,
325
+ 699,
326
+ 698,
327
+ 694,
328
+ 690,
329
+ 685,
330
+ 681,
331
+ 677,
332
+ 673,
333
+ 669,
334
+ 664,
335
+ 660,
336
+ 656,
337
+ 655,
338
+ 650,
339
+ 646,
340
+ 641,
341
+ 636,
342
+ 632,
343
+ 627,
344
+ 622,
345
+ 618,
346
+ 613,
347
+ 612,
348
+ 607,
349
+ 602,
350
+ 596,
351
+ 591,
352
+ 586,
353
+ 580,
354
+ 575,
355
+ 570,
356
+ 569,
357
+ 563,
358
+ 557,
359
+ 551,
360
+ 545,
361
+ 539,
362
+ 533,
363
+ 527,
364
+ 526,
365
+ 519,
366
+ 512,
367
+ 505,
368
+ 498,
369
+ 491,
370
+ 484,
371
+ 483,
372
+ 474,
373
+ 466,
374
+ 457,
375
+ 449,
376
+ 440,
377
+ 439,
378
+ 428,
379
+ 418,
380
+ 407,
381
+ 396,
382
+ 395,
383
+ 381,
384
+ 366,
385
+ 352,
386
+ 351,
387
+ 330,
388
+ 308,
389
+ 307,
390
+ 286,
391
+ 264,
392
+ 263,
393
+ 242,
394
+ 220,
395
+ 219,
396
+ 176,
397
+ 175,
398
+ 132,
399
+ 131,
400
+ 88,
401
+ 44,
402
+ 0,
403
+ ]
404
+
405
+ super27_timesteps = [
406
+ 999,
407
+ 991,
408
+ 982,
409
+ 974,
410
+ 966,
411
+ 958,
412
+ 950,
413
+ 941,
414
+ 933,
415
+ 925,
416
+ 916,
417
+ 908,
418
+ 900,
419
+ 899,
420
+ 874,
421
+ 850,
422
+ 825,
423
+ 800,
424
+ 799,
425
+ 700,
426
+ 600,
427
+ 500,
428
+ 400,
429
+ 300,
430
+ 200,
431
+ 100,
432
+ 0,
433
+ ]
434
+
435
+ super40_timesteps = [
436
+ 999,
437
+ 992,
438
+ 985,
439
+ 978,
440
+ 971,
441
+ 964,
442
+ 957,
443
+ 949,
444
+ 942,
445
+ 935,
446
+ 928,
447
+ 921,
448
+ 914,
449
+ 907,
450
+ 900,
451
+ 899,
452
+ 879,
453
+ 859,
454
+ 840,
455
+ 820,
456
+ 800,
457
+ 799,
458
+ 766,
459
+ 733,
460
+ 700,
461
+ 699,
462
+ 650,
463
+ 600,
464
+ 599,
465
+ 500,
466
+ 499,
467
+ 400,
468
+ 399,
469
+ 300,
470
+ 299,
471
+ 200,
472
+ 199,
473
+ 100,
474
+ 99,
475
+ 0,
476
+ ]
477
+
478
+ super100_timesteps = [
479
+ 999,
480
+ 996,
481
+ 992,
482
+ 989,
483
+ 985,
484
+ 982,
485
+ 979,
486
+ 975,
487
+ 972,
488
+ 968,
489
+ 965,
490
+ 961,
491
+ 958,
492
+ 955,
493
+ 951,
494
+ 948,
495
+ 944,
496
+ 941,
497
+ 938,
498
+ 934,
499
+ 931,
500
+ 927,
501
+ 924,
502
+ 920,
503
+ 917,
504
+ 914,
505
+ 910,
506
+ 907,
507
+ 903,
508
+ 900,
509
+ 899,
510
+ 891,
511
+ 884,
512
+ 876,
513
+ 869,
514
+ 861,
515
+ 853,
516
+ 846,
517
+ 838,
518
+ 830,
519
+ 823,
520
+ 815,
521
+ 808,
522
+ 800,
523
+ 799,
524
+ 788,
525
+ 777,
526
+ 766,
527
+ 755,
528
+ 744,
529
+ 733,
530
+ 722,
531
+ 711,
532
+ 700,
533
+ 699,
534
+ 688,
535
+ 677,
536
+ 666,
537
+ 655,
538
+ 644,
539
+ 633,
540
+ 622,
541
+ 611,
542
+ 600,
543
+ 599,
544
+ 585,
545
+ 571,
546
+ 557,
547
+ 542,
548
+ 528,
549
+ 514,
550
+ 500,
551
+ 499,
552
+ 485,
553
+ 471,
554
+ 457,
555
+ 442,
556
+ 428,
557
+ 414,
558
+ 400,
559
+ 399,
560
+ 379,
561
+ 359,
562
+ 340,
563
+ 320,
564
+ 300,
565
+ 299,
566
+ 279,
567
+ 259,
568
+ 240,
569
+ 220,
570
+ 200,
571
+ 199,
572
+ 166,
573
+ 133,
574
+ 100,
575
+ 99,
576
+ 66,
577
+ 33,
578
+ 0,
579
+ ]
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/watermark.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import PIL.Image
4
+ import torch
5
+ from PIL import Image
6
+
7
+ from ...configuration_utils import ConfigMixin
8
+ from ...models.modeling_utils import ModelMixin
9
+ from ...utils import PIL_INTERPOLATION
10
+
11
+
12
+ class IFWatermarker(ModelMixin, ConfigMixin):
13
+ def __init__(self):
14
+ super().__init__()
15
+
16
+ self.register_buffer("watermark_image", torch.zeros((62, 62, 4)))
17
+ self.watermark_image_as_pil = None
18
+
19
+ def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None):
20
+ # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287
21
+
22
+ h = images[0].height
23
+ w = images[0].width
24
+
25
+ sample_size = sample_size or h
26
+
27
+ coef = min(h / sample_size, w / sample_size)
28
+ img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w)
29
+
30
+ S1, S2 = 1024**2, img_w * img_h
31
+ K = (S2 / S1) ** 0.5
32
+ wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K)
33
+
34
+ if self.watermark_image_as_pil is None:
35
+ watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy()
36
+ watermark_image = Image.fromarray(watermark_image, mode="RGBA")
37
+ self.watermark_image_as_pil = watermark_image
38
+
39
+ wm_img = self.watermark_image_as_pil.resize(
40
+ (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None
41
+ )
42
+
43
+ for pil_img in images:
44
+ pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1])
45
+
46
+ return images
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/dit/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
4
+
5
+
6
+ _import_structure = {"pipeline_dit": ["DiTPipeline"]}
7
+
8
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
9
+ from .pipeline_dit import DiTPipeline
10
+
11
+ else:
12
+ import sys
13
+
14
+ sys.modules[__name__] = _LazyModule(
15
+ __name__,
16
+ globals()["__file__"],
17
+ _import_structure,
18
+ module_spec=__spec__,
19
+ )
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/dit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (530 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/pipelines/dit/__pycache__/pipeline_dit.cpython-310.pyc ADDED
Binary file (7.38 kB). View file