ZTWHHH commited on
Commit
dc0c613
·
verified ·
1 Parent(s): 4e091da

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__init__.py +62 -0
  3. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/__init__.cpython-310.pyc +0 -0
  4. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused.cpython-310.pyc +0 -0
  5. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused_img2img.cpython-310.pyc +0 -0
  6. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused_inpaint.cpython-310.pyc +0 -0
  7. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused.py +328 -0
  8. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused_img2img.py +349 -0
  9. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused_inpaint.py +380 -0
  10. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__init__.py +50 -0
  11. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/__init__.cpython-310.pyc +0 -0
  12. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/modeling_audioldm2.cpython-310.pyc +0 -0
  13. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/pipeline_audioldm2.cpython-310.pyc +0 -0
  14. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/modeling_audioldm2.py +1530 -0
  15. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  16. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__pycache__/__init__.cpython-310.pyc +0 -0
  17. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__pycache__/pipeline_controlnet_xs.cpython-310.pyc +0 -0
  18. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__pycache__/pipeline_controlnet_xs_sd_xl.cpython-310.pyc +0 -0
  19. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  20. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  21. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__init__.py +85 -0
  22. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +1121 -0
  23. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +870 -0
  24. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_output.py +29 -0
  25. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/timesteps.py +579 -0
  26. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/watermark.py +46 -0
  27. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__init__.py +202 -0
  28. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/clip_image_project_model.cpython-310.pyc +0 -0
  29. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/convert_from_ckpt.cpython-310.pyc +0 -0
  30. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_flax_stable_diffusion.cpython-310.pyc +0 -0
  31. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_flax_stable_diffusion_img2img.cpython-310.pyc +0 -0
  32. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_flax_stable_diffusion_inpaint.cpython-310.pyc +0 -0
  33. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion.cpython-310.pyc +0 -0
  34. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion_img2img.cpython-310.pyc +0 -0
  35. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion_inpaint.cpython-310.pyc +0 -0
  36. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion_upscale.cpython-310.pyc +0 -0
  37. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_output.cpython-310.pyc +0 -0
  38. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_depth2img.cpython-310.pyc +0 -0
  39. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_image_variation.cpython-310.pyc +0 -0
  40. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_img2img.cpython-310.pyc +0 -0
  41. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_inpaint.cpython-310.pyc +0 -0
  42. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_instruct_pix2pix.cpython-310.pyc +0 -0
  43. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_latent_upscale.cpython-310.pyc +0 -0
  44. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_upscale.cpython-310.pyc +0 -0
  45. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_unclip.cpython-310.pyc +0 -0
  46. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_unclip_img2img.cpython-310.pyc +0 -0
  47. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/safety_checker.cpython-310.pyc +0 -0
  48. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/safety_checker_flax.cpython-310.pyc +0 -0
  49. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/stable_unclip_image_normalizer.cpython-310.pyc +0 -0
  50. mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +1869 -0
.gitattributes CHANGED
@@ -533,3 +533,4 @@ moondream/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pyc
533
  moondream/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
534
  moondream/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
535
  moondream/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
533
  moondream/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
534
  moondream/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
535
  moondream/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
536
+ mantis_evalkit/lib/python3.10/site-packages/pip/_vendor/__pycache__/typing_extensions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ is_torch_available,
8
+ is_transformers_available,
9
+ )
10
+
11
+
12
+ _dummy_objects = {}
13
+ _import_structure = {}
14
+
15
+ try:
16
+ if not (is_transformers_available() and is_torch_available()):
17
+ raise OptionalDependencyNotAvailable()
18
+ except OptionalDependencyNotAvailable:
19
+ from ...utils.dummy_torch_and_transformers_objects import (
20
+ AmusedImg2ImgPipeline,
21
+ AmusedInpaintPipeline,
22
+ AmusedPipeline,
23
+ )
24
+
25
+ _dummy_objects.update(
26
+ {
27
+ "AmusedPipeline": AmusedPipeline,
28
+ "AmusedImg2ImgPipeline": AmusedImg2ImgPipeline,
29
+ "AmusedInpaintPipeline": AmusedInpaintPipeline,
30
+ }
31
+ )
32
+ else:
33
+ _import_structure["pipeline_amused"] = ["AmusedPipeline"]
34
+ _import_structure["pipeline_amused_img2img"] = ["AmusedImg2ImgPipeline"]
35
+ _import_structure["pipeline_amused_inpaint"] = ["AmusedInpaintPipeline"]
36
+
37
+
38
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
39
+ try:
40
+ if not (is_transformers_available() and is_torch_available()):
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ from ...utils.dummy_torch_and_transformers_objects import (
44
+ AmusedPipeline,
45
+ )
46
+ else:
47
+ from .pipeline_amused import AmusedPipeline
48
+ from .pipeline_amused_img2img import AmusedImg2ImgPipeline
49
+ from .pipeline_amused_inpaint import AmusedInpaintPipeline
50
+
51
+ else:
52
+ import sys
53
+
54
+ sys.modules[__name__] = _LazyModule(
55
+ __name__,
56
+ globals()["__file__"],
57
+ _import_structure,
58
+ module_spec=__spec__,
59
+ )
60
+
61
+ for name, value in _dummy_objects.items():
62
+ setattr(sys.modules[__name__], name, value)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused_img2img.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/__pycache__/pipeline_amused_inpaint.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ from transformers import CLIPTextModelWithProjection, CLIPTokenizer
19
+
20
+ from ...image_processor import VaeImageProcessor
21
+ from ...models import UVit2DModel, VQModel
22
+ from ...schedulers import AmusedScheduler
23
+ from ...utils import replace_example_docstring
24
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
+
26
+
27
+ EXAMPLE_DOC_STRING = """
28
+ Examples:
29
+ ```py
30
+ >>> import torch
31
+ >>> from diffusers import AmusedPipeline
32
+
33
+ >>> pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16)
34
+ >>> pipe = pipe.to("cuda")
35
+
36
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
37
+ >>> image = pipe(prompt).images[0]
38
+ ```
39
+ """
40
+
41
+
42
+ class AmusedPipeline(DiffusionPipeline):
43
+ image_processor: VaeImageProcessor
44
+ vqvae: VQModel
45
+ tokenizer: CLIPTokenizer
46
+ text_encoder: CLIPTextModelWithProjection
47
+ transformer: UVit2DModel
48
+ scheduler: AmusedScheduler
49
+
50
+ model_cpu_offload_seq = "text_encoder->transformer->vqvae"
51
+
52
+ def __init__(
53
+ self,
54
+ vqvae: VQModel,
55
+ tokenizer: CLIPTokenizer,
56
+ text_encoder: CLIPTextModelWithProjection,
57
+ transformer: UVit2DModel,
58
+ scheduler: AmusedScheduler,
59
+ ):
60
+ super().__init__()
61
+
62
+ self.register_modules(
63
+ vqvae=vqvae,
64
+ tokenizer=tokenizer,
65
+ text_encoder=text_encoder,
66
+ transformer=transformer,
67
+ scheduler=scheduler,
68
+ )
69
+ self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
70
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False)
71
+
72
+ @torch.no_grad()
73
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
74
+ def __call__(
75
+ self,
76
+ prompt: Optional[Union[List[str], str]] = None,
77
+ height: Optional[int] = None,
78
+ width: Optional[int] = None,
79
+ num_inference_steps: int = 12,
80
+ guidance_scale: float = 10.0,
81
+ negative_prompt: Optional[Union[str, List[str]]] = None,
82
+ num_images_per_prompt: Optional[int] = 1,
83
+ generator: Optional[torch.Generator] = None,
84
+ latents: Optional[torch.IntTensor] = None,
85
+ prompt_embeds: Optional[torch.Tensor] = None,
86
+ encoder_hidden_states: Optional[torch.Tensor] = None,
87
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
88
+ negative_encoder_hidden_states: Optional[torch.Tensor] = None,
89
+ output_type="pil",
90
+ return_dict: bool = True,
91
+ callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
92
+ callback_steps: int = 1,
93
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
94
+ micro_conditioning_aesthetic_score: int = 6,
95
+ micro_conditioning_crop_coord: Tuple[int, int] = (0, 0),
96
+ temperature: Union[int, Tuple[int, int], List[int]] = (2, 0),
97
+ ):
98
+ """
99
+ The call function to the pipeline for generation.
100
+
101
+ Args:
102
+ prompt (`str` or `List[str]`, *optional*):
103
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
104
+ height (`int`, *optional*, defaults to `self.transformer.config.sample_size * self.vae_scale_factor`):
105
+ The height in pixels of the generated image.
106
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
107
+ The width in pixels of the generated image.
108
+ num_inference_steps (`int`, *optional*, defaults to 16):
109
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
110
+ expense of slower inference.
111
+ guidance_scale (`float`, *optional*, defaults to 10.0):
112
+ A higher guidance scale value encourages the model to generate images closely linked to the text
113
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
114
+ negative_prompt (`str` or `List[str]`, *optional*):
115
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
116
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
117
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
118
+ The number of images to generate per prompt.
119
+ generator (`torch.Generator`, *optional*):
120
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
121
+ generation deterministic.
122
+ latents (`torch.IntTensor`, *optional*):
123
+ Pre-generated tokens representing latent vectors in `self.vqvae`, to be used as inputs for image
124
+ gneration. If not provided, the starting latents will be completely masked.
125
+ prompt_embeds (`torch.Tensor`, *optional*):
126
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
127
+ provided, text embeddings are generated from the `prompt` input argument. A single vector from the
128
+ pooled and projected final hidden states.
129
+ encoder_hidden_states (`torch.Tensor`, *optional*):
130
+ Pre-generated penultimate hidden states from the text encoder providing additional text conditioning.
131
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
132
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
133
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
134
+ negative_encoder_hidden_states (`torch.Tensor`, *optional*):
135
+ Analogous to `encoder_hidden_states` for the positive prompt.
136
+ output_type (`str`, *optional*, defaults to `"pil"`):
137
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
138
+ return_dict (`bool`, *optional*, defaults to `True`):
139
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
140
+ plain tuple.
141
+ callback (`Callable`, *optional*):
142
+ A function that calls every `callback_steps` steps during inference. The function is called with the
143
+ following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
144
+ callback_steps (`int`, *optional*, defaults to 1):
145
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
146
+ every step.
147
+ cross_attention_kwargs (`dict`, *optional*):
148
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
149
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
150
+ micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6):
151
+ The targeted aesthetic score according to the laion aesthetic classifier. See
152
+ https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of
153
+ https://arxiv.org/abs/2307.01952.
154
+ micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)):
155
+ The targeted height, width crop coordinates. See the micro-conditioning section of
156
+ https://arxiv.org/abs/2307.01952.
157
+ temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)):
158
+ Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`.
159
+
160
+ Examples:
161
+
162
+ Returns:
163
+ [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`:
164
+ If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a
165
+ `tuple` is returned where the first element is a list with the generated images.
166
+ """
167
+ if (prompt_embeds is not None and encoder_hidden_states is None) or (
168
+ prompt_embeds is None and encoder_hidden_states is not None
169
+ ):
170
+ raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither")
171
+
172
+ if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or (
173
+ negative_prompt_embeds is None and negative_encoder_hidden_states is not None
174
+ ):
175
+ raise ValueError(
176
+ "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither"
177
+ )
178
+
179
+ if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None):
180
+ raise ValueError("pass only one of `prompt` or `prompt_embeds`")
181
+
182
+ if isinstance(prompt, str):
183
+ prompt = [prompt]
184
+
185
+ if prompt is not None:
186
+ batch_size = len(prompt)
187
+ else:
188
+ batch_size = prompt_embeds.shape[0]
189
+
190
+ batch_size = batch_size * num_images_per_prompt
191
+
192
+ if height is None:
193
+ height = self.transformer.config.sample_size * self.vae_scale_factor
194
+
195
+ if width is None:
196
+ width = self.transformer.config.sample_size * self.vae_scale_factor
197
+
198
+ if prompt_embeds is None:
199
+ input_ids = self.tokenizer(
200
+ prompt,
201
+ return_tensors="pt",
202
+ padding="max_length",
203
+ truncation=True,
204
+ max_length=self.tokenizer.model_max_length,
205
+ ).input_ids.to(self._execution_device)
206
+
207
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
208
+ prompt_embeds = outputs.text_embeds
209
+ encoder_hidden_states = outputs.hidden_states[-2]
210
+
211
+ prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1)
212
+ encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
213
+
214
+ if guidance_scale > 1.0:
215
+ if negative_prompt_embeds is None:
216
+ if negative_prompt is None:
217
+ negative_prompt = [""] * len(prompt)
218
+
219
+ if isinstance(negative_prompt, str):
220
+ negative_prompt = [negative_prompt]
221
+
222
+ input_ids = self.tokenizer(
223
+ negative_prompt,
224
+ return_tensors="pt",
225
+ padding="max_length",
226
+ truncation=True,
227
+ max_length=self.tokenizer.model_max_length,
228
+ ).input_ids.to(self._execution_device)
229
+
230
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
231
+ negative_prompt_embeds = outputs.text_embeds
232
+ negative_encoder_hidden_states = outputs.hidden_states[-2]
233
+
234
+ negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1)
235
+ negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
236
+
237
+ prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds])
238
+ encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states])
239
+
240
+ # Note that the micro conditionings _do_ flip the order of width, height for the original size
241
+ # and the crop coordinates. This is how it was done in the original code base
242
+ micro_conds = torch.tensor(
243
+ [
244
+ width,
245
+ height,
246
+ micro_conditioning_crop_coord[0],
247
+ micro_conditioning_crop_coord[1],
248
+ micro_conditioning_aesthetic_score,
249
+ ],
250
+ device=self._execution_device,
251
+ dtype=encoder_hidden_states.dtype,
252
+ )
253
+ micro_conds = micro_conds.unsqueeze(0)
254
+ micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1)
255
+
256
+ shape = (batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor)
257
+
258
+ if latents is None:
259
+ latents = torch.full(
260
+ shape, self.scheduler.config.mask_token_id, dtype=torch.long, device=self._execution_device
261
+ )
262
+
263
+ self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device)
264
+
265
+ num_warmup_steps = len(self.scheduler.timesteps) - num_inference_steps * self.scheduler.order
266
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
267
+ for i, timestep in enumerate(self.scheduler.timesteps):
268
+ if guidance_scale > 1.0:
269
+ model_input = torch.cat([latents] * 2)
270
+ else:
271
+ model_input = latents
272
+
273
+ model_output = self.transformer(
274
+ model_input,
275
+ micro_conds=micro_conds,
276
+ pooled_text_emb=prompt_embeds,
277
+ encoder_hidden_states=encoder_hidden_states,
278
+ cross_attention_kwargs=cross_attention_kwargs,
279
+ )
280
+
281
+ if guidance_scale > 1.0:
282
+ uncond_logits, cond_logits = model_output.chunk(2)
283
+ model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits)
284
+
285
+ latents = self.scheduler.step(
286
+ model_output=model_output,
287
+ timestep=timestep,
288
+ sample=latents,
289
+ generator=generator,
290
+ ).prev_sample
291
+
292
+ if i == len(self.scheduler.timesteps) - 1 or (
293
+ (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
294
+ ):
295
+ progress_bar.update()
296
+ if callback is not None and i % callback_steps == 0:
297
+ step_idx = i // getattr(self.scheduler, "order", 1)
298
+ callback(step_idx, timestep, latents)
299
+
300
+ if output_type == "latent":
301
+ output = latents
302
+ else:
303
+ needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast
304
+
305
+ if needs_upcasting:
306
+ self.vqvae.float()
307
+
308
+ output = self.vqvae.decode(
309
+ latents,
310
+ force_not_quantize=True,
311
+ shape=(
312
+ batch_size,
313
+ height // self.vae_scale_factor,
314
+ width // self.vae_scale_factor,
315
+ self.vqvae.config.latent_channels,
316
+ ),
317
+ ).sample.clip(0, 1)
318
+ output = self.image_processor.postprocess(output, output_type)
319
+
320
+ if needs_upcasting:
321
+ self.vqvae.half()
322
+
323
+ self.maybe_free_model_hooks()
324
+
325
+ if not return_dict:
326
+ return (output,)
327
+
328
+ return ImagePipelineOutput(output)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused_img2img.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ from transformers import CLIPTextModelWithProjection, CLIPTokenizer
19
+
20
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
21
+ from ...models import UVit2DModel, VQModel
22
+ from ...schedulers import AmusedScheduler
23
+ from ...utils import replace_example_docstring
24
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
+
26
+
27
+ EXAMPLE_DOC_STRING = """
28
+ Examples:
29
+ ```py
30
+ >>> import torch
31
+ >>> from diffusers import AmusedImg2ImgPipeline
32
+ >>> from diffusers.utils import load_image
33
+
34
+ >>> pipe = AmusedImg2ImgPipeline.from_pretrained(
35
+ ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16
36
+ ... )
37
+ >>> pipe = pipe.to("cuda")
38
+
39
+ >>> prompt = "winter mountains"
40
+ >>> input_image = (
41
+ ... load_image(
42
+ ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg"
43
+ ... )
44
+ ... .resize((512, 512))
45
+ ... .convert("RGB")
46
+ ... )
47
+ >>> image = pipe(prompt, input_image).images[0]
48
+ ```
49
+ """
50
+
51
+
52
+ class AmusedImg2ImgPipeline(DiffusionPipeline):
53
+ image_processor: VaeImageProcessor
54
+ vqvae: VQModel
55
+ tokenizer: CLIPTokenizer
56
+ text_encoder: CLIPTextModelWithProjection
57
+ transformer: UVit2DModel
58
+ scheduler: AmusedScheduler
59
+
60
+ model_cpu_offload_seq = "text_encoder->transformer->vqvae"
61
+
62
+ # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before
63
+ # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter
64
+ # off the meta device. There should be a way to fix this instead of just not offloading it
65
+ _exclude_from_cpu_offload = ["vqvae"]
66
+
67
+ def __init__(
68
+ self,
69
+ vqvae: VQModel,
70
+ tokenizer: CLIPTokenizer,
71
+ text_encoder: CLIPTextModelWithProjection,
72
+ transformer: UVit2DModel,
73
+ scheduler: AmusedScheduler,
74
+ ):
75
+ super().__init__()
76
+
77
+ self.register_modules(
78
+ vqvae=vqvae,
79
+ tokenizer=tokenizer,
80
+ text_encoder=text_encoder,
81
+ transformer=transformer,
82
+ scheduler=scheduler,
83
+ )
84
+ self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
85
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False)
86
+
87
+ @torch.no_grad()
88
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
89
+ def __call__(
90
+ self,
91
+ prompt: Optional[Union[List[str], str]] = None,
92
+ image: PipelineImageInput = None,
93
+ strength: float = 0.5,
94
+ num_inference_steps: int = 12,
95
+ guidance_scale: float = 10.0,
96
+ negative_prompt: Optional[Union[str, List[str]]] = None,
97
+ num_images_per_prompt: Optional[int] = 1,
98
+ generator: Optional[torch.Generator] = None,
99
+ prompt_embeds: Optional[torch.Tensor] = None,
100
+ encoder_hidden_states: Optional[torch.Tensor] = None,
101
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
102
+ negative_encoder_hidden_states: Optional[torch.Tensor] = None,
103
+ output_type="pil",
104
+ return_dict: bool = True,
105
+ callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
106
+ callback_steps: int = 1,
107
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
108
+ micro_conditioning_aesthetic_score: int = 6,
109
+ micro_conditioning_crop_coord: Tuple[int, int] = (0, 0),
110
+ temperature: Union[int, Tuple[int, int], List[int]] = (2, 0),
111
+ ):
112
+ """
113
+ The call function to the pipeline for generation.
114
+
115
+ Args:
116
+ prompt (`str` or `List[str]`, *optional*):
117
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
118
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
119
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
120
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
121
+ or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
122
+ list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image
123
+ latents as `image`, but if passing latents directly it is not encoded again.
124
+ strength (`float`, *optional*, defaults to 0.5):
125
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
126
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
127
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
128
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
129
+ essentially ignores `image`.
130
+ num_inference_steps (`int`, *optional*, defaults to 12):
131
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
132
+ expense of slower inference.
133
+ guidance_scale (`float`, *optional*, defaults to 10.0):
134
+ A higher guidance scale value encourages the model to generate images closely linked to the text
135
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
136
+ negative_prompt (`str` or `List[str]`, *optional*):
137
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
138
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
139
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
140
+ The number of images to generate per prompt.
141
+ generator (`torch.Generator`, *optional*):
142
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
143
+ generation deterministic.
144
+ prompt_embeds (`torch.Tensor`, *optional*):
145
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
146
+ provided, text embeddings are generated from the `prompt` input argument. A single vector from the
147
+ pooled and projected final hidden states.
148
+ encoder_hidden_states (`torch.Tensor`, *optional*):
149
+ Pre-generated penultimate hidden states from the text encoder providing additional text conditioning.
150
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
151
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
152
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
153
+ negative_encoder_hidden_states (`torch.Tensor`, *optional*):
154
+ Analogous to `encoder_hidden_states` for the positive prompt.
155
+ output_type (`str`, *optional*, defaults to `"pil"`):
156
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
157
+ return_dict (`bool`, *optional*, defaults to `True`):
158
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
159
+ plain tuple.
160
+ callback (`Callable`, *optional*):
161
+ A function that calls every `callback_steps` steps during inference. The function is called with the
162
+ following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
163
+ callback_steps (`int`, *optional*, defaults to 1):
164
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
165
+ every step.
166
+ cross_attention_kwargs (`dict`, *optional*):
167
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
168
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
169
+ micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6):
170
+ The targeted aesthetic score according to the laion aesthetic classifier. See
171
+ https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of
172
+ https://arxiv.org/abs/2307.01952.
173
+ micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)):
174
+ The targeted height, width crop coordinates. See the micro-conditioning section of
175
+ https://arxiv.org/abs/2307.01952.
176
+ temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)):
177
+ Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`.
178
+
179
+ Examples:
180
+
181
+ Returns:
182
+ [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`:
183
+ If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a
184
+ `tuple` is returned where the first element is a list with the generated images.
185
+ """
186
+
187
+ if (prompt_embeds is not None and encoder_hidden_states is None) or (
188
+ prompt_embeds is None and encoder_hidden_states is not None
189
+ ):
190
+ raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither")
191
+
192
+ if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or (
193
+ negative_prompt_embeds is None and negative_encoder_hidden_states is not None
194
+ ):
195
+ raise ValueError(
196
+ "pass either both `negative_prompt_embeds` and `negative_encoder_hidden_states` or neither"
197
+ )
198
+
199
+ if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None):
200
+ raise ValueError("pass only one of `prompt` or `prompt_embeds`")
201
+
202
+ if isinstance(prompt, str):
203
+ prompt = [prompt]
204
+
205
+ if prompt is not None:
206
+ batch_size = len(prompt)
207
+ else:
208
+ batch_size = prompt_embeds.shape[0]
209
+
210
+ batch_size = batch_size * num_images_per_prompt
211
+
212
+ if prompt_embeds is None:
213
+ input_ids = self.tokenizer(
214
+ prompt,
215
+ return_tensors="pt",
216
+ padding="max_length",
217
+ truncation=True,
218
+ max_length=self.tokenizer.model_max_length,
219
+ ).input_ids.to(self._execution_device)
220
+
221
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
222
+ prompt_embeds = outputs.text_embeds
223
+ encoder_hidden_states = outputs.hidden_states[-2]
224
+
225
+ prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1)
226
+ encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
227
+
228
+ if guidance_scale > 1.0:
229
+ if negative_prompt_embeds is None:
230
+ if negative_prompt is None:
231
+ negative_prompt = [""] * len(prompt)
232
+
233
+ if isinstance(negative_prompt, str):
234
+ negative_prompt = [negative_prompt]
235
+
236
+ input_ids = self.tokenizer(
237
+ negative_prompt,
238
+ return_tensors="pt",
239
+ padding="max_length",
240
+ truncation=True,
241
+ max_length=self.tokenizer.model_max_length,
242
+ ).input_ids.to(self._execution_device)
243
+
244
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
245
+ negative_prompt_embeds = outputs.text_embeds
246
+ negative_encoder_hidden_states = outputs.hidden_states[-2]
247
+
248
+ negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1)
249
+ negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
250
+
251
+ prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds])
252
+ encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states])
253
+
254
+ image = self.image_processor.preprocess(image)
255
+
256
+ height, width = image.shape[-2:]
257
+
258
+ # Note that the micro conditionings _do_ flip the order of width, height for the original size
259
+ # and the crop coordinates. This is how it was done in the original code base
260
+ micro_conds = torch.tensor(
261
+ [
262
+ width,
263
+ height,
264
+ micro_conditioning_crop_coord[0],
265
+ micro_conditioning_crop_coord[1],
266
+ micro_conditioning_aesthetic_score,
267
+ ],
268
+ device=self._execution_device,
269
+ dtype=encoder_hidden_states.dtype,
270
+ )
271
+
272
+ micro_conds = micro_conds.unsqueeze(0)
273
+ micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1)
274
+
275
+ self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device)
276
+ num_inference_steps = int(len(self.scheduler.timesteps) * strength)
277
+ start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps
278
+
279
+ needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast
280
+
281
+ if needs_upcasting:
282
+ self.vqvae.float()
283
+
284
+ latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents
285
+ latents_bsz, channels, latents_height, latents_width = latents.shape
286
+ latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width)
287
+ latents = self.scheduler.add_noise(
288
+ latents, self.scheduler.timesteps[start_timestep_idx - 1], generator=generator
289
+ )
290
+ latents = latents.repeat(num_images_per_prompt, 1, 1)
291
+
292
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
293
+ for i in range(start_timestep_idx, len(self.scheduler.timesteps)):
294
+ timestep = self.scheduler.timesteps[i]
295
+
296
+ if guidance_scale > 1.0:
297
+ model_input = torch.cat([latents] * 2)
298
+ else:
299
+ model_input = latents
300
+
301
+ model_output = self.transformer(
302
+ model_input,
303
+ micro_conds=micro_conds,
304
+ pooled_text_emb=prompt_embeds,
305
+ encoder_hidden_states=encoder_hidden_states,
306
+ cross_attention_kwargs=cross_attention_kwargs,
307
+ )
308
+
309
+ if guidance_scale > 1.0:
310
+ uncond_logits, cond_logits = model_output.chunk(2)
311
+ model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits)
312
+
313
+ latents = self.scheduler.step(
314
+ model_output=model_output,
315
+ timestep=timestep,
316
+ sample=latents,
317
+ generator=generator,
318
+ ).prev_sample
319
+
320
+ if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
321
+ progress_bar.update()
322
+ if callback is not None and i % callback_steps == 0:
323
+ step_idx = i // getattr(self.scheduler, "order", 1)
324
+ callback(step_idx, timestep, latents)
325
+
326
+ if output_type == "latent":
327
+ output = latents
328
+ else:
329
+ output = self.vqvae.decode(
330
+ latents,
331
+ force_not_quantize=True,
332
+ shape=(
333
+ batch_size,
334
+ height // self.vae_scale_factor,
335
+ width // self.vae_scale_factor,
336
+ self.vqvae.config.latent_channels,
337
+ ),
338
+ ).sample.clip(0, 1)
339
+ output = self.image_processor.postprocess(output, output_type)
340
+
341
+ if needs_upcasting:
342
+ self.vqvae.half()
343
+
344
+ self.maybe_free_model_hooks()
345
+
346
+ if not return_dict:
347
+ return (output,)
348
+
349
+ return ImagePipelineOutput(output)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/amused/pipeline_amused_inpaint.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from transformers import CLIPTextModelWithProjection, CLIPTokenizer
20
+
21
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
22
+ from ...models import UVit2DModel, VQModel
23
+ from ...schedulers import AmusedScheduler
24
+ from ...utils import replace_example_docstring
25
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
26
+
27
+
28
+ EXAMPLE_DOC_STRING = """
29
+ Examples:
30
+ ```py
31
+ >>> import torch
32
+ >>> from diffusers import AmusedInpaintPipeline
33
+ >>> from diffusers.utils import load_image
34
+
35
+ >>> pipe = AmusedInpaintPipeline.from_pretrained(
36
+ ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16
37
+ ... )
38
+ >>> pipe = pipe.to("cuda")
39
+
40
+ >>> prompt = "fall mountains"
41
+ >>> input_image = (
42
+ ... load_image(
43
+ ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg"
44
+ ... )
45
+ ... .resize((512, 512))
46
+ ... .convert("RGB")
47
+ ... )
48
+ >>> mask = (
49
+ ... load_image(
50
+ ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"
51
+ ... )
52
+ ... .resize((512, 512))
53
+ ... .convert("L")
54
+ ... )
55
+ >>> pipe(prompt, input_image, mask).images[0].save("out.png")
56
+ ```
57
+ """
58
+
59
+
60
+ class AmusedInpaintPipeline(DiffusionPipeline):
61
+ image_processor: VaeImageProcessor
62
+ vqvae: VQModel
63
+ tokenizer: CLIPTokenizer
64
+ text_encoder: CLIPTextModelWithProjection
65
+ transformer: UVit2DModel
66
+ scheduler: AmusedScheduler
67
+
68
+ model_cpu_offload_seq = "text_encoder->transformer->vqvae"
69
+
70
+ # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before
71
+ # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter
72
+ # off the meta device. There should be a way to fix this instead of just not offloading it
73
+ _exclude_from_cpu_offload = ["vqvae"]
74
+
75
+ def __init__(
76
+ self,
77
+ vqvae: VQModel,
78
+ tokenizer: CLIPTokenizer,
79
+ text_encoder: CLIPTextModelWithProjection,
80
+ transformer: UVit2DModel,
81
+ scheduler: AmusedScheduler,
82
+ ):
83
+ super().__init__()
84
+
85
+ self.register_modules(
86
+ vqvae=vqvae,
87
+ tokenizer=tokenizer,
88
+ text_encoder=text_encoder,
89
+ transformer=transformer,
90
+ scheduler=scheduler,
91
+ )
92
+ self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
93
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False)
94
+ self.mask_processor = VaeImageProcessor(
95
+ vae_scale_factor=self.vae_scale_factor,
96
+ do_normalize=False,
97
+ do_binarize=True,
98
+ do_convert_grayscale=True,
99
+ do_resize=True,
100
+ )
101
+ self.scheduler.register_to_config(masking_schedule="linear")
102
+
103
+ @torch.no_grad()
104
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
105
+ def __call__(
106
+ self,
107
+ prompt: Optional[Union[List[str], str]] = None,
108
+ image: PipelineImageInput = None,
109
+ mask_image: PipelineImageInput = None,
110
+ strength: float = 1.0,
111
+ num_inference_steps: int = 12,
112
+ guidance_scale: float = 10.0,
113
+ negative_prompt: Optional[Union[str, List[str]]] = None,
114
+ num_images_per_prompt: Optional[int] = 1,
115
+ generator: Optional[torch.Generator] = None,
116
+ prompt_embeds: Optional[torch.Tensor] = None,
117
+ encoder_hidden_states: Optional[torch.Tensor] = None,
118
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
119
+ negative_encoder_hidden_states: Optional[torch.Tensor] = None,
120
+ output_type="pil",
121
+ return_dict: bool = True,
122
+ callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
123
+ callback_steps: int = 1,
124
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
125
+ micro_conditioning_aesthetic_score: int = 6,
126
+ micro_conditioning_crop_coord: Tuple[int, int] = (0, 0),
127
+ temperature: Union[int, Tuple[int, int], List[int]] = (2, 0),
128
+ ):
129
+ """
130
+ The call function to the pipeline for generation.
131
+
132
+ Args:
133
+ prompt (`str` or `List[str]`, *optional*):
134
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
135
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
136
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
137
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
138
+ or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
139
+ list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image
140
+ latents as `image`, but if passing latents directly it is not encoded again.
141
+ mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
142
+ `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask
143
+ are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
144
+ single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one
145
+ color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B,
146
+ H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W,
147
+ 1)`, or `(H, W)`.
148
+ strength (`float`, *optional*, defaults to 1.0):
149
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
150
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
151
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
152
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
153
+ essentially ignores `image`.
154
+ num_inference_steps (`int`, *optional*, defaults to 16):
155
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
156
+ expense of slower inference.
157
+ guidance_scale (`float`, *optional*, defaults to 10.0):
158
+ A higher guidance scale value encourages the model to generate images closely linked to the text
159
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
160
+ negative_prompt (`str` or `List[str]`, *optional*):
161
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
162
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
163
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
164
+ The number of images to generate per prompt.
165
+ generator (`torch.Generator`, *optional*):
166
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
167
+ generation deterministic.
168
+ prompt_embeds (`torch.Tensor`, *optional*):
169
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
170
+ provided, text embeddings are generated from the `prompt` input argument. A single vector from the
171
+ pooled and projected final hidden states.
172
+ encoder_hidden_states (`torch.Tensor`, *optional*):
173
+ Pre-generated penultimate hidden states from the text encoder providing additional text conditioning.
174
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
175
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
176
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
177
+ negative_encoder_hidden_states (`torch.Tensor`, *optional*):
178
+ Analogous to `encoder_hidden_states` for the positive prompt.
179
+ output_type (`str`, *optional*, defaults to `"pil"`):
180
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
181
+ return_dict (`bool`, *optional*, defaults to `True`):
182
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
183
+ plain tuple.
184
+ callback (`Callable`, *optional*):
185
+ A function that calls every `callback_steps` steps during inference. The function is called with the
186
+ following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
187
+ callback_steps (`int`, *optional*, defaults to 1):
188
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
189
+ every step.
190
+ cross_attention_kwargs (`dict`, *optional*):
191
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
192
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
193
+ micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6):
194
+ The targeted aesthetic score according to the laion aesthetic classifier. See
195
+ https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of
196
+ https://arxiv.org/abs/2307.01952.
197
+ micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)):
198
+ The targeted height, width crop coordinates. See the micro-conditioning section of
199
+ https://arxiv.org/abs/2307.01952.
200
+ temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)):
201
+ Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`.
202
+
203
+ Examples:
204
+
205
+ Returns:
206
+ [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`:
207
+ If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a
208
+ `tuple` is returned where the first element is a list with the generated images.
209
+ """
210
+
211
+ if (prompt_embeds is not None and encoder_hidden_states is None) or (
212
+ prompt_embeds is None and encoder_hidden_states is not None
213
+ ):
214
+ raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither")
215
+
216
+ if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or (
217
+ negative_prompt_embeds is None and negative_encoder_hidden_states is not None
218
+ ):
219
+ raise ValueError(
220
+ "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither"
221
+ )
222
+
223
+ if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None):
224
+ raise ValueError("pass only one of `prompt` or `prompt_embeds`")
225
+
226
+ if isinstance(prompt, str):
227
+ prompt = [prompt]
228
+
229
+ if prompt is not None:
230
+ batch_size = len(prompt)
231
+ else:
232
+ batch_size = prompt_embeds.shape[0]
233
+
234
+ batch_size = batch_size * num_images_per_prompt
235
+
236
+ if prompt_embeds is None:
237
+ input_ids = self.tokenizer(
238
+ prompt,
239
+ return_tensors="pt",
240
+ padding="max_length",
241
+ truncation=True,
242
+ max_length=self.tokenizer.model_max_length,
243
+ ).input_ids.to(self._execution_device)
244
+
245
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
246
+ prompt_embeds = outputs.text_embeds
247
+ encoder_hidden_states = outputs.hidden_states[-2]
248
+
249
+ prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1)
250
+ encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
251
+
252
+ if guidance_scale > 1.0:
253
+ if negative_prompt_embeds is None:
254
+ if negative_prompt is None:
255
+ negative_prompt = [""] * len(prompt)
256
+
257
+ if isinstance(negative_prompt, str):
258
+ negative_prompt = [negative_prompt]
259
+
260
+ input_ids = self.tokenizer(
261
+ negative_prompt,
262
+ return_tensors="pt",
263
+ padding="max_length",
264
+ truncation=True,
265
+ max_length=self.tokenizer.model_max_length,
266
+ ).input_ids.to(self._execution_device)
267
+
268
+ outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True)
269
+ negative_prompt_embeds = outputs.text_embeds
270
+ negative_encoder_hidden_states = outputs.hidden_states[-2]
271
+
272
+ negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1)
273
+ negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1)
274
+
275
+ prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds])
276
+ encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states])
277
+
278
+ image = self.image_processor.preprocess(image)
279
+
280
+ height, width = image.shape[-2:]
281
+
282
+ # Note that the micro conditionings _do_ flip the order of width, height for the original size
283
+ # and the crop coordinates. This is how it was done in the original code base
284
+ micro_conds = torch.tensor(
285
+ [
286
+ width,
287
+ height,
288
+ micro_conditioning_crop_coord[0],
289
+ micro_conditioning_crop_coord[1],
290
+ micro_conditioning_aesthetic_score,
291
+ ],
292
+ device=self._execution_device,
293
+ dtype=encoder_hidden_states.dtype,
294
+ )
295
+
296
+ micro_conds = micro_conds.unsqueeze(0)
297
+ micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1)
298
+
299
+ self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device)
300
+ num_inference_steps = int(len(self.scheduler.timesteps) * strength)
301
+ start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps
302
+
303
+ needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast
304
+
305
+ if needs_upcasting:
306
+ self.vqvae.float()
307
+
308
+ latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents
309
+ latents_bsz, channels, latents_height, latents_width = latents.shape
310
+ latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width)
311
+
312
+ mask = self.mask_processor.preprocess(
313
+ mask_image, height // self.vae_scale_factor, width // self.vae_scale_factor
314
+ )
315
+ mask = mask.reshape(mask.shape[0], latents_height, latents_width).bool().to(latents.device)
316
+ latents[mask] = self.scheduler.config.mask_token_id
317
+
318
+ starting_mask_ratio = mask.sum() / latents.numel()
319
+
320
+ latents = latents.repeat(num_images_per_prompt, 1, 1)
321
+
322
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
323
+ for i in range(start_timestep_idx, len(self.scheduler.timesteps)):
324
+ timestep = self.scheduler.timesteps[i]
325
+
326
+ if guidance_scale > 1.0:
327
+ model_input = torch.cat([latents] * 2)
328
+ else:
329
+ model_input = latents
330
+
331
+ model_output = self.transformer(
332
+ model_input,
333
+ micro_conds=micro_conds,
334
+ pooled_text_emb=prompt_embeds,
335
+ encoder_hidden_states=encoder_hidden_states,
336
+ cross_attention_kwargs=cross_attention_kwargs,
337
+ )
338
+
339
+ if guidance_scale > 1.0:
340
+ uncond_logits, cond_logits = model_output.chunk(2)
341
+ model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits)
342
+
343
+ latents = self.scheduler.step(
344
+ model_output=model_output,
345
+ timestep=timestep,
346
+ sample=latents,
347
+ generator=generator,
348
+ starting_mask_ratio=starting_mask_ratio,
349
+ ).prev_sample
350
+
351
+ if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
352
+ progress_bar.update()
353
+ if callback is not None and i % callback_steps == 0:
354
+ step_idx = i // getattr(self.scheduler, "order", 1)
355
+ callback(step_idx, timestep, latents)
356
+
357
+ if output_type == "latent":
358
+ output = latents
359
+ else:
360
+ output = self.vqvae.decode(
361
+ latents,
362
+ force_not_quantize=True,
363
+ shape=(
364
+ batch_size,
365
+ height // self.vae_scale_factor,
366
+ width // self.vae_scale_factor,
367
+ self.vqvae.config.latent_channels,
368
+ ),
369
+ ).sample.clip(0, 1)
370
+ output = self.image_processor.postprocess(output, output_type)
371
+
372
+ if needs_upcasting:
373
+ self.vqvae.half()
374
+
375
+ self.maybe_free_model_hooks()
376
+
377
+ if not return_dict:
378
+ return (output,)
379
+
380
+ return ImagePipelineOutput(output)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__init__.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ is_transformers_version,
11
+ )
12
+
13
+
14
+ _dummy_objects = {}
15
+ _import_structure = {}
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["modeling_audioldm2"] = ["AudioLDM2ProjectionModel", "AudioLDM2UNet2DConditionModel"]
26
+ _import_structure["pipeline_audioldm2"] = ["AudioLDM2Pipeline"]
27
+
28
+
29
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
30
+ try:
31
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")):
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ from ...utils.dummy_torch_and_transformers_objects import *
35
+
36
+ else:
37
+ from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel
38
+ from .pipeline_audioldm2 import AudioLDM2Pipeline
39
+
40
+ else:
41
+ import sys
42
+
43
+ sys.modules[__name__] = _LazyModule(
44
+ __name__,
45
+ globals()["__file__"],
46
+ _import_structure,
47
+ module_spec=__spec__,
48
+ )
49
+ for name, value in _dummy_objects.items():
50
+ setattr(sys.modules[__name__], name, value)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/modeling_audioldm2.cpython-310.pyc ADDED
Binary file (38.8 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/__pycache__/pipeline_audioldm2.cpython-310.pyc ADDED
Binary file (33.9 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/audioldm2/modeling_audioldm2.py ADDED
@@ -0,0 +1,1530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.utils.checkpoint
21
+
22
+ from ...configuration_utils import ConfigMixin, register_to_config
23
+ from ...loaders import UNet2DConditionLoadersMixin
24
+ from ...models.activations import get_activation
25
+ from ...models.attention_processor import (
26
+ ADDED_KV_ATTENTION_PROCESSORS,
27
+ CROSS_ATTENTION_PROCESSORS,
28
+ AttentionProcessor,
29
+ AttnAddedKVProcessor,
30
+ AttnProcessor,
31
+ )
32
+ from ...models.embeddings import (
33
+ TimestepEmbedding,
34
+ Timesteps,
35
+ )
36
+ from ...models.modeling_utils import ModelMixin
37
+ from ...models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
38
+ from ...models.transformers.transformer_2d import Transformer2DModel
39
+ from ...models.unets.unet_2d_blocks import DownBlock2D, UpBlock2D
40
+ from ...models.unets.unet_2d_condition import UNet2DConditionOutput
41
+ from ...utils import BaseOutput, is_torch_version, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+ def add_special_tokens(hidden_states, attention_mask, sos_token, eos_token):
48
+ batch_size = hidden_states.shape[0]
49
+
50
+ if attention_mask is not None:
51
+ # Add two more steps to attn mask
52
+ new_attn_mask_step = attention_mask.new_ones((batch_size, 1))
53
+ attention_mask = torch.concat([new_attn_mask_step, attention_mask, new_attn_mask_step], dim=-1)
54
+
55
+ # Add the SOS / EOS tokens at the start / end of the sequence respectively
56
+ sos_token = sos_token.expand(batch_size, 1, -1)
57
+ eos_token = eos_token.expand(batch_size, 1, -1)
58
+ hidden_states = torch.concat([sos_token, hidden_states, eos_token], dim=1)
59
+ return hidden_states, attention_mask
60
+
61
+
62
+ @dataclass
63
+ class AudioLDM2ProjectionModelOutput(BaseOutput):
64
+ """
65
+ Args:
66
+ Class for AudioLDM2 projection layer's outputs.
67
+ hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
68
+ Sequence of hidden-states obtained by linearly projecting the hidden-states for each of the text
69
+ encoders and subsequently concatenating them together.
70
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
71
+ Mask to avoid performing attention on padding token indices, formed by concatenating the attention masks
72
+ for the two text encoders together. Mask values selected in `[0, 1]`:
73
+
74
+ - 1 for tokens that are **not masked**,
75
+ - 0 for tokens that are **masked**.
76
+ """
77
+
78
+ hidden_states: torch.Tensor
79
+ attention_mask: Optional[torch.LongTensor] = None
80
+
81
+
82
+ class AudioLDM2ProjectionModel(ModelMixin, ConfigMixin):
83
+ """
84
+ A simple linear projection model to map two text embeddings to a shared latent space. It also inserts learned
85
+ embedding vectors at the start and end of each text embedding sequence respectively. Each variable appended with
86
+ `_1` refers to that corresponding to the second text encoder. Otherwise, it is from the first.
87
+
88
+ Args:
89
+ text_encoder_dim (`int`):
90
+ Dimensionality of the text embeddings from the first text encoder (CLAP).
91
+ text_encoder_1_dim (`int`):
92
+ Dimensionality of the text embeddings from the second text encoder (T5 or VITS).
93
+ langauge_model_dim (`int`):
94
+ Dimensionality of the text embeddings from the language model (GPT2).
95
+ """
96
+
97
+ @register_to_config
98
+ def __init__(
99
+ self,
100
+ text_encoder_dim,
101
+ text_encoder_1_dim,
102
+ langauge_model_dim,
103
+ use_learned_position_embedding=None,
104
+ max_seq_length=None,
105
+ ):
106
+ super().__init__()
107
+ # additional projection layers for each text encoder
108
+ self.projection = nn.Linear(text_encoder_dim, langauge_model_dim)
109
+ self.projection_1 = nn.Linear(text_encoder_1_dim, langauge_model_dim)
110
+
111
+ # learnable SOS / EOS token embeddings for each text encoder
112
+ self.sos_embed = nn.Parameter(torch.ones(langauge_model_dim))
113
+ self.eos_embed = nn.Parameter(torch.ones(langauge_model_dim))
114
+
115
+ self.sos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim))
116
+ self.eos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim))
117
+
118
+ self.use_learned_position_embedding = use_learned_position_embedding
119
+
120
+ # learable positional embedding for vits encoder
121
+ if self.use_learned_position_embedding is not None:
122
+ self.learnable_positional_embedding = torch.nn.Parameter(
123
+ torch.zeros((1, text_encoder_1_dim, max_seq_length))
124
+ )
125
+
126
+ def forward(
127
+ self,
128
+ hidden_states: Optional[torch.Tensor] = None,
129
+ hidden_states_1: Optional[torch.Tensor] = None,
130
+ attention_mask: Optional[torch.LongTensor] = None,
131
+ attention_mask_1: Optional[torch.LongTensor] = None,
132
+ ):
133
+ hidden_states = self.projection(hidden_states)
134
+ hidden_states, attention_mask = add_special_tokens(
135
+ hidden_states, attention_mask, sos_token=self.sos_embed, eos_token=self.eos_embed
136
+ )
137
+
138
+ # Add positional embedding for Vits hidden state
139
+ if self.use_learned_position_embedding is not None:
140
+ hidden_states_1 = (hidden_states_1.permute(0, 2, 1) + self.learnable_positional_embedding).permute(0, 2, 1)
141
+
142
+ hidden_states_1 = self.projection_1(hidden_states_1)
143
+ hidden_states_1, attention_mask_1 = add_special_tokens(
144
+ hidden_states_1, attention_mask_1, sos_token=self.sos_embed_1, eos_token=self.eos_embed_1
145
+ )
146
+
147
+ # concatenate clap and t5 text encoding
148
+ hidden_states = torch.cat([hidden_states, hidden_states_1], dim=1)
149
+
150
+ # concatenate attention masks
151
+ if attention_mask is None and attention_mask_1 is not None:
152
+ attention_mask = attention_mask_1.new_ones((hidden_states[:2]))
153
+ elif attention_mask is not None and attention_mask_1 is None:
154
+ attention_mask_1 = attention_mask.new_ones((hidden_states_1[:2]))
155
+
156
+ if attention_mask is not None and attention_mask_1 is not None:
157
+ attention_mask = torch.cat([attention_mask, attention_mask_1], dim=-1)
158
+ else:
159
+ attention_mask = None
160
+
161
+ return AudioLDM2ProjectionModelOutput(
162
+ hidden_states=hidden_states,
163
+ attention_mask=attention_mask,
164
+ )
165
+
166
+
167
+ class AudioLDM2UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
168
+ r"""
169
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
170
+ shaped output. Compared to the vanilla [`UNet2DConditionModel`], this variant optionally includes an additional
171
+ self-attention layer in each Transformer block, as well as multiple cross-attention layers. It also allows for up
172
+ to two cross-attention embeddings, `encoder_hidden_states` and `encoder_hidden_states_1`.
173
+
174
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
175
+ for all models (such as downloading or saving).
176
+
177
+ Parameters:
178
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
179
+ Height and width of input/output sample.
180
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
181
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
182
+ flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
183
+ Whether to flip the sin to cos in the time embedding.
184
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
185
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
186
+ The tuple of downsample blocks to use.
187
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
188
+ Block type for middle of UNet, it can only be `UNetMidBlock2DCrossAttn` for AudioLDM2.
189
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
190
+ The tuple of upsample blocks to use.
191
+ only_cross_attention (`bool` or `Tuple[bool]`, *optional*, default to `False`):
192
+ Whether to include self-attention in the basic transformer blocks, see
193
+ [`~models.attention.BasicTransformerBlock`].
194
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
195
+ The tuple of output channels for each block.
196
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
197
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
198
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
199
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
200
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
201
+ If `None`, normalization and activation layers is skipped in post-processing.
202
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
203
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
204
+ The dimension of the cross attention features.
205
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
206
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
207
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
208
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
209
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
210
+ num_attention_heads (`int`, *optional*):
211
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
212
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
213
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
214
+ class_embed_type (`str`, *optional*, defaults to `None`):
215
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
216
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
217
+ num_class_embeds (`int`, *optional*, defaults to `None`):
218
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
219
+ class conditioning with `class_embed_type` equal to `None`.
220
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
221
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
222
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
223
+ An optional override for the dimension of the projected time embedding.
224
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
225
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
226
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
227
+ timestep_post_act (`str`, *optional*, defaults to `None`):
228
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
229
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
230
+ The dimension of `cond_proj` layer in the timestep embedding.
231
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
232
+ conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
233
+ projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
234
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
235
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
236
+ embeddings with the class embeddings.
237
+ """
238
+
239
+ _supports_gradient_checkpointing = True
240
+
241
+ @register_to_config
242
+ def __init__(
243
+ self,
244
+ sample_size: Optional[int] = None,
245
+ in_channels: int = 4,
246
+ out_channels: int = 4,
247
+ flip_sin_to_cos: bool = True,
248
+ freq_shift: int = 0,
249
+ down_block_types: Tuple[str] = (
250
+ "CrossAttnDownBlock2D",
251
+ "CrossAttnDownBlock2D",
252
+ "CrossAttnDownBlock2D",
253
+ "DownBlock2D",
254
+ ),
255
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
256
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
257
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
258
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
259
+ layers_per_block: Union[int, Tuple[int]] = 2,
260
+ downsample_padding: int = 1,
261
+ mid_block_scale_factor: float = 1,
262
+ act_fn: str = "silu",
263
+ norm_num_groups: Optional[int] = 32,
264
+ norm_eps: float = 1e-5,
265
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
266
+ transformer_layers_per_block: Union[int, Tuple[int]] = 1,
267
+ attention_head_dim: Union[int, Tuple[int]] = 8,
268
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
269
+ use_linear_projection: bool = False,
270
+ class_embed_type: Optional[str] = None,
271
+ num_class_embeds: Optional[int] = None,
272
+ upcast_attention: bool = False,
273
+ resnet_time_scale_shift: str = "default",
274
+ time_embedding_type: str = "positional",
275
+ time_embedding_dim: Optional[int] = None,
276
+ time_embedding_act_fn: Optional[str] = None,
277
+ timestep_post_act: Optional[str] = None,
278
+ time_cond_proj_dim: Optional[int] = None,
279
+ conv_in_kernel: int = 3,
280
+ conv_out_kernel: int = 3,
281
+ projection_class_embeddings_input_dim: Optional[int] = None,
282
+ class_embeddings_concat: bool = False,
283
+ ):
284
+ super().__init__()
285
+
286
+ self.sample_size = sample_size
287
+
288
+ if num_attention_heads is not None:
289
+ raise ValueError(
290
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
291
+ )
292
+
293
+ # If `num_attention_heads` is not defined (which is the case for most models)
294
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
295
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
296
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
297
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
298
+ # which is why we correct for the naming here.
299
+ num_attention_heads = num_attention_heads or attention_head_dim
300
+
301
+ # Check inputs
302
+ if len(down_block_types) != len(up_block_types):
303
+ raise ValueError(
304
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
305
+ )
306
+
307
+ if len(block_out_channels) != len(down_block_types):
308
+ raise ValueError(
309
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
310
+ )
311
+
312
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
313
+ raise ValueError(
314
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
315
+ )
316
+
317
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
318
+ raise ValueError(
319
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
320
+ )
321
+
322
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
323
+ raise ValueError(
324
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
325
+ )
326
+
327
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
328
+ raise ValueError(
329
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
330
+ )
331
+
332
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
333
+ raise ValueError(
334
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
335
+ )
336
+
337
+ # input
338
+ conv_in_padding = (conv_in_kernel - 1) // 2
339
+ self.conv_in = nn.Conv2d(
340
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
341
+ )
342
+
343
+ # time
344
+ if time_embedding_type == "positional":
345
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
346
+
347
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
348
+ timestep_input_dim = block_out_channels[0]
349
+ else:
350
+ raise ValueError(f"{time_embedding_type} does not exist. Please make sure to use `positional`.")
351
+
352
+ self.time_embedding = TimestepEmbedding(
353
+ timestep_input_dim,
354
+ time_embed_dim,
355
+ act_fn=act_fn,
356
+ post_act_fn=timestep_post_act,
357
+ cond_proj_dim=time_cond_proj_dim,
358
+ )
359
+
360
+ # class embedding
361
+ if class_embed_type is None and num_class_embeds is not None:
362
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
363
+ elif class_embed_type == "timestep":
364
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
365
+ elif class_embed_type == "identity":
366
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
367
+ elif class_embed_type == "projection":
368
+ if projection_class_embeddings_input_dim is None:
369
+ raise ValueError(
370
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
371
+ )
372
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
373
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
374
+ # 2. it projects from an arbitrary input dimension.
375
+ #
376
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
377
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
378
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
379
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
380
+ elif class_embed_type == "simple_projection":
381
+ if projection_class_embeddings_input_dim is None:
382
+ raise ValueError(
383
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
384
+ )
385
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
386
+ else:
387
+ self.class_embedding = None
388
+
389
+ if time_embedding_act_fn is None:
390
+ self.time_embed_act = None
391
+ else:
392
+ self.time_embed_act = get_activation(time_embedding_act_fn)
393
+
394
+ self.down_blocks = nn.ModuleList([])
395
+ self.up_blocks = nn.ModuleList([])
396
+
397
+ if isinstance(only_cross_attention, bool):
398
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
399
+
400
+ if isinstance(num_attention_heads, int):
401
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
402
+
403
+ if isinstance(cross_attention_dim, int):
404
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
405
+
406
+ if isinstance(layers_per_block, int):
407
+ layers_per_block = [layers_per_block] * len(down_block_types)
408
+
409
+ if isinstance(transformer_layers_per_block, int):
410
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
411
+
412
+ if class_embeddings_concat:
413
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
414
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
415
+ # regular time embeddings
416
+ blocks_time_embed_dim = time_embed_dim * 2
417
+ else:
418
+ blocks_time_embed_dim = time_embed_dim
419
+
420
+ # down
421
+ output_channel = block_out_channels[0]
422
+ for i, down_block_type in enumerate(down_block_types):
423
+ input_channel = output_channel
424
+ output_channel = block_out_channels[i]
425
+ is_final_block = i == len(block_out_channels) - 1
426
+
427
+ down_block = get_down_block(
428
+ down_block_type,
429
+ num_layers=layers_per_block[i],
430
+ transformer_layers_per_block=transformer_layers_per_block[i],
431
+ in_channels=input_channel,
432
+ out_channels=output_channel,
433
+ temb_channels=blocks_time_embed_dim,
434
+ add_downsample=not is_final_block,
435
+ resnet_eps=norm_eps,
436
+ resnet_act_fn=act_fn,
437
+ resnet_groups=norm_num_groups,
438
+ cross_attention_dim=cross_attention_dim[i],
439
+ num_attention_heads=num_attention_heads[i],
440
+ downsample_padding=downsample_padding,
441
+ use_linear_projection=use_linear_projection,
442
+ only_cross_attention=only_cross_attention[i],
443
+ upcast_attention=upcast_attention,
444
+ resnet_time_scale_shift=resnet_time_scale_shift,
445
+ )
446
+ self.down_blocks.append(down_block)
447
+
448
+ # mid
449
+ if mid_block_type == "UNetMidBlock2DCrossAttn":
450
+ self.mid_block = UNetMidBlock2DCrossAttn(
451
+ transformer_layers_per_block=transformer_layers_per_block[-1],
452
+ in_channels=block_out_channels[-1],
453
+ temb_channels=blocks_time_embed_dim,
454
+ resnet_eps=norm_eps,
455
+ resnet_act_fn=act_fn,
456
+ output_scale_factor=mid_block_scale_factor,
457
+ resnet_time_scale_shift=resnet_time_scale_shift,
458
+ cross_attention_dim=cross_attention_dim[-1],
459
+ num_attention_heads=num_attention_heads[-1],
460
+ resnet_groups=norm_num_groups,
461
+ use_linear_projection=use_linear_projection,
462
+ upcast_attention=upcast_attention,
463
+ )
464
+ else:
465
+ raise ValueError(
466
+ f"unknown mid_block_type : {mid_block_type}. Should be `UNetMidBlock2DCrossAttn` for AudioLDM2."
467
+ )
468
+
469
+ # count how many layers upsample the images
470
+ self.num_upsamplers = 0
471
+
472
+ # up
473
+ reversed_block_out_channels = list(reversed(block_out_channels))
474
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
475
+ reversed_layers_per_block = list(reversed(layers_per_block))
476
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
477
+ reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))
478
+ only_cross_attention = list(reversed(only_cross_attention))
479
+
480
+ output_channel = reversed_block_out_channels[0]
481
+ for i, up_block_type in enumerate(up_block_types):
482
+ is_final_block = i == len(block_out_channels) - 1
483
+
484
+ prev_output_channel = output_channel
485
+ output_channel = reversed_block_out_channels[i]
486
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
487
+
488
+ # add upsample block for all BUT final layer
489
+ if not is_final_block:
490
+ add_upsample = True
491
+ self.num_upsamplers += 1
492
+ else:
493
+ add_upsample = False
494
+
495
+ up_block = get_up_block(
496
+ up_block_type,
497
+ num_layers=reversed_layers_per_block[i] + 1,
498
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
499
+ in_channels=input_channel,
500
+ out_channels=output_channel,
501
+ prev_output_channel=prev_output_channel,
502
+ temb_channels=blocks_time_embed_dim,
503
+ add_upsample=add_upsample,
504
+ resnet_eps=norm_eps,
505
+ resnet_act_fn=act_fn,
506
+ resnet_groups=norm_num_groups,
507
+ cross_attention_dim=reversed_cross_attention_dim[i],
508
+ num_attention_heads=reversed_num_attention_heads[i],
509
+ use_linear_projection=use_linear_projection,
510
+ only_cross_attention=only_cross_attention[i],
511
+ upcast_attention=upcast_attention,
512
+ resnet_time_scale_shift=resnet_time_scale_shift,
513
+ )
514
+ self.up_blocks.append(up_block)
515
+ prev_output_channel = output_channel
516
+
517
+ # out
518
+ if norm_num_groups is not None:
519
+ self.conv_norm_out = nn.GroupNorm(
520
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
521
+ )
522
+
523
+ self.conv_act = get_activation(act_fn)
524
+
525
+ else:
526
+ self.conv_norm_out = None
527
+ self.conv_act = None
528
+
529
+ conv_out_padding = (conv_out_kernel - 1) // 2
530
+ self.conv_out = nn.Conv2d(
531
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
532
+ )
533
+
534
+ @property
535
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
536
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
537
+ r"""
538
+ Returns:
539
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
540
+ indexed by its weight name.
541
+ """
542
+ # set recursively
543
+ processors = {}
544
+
545
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
546
+ if hasattr(module, "get_processor"):
547
+ processors[f"{name}.processor"] = module.get_processor()
548
+
549
+ for sub_name, child in module.named_children():
550
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
551
+
552
+ return processors
553
+
554
+ for name, module in self.named_children():
555
+ fn_recursive_add_processors(name, module, processors)
556
+
557
+ return processors
558
+
559
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
560
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
561
+ r"""
562
+ Sets the attention processor to use to compute attention.
563
+
564
+ Parameters:
565
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
566
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
567
+ for **all** `Attention` layers.
568
+
569
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
570
+ processor. This is strongly recommended when setting trainable attention processors.
571
+
572
+ """
573
+ count = len(self.attn_processors.keys())
574
+
575
+ if isinstance(processor, dict) and len(processor) != count:
576
+ raise ValueError(
577
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
578
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
579
+ )
580
+
581
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
582
+ if hasattr(module, "set_processor"):
583
+ if not isinstance(processor, dict):
584
+ module.set_processor(processor)
585
+ else:
586
+ module.set_processor(processor.pop(f"{name}.processor"))
587
+
588
+ for sub_name, child in module.named_children():
589
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
590
+
591
+ for name, module in self.named_children():
592
+ fn_recursive_attn_processor(name, module, processor)
593
+
594
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
595
+ def set_default_attn_processor(self):
596
+ """
597
+ Disables custom attention processors and sets the default attention implementation.
598
+ """
599
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
600
+ processor = AttnAddedKVProcessor()
601
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
602
+ processor = AttnProcessor()
603
+ else:
604
+ raise ValueError(
605
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
606
+ )
607
+
608
+ self.set_attn_processor(processor)
609
+
610
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice
611
+ def set_attention_slice(self, slice_size):
612
+ r"""
613
+ Enable sliced attention computation.
614
+
615
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
616
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
617
+
618
+ Args:
619
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
620
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
621
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
622
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
623
+ must be a multiple of `slice_size`.
624
+ """
625
+ sliceable_head_dims = []
626
+
627
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
628
+ if hasattr(module, "set_attention_slice"):
629
+ sliceable_head_dims.append(module.sliceable_head_dim)
630
+
631
+ for child in module.children():
632
+ fn_recursive_retrieve_sliceable_dims(child)
633
+
634
+ # retrieve number of attention layers
635
+ for module in self.children():
636
+ fn_recursive_retrieve_sliceable_dims(module)
637
+
638
+ num_sliceable_layers = len(sliceable_head_dims)
639
+
640
+ if slice_size == "auto":
641
+ # half the attention head size is usually a good trade-off between
642
+ # speed and memory
643
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
644
+ elif slice_size == "max":
645
+ # make smallest slice possible
646
+ slice_size = num_sliceable_layers * [1]
647
+
648
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
649
+
650
+ if len(slice_size) != len(sliceable_head_dims):
651
+ raise ValueError(
652
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
653
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
654
+ )
655
+
656
+ for i in range(len(slice_size)):
657
+ size = slice_size[i]
658
+ dim = sliceable_head_dims[i]
659
+ if size is not None and size > dim:
660
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
661
+
662
+ # Recursively walk through all the children.
663
+ # Any children which exposes the set_attention_slice method
664
+ # gets the message
665
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
666
+ if hasattr(module, "set_attention_slice"):
667
+ module.set_attention_slice(slice_size.pop())
668
+
669
+ for child in module.children():
670
+ fn_recursive_set_attention_slice(child, slice_size)
671
+
672
+ reversed_slice_size = list(reversed(slice_size))
673
+ for module in self.children():
674
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
675
+
676
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel._set_gradient_checkpointing
677
+ def _set_gradient_checkpointing(self, module, value=False):
678
+ if hasattr(module, "gradient_checkpointing"):
679
+ module.gradient_checkpointing = value
680
+
681
+ def forward(
682
+ self,
683
+ sample: torch.Tensor,
684
+ timestep: Union[torch.Tensor, float, int],
685
+ encoder_hidden_states: torch.Tensor,
686
+ class_labels: Optional[torch.Tensor] = None,
687
+ timestep_cond: Optional[torch.Tensor] = None,
688
+ attention_mask: Optional[torch.Tensor] = None,
689
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
690
+ encoder_attention_mask: Optional[torch.Tensor] = None,
691
+ return_dict: bool = True,
692
+ encoder_hidden_states_1: Optional[torch.Tensor] = None,
693
+ encoder_attention_mask_1: Optional[torch.Tensor] = None,
694
+ ) -> Union[UNet2DConditionOutput, Tuple]:
695
+ r"""
696
+ The [`AudioLDM2UNet2DConditionModel`] forward method.
697
+
698
+ Args:
699
+ sample (`torch.Tensor`):
700
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
701
+ timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
702
+ encoder_hidden_states (`torch.Tensor`):
703
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
704
+ encoder_attention_mask (`torch.Tensor`):
705
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
706
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
707
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
708
+ return_dict (`bool`, *optional*, defaults to `True`):
709
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
710
+ tuple.
711
+ cross_attention_kwargs (`dict`, *optional*):
712
+ A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
713
+ encoder_hidden_states_1 (`torch.Tensor`, *optional*):
714
+ A second set of encoder hidden states with shape `(batch, sequence_length_2, feature_dim_2)`. Can be
715
+ used to condition the model on a different set of embeddings to `encoder_hidden_states`.
716
+ encoder_attention_mask_1 (`torch.Tensor`, *optional*):
717
+ A cross-attention mask of shape `(batch, sequence_length_2)` is applied to `encoder_hidden_states_1`.
718
+ If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
719
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
720
+
721
+ Returns:
722
+ [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
723
+ If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
724
+ otherwise a `tuple` is returned where the first element is the sample tensor.
725
+ """
726
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
727
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
728
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
729
+ # on the fly if necessary.
730
+ default_overall_up_factor = 2**self.num_upsamplers
731
+
732
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
733
+ forward_upsample_size = False
734
+ upsample_size = None
735
+
736
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
737
+ logger.info("Forward upsample size to force interpolation output size.")
738
+ forward_upsample_size = True
739
+
740
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
741
+ # expects mask of shape:
742
+ # [batch, key_tokens]
743
+ # adds singleton query_tokens dimension:
744
+ # [batch, 1, key_tokens]
745
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
746
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
747
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
748
+ if attention_mask is not None:
749
+ # assume that mask is expressed as:
750
+ # (1 = keep, 0 = discard)
751
+ # convert mask into a bias that can be added to attention scores:
752
+ # (keep = +0, discard = -10000.0)
753
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
754
+ attention_mask = attention_mask.unsqueeze(1)
755
+
756
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
757
+ if encoder_attention_mask is not None:
758
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
759
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
760
+
761
+ if encoder_attention_mask_1 is not None:
762
+ encoder_attention_mask_1 = (1 - encoder_attention_mask_1.to(sample.dtype)) * -10000.0
763
+ encoder_attention_mask_1 = encoder_attention_mask_1.unsqueeze(1)
764
+
765
+ # 1. time
766
+ timesteps = timestep
767
+ if not torch.is_tensor(timesteps):
768
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
769
+ # This would be a good case for the `match` statement (Python 3.10+)
770
+ is_mps = sample.device.type == "mps"
771
+ if isinstance(timestep, float):
772
+ dtype = torch.float32 if is_mps else torch.float64
773
+ else:
774
+ dtype = torch.int32 if is_mps else torch.int64
775
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
776
+ elif len(timesteps.shape) == 0:
777
+ timesteps = timesteps[None].to(sample.device)
778
+
779
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
780
+ timesteps = timesteps.expand(sample.shape[0])
781
+
782
+ t_emb = self.time_proj(timesteps)
783
+
784
+ # `Timesteps` does not contain any weights and will always return f32 tensors
785
+ # but time_embedding might actually be running in fp16. so we need to cast here.
786
+ # there might be better ways to encapsulate this.
787
+ t_emb = t_emb.to(dtype=sample.dtype)
788
+
789
+ emb = self.time_embedding(t_emb, timestep_cond)
790
+ aug_emb = None
791
+
792
+ if self.class_embedding is not None:
793
+ if class_labels is None:
794
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
795
+
796
+ if self.config.class_embed_type == "timestep":
797
+ class_labels = self.time_proj(class_labels)
798
+
799
+ # `Timesteps` does not contain any weights and will always return f32 tensors
800
+ # there might be better ways to encapsulate this.
801
+ class_labels = class_labels.to(dtype=sample.dtype)
802
+
803
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
804
+
805
+ if self.config.class_embeddings_concat:
806
+ emb = torch.cat([emb, class_emb], dim=-1)
807
+ else:
808
+ emb = emb + class_emb
809
+
810
+ emb = emb + aug_emb if aug_emb is not None else emb
811
+
812
+ if self.time_embed_act is not None:
813
+ emb = self.time_embed_act(emb)
814
+
815
+ # 2. pre-process
816
+ sample = self.conv_in(sample)
817
+
818
+ # 3. down
819
+ down_block_res_samples = (sample,)
820
+ for downsample_block in self.down_blocks:
821
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
822
+ sample, res_samples = downsample_block(
823
+ hidden_states=sample,
824
+ temb=emb,
825
+ encoder_hidden_states=encoder_hidden_states,
826
+ attention_mask=attention_mask,
827
+ cross_attention_kwargs=cross_attention_kwargs,
828
+ encoder_attention_mask=encoder_attention_mask,
829
+ encoder_hidden_states_1=encoder_hidden_states_1,
830
+ encoder_attention_mask_1=encoder_attention_mask_1,
831
+ )
832
+ else:
833
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
834
+
835
+ down_block_res_samples += res_samples
836
+
837
+ # 4. mid
838
+ if self.mid_block is not None:
839
+ sample = self.mid_block(
840
+ sample,
841
+ emb,
842
+ encoder_hidden_states=encoder_hidden_states,
843
+ attention_mask=attention_mask,
844
+ cross_attention_kwargs=cross_attention_kwargs,
845
+ encoder_attention_mask=encoder_attention_mask,
846
+ encoder_hidden_states_1=encoder_hidden_states_1,
847
+ encoder_attention_mask_1=encoder_attention_mask_1,
848
+ )
849
+
850
+ # 5. up
851
+ for i, upsample_block in enumerate(self.up_blocks):
852
+ is_final_block = i == len(self.up_blocks) - 1
853
+
854
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
855
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
856
+
857
+ # if we have not reached the final block and need to forward the
858
+ # upsample size, we do it here
859
+ if not is_final_block and forward_upsample_size:
860
+ upsample_size = down_block_res_samples[-1].shape[2:]
861
+
862
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
863
+ sample = upsample_block(
864
+ hidden_states=sample,
865
+ temb=emb,
866
+ res_hidden_states_tuple=res_samples,
867
+ encoder_hidden_states=encoder_hidden_states,
868
+ cross_attention_kwargs=cross_attention_kwargs,
869
+ upsample_size=upsample_size,
870
+ attention_mask=attention_mask,
871
+ encoder_attention_mask=encoder_attention_mask,
872
+ encoder_hidden_states_1=encoder_hidden_states_1,
873
+ encoder_attention_mask_1=encoder_attention_mask_1,
874
+ )
875
+ else:
876
+ sample = upsample_block(
877
+ hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
878
+ )
879
+
880
+ # 6. post-process
881
+ if self.conv_norm_out:
882
+ sample = self.conv_norm_out(sample)
883
+ sample = self.conv_act(sample)
884
+ sample = self.conv_out(sample)
885
+
886
+ if not return_dict:
887
+ return (sample,)
888
+
889
+ return UNet2DConditionOutput(sample=sample)
890
+
891
+
892
+ def get_down_block(
893
+ down_block_type,
894
+ num_layers,
895
+ in_channels,
896
+ out_channels,
897
+ temb_channels,
898
+ add_downsample,
899
+ resnet_eps,
900
+ resnet_act_fn,
901
+ transformer_layers_per_block=1,
902
+ num_attention_heads=None,
903
+ resnet_groups=None,
904
+ cross_attention_dim=None,
905
+ downsample_padding=None,
906
+ use_linear_projection=False,
907
+ only_cross_attention=False,
908
+ upcast_attention=False,
909
+ resnet_time_scale_shift="default",
910
+ ):
911
+ down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
912
+ if down_block_type == "DownBlock2D":
913
+ return DownBlock2D(
914
+ num_layers=num_layers,
915
+ in_channels=in_channels,
916
+ out_channels=out_channels,
917
+ temb_channels=temb_channels,
918
+ add_downsample=add_downsample,
919
+ resnet_eps=resnet_eps,
920
+ resnet_act_fn=resnet_act_fn,
921
+ resnet_groups=resnet_groups,
922
+ downsample_padding=downsample_padding,
923
+ resnet_time_scale_shift=resnet_time_scale_shift,
924
+ )
925
+ elif down_block_type == "CrossAttnDownBlock2D":
926
+ if cross_attention_dim is None:
927
+ raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
928
+ return CrossAttnDownBlock2D(
929
+ num_layers=num_layers,
930
+ transformer_layers_per_block=transformer_layers_per_block,
931
+ in_channels=in_channels,
932
+ out_channels=out_channels,
933
+ temb_channels=temb_channels,
934
+ add_downsample=add_downsample,
935
+ resnet_eps=resnet_eps,
936
+ resnet_act_fn=resnet_act_fn,
937
+ resnet_groups=resnet_groups,
938
+ downsample_padding=downsample_padding,
939
+ cross_attention_dim=cross_attention_dim,
940
+ num_attention_heads=num_attention_heads,
941
+ use_linear_projection=use_linear_projection,
942
+ only_cross_attention=only_cross_attention,
943
+ upcast_attention=upcast_attention,
944
+ resnet_time_scale_shift=resnet_time_scale_shift,
945
+ )
946
+ raise ValueError(f"{down_block_type} does not exist.")
947
+
948
+
949
+ def get_up_block(
950
+ up_block_type,
951
+ num_layers,
952
+ in_channels,
953
+ out_channels,
954
+ prev_output_channel,
955
+ temb_channels,
956
+ add_upsample,
957
+ resnet_eps,
958
+ resnet_act_fn,
959
+ transformer_layers_per_block=1,
960
+ num_attention_heads=None,
961
+ resnet_groups=None,
962
+ cross_attention_dim=None,
963
+ use_linear_projection=False,
964
+ only_cross_attention=False,
965
+ upcast_attention=False,
966
+ resnet_time_scale_shift="default",
967
+ ):
968
+ up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
969
+ if up_block_type == "UpBlock2D":
970
+ return UpBlock2D(
971
+ num_layers=num_layers,
972
+ in_channels=in_channels,
973
+ out_channels=out_channels,
974
+ prev_output_channel=prev_output_channel,
975
+ temb_channels=temb_channels,
976
+ add_upsample=add_upsample,
977
+ resnet_eps=resnet_eps,
978
+ resnet_act_fn=resnet_act_fn,
979
+ resnet_groups=resnet_groups,
980
+ resnet_time_scale_shift=resnet_time_scale_shift,
981
+ )
982
+ elif up_block_type == "CrossAttnUpBlock2D":
983
+ if cross_attention_dim is None:
984
+ raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D")
985
+ return CrossAttnUpBlock2D(
986
+ num_layers=num_layers,
987
+ transformer_layers_per_block=transformer_layers_per_block,
988
+ in_channels=in_channels,
989
+ out_channels=out_channels,
990
+ prev_output_channel=prev_output_channel,
991
+ temb_channels=temb_channels,
992
+ add_upsample=add_upsample,
993
+ resnet_eps=resnet_eps,
994
+ resnet_act_fn=resnet_act_fn,
995
+ resnet_groups=resnet_groups,
996
+ cross_attention_dim=cross_attention_dim,
997
+ num_attention_heads=num_attention_heads,
998
+ use_linear_projection=use_linear_projection,
999
+ only_cross_attention=only_cross_attention,
1000
+ upcast_attention=upcast_attention,
1001
+ resnet_time_scale_shift=resnet_time_scale_shift,
1002
+ )
1003
+ raise ValueError(f"{up_block_type} does not exist.")
1004
+
1005
+
1006
+ class CrossAttnDownBlock2D(nn.Module):
1007
+ def __init__(
1008
+ self,
1009
+ in_channels: int,
1010
+ out_channels: int,
1011
+ temb_channels: int,
1012
+ dropout: float = 0.0,
1013
+ num_layers: int = 1,
1014
+ transformer_layers_per_block: int = 1,
1015
+ resnet_eps: float = 1e-6,
1016
+ resnet_time_scale_shift: str = "default",
1017
+ resnet_act_fn: str = "swish",
1018
+ resnet_groups: int = 32,
1019
+ resnet_pre_norm: bool = True,
1020
+ num_attention_heads=1,
1021
+ cross_attention_dim=1280,
1022
+ output_scale_factor=1.0,
1023
+ downsample_padding=1,
1024
+ add_downsample=True,
1025
+ use_linear_projection=False,
1026
+ only_cross_attention=False,
1027
+ upcast_attention=False,
1028
+ ):
1029
+ super().__init__()
1030
+ resnets = []
1031
+ attentions = []
1032
+
1033
+ self.has_cross_attention = True
1034
+ self.num_attention_heads = num_attention_heads
1035
+
1036
+ if isinstance(cross_attention_dim, int):
1037
+ cross_attention_dim = (cross_attention_dim,)
1038
+ if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4:
1039
+ raise ValueError(
1040
+ "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention "
1041
+ f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}"
1042
+ )
1043
+ self.cross_attention_dim = cross_attention_dim
1044
+
1045
+ for i in range(num_layers):
1046
+ in_channels = in_channels if i == 0 else out_channels
1047
+ resnets.append(
1048
+ ResnetBlock2D(
1049
+ in_channels=in_channels,
1050
+ out_channels=out_channels,
1051
+ temb_channels=temb_channels,
1052
+ eps=resnet_eps,
1053
+ groups=resnet_groups,
1054
+ dropout=dropout,
1055
+ time_embedding_norm=resnet_time_scale_shift,
1056
+ non_linearity=resnet_act_fn,
1057
+ output_scale_factor=output_scale_factor,
1058
+ pre_norm=resnet_pre_norm,
1059
+ )
1060
+ )
1061
+ for j in range(len(cross_attention_dim)):
1062
+ attentions.append(
1063
+ Transformer2DModel(
1064
+ num_attention_heads,
1065
+ out_channels // num_attention_heads,
1066
+ in_channels=out_channels,
1067
+ num_layers=transformer_layers_per_block,
1068
+ cross_attention_dim=cross_attention_dim[j],
1069
+ norm_num_groups=resnet_groups,
1070
+ use_linear_projection=use_linear_projection,
1071
+ only_cross_attention=only_cross_attention,
1072
+ upcast_attention=upcast_attention,
1073
+ double_self_attention=True if cross_attention_dim[j] is None else False,
1074
+ )
1075
+ )
1076
+ self.attentions = nn.ModuleList(attentions)
1077
+ self.resnets = nn.ModuleList(resnets)
1078
+
1079
+ if add_downsample:
1080
+ self.downsamplers = nn.ModuleList(
1081
+ [
1082
+ Downsample2D(
1083
+ out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
1084
+ )
1085
+ ]
1086
+ )
1087
+ else:
1088
+ self.downsamplers = None
1089
+
1090
+ self.gradient_checkpointing = False
1091
+
1092
+ def forward(
1093
+ self,
1094
+ hidden_states: torch.Tensor,
1095
+ temb: Optional[torch.Tensor] = None,
1096
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1097
+ attention_mask: Optional[torch.Tensor] = None,
1098
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1099
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1100
+ encoder_hidden_states_1: Optional[torch.Tensor] = None,
1101
+ encoder_attention_mask_1: Optional[torch.Tensor] = None,
1102
+ ):
1103
+ output_states = ()
1104
+ num_layers = len(self.resnets)
1105
+ num_attention_per_layer = len(self.attentions) // num_layers
1106
+
1107
+ encoder_hidden_states_1 = (
1108
+ encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states
1109
+ )
1110
+ encoder_attention_mask_1 = (
1111
+ encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask
1112
+ )
1113
+
1114
+ for i in range(num_layers):
1115
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1116
+
1117
+ def create_custom_forward(module, return_dict=None):
1118
+ def custom_forward(*inputs):
1119
+ if return_dict is not None:
1120
+ return module(*inputs, return_dict=return_dict)
1121
+ else:
1122
+ return module(*inputs)
1123
+
1124
+ return custom_forward
1125
+
1126
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1127
+ hidden_states = torch.utils.checkpoint.checkpoint(
1128
+ create_custom_forward(self.resnets[i]),
1129
+ hidden_states,
1130
+ temb,
1131
+ **ckpt_kwargs,
1132
+ )
1133
+ for idx, cross_attention_dim in enumerate(self.cross_attention_dim):
1134
+ if cross_attention_dim is not None and idx <= 1:
1135
+ forward_encoder_hidden_states = encoder_hidden_states
1136
+ forward_encoder_attention_mask = encoder_attention_mask
1137
+ elif cross_attention_dim is not None and idx > 1:
1138
+ forward_encoder_hidden_states = encoder_hidden_states_1
1139
+ forward_encoder_attention_mask = encoder_attention_mask_1
1140
+ else:
1141
+ forward_encoder_hidden_states = None
1142
+ forward_encoder_attention_mask = None
1143
+ hidden_states = torch.utils.checkpoint.checkpoint(
1144
+ create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False),
1145
+ hidden_states,
1146
+ forward_encoder_hidden_states,
1147
+ None, # timestep
1148
+ None, # class_labels
1149
+ cross_attention_kwargs,
1150
+ attention_mask,
1151
+ forward_encoder_attention_mask,
1152
+ **ckpt_kwargs,
1153
+ )[0]
1154
+ else:
1155
+ hidden_states = self.resnets[i](hidden_states, temb)
1156
+ for idx, cross_attention_dim in enumerate(self.cross_attention_dim):
1157
+ if cross_attention_dim is not None and idx <= 1:
1158
+ forward_encoder_hidden_states = encoder_hidden_states
1159
+ forward_encoder_attention_mask = encoder_attention_mask
1160
+ elif cross_attention_dim is not None and idx > 1:
1161
+ forward_encoder_hidden_states = encoder_hidden_states_1
1162
+ forward_encoder_attention_mask = encoder_attention_mask_1
1163
+ else:
1164
+ forward_encoder_hidden_states = None
1165
+ forward_encoder_attention_mask = None
1166
+ hidden_states = self.attentions[i * num_attention_per_layer + idx](
1167
+ hidden_states,
1168
+ attention_mask=attention_mask,
1169
+ encoder_hidden_states=forward_encoder_hidden_states,
1170
+ encoder_attention_mask=forward_encoder_attention_mask,
1171
+ return_dict=False,
1172
+ )[0]
1173
+
1174
+ output_states = output_states + (hidden_states,)
1175
+
1176
+ if self.downsamplers is not None:
1177
+ for downsampler in self.downsamplers:
1178
+ hidden_states = downsampler(hidden_states)
1179
+
1180
+ output_states = output_states + (hidden_states,)
1181
+
1182
+ return hidden_states, output_states
1183
+
1184
+
1185
+ class UNetMidBlock2DCrossAttn(nn.Module):
1186
+ def __init__(
1187
+ self,
1188
+ in_channels: int,
1189
+ temb_channels: int,
1190
+ dropout: float = 0.0,
1191
+ num_layers: int = 1,
1192
+ transformer_layers_per_block: int = 1,
1193
+ resnet_eps: float = 1e-6,
1194
+ resnet_time_scale_shift: str = "default",
1195
+ resnet_act_fn: str = "swish",
1196
+ resnet_groups: int = 32,
1197
+ resnet_pre_norm: bool = True,
1198
+ num_attention_heads=1,
1199
+ output_scale_factor=1.0,
1200
+ cross_attention_dim=1280,
1201
+ use_linear_projection=False,
1202
+ upcast_attention=False,
1203
+ ):
1204
+ super().__init__()
1205
+
1206
+ self.has_cross_attention = True
1207
+ self.num_attention_heads = num_attention_heads
1208
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
1209
+
1210
+ if isinstance(cross_attention_dim, int):
1211
+ cross_attention_dim = (cross_attention_dim,)
1212
+ if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4:
1213
+ raise ValueError(
1214
+ "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention "
1215
+ f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}"
1216
+ )
1217
+ self.cross_attention_dim = cross_attention_dim
1218
+
1219
+ # there is always at least one resnet
1220
+ resnets = [
1221
+ ResnetBlock2D(
1222
+ in_channels=in_channels,
1223
+ out_channels=in_channels,
1224
+ temb_channels=temb_channels,
1225
+ eps=resnet_eps,
1226
+ groups=resnet_groups,
1227
+ dropout=dropout,
1228
+ time_embedding_norm=resnet_time_scale_shift,
1229
+ non_linearity=resnet_act_fn,
1230
+ output_scale_factor=output_scale_factor,
1231
+ pre_norm=resnet_pre_norm,
1232
+ )
1233
+ ]
1234
+ attentions = []
1235
+
1236
+ for i in range(num_layers):
1237
+ for j in range(len(cross_attention_dim)):
1238
+ attentions.append(
1239
+ Transformer2DModel(
1240
+ num_attention_heads,
1241
+ in_channels // num_attention_heads,
1242
+ in_channels=in_channels,
1243
+ num_layers=transformer_layers_per_block,
1244
+ cross_attention_dim=cross_attention_dim[j],
1245
+ norm_num_groups=resnet_groups,
1246
+ use_linear_projection=use_linear_projection,
1247
+ upcast_attention=upcast_attention,
1248
+ double_self_attention=True if cross_attention_dim[j] is None else False,
1249
+ )
1250
+ )
1251
+ resnets.append(
1252
+ ResnetBlock2D(
1253
+ in_channels=in_channels,
1254
+ out_channels=in_channels,
1255
+ temb_channels=temb_channels,
1256
+ eps=resnet_eps,
1257
+ groups=resnet_groups,
1258
+ dropout=dropout,
1259
+ time_embedding_norm=resnet_time_scale_shift,
1260
+ non_linearity=resnet_act_fn,
1261
+ output_scale_factor=output_scale_factor,
1262
+ pre_norm=resnet_pre_norm,
1263
+ )
1264
+ )
1265
+
1266
+ self.attentions = nn.ModuleList(attentions)
1267
+ self.resnets = nn.ModuleList(resnets)
1268
+
1269
+ self.gradient_checkpointing = False
1270
+
1271
+ def forward(
1272
+ self,
1273
+ hidden_states: torch.Tensor,
1274
+ temb: Optional[torch.Tensor] = None,
1275
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1276
+ attention_mask: Optional[torch.Tensor] = None,
1277
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1278
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1279
+ encoder_hidden_states_1: Optional[torch.Tensor] = None,
1280
+ encoder_attention_mask_1: Optional[torch.Tensor] = None,
1281
+ ) -> torch.Tensor:
1282
+ hidden_states = self.resnets[0](hidden_states, temb)
1283
+ num_attention_per_layer = len(self.attentions) // (len(self.resnets) - 1)
1284
+
1285
+ encoder_hidden_states_1 = (
1286
+ encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states
1287
+ )
1288
+ encoder_attention_mask_1 = (
1289
+ encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask
1290
+ )
1291
+
1292
+ for i in range(len(self.resnets[1:])):
1293
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1294
+
1295
+ def create_custom_forward(module, return_dict=None):
1296
+ def custom_forward(*inputs):
1297
+ if return_dict is not None:
1298
+ return module(*inputs, return_dict=return_dict)
1299
+ else:
1300
+ return module(*inputs)
1301
+
1302
+ return custom_forward
1303
+
1304
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1305
+ for idx, cross_attention_dim in enumerate(self.cross_attention_dim):
1306
+ if cross_attention_dim is not None and idx <= 1:
1307
+ forward_encoder_hidden_states = encoder_hidden_states
1308
+ forward_encoder_attention_mask = encoder_attention_mask
1309
+ elif cross_attention_dim is not None and idx > 1:
1310
+ forward_encoder_hidden_states = encoder_hidden_states_1
1311
+ forward_encoder_attention_mask = encoder_attention_mask_1
1312
+ else:
1313
+ forward_encoder_hidden_states = None
1314
+ forward_encoder_attention_mask = None
1315
+ hidden_states = torch.utils.checkpoint.checkpoint(
1316
+ create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False),
1317
+ hidden_states,
1318
+ forward_encoder_hidden_states,
1319
+ None, # timestep
1320
+ None, # class_labels
1321
+ cross_attention_kwargs,
1322
+ attention_mask,
1323
+ forward_encoder_attention_mask,
1324
+ **ckpt_kwargs,
1325
+ )[0]
1326
+ hidden_states = torch.utils.checkpoint.checkpoint(
1327
+ create_custom_forward(self.resnets[i + 1]),
1328
+ hidden_states,
1329
+ temb,
1330
+ **ckpt_kwargs,
1331
+ )
1332
+ else:
1333
+ for idx, cross_attention_dim in enumerate(self.cross_attention_dim):
1334
+ if cross_attention_dim is not None and idx <= 1:
1335
+ forward_encoder_hidden_states = encoder_hidden_states
1336
+ forward_encoder_attention_mask = encoder_attention_mask
1337
+ elif cross_attention_dim is not None and idx > 1:
1338
+ forward_encoder_hidden_states = encoder_hidden_states_1
1339
+ forward_encoder_attention_mask = encoder_attention_mask_1
1340
+ else:
1341
+ forward_encoder_hidden_states = None
1342
+ forward_encoder_attention_mask = None
1343
+ hidden_states = self.attentions[i * num_attention_per_layer + idx](
1344
+ hidden_states,
1345
+ attention_mask=attention_mask,
1346
+ encoder_hidden_states=forward_encoder_hidden_states,
1347
+ encoder_attention_mask=forward_encoder_attention_mask,
1348
+ return_dict=False,
1349
+ )[0]
1350
+
1351
+ hidden_states = self.resnets[i + 1](hidden_states, temb)
1352
+
1353
+ return hidden_states
1354
+
1355
+
1356
+ class CrossAttnUpBlock2D(nn.Module):
1357
+ def __init__(
1358
+ self,
1359
+ in_channels: int,
1360
+ out_channels: int,
1361
+ prev_output_channel: int,
1362
+ temb_channels: int,
1363
+ dropout: float = 0.0,
1364
+ num_layers: int = 1,
1365
+ transformer_layers_per_block: int = 1,
1366
+ resnet_eps: float = 1e-6,
1367
+ resnet_time_scale_shift: str = "default",
1368
+ resnet_act_fn: str = "swish",
1369
+ resnet_groups: int = 32,
1370
+ resnet_pre_norm: bool = True,
1371
+ num_attention_heads=1,
1372
+ cross_attention_dim=1280,
1373
+ output_scale_factor=1.0,
1374
+ add_upsample=True,
1375
+ use_linear_projection=False,
1376
+ only_cross_attention=False,
1377
+ upcast_attention=False,
1378
+ ):
1379
+ super().__init__()
1380
+ resnets = []
1381
+ attentions = []
1382
+
1383
+ self.has_cross_attention = True
1384
+ self.num_attention_heads = num_attention_heads
1385
+
1386
+ if isinstance(cross_attention_dim, int):
1387
+ cross_attention_dim = (cross_attention_dim,)
1388
+ if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4:
1389
+ raise ValueError(
1390
+ "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention "
1391
+ f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}"
1392
+ )
1393
+ self.cross_attention_dim = cross_attention_dim
1394
+
1395
+ for i in range(num_layers):
1396
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
1397
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
1398
+
1399
+ resnets.append(
1400
+ ResnetBlock2D(
1401
+ in_channels=resnet_in_channels + res_skip_channels,
1402
+ out_channels=out_channels,
1403
+ temb_channels=temb_channels,
1404
+ eps=resnet_eps,
1405
+ groups=resnet_groups,
1406
+ dropout=dropout,
1407
+ time_embedding_norm=resnet_time_scale_shift,
1408
+ non_linearity=resnet_act_fn,
1409
+ output_scale_factor=output_scale_factor,
1410
+ pre_norm=resnet_pre_norm,
1411
+ )
1412
+ )
1413
+ for j in range(len(cross_attention_dim)):
1414
+ attentions.append(
1415
+ Transformer2DModel(
1416
+ num_attention_heads,
1417
+ out_channels // num_attention_heads,
1418
+ in_channels=out_channels,
1419
+ num_layers=transformer_layers_per_block,
1420
+ cross_attention_dim=cross_attention_dim[j],
1421
+ norm_num_groups=resnet_groups,
1422
+ use_linear_projection=use_linear_projection,
1423
+ only_cross_attention=only_cross_attention,
1424
+ upcast_attention=upcast_attention,
1425
+ double_self_attention=True if cross_attention_dim[j] is None else False,
1426
+ )
1427
+ )
1428
+ self.attentions = nn.ModuleList(attentions)
1429
+ self.resnets = nn.ModuleList(resnets)
1430
+
1431
+ if add_upsample:
1432
+ self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
1433
+ else:
1434
+ self.upsamplers = None
1435
+
1436
+ self.gradient_checkpointing = False
1437
+
1438
+ def forward(
1439
+ self,
1440
+ hidden_states: torch.Tensor,
1441
+ res_hidden_states_tuple: Tuple[torch.Tensor, ...],
1442
+ temb: Optional[torch.Tensor] = None,
1443
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1444
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1445
+ upsample_size: Optional[int] = None,
1446
+ attention_mask: Optional[torch.Tensor] = None,
1447
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1448
+ encoder_hidden_states_1: Optional[torch.Tensor] = None,
1449
+ encoder_attention_mask_1: Optional[torch.Tensor] = None,
1450
+ ):
1451
+ num_layers = len(self.resnets)
1452
+ num_attention_per_layer = len(self.attentions) // num_layers
1453
+
1454
+ encoder_hidden_states_1 = (
1455
+ encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states
1456
+ )
1457
+ encoder_attention_mask_1 = (
1458
+ encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask
1459
+ )
1460
+
1461
+ for i in range(num_layers):
1462
+ # pop res hidden states
1463
+ res_hidden_states = res_hidden_states_tuple[-1]
1464
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
1465
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
1466
+
1467
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1468
+
1469
+ def create_custom_forward(module, return_dict=None):
1470
+ def custom_forward(*inputs):
1471
+ if return_dict is not None:
1472
+ return module(*inputs, return_dict=return_dict)
1473
+ else:
1474
+ return module(*inputs)
1475
+
1476
+ return custom_forward
1477
+
1478
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1479
+ hidden_states = torch.utils.checkpoint.checkpoint(
1480
+ create_custom_forward(self.resnets[i]),
1481
+ hidden_states,
1482
+ temb,
1483
+ **ckpt_kwargs,
1484
+ )
1485
+ for idx, cross_attention_dim in enumerate(self.cross_attention_dim):
1486
+ if cross_attention_dim is not None and idx <= 1:
1487
+ forward_encoder_hidden_states = encoder_hidden_states
1488
+ forward_encoder_attention_mask = encoder_attention_mask
1489
+ elif cross_attention_dim is not None and idx > 1:
1490
+ forward_encoder_hidden_states = encoder_hidden_states_1
1491
+ forward_encoder_attention_mask = encoder_attention_mask_1
1492
+ else:
1493
+ forward_encoder_hidden_states = None
1494
+ forward_encoder_attention_mask = None
1495
+ hidden_states = torch.utils.checkpoint.checkpoint(
1496
+ create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False),
1497
+ hidden_states,
1498
+ forward_encoder_hidden_states,
1499
+ None, # timestep
1500
+ None, # class_labels
1501
+ cross_attention_kwargs,
1502
+ attention_mask,
1503
+ forward_encoder_attention_mask,
1504
+ **ckpt_kwargs,
1505
+ )[0]
1506
+ else:
1507
+ hidden_states = self.resnets[i](hidden_states, temb)
1508
+ for idx, cross_attention_dim in enumerate(self.cross_attention_dim):
1509
+ if cross_attention_dim is not None and idx <= 1:
1510
+ forward_encoder_hidden_states = encoder_hidden_states
1511
+ forward_encoder_attention_mask = encoder_attention_mask
1512
+ elif cross_attention_dim is not None and idx > 1:
1513
+ forward_encoder_hidden_states = encoder_hidden_states_1
1514
+ forward_encoder_attention_mask = encoder_attention_mask_1
1515
+ else:
1516
+ forward_encoder_hidden_states = None
1517
+ forward_encoder_attention_mask = None
1518
+ hidden_states = self.attentions[i * num_attention_per_layer + idx](
1519
+ hidden_states,
1520
+ attention_mask=attention_mask,
1521
+ encoder_hidden_states=forward_encoder_hidden_states,
1522
+ encoder_attention_mask=forward_encoder_attention_mask,
1523
+ return_dict=False,
1524
+ )[0]
1525
+
1526
+ if self.upsamplers is not None:
1527
+ for upsampler in self.upsamplers:
1528
+ hidden_states = upsampler(hidden_states, upsample_size)
1529
+
1530
+ return hidden_states
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__init__.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_flax_available,
9
+ is_torch_available,
10
+ is_transformers_available,
11
+ )
12
+
13
+
14
+ _dummy_objects = {}
15
+ _import_structure = {}
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_controlnet_xs"] = ["StableDiffusionControlNetXSPipeline"]
26
+ _import_structure["pipeline_controlnet_xs_sd_xl"] = ["StableDiffusionXLControlNetXSPipeline"]
27
+ try:
28
+ if not (is_transformers_available() and is_flax_available()):
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ from ...utils import dummy_flax_and_transformers_objects # noqa F403
32
+
33
+ _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects))
34
+ else:
35
+ pass # _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"]
36
+
37
+
38
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
39
+ try:
40
+ if not (is_transformers_available() and is_torch_available()):
41
+ raise OptionalDependencyNotAvailable()
42
+
43
+ except OptionalDependencyNotAvailable:
44
+ from ...utils.dummy_torch_and_transformers_objects import *
45
+ else:
46
+ from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline
47
+ from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline
48
+
49
+ try:
50
+ if not (is_transformers_available() and is_flax_available()):
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ from ...utils.dummy_flax_and_transformers_objects import * # noqa F403
54
+ else:
55
+ pass # from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
56
+
57
+
58
+ else:
59
+ import sys
60
+
61
+ sys.modules[__name__] = _LazyModule(
62
+ __name__,
63
+ globals()["__file__"],
64
+ _import_structure,
65
+ module_spec=__spec__,
66
+ )
67
+ for name, value in _dummy_objects.items():
68
+ setattr(sys.modules[__name__], name, value)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__pycache__/pipeline_controlnet_xs.cpython-310.pyc ADDED
Binary file (30.1 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/__pycache__/pipeline_controlnet_xs_sd_xl.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py ADDED
@@ -0,0 +1,916 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
23
+
24
+ from ...callbacks import MultiPipelineCallbacks, PipelineCallback
25
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
26
+ from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
27
+ from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel
28
+ from ...models.lora import adjust_lora_scale_text_encoder
29
+ from ...schedulers import KarrasDiffusionSchedulers
30
+ from ...utils import (
31
+ USE_PEFT_BACKEND,
32
+ deprecate,
33
+ logging,
34
+ replace_example_docstring,
35
+ scale_lora_layers,
36
+ unscale_lora_layers,
37
+ )
38
+ from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
39
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
40
+ from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
41
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+ EXAMPLE_DOC_STRING = """
48
+ Examples:
49
+ ```py
50
+ >>> # !pip install opencv-python transformers accelerate
51
+ >>> from diffusers import StableDiffusionControlNetXSPipeline, ControlNetXSAdapter
52
+ >>> from diffusers.utils import load_image
53
+ >>> import numpy as np
54
+ >>> import torch
55
+
56
+ >>> import cv2
57
+ >>> from PIL import Image
58
+
59
+ >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
60
+ >>> negative_prompt = "low quality, bad quality, sketches"
61
+
62
+ >>> # download an image
63
+ >>> image = load_image(
64
+ ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
65
+ ... )
66
+
67
+ >>> # initialize the models and pipeline
68
+ >>> controlnet_conditioning_scale = 0.5
69
+
70
+ >>> controlnet = ControlNetXSAdapter.from_pretrained(
71
+ ... "UmerHA/Testing-ConrolNetXS-SD2.1-canny", torch_dtype=torch.float16
72
+ ... )
73
+ >>> pipe = StableDiffusionControlNetXSPipeline.from_pretrained(
74
+ ... "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16
75
+ ... )
76
+ >>> pipe.enable_model_cpu_offload()
77
+
78
+ >>> # get canny image
79
+ >>> image = np.array(image)
80
+ >>> image = cv2.Canny(image, 100, 200)
81
+ >>> image = image[:, :, None]
82
+ >>> image = np.concatenate([image, image, image], axis=2)
83
+ >>> canny_image = Image.fromarray(image)
84
+ >>> # generate image
85
+ >>> image = pipe(
86
+ ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
87
+ ... ).images[0]
88
+ ```
89
+ """
90
+
91
+
92
+ class StableDiffusionControlNetXSPipeline(
93
+ DiffusionPipeline,
94
+ StableDiffusionMixin,
95
+ TextualInversionLoaderMixin,
96
+ StableDiffusionLoraLoaderMixin,
97
+ FromSingleFileMixin,
98
+ ):
99
+ r"""
100
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet-XS guidance.
101
+
102
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
103
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
104
+
105
+ The pipeline also inherits the following loading methods:
106
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
107
+ - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
108
+ - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
109
+ - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
110
+
111
+ Args:
112
+ vae ([`AutoencoderKL`]):
113
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
114
+ text_encoder ([`~transformers.CLIPTextModel`]):
115
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
116
+ tokenizer ([`~transformers.CLIPTokenizer`]):
117
+ A `CLIPTokenizer` to tokenize text.
118
+ unet ([`UNet2DConditionModel`]):
119
+ A [`UNet2DConditionModel`] used to create a UNetControlNetXSModel to denoise the encoded image latents.
120
+ controlnet ([`ControlNetXSAdapter`]):
121
+ A [`ControlNetXSAdapter`] to be used in combination with `unet` to denoise the encoded image latents.
122
+ scheduler ([`SchedulerMixin`]):
123
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
124
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
125
+ safety_checker ([`StableDiffusionSafetyChecker`]):
126
+ Classification module that estimates whether generated images could be considered offensive or harmful.
127
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
128
+ about a model's potential harms.
129
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
130
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
131
+ """
132
+
133
+ model_cpu_offload_seq = "text_encoder->unet->vae"
134
+ _optional_components = ["safety_checker", "feature_extractor"]
135
+ _exclude_from_cpu_offload = ["safety_checker"]
136
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
137
+
138
+ def __init__(
139
+ self,
140
+ vae: AutoencoderKL,
141
+ text_encoder: CLIPTextModel,
142
+ tokenizer: CLIPTokenizer,
143
+ unet: Union[UNet2DConditionModel, UNetControlNetXSModel],
144
+ controlnet: ControlNetXSAdapter,
145
+ scheduler: KarrasDiffusionSchedulers,
146
+ safety_checker: StableDiffusionSafetyChecker,
147
+ feature_extractor: CLIPImageProcessor,
148
+ requires_safety_checker: bool = True,
149
+ ):
150
+ super().__init__()
151
+
152
+ if isinstance(unet, UNet2DConditionModel):
153
+ unet = UNetControlNetXSModel.from_unet(unet, controlnet)
154
+
155
+ if safety_checker is None and requires_safety_checker:
156
+ logger.warning(
157
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
158
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
159
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
160
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
161
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
162
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
163
+ )
164
+
165
+ if safety_checker is not None and feature_extractor is None:
166
+ raise ValueError(
167
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
168
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
169
+ )
170
+
171
+ self.register_modules(
172
+ vae=vae,
173
+ text_encoder=text_encoder,
174
+ tokenizer=tokenizer,
175
+ unet=unet,
176
+ controlnet=controlnet,
177
+ scheduler=scheduler,
178
+ safety_checker=safety_checker,
179
+ feature_extractor=feature_extractor,
180
+ )
181
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
182
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
183
+ self.control_image_processor = VaeImageProcessor(
184
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
185
+ )
186
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
187
+
188
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
189
+ def _encode_prompt(
190
+ self,
191
+ prompt,
192
+ device,
193
+ num_images_per_prompt,
194
+ do_classifier_free_guidance,
195
+ negative_prompt=None,
196
+ prompt_embeds: Optional[torch.Tensor] = None,
197
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
198
+ lora_scale: Optional[float] = None,
199
+ **kwargs,
200
+ ):
201
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
202
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
203
+
204
+ prompt_embeds_tuple = self.encode_prompt(
205
+ prompt=prompt,
206
+ device=device,
207
+ num_images_per_prompt=num_images_per_prompt,
208
+ do_classifier_free_guidance=do_classifier_free_guidance,
209
+ negative_prompt=negative_prompt,
210
+ prompt_embeds=prompt_embeds,
211
+ negative_prompt_embeds=negative_prompt_embeds,
212
+ lora_scale=lora_scale,
213
+ **kwargs,
214
+ )
215
+
216
+ # concatenate for backwards comp
217
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
218
+
219
+ return prompt_embeds
220
+
221
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
222
+ def encode_prompt(
223
+ self,
224
+ prompt,
225
+ device,
226
+ num_images_per_prompt,
227
+ do_classifier_free_guidance,
228
+ negative_prompt=None,
229
+ prompt_embeds: Optional[torch.Tensor] = None,
230
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
231
+ lora_scale: Optional[float] = None,
232
+ clip_skip: Optional[int] = None,
233
+ ):
234
+ r"""
235
+ Encodes the prompt into text encoder hidden states.
236
+
237
+ Args:
238
+ prompt (`str` or `List[str]`, *optional*):
239
+ prompt to be encoded
240
+ device: (`torch.device`):
241
+ torch device
242
+ num_images_per_prompt (`int`):
243
+ number of images that should be generated per prompt
244
+ do_classifier_free_guidance (`bool`):
245
+ whether to use classifier free guidance or not
246
+ negative_prompt (`str` or `List[str]`, *optional*):
247
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
248
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
249
+ less than `1`).
250
+ prompt_embeds (`torch.Tensor`, *optional*):
251
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
252
+ provided, text embeddings will be generated from `prompt` input argument.
253
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
254
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
255
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
256
+ argument.
257
+ lora_scale (`float`, *optional*):
258
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
259
+ clip_skip (`int`, *optional*):
260
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
261
+ the output of the pre-final layer will be used for computing the prompt embeddings.
262
+ """
263
+ # set lora scale so that monkey patched LoRA
264
+ # function of text encoder can correctly access it
265
+ if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
266
+ self._lora_scale = lora_scale
267
+
268
+ # dynamically adjust the LoRA scale
269
+ if not USE_PEFT_BACKEND:
270
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
271
+ else:
272
+ scale_lora_layers(self.text_encoder, lora_scale)
273
+
274
+ if prompt is not None and isinstance(prompt, str):
275
+ batch_size = 1
276
+ elif prompt is not None and isinstance(prompt, list):
277
+ batch_size = len(prompt)
278
+ else:
279
+ batch_size = prompt_embeds.shape[0]
280
+
281
+ if prompt_embeds is None:
282
+ # textual inversion: process multi-vector tokens if necessary
283
+ if isinstance(self, TextualInversionLoaderMixin):
284
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
285
+
286
+ text_inputs = self.tokenizer(
287
+ prompt,
288
+ padding="max_length",
289
+ max_length=self.tokenizer.model_max_length,
290
+ truncation=True,
291
+ return_tensors="pt",
292
+ )
293
+ text_input_ids = text_inputs.input_ids
294
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
295
+
296
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
297
+ text_input_ids, untruncated_ids
298
+ ):
299
+ removed_text = self.tokenizer.batch_decode(
300
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
301
+ )
302
+ logger.warning(
303
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
304
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
305
+ )
306
+
307
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
308
+ attention_mask = text_inputs.attention_mask.to(device)
309
+ else:
310
+ attention_mask = None
311
+
312
+ if clip_skip is None:
313
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
314
+ prompt_embeds = prompt_embeds[0]
315
+ else:
316
+ prompt_embeds = self.text_encoder(
317
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
318
+ )
319
+ # Access the `hidden_states` first, that contains a tuple of
320
+ # all the hidden states from the encoder layers. Then index into
321
+ # the tuple to access the hidden states from the desired layer.
322
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
323
+ # We also need to apply the final LayerNorm here to not mess with the
324
+ # representations. The `last_hidden_states` that we typically use for
325
+ # obtaining the final prompt representations passes through the LayerNorm
326
+ # layer.
327
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
328
+
329
+ if self.text_encoder is not None:
330
+ prompt_embeds_dtype = self.text_encoder.dtype
331
+ elif self.unet is not None:
332
+ prompt_embeds_dtype = self.unet.dtype
333
+ else:
334
+ prompt_embeds_dtype = prompt_embeds.dtype
335
+
336
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
337
+
338
+ bs_embed, seq_len, _ = prompt_embeds.shape
339
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
340
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
341
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
342
+
343
+ # get unconditional embeddings for classifier free guidance
344
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
345
+ uncond_tokens: List[str]
346
+ if negative_prompt is None:
347
+ uncond_tokens = [""] * batch_size
348
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
349
+ raise TypeError(
350
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
351
+ f" {type(prompt)}."
352
+ )
353
+ elif isinstance(negative_prompt, str):
354
+ uncond_tokens = [negative_prompt]
355
+ elif batch_size != len(negative_prompt):
356
+ raise ValueError(
357
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
358
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
359
+ " the batch size of `prompt`."
360
+ )
361
+ else:
362
+ uncond_tokens = negative_prompt
363
+
364
+ # textual inversion: process multi-vector tokens if necessary
365
+ if isinstance(self, TextualInversionLoaderMixin):
366
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
367
+
368
+ max_length = prompt_embeds.shape[1]
369
+ uncond_input = self.tokenizer(
370
+ uncond_tokens,
371
+ padding="max_length",
372
+ max_length=max_length,
373
+ truncation=True,
374
+ return_tensors="pt",
375
+ )
376
+
377
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
378
+ attention_mask = uncond_input.attention_mask.to(device)
379
+ else:
380
+ attention_mask = None
381
+
382
+ negative_prompt_embeds = self.text_encoder(
383
+ uncond_input.input_ids.to(device),
384
+ attention_mask=attention_mask,
385
+ )
386
+ negative_prompt_embeds = negative_prompt_embeds[0]
387
+
388
+ if do_classifier_free_guidance:
389
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
390
+ seq_len = negative_prompt_embeds.shape[1]
391
+
392
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
393
+
394
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
395
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
396
+
397
+ if self.text_encoder is not None:
398
+ if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
399
+ # Retrieve the original scale by scaling back the LoRA layers
400
+ unscale_lora_layers(self.text_encoder, lora_scale)
401
+
402
+ return prompt_embeds, negative_prompt_embeds
403
+
404
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
405
+ def run_safety_checker(self, image, device, dtype):
406
+ if self.safety_checker is None:
407
+ has_nsfw_concept = None
408
+ else:
409
+ if torch.is_tensor(image):
410
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
411
+ else:
412
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
413
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
414
+ image, has_nsfw_concept = self.safety_checker(
415
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
416
+ )
417
+ return image, has_nsfw_concept
418
+
419
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
420
+ def decode_latents(self, latents):
421
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
422
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
423
+
424
+ latents = 1 / self.vae.config.scaling_factor * latents
425
+ image = self.vae.decode(latents, return_dict=False)[0]
426
+ image = (image / 2 + 0.5).clamp(0, 1)
427
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
428
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
429
+ return image
430
+
431
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
432
+ def prepare_extra_step_kwargs(self, generator, eta):
433
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
434
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
435
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
436
+ # and should be between [0, 1]
437
+
438
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
439
+ extra_step_kwargs = {}
440
+ if accepts_eta:
441
+ extra_step_kwargs["eta"] = eta
442
+
443
+ # check if the scheduler accepts generator
444
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
445
+ if accepts_generator:
446
+ extra_step_kwargs["generator"] = generator
447
+ return extra_step_kwargs
448
+
449
+ def check_inputs(
450
+ self,
451
+ prompt,
452
+ image,
453
+ negative_prompt=None,
454
+ prompt_embeds=None,
455
+ negative_prompt_embeds=None,
456
+ controlnet_conditioning_scale=1.0,
457
+ control_guidance_start=0.0,
458
+ control_guidance_end=1.0,
459
+ callback_on_step_end_tensor_inputs=None,
460
+ ):
461
+ if callback_on_step_end_tensor_inputs is not None and not all(
462
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
463
+ ):
464
+ raise ValueError(
465
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
466
+ )
467
+
468
+ if prompt is not None and prompt_embeds is not None:
469
+ raise ValueError(
470
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
471
+ " only forward one of the two."
472
+ )
473
+ elif prompt is None and prompt_embeds is None:
474
+ raise ValueError(
475
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
476
+ )
477
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
478
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
479
+
480
+ if negative_prompt is not None and negative_prompt_embeds is not None:
481
+ raise ValueError(
482
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
483
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
484
+ )
485
+
486
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
487
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
488
+ raise ValueError(
489
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
490
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
491
+ f" {negative_prompt_embeds.shape}."
492
+ )
493
+
494
+ # Check `image` and `controlnet_conditioning_scale`
495
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
496
+ self.unet, torch._dynamo.eval_frame.OptimizedModule
497
+ )
498
+ if (
499
+ isinstance(self.unet, UNetControlNetXSModel)
500
+ or is_compiled
501
+ and isinstance(self.unet._orig_mod, UNetControlNetXSModel)
502
+ ):
503
+ self.check_image(image, prompt, prompt_embeds)
504
+ if not isinstance(controlnet_conditioning_scale, float):
505
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
506
+ else:
507
+ assert False
508
+
509
+ start, end = control_guidance_start, control_guidance_end
510
+ if start >= end:
511
+ raise ValueError(
512
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
513
+ )
514
+ if start < 0.0:
515
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
516
+ if end > 1.0:
517
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
518
+
519
+ def check_image(self, image, prompt, prompt_embeds):
520
+ image_is_pil = isinstance(image, PIL.Image.Image)
521
+ image_is_tensor = isinstance(image, torch.Tensor)
522
+ image_is_np = isinstance(image, np.ndarray)
523
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
524
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
525
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
526
+
527
+ if (
528
+ not image_is_pil
529
+ and not image_is_tensor
530
+ and not image_is_np
531
+ and not image_is_pil_list
532
+ and not image_is_tensor_list
533
+ and not image_is_np_list
534
+ ):
535
+ raise TypeError(
536
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
537
+ )
538
+
539
+ if image_is_pil:
540
+ image_batch_size = 1
541
+ else:
542
+ image_batch_size = len(image)
543
+
544
+ if prompt is not None and isinstance(prompt, str):
545
+ prompt_batch_size = 1
546
+ elif prompt is not None and isinstance(prompt, list):
547
+ prompt_batch_size = len(prompt)
548
+ elif prompt_embeds is not None:
549
+ prompt_batch_size = prompt_embeds.shape[0]
550
+
551
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
552
+ raise ValueError(
553
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
554
+ )
555
+
556
+ def prepare_image(
557
+ self,
558
+ image,
559
+ width,
560
+ height,
561
+ batch_size,
562
+ num_images_per_prompt,
563
+ device,
564
+ dtype,
565
+ do_classifier_free_guidance=False,
566
+ ):
567
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
568
+ image_batch_size = image.shape[0]
569
+
570
+ if image_batch_size == 1:
571
+ repeat_by = batch_size
572
+ else:
573
+ # image batch size is the same as prompt batch size
574
+ repeat_by = num_images_per_prompt
575
+
576
+ image = image.repeat_interleave(repeat_by, dim=0)
577
+
578
+ image = image.to(device=device, dtype=dtype)
579
+
580
+ if do_classifier_free_guidance:
581
+ image = torch.cat([image] * 2)
582
+
583
+ return image
584
+
585
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
586
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
587
+ shape = (
588
+ batch_size,
589
+ num_channels_latents,
590
+ int(height) // self.vae_scale_factor,
591
+ int(width) // self.vae_scale_factor,
592
+ )
593
+ if isinstance(generator, list) and len(generator) != batch_size:
594
+ raise ValueError(
595
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
596
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
597
+ )
598
+
599
+ if latents is None:
600
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
601
+ else:
602
+ latents = latents.to(device)
603
+
604
+ # scale the initial noise by the standard deviation required by the scheduler
605
+ latents = latents * self.scheduler.init_noise_sigma
606
+ return latents
607
+
608
+ @property
609
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
610
+ def guidance_scale(self):
611
+ return self._guidance_scale
612
+
613
+ @property
614
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
615
+ def clip_skip(self):
616
+ return self._clip_skip
617
+
618
+ @property
619
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
620
+ def do_classifier_free_guidance(self):
621
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
622
+
623
+ @property
624
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
625
+ def cross_attention_kwargs(self):
626
+ return self._cross_attention_kwargs
627
+
628
+ @property
629
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps
630
+ def num_timesteps(self):
631
+ return self._num_timesteps
632
+
633
+ @torch.no_grad()
634
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
635
+ def __call__(
636
+ self,
637
+ prompt: Union[str, List[str]] = None,
638
+ image: PipelineImageInput = None,
639
+ height: Optional[int] = None,
640
+ width: Optional[int] = None,
641
+ num_inference_steps: int = 50,
642
+ guidance_scale: float = 7.5,
643
+ negative_prompt: Optional[Union[str, List[str]]] = None,
644
+ num_images_per_prompt: Optional[int] = 1,
645
+ eta: float = 0.0,
646
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
647
+ latents: Optional[torch.Tensor] = None,
648
+ prompt_embeds: Optional[torch.Tensor] = None,
649
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
650
+ output_type: Optional[str] = "pil",
651
+ return_dict: bool = True,
652
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
653
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
654
+ control_guidance_start: float = 0.0,
655
+ control_guidance_end: float = 1.0,
656
+ clip_skip: Optional[int] = None,
657
+ callback_on_step_end: Optional[
658
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
659
+ ] = None,
660
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
661
+ ):
662
+ r"""
663
+ The call function to the pipeline for generation.
664
+
665
+ Args:
666
+ prompt (`str` or `List[str]`, *optional*):
667
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
668
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
669
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
670
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
671
+ specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
672
+ as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
673
+ width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
674
+ images must be passed as a list such that each element of the list can be correctly batched for input
675
+ to a single ControlNet.
676
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
677
+ The height in pixels of the generated image.
678
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
679
+ The width in pixels of the generated image.
680
+ num_inference_steps (`int`, *optional*, defaults to 50):
681
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
682
+ expense of slower inference.
683
+ guidance_scale (`float`, *optional*, defaults to 7.5):
684
+ A higher guidance scale value encourages the model to generate images closely linked to the text
685
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
686
+ negative_prompt (`str` or `List[str]`, *optional*):
687
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
688
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
689
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
690
+ The number of images to generate per prompt.
691
+ eta (`float`, *optional*, defaults to 0.0):
692
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
693
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
694
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
695
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
696
+ generation deterministic.
697
+ latents (`torch.Tensor`, *optional*):
698
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
699
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
700
+ tensor is generated by sampling using the supplied random `generator`.
701
+ prompt_embeds (`torch.Tensor`, *optional*):
702
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
703
+ provided, text embeddings are generated from the `prompt` input argument.
704
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
705
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
706
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
707
+ output_type (`str`, *optional*, defaults to `"pil"`):
708
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
709
+ return_dict (`bool`, *optional*, defaults to `True`):
710
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
711
+ plain tuple.
712
+ cross_attention_kwargs (`dict`, *optional*):
713
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
714
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
715
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
716
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
717
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
718
+ the corresponding scale as a list.
719
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
720
+ The percentage of total steps at which the ControlNet starts applying.
721
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
722
+ The percentage of total steps at which the ControlNet stops applying.
723
+ clip_skip (`int`, *optional*):
724
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
725
+ the output of the pre-final layer will be used for computing the prompt embeddings.
726
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
727
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
728
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
729
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
730
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
731
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
732
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
733
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
734
+ `._callback_tensor_inputs` attribute of your pipeine class.
735
+ Examples:
736
+
737
+ Returns:
738
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
739
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
740
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
741
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
742
+ "not-safe-for-work" (nsfw) content.
743
+ """
744
+
745
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
746
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
747
+
748
+ unet = self.unet._orig_mod if is_compiled_module(self.unet) else self.unet
749
+
750
+ # 1. Check inputs. Raise error if not correct
751
+ self.check_inputs(
752
+ prompt,
753
+ image,
754
+ negative_prompt,
755
+ prompt_embeds,
756
+ negative_prompt_embeds,
757
+ controlnet_conditioning_scale,
758
+ control_guidance_start,
759
+ control_guidance_end,
760
+ callback_on_step_end_tensor_inputs,
761
+ )
762
+
763
+ self._guidance_scale = guidance_scale
764
+ self._clip_skip = clip_skip
765
+ self._cross_attention_kwargs = cross_attention_kwargs
766
+ self._interrupt = False
767
+
768
+ # 2. Define call parameters
769
+ if prompt is not None and isinstance(prompt, str):
770
+ batch_size = 1
771
+ elif prompt is not None and isinstance(prompt, list):
772
+ batch_size = len(prompt)
773
+ else:
774
+ batch_size = prompt_embeds.shape[0]
775
+
776
+ device = self._execution_device
777
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
778
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
779
+ # corresponds to doing no classifier free guidance.
780
+ do_classifier_free_guidance = guidance_scale > 1.0
781
+
782
+ # 3. Encode input prompt
783
+ text_encoder_lora_scale = (
784
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
785
+ )
786
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
787
+ prompt,
788
+ device,
789
+ num_images_per_prompt,
790
+ do_classifier_free_guidance,
791
+ negative_prompt,
792
+ prompt_embeds=prompt_embeds,
793
+ negative_prompt_embeds=negative_prompt_embeds,
794
+ lora_scale=text_encoder_lora_scale,
795
+ clip_skip=clip_skip,
796
+ )
797
+
798
+ # For classifier free guidance, we need to do two forward passes.
799
+ # Here we concatenate the unconditional and text embeddings into a single batch
800
+ # to avoid doing two forward passes
801
+ if do_classifier_free_guidance:
802
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
803
+
804
+ # 4. Prepare image
805
+ image = self.prepare_image(
806
+ image=image,
807
+ width=width,
808
+ height=height,
809
+ batch_size=batch_size * num_images_per_prompt,
810
+ num_images_per_prompt=num_images_per_prompt,
811
+ device=device,
812
+ dtype=unet.dtype,
813
+ do_classifier_free_guidance=do_classifier_free_guidance,
814
+ )
815
+ height, width = image.shape[-2:]
816
+
817
+ # 5. Prepare timesteps
818
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
819
+ timesteps = self.scheduler.timesteps
820
+
821
+ # 6. Prepare latent variables
822
+ num_channels_latents = self.unet.in_channels
823
+ latents = self.prepare_latents(
824
+ batch_size * num_images_per_prompt,
825
+ num_channels_latents,
826
+ height,
827
+ width,
828
+ prompt_embeds.dtype,
829
+ device,
830
+ generator,
831
+ latents,
832
+ )
833
+
834
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
835
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
836
+
837
+ # 8. Denoising loop
838
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
839
+ self._num_timesteps = len(timesteps)
840
+ is_controlnet_compiled = is_compiled_module(self.unet)
841
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
842
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
843
+ for i, t in enumerate(timesteps):
844
+ # Relevant thread:
845
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
846
+ if is_controlnet_compiled and is_torch_higher_equal_2_1:
847
+ torch._inductor.cudagraph_mark_step_begin()
848
+ # expand the latents if we are doing classifier free guidance
849
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
850
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
851
+
852
+ # predict the noise residual
853
+ apply_control = (
854
+ i / len(timesteps) >= control_guidance_start and (i + 1) / len(timesteps) <= control_guidance_end
855
+ )
856
+ noise_pred = self.unet(
857
+ sample=latent_model_input,
858
+ timestep=t,
859
+ encoder_hidden_states=prompt_embeds,
860
+ controlnet_cond=image,
861
+ conditioning_scale=controlnet_conditioning_scale,
862
+ cross_attention_kwargs=cross_attention_kwargs,
863
+ return_dict=True,
864
+ apply_control=apply_control,
865
+ ).sample
866
+
867
+ # perform guidance
868
+ if do_classifier_free_guidance:
869
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
870
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
871
+
872
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
873
+
874
+ if callback_on_step_end is not None:
875
+ callback_kwargs = {}
876
+ for k in callback_on_step_end_tensor_inputs:
877
+ callback_kwargs[k] = locals()[k]
878
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
879
+
880
+ latents = callback_outputs.pop("latents", latents)
881
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
882
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
883
+
884
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
885
+ progress_bar.update()
886
+
887
+ # If we do sequential model offloading, let's offload unet and controlnet
888
+ # manually for max memory savings
889
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
890
+ self.unet.to("cpu")
891
+ self.controlnet.to("cpu")
892
+ torch.cuda.empty_cache()
893
+
894
+ if not output_type == "latent":
895
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
896
+ 0
897
+ ]
898
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
899
+ else:
900
+ image = latents
901
+ has_nsfw_concept = None
902
+
903
+ if has_nsfw_concept is None:
904
+ do_denormalize = [True] * image.shape[0]
905
+ else:
906
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
907
+
908
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
909
+
910
+ # Offload all models
911
+ self.maybe_free_model_hooks()
912
+
913
+ if not return_dict:
914
+ return (image, has_nsfw_concept)
915
+
916
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py ADDED
@@ -0,0 +1,1111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from transformers import (
23
+ CLIPImageProcessor,
24
+ CLIPTextModel,
25
+ CLIPTextModelWithProjection,
26
+ CLIPTokenizer,
27
+ )
28
+
29
+ from diffusers.utils.import_utils import is_invisible_watermark_available
30
+
31
+ from ...callbacks import MultiPipelineCallbacks, PipelineCallback
32
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
33
+ from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
34
+ from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel
35
+ from ...models.attention_processor import (
36
+ AttnProcessor2_0,
37
+ XFormersAttnProcessor,
38
+ )
39
+ from ...models.lora import adjust_lora_scale_text_encoder
40
+ from ...schedulers import KarrasDiffusionSchedulers
41
+ from ...utils import (
42
+ USE_PEFT_BACKEND,
43
+ logging,
44
+ replace_example_docstring,
45
+ scale_lora_layers,
46
+ unscale_lora_layers,
47
+ )
48
+ from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
49
+ from ..pipeline_utils import DiffusionPipeline
50
+ from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
51
+
52
+
53
+ if is_invisible_watermark_available():
54
+ from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
55
+
56
+
57
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
58
+
59
+
60
+ EXAMPLE_DOC_STRING = """
61
+ Examples:
62
+ ```py
63
+ >>> # !pip install opencv-python transformers accelerate
64
+ >>> from diffusers import StableDiffusionXLControlNetXSPipeline, ControlNetXSAdapter, AutoencoderKL
65
+ >>> from diffusers.utils import load_image
66
+ >>> import numpy as np
67
+ >>> import torch
68
+
69
+ >>> import cv2
70
+ >>> from PIL import Image
71
+
72
+ >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
73
+ >>> negative_prompt = "low quality, bad quality, sketches"
74
+
75
+ >>> # download an image
76
+ >>> image = load_image(
77
+ ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
78
+ ... )
79
+
80
+ >>> # initialize the models and pipeline
81
+ >>> controlnet_conditioning_scale = 0.5
82
+ >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
83
+ >>> controlnet = ControlNetXSAdapter.from_pretrained(
84
+ ... "UmerHA/Testing-ConrolNetXS-SDXL-canny", torch_dtype=torch.float16
85
+ ... )
86
+ >>> pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained(
87
+ ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
88
+ ... )
89
+ >>> pipe.enable_model_cpu_offload()
90
+
91
+ >>> # get canny image
92
+ >>> image = np.array(image)
93
+ >>> image = cv2.Canny(image, 100, 200)
94
+ >>> image = image[:, :, None]
95
+ >>> image = np.concatenate([image, image, image], axis=2)
96
+ >>> canny_image = Image.fromarray(image)
97
+
98
+ >>> # generate image
99
+ >>> image = pipe(
100
+ ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
101
+ ... ).images[0]
102
+ ```
103
+ """
104
+
105
+
106
+ class StableDiffusionXLControlNetXSPipeline(
107
+ DiffusionPipeline,
108
+ TextualInversionLoaderMixin,
109
+ StableDiffusionXLLoraLoaderMixin,
110
+ FromSingleFileMixin,
111
+ ):
112
+ r"""
113
+ Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet-XS guidance.
114
+
115
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
116
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
117
+
118
+ The pipeline also inherits the following loading methods:
119
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
120
+ - [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
121
+ - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
122
+
123
+ Args:
124
+ vae ([`AutoencoderKL`]):
125
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
126
+ text_encoder ([`~transformers.CLIPTextModel`]):
127
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
128
+ text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
129
+ Second frozen text-encoder
130
+ ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
131
+ tokenizer ([`~transformers.CLIPTokenizer`]):
132
+ A `CLIPTokenizer` to tokenize text.
133
+ tokenizer_2 ([`~transformers.CLIPTokenizer`]):
134
+ A `CLIPTokenizer` to tokenize text.
135
+ unet ([`UNet2DConditionModel`]):
136
+ A [`UNet2DConditionModel`] used to create a UNetControlNetXSModel to denoise the encoded image latents.
137
+ controlnet ([`ControlNetXSAdapter`]):
138
+ A [`ControlNetXSAdapter`] to be used in combination with `unet` to denoise the encoded image latents.
139
+ scheduler ([`SchedulerMixin`]):
140
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
141
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
142
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
143
+ Whether the negative prompt embeddings should always be set to 0. Also see the config of
144
+ `stabilityai/stable-diffusion-xl-base-1-0`.
145
+ add_watermarker (`bool`, *optional*):
146
+ Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to
147
+ watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no
148
+ watermarker is used.
149
+ """
150
+
151
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
152
+ _optional_components = [
153
+ "tokenizer",
154
+ "tokenizer_2",
155
+ "text_encoder",
156
+ "text_encoder_2",
157
+ "feature_extractor",
158
+ ]
159
+ _callback_tensor_inputs = [
160
+ "latents",
161
+ "prompt_embeds",
162
+ "negative_prompt_embeds",
163
+ "add_text_embeds",
164
+ "add_time_ids",
165
+ "negative_pooled_prompt_embeds",
166
+ "negative_add_time_ids",
167
+ ]
168
+
169
+ def __init__(
170
+ self,
171
+ vae: AutoencoderKL,
172
+ text_encoder: CLIPTextModel,
173
+ text_encoder_2: CLIPTextModelWithProjection,
174
+ tokenizer: CLIPTokenizer,
175
+ tokenizer_2: CLIPTokenizer,
176
+ unet: Union[UNet2DConditionModel, UNetControlNetXSModel],
177
+ controlnet: ControlNetXSAdapter,
178
+ scheduler: KarrasDiffusionSchedulers,
179
+ force_zeros_for_empty_prompt: bool = True,
180
+ add_watermarker: Optional[bool] = None,
181
+ feature_extractor: CLIPImageProcessor = None,
182
+ ):
183
+ super().__init__()
184
+
185
+ if isinstance(unet, UNet2DConditionModel):
186
+ unet = UNetControlNetXSModel.from_unet(unet, controlnet)
187
+
188
+ self.register_modules(
189
+ vae=vae,
190
+ text_encoder=text_encoder,
191
+ text_encoder_2=text_encoder_2,
192
+ tokenizer=tokenizer,
193
+ tokenizer_2=tokenizer_2,
194
+ unet=unet,
195
+ controlnet=controlnet,
196
+ scheduler=scheduler,
197
+ feature_extractor=feature_extractor,
198
+ )
199
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
200
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
201
+ self.control_image_processor = VaeImageProcessor(
202
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
203
+ )
204
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
205
+
206
+ if add_watermarker:
207
+ self.watermark = StableDiffusionXLWatermarker()
208
+ else:
209
+ self.watermark = None
210
+
211
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
212
+
213
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
214
+ def encode_prompt(
215
+ self,
216
+ prompt: str,
217
+ prompt_2: Optional[str] = None,
218
+ device: Optional[torch.device] = None,
219
+ num_images_per_prompt: int = 1,
220
+ do_classifier_free_guidance: bool = True,
221
+ negative_prompt: Optional[str] = None,
222
+ negative_prompt_2: Optional[str] = None,
223
+ prompt_embeds: Optional[torch.Tensor] = None,
224
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
225
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
226
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
227
+ lora_scale: Optional[float] = None,
228
+ clip_skip: Optional[int] = None,
229
+ ):
230
+ r"""
231
+ Encodes the prompt into text encoder hidden states.
232
+
233
+ Args:
234
+ prompt (`str` or `List[str]`, *optional*):
235
+ prompt to be encoded
236
+ prompt_2 (`str` or `List[str]`, *optional*):
237
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
238
+ used in both text-encoders
239
+ device: (`torch.device`):
240
+ torch device
241
+ num_images_per_prompt (`int`):
242
+ number of images that should be generated per prompt
243
+ do_classifier_free_guidance (`bool`):
244
+ whether to use classifier free guidance or not
245
+ negative_prompt (`str` or `List[str]`, *optional*):
246
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
247
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
248
+ less than `1`).
249
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
250
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
251
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
252
+ prompt_embeds (`torch.Tensor`, *optional*):
253
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
254
+ provided, text embeddings will be generated from `prompt` input argument.
255
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
256
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
257
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
258
+ argument.
259
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
260
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
261
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
262
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
263
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
264
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
265
+ input argument.
266
+ lora_scale (`float`, *optional*):
267
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
268
+ clip_skip (`int`, *optional*):
269
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
270
+ the output of the pre-final layer will be used for computing the prompt embeddings.
271
+ """
272
+ device = device or self._execution_device
273
+
274
+ # set lora scale so that monkey patched LoRA
275
+ # function of text encoder can correctly access it
276
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
277
+ self._lora_scale = lora_scale
278
+
279
+ # dynamically adjust the LoRA scale
280
+ if self.text_encoder is not None:
281
+ if not USE_PEFT_BACKEND:
282
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
283
+ else:
284
+ scale_lora_layers(self.text_encoder, lora_scale)
285
+
286
+ if self.text_encoder_2 is not None:
287
+ if not USE_PEFT_BACKEND:
288
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
289
+ else:
290
+ scale_lora_layers(self.text_encoder_2, lora_scale)
291
+
292
+ prompt = [prompt] if isinstance(prompt, str) else prompt
293
+
294
+ if prompt is not None:
295
+ batch_size = len(prompt)
296
+ else:
297
+ batch_size = prompt_embeds.shape[0]
298
+
299
+ # Define tokenizers and text encoders
300
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
301
+ text_encoders = (
302
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
303
+ )
304
+
305
+ if prompt_embeds is None:
306
+ prompt_2 = prompt_2 or prompt
307
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
308
+
309
+ # textual inversion: process multi-vector tokens if necessary
310
+ prompt_embeds_list = []
311
+ prompts = [prompt, prompt_2]
312
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
313
+ if isinstance(self, TextualInversionLoaderMixin):
314
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
315
+
316
+ text_inputs = tokenizer(
317
+ prompt,
318
+ padding="max_length",
319
+ max_length=tokenizer.model_max_length,
320
+ truncation=True,
321
+ return_tensors="pt",
322
+ )
323
+
324
+ text_input_ids = text_inputs.input_ids
325
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
326
+
327
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
328
+ text_input_ids, untruncated_ids
329
+ ):
330
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
331
+ logger.warning(
332
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
333
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
334
+ )
335
+
336
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
337
+
338
+ # We are only ALWAYS interested in the pooled output of the final text encoder
339
+ pooled_prompt_embeds = prompt_embeds[0]
340
+ if clip_skip is None:
341
+ prompt_embeds = prompt_embeds.hidden_states[-2]
342
+ else:
343
+ # "2" because SDXL always indexes from the penultimate layer.
344
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
345
+
346
+ prompt_embeds_list.append(prompt_embeds)
347
+
348
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
349
+
350
+ # get unconditional embeddings for classifier free guidance
351
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
352
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
353
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
354
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
355
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
356
+ negative_prompt = negative_prompt or ""
357
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
358
+
359
+ # normalize str to list
360
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
361
+ negative_prompt_2 = (
362
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
363
+ )
364
+
365
+ uncond_tokens: List[str]
366
+ if prompt is not None and type(prompt) is not type(negative_prompt):
367
+ raise TypeError(
368
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
369
+ f" {type(prompt)}."
370
+ )
371
+ elif batch_size != len(negative_prompt):
372
+ raise ValueError(
373
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
374
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
375
+ " the batch size of `prompt`."
376
+ )
377
+ else:
378
+ uncond_tokens = [negative_prompt, negative_prompt_2]
379
+
380
+ negative_prompt_embeds_list = []
381
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
382
+ if isinstance(self, TextualInversionLoaderMixin):
383
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
384
+
385
+ max_length = prompt_embeds.shape[1]
386
+ uncond_input = tokenizer(
387
+ negative_prompt,
388
+ padding="max_length",
389
+ max_length=max_length,
390
+ truncation=True,
391
+ return_tensors="pt",
392
+ )
393
+
394
+ negative_prompt_embeds = text_encoder(
395
+ uncond_input.input_ids.to(device),
396
+ output_hidden_states=True,
397
+ )
398
+ # We are only ALWAYS interested in the pooled output of the final text encoder
399
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
400
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
401
+
402
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
403
+
404
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
405
+
406
+ if self.text_encoder_2 is not None:
407
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
408
+ else:
409
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
410
+
411
+ bs_embed, seq_len, _ = prompt_embeds.shape
412
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
413
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
414
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
415
+
416
+ if do_classifier_free_guidance:
417
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
418
+ seq_len = negative_prompt_embeds.shape[1]
419
+
420
+ if self.text_encoder_2 is not None:
421
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
422
+ else:
423
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
424
+
425
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
426
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
427
+
428
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
429
+ bs_embed * num_images_per_prompt, -1
430
+ )
431
+ if do_classifier_free_guidance:
432
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
433
+ bs_embed * num_images_per_prompt, -1
434
+ )
435
+
436
+ if self.text_encoder is not None:
437
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
438
+ # Retrieve the original scale by scaling back the LoRA layers
439
+ unscale_lora_layers(self.text_encoder, lora_scale)
440
+
441
+ if self.text_encoder_2 is not None:
442
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
443
+ # Retrieve the original scale by scaling back the LoRA layers
444
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
445
+
446
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
447
+
448
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
449
+ def prepare_extra_step_kwargs(self, generator, eta):
450
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
451
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
452
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
453
+ # and should be between [0, 1]
454
+
455
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
456
+ extra_step_kwargs = {}
457
+ if accepts_eta:
458
+ extra_step_kwargs["eta"] = eta
459
+
460
+ # check if the scheduler accepts generator
461
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
462
+ if accepts_generator:
463
+ extra_step_kwargs["generator"] = generator
464
+ return extra_step_kwargs
465
+
466
+ def check_inputs(
467
+ self,
468
+ prompt,
469
+ prompt_2,
470
+ image,
471
+ negative_prompt=None,
472
+ negative_prompt_2=None,
473
+ prompt_embeds=None,
474
+ negative_prompt_embeds=None,
475
+ pooled_prompt_embeds=None,
476
+ negative_pooled_prompt_embeds=None,
477
+ controlnet_conditioning_scale=1.0,
478
+ control_guidance_start=0.0,
479
+ control_guidance_end=1.0,
480
+ callback_on_step_end_tensor_inputs=None,
481
+ ):
482
+ if callback_on_step_end_tensor_inputs is not None and not all(
483
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
484
+ ):
485
+ raise ValueError(
486
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
487
+ )
488
+
489
+ if prompt is not None and prompt_embeds is not None:
490
+ raise ValueError(
491
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
492
+ " only forward one of the two."
493
+ )
494
+ elif prompt_2 is not None and prompt_embeds is not None:
495
+ raise ValueError(
496
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
497
+ " only forward one of the two."
498
+ )
499
+ elif prompt is None and prompt_embeds is None:
500
+ raise ValueError(
501
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
502
+ )
503
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
504
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
505
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
506
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
507
+
508
+ if negative_prompt is not None and negative_prompt_embeds is not None:
509
+ raise ValueError(
510
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
511
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
512
+ )
513
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
514
+ raise ValueError(
515
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
516
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
517
+ )
518
+
519
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
520
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
521
+ raise ValueError(
522
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
523
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
524
+ f" {negative_prompt_embeds.shape}."
525
+ )
526
+
527
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
528
+ raise ValueError(
529
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
530
+ )
531
+
532
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
533
+ raise ValueError(
534
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
535
+ )
536
+
537
+ # Check `image` and ``controlnet_conditioning_scale``
538
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
539
+ self.unet, torch._dynamo.eval_frame.OptimizedModule
540
+ )
541
+ if (
542
+ isinstance(self.unet, UNetControlNetXSModel)
543
+ or is_compiled
544
+ and isinstance(self.unet._orig_mod, UNetControlNetXSModel)
545
+ ):
546
+ self.check_image(image, prompt, prompt_embeds)
547
+ if not isinstance(controlnet_conditioning_scale, float):
548
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
549
+ else:
550
+ assert False
551
+
552
+ start, end = control_guidance_start, control_guidance_end
553
+ if start >= end:
554
+ raise ValueError(
555
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
556
+ )
557
+ if start < 0.0:
558
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
559
+ if end > 1.0:
560
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
561
+
562
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
563
+ def check_image(self, image, prompt, prompt_embeds):
564
+ image_is_pil = isinstance(image, PIL.Image.Image)
565
+ image_is_tensor = isinstance(image, torch.Tensor)
566
+ image_is_np = isinstance(image, np.ndarray)
567
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
568
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
569
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
570
+
571
+ if (
572
+ not image_is_pil
573
+ and not image_is_tensor
574
+ and not image_is_np
575
+ and not image_is_pil_list
576
+ and not image_is_tensor_list
577
+ and not image_is_np_list
578
+ ):
579
+ raise TypeError(
580
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
581
+ )
582
+
583
+ if image_is_pil:
584
+ image_batch_size = 1
585
+ else:
586
+ image_batch_size = len(image)
587
+
588
+ if prompt is not None and isinstance(prompt, str):
589
+ prompt_batch_size = 1
590
+ elif prompt is not None and isinstance(prompt, list):
591
+ prompt_batch_size = len(prompt)
592
+ elif prompt_embeds is not None:
593
+ prompt_batch_size = prompt_embeds.shape[0]
594
+
595
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
596
+ raise ValueError(
597
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
598
+ )
599
+
600
+ def prepare_image(
601
+ self,
602
+ image,
603
+ width,
604
+ height,
605
+ batch_size,
606
+ num_images_per_prompt,
607
+ device,
608
+ dtype,
609
+ do_classifier_free_guidance=False,
610
+ ):
611
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
612
+ image_batch_size = image.shape[0]
613
+
614
+ if image_batch_size == 1:
615
+ repeat_by = batch_size
616
+ else:
617
+ # image batch size is the same as prompt batch size
618
+ repeat_by = num_images_per_prompt
619
+
620
+ image = image.repeat_interleave(repeat_by, dim=0)
621
+
622
+ image = image.to(device=device, dtype=dtype)
623
+
624
+ if do_classifier_free_guidance:
625
+ image = torch.cat([image] * 2)
626
+
627
+ return image
628
+
629
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
630
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
631
+ shape = (
632
+ batch_size,
633
+ num_channels_latents,
634
+ int(height) // self.vae_scale_factor,
635
+ int(width) // self.vae_scale_factor,
636
+ )
637
+ if isinstance(generator, list) and len(generator) != batch_size:
638
+ raise ValueError(
639
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
640
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
641
+ )
642
+
643
+ if latents is None:
644
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
645
+ else:
646
+ latents = latents.to(device)
647
+
648
+ # scale the initial noise by the standard deviation required by the scheduler
649
+ latents = latents * self.scheduler.init_noise_sigma
650
+ return latents
651
+
652
+ def _get_add_time_ids(
653
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
654
+ ):
655
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
656
+
657
+ passed_add_embed_dim = (
658
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
659
+ )
660
+ expected_add_embed_dim = self.unet.base_add_embedding.linear_1.in_features
661
+
662
+ if expected_add_embed_dim != passed_add_embed_dim:
663
+ raise ValueError(
664
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
665
+ )
666
+
667
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
668
+ return add_time_ids
669
+
670
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
671
+ def upcast_vae(self):
672
+ dtype = self.vae.dtype
673
+ self.vae.to(dtype=torch.float32)
674
+ use_torch_2_0_or_xformers = isinstance(
675
+ self.vae.decoder.mid_block.attentions[0].processor,
676
+ (
677
+ AttnProcessor2_0,
678
+ XFormersAttnProcessor,
679
+ ),
680
+ )
681
+ # if xformers or torch_2_0 is used attention block does not need
682
+ # to be in float32 which can save lots of memory
683
+ if use_torch_2_0_or_xformers:
684
+ self.vae.post_quant_conv.to(dtype)
685
+ self.vae.decoder.conv_in.to(dtype)
686
+ self.vae.decoder.mid_block.to(dtype)
687
+
688
+ @property
689
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
690
+ def guidance_scale(self):
691
+ return self._guidance_scale
692
+
693
+ @property
694
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
695
+ def clip_skip(self):
696
+ return self._clip_skip
697
+
698
+ @property
699
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
700
+ def do_classifier_free_guidance(self):
701
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
702
+
703
+ @property
704
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
705
+ def cross_attention_kwargs(self):
706
+ return self._cross_attention_kwargs
707
+
708
+ @property
709
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps
710
+ def num_timesteps(self):
711
+ return self._num_timesteps
712
+
713
+ @torch.no_grad()
714
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
715
+ def __call__(
716
+ self,
717
+ prompt: Union[str, List[str]] = None,
718
+ prompt_2: Optional[Union[str, List[str]]] = None,
719
+ image: PipelineImageInput = None,
720
+ height: Optional[int] = None,
721
+ width: Optional[int] = None,
722
+ num_inference_steps: int = 50,
723
+ guidance_scale: float = 5.0,
724
+ negative_prompt: Optional[Union[str, List[str]]] = None,
725
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
726
+ num_images_per_prompt: Optional[int] = 1,
727
+ eta: float = 0.0,
728
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
729
+ latents: Optional[torch.Tensor] = None,
730
+ prompt_embeds: Optional[torch.Tensor] = None,
731
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
732
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
733
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
734
+ output_type: Optional[str] = "pil",
735
+ return_dict: bool = True,
736
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
737
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
738
+ control_guidance_start: float = 0.0,
739
+ control_guidance_end: float = 1.0,
740
+ original_size: Tuple[int, int] = None,
741
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
742
+ target_size: Tuple[int, int] = None,
743
+ negative_original_size: Optional[Tuple[int, int]] = None,
744
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
745
+ negative_target_size: Optional[Tuple[int, int]] = None,
746
+ clip_skip: Optional[int] = None,
747
+ callback_on_step_end: Optional[
748
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
749
+ ] = None,
750
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
751
+ ):
752
+ r"""
753
+ The call function to the pipeline for generation.
754
+
755
+ Args:
756
+ prompt (`str` or `List[str]`, *optional*):
757
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
758
+ prompt_2 (`str` or `List[str]`, *optional*):
759
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
760
+ used in both text-encoders.
761
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
762
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
763
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
764
+ specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
765
+ as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
766
+ width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
767
+ images must be passed as a list such that each element of the list can be correctly batched for input
768
+ to a single ControlNet.
769
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
770
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
771
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
772
+ and checkpoints that are not specifically fine-tuned on low resolutions.
773
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
774
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
775
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
776
+ and checkpoints that are not specifically fine-tuned on low resolutions.
777
+ num_inference_steps (`int`, *optional*, defaults to 50):
778
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
779
+ expense of slower inference.
780
+ guidance_scale (`float`, *optional*, defaults to 5.0):
781
+ A higher guidance scale value encourages the model to generate images closely linked to the text
782
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
783
+ negative_prompt (`str` or `List[str]`, *optional*):
784
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
785
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
786
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
787
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
788
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
789
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
790
+ The number of images to generate per prompt.
791
+ eta (`float`, *optional*, defaults to 0.0):
792
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
793
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
794
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
795
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
796
+ generation deterministic.
797
+ latents (`torch.Tensor`, *optional*):
798
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
799
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
800
+ tensor is generated by sampling using the supplied random `generator`.
801
+ prompt_embeds (`torch.Tensor`, *optional*):
802
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
803
+ provided, text embeddings are generated from the `prompt` input argument.
804
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
805
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
806
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
807
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
808
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
809
+ not provided, pooled text embeddings are generated from `prompt` input argument.
810
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
811
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
812
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
813
+ argument.
814
+ output_type (`str`, *optional*, defaults to `"pil"`):
815
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
816
+ return_dict (`bool`, *optional*, defaults to `True`):
817
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
818
+ plain tuple.
819
+ cross_attention_kwargs (`dict`, *optional*):
820
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
821
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
822
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
823
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
824
+ to the residual in the original `unet`.
825
+ control_guidance_start (`float`, *optional*, defaults to 0.0):
826
+ The percentage of total steps at which the ControlNet starts applying.
827
+ control_guidance_end (`float`, *optional*, defaults to 1.0):
828
+ The percentage of total steps at which the ControlNet stops applying.
829
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
830
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
831
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
832
+ explained in section 2.2 of
833
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
834
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
835
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
836
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
837
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
838
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
839
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
840
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
841
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
842
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
843
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
844
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
845
+ micro-conditioning as explained in section 2.2 of
846
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
847
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
848
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
849
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
850
+ micro-conditioning as explained in section 2.2 of
851
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
852
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
853
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
854
+ To negatively condition the generation process based on a target image resolution. It should be as same
855
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
856
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
857
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
858
+ clip_skip (`int`, *optional*):
859
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
860
+ the output of the pre-final layer will be used for computing the prompt embeddings.
861
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
862
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
863
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
864
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
865
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
866
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
867
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
868
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
869
+ `._callback_tensor_inputs` attribute of your pipeine class.
870
+
871
+ Examples:
872
+
873
+ Returns:
874
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
875
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] is
876
+ returned, otherwise a `tuple` is returned containing the output images.
877
+ """
878
+
879
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
880
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
881
+
882
+ unet = self.unet._orig_mod if is_compiled_module(self.unet) else self.unet
883
+
884
+ # 1. Check inputs. Raise error if not correct
885
+ self.check_inputs(
886
+ prompt,
887
+ prompt_2,
888
+ image,
889
+ negative_prompt,
890
+ negative_prompt_2,
891
+ prompt_embeds,
892
+ negative_prompt_embeds,
893
+ pooled_prompt_embeds,
894
+ negative_pooled_prompt_embeds,
895
+ controlnet_conditioning_scale,
896
+ control_guidance_start,
897
+ control_guidance_end,
898
+ callback_on_step_end_tensor_inputs,
899
+ )
900
+
901
+ self._guidance_scale = guidance_scale
902
+ self._clip_skip = clip_skip
903
+ self._cross_attention_kwargs = cross_attention_kwargs
904
+ self._interrupt = False
905
+
906
+ # 2. Define call parameters
907
+ if prompt is not None and isinstance(prompt, str):
908
+ batch_size = 1
909
+ elif prompt is not None and isinstance(prompt, list):
910
+ batch_size = len(prompt)
911
+ else:
912
+ batch_size = prompt_embeds.shape[0]
913
+
914
+ device = self._execution_device
915
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
916
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
917
+ # corresponds to doing no classifier free guidance.
918
+ do_classifier_free_guidance = guidance_scale > 1.0
919
+
920
+ # 3. Encode input prompt
921
+ text_encoder_lora_scale = (
922
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
923
+ )
924
+ (
925
+ prompt_embeds,
926
+ negative_prompt_embeds,
927
+ pooled_prompt_embeds,
928
+ negative_pooled_prompt_embeds,
929
+ ) = self.encode_prompt(
930
+ prompt,
931
+ prompt_2,
932
+ device,
933
+ num_images_per_prompt,
934
+ do_classifier_free_guidance,
935
+ negative_prompt,
936
+ negative_prompt_2,
937
+ prompt_embeds=prompt_embeds,
938
+ negative_prompt_embeds=negative_prompt_embeds,
939
+ pooled_prompt_embeds=pooled_prompt_embeds,
940
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
941
+ lora_scale=text_encoder_lora_scale,
942
+ clip_skip=clip_skip,
943
+ )
944
+
945
+ # 4. Prepare image
946
+ if isinstance(unet, UNetControlNetXSModel):
947
+ image = self.prepare_image(
948
+ image=image,
949
+ width=width,
950
+ height=height,
951
+ batch_size=batch_size * num_images_per_prompt,
952
+ num_images_per_prompt=num_images_per_prompt,
953
+ device=device,
954
+ dtype=unet.dtype,
955
+ do_classifier_free_guidance=do_classifier_free_guidance,
956
+ )
957
+ height, width = image.shape[-2:]
958
+ else:
959
+ assert False
960
+
961
+ # 5. Prepare timesteps
962
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
963
+ timesteps = self.scheduler.timesteps
964
+
965
+ # 6. Prepare latent variables
966
+ num_channels_latents = self.unet.in_channels
967
+ latents = self.prepare_latents(
968
+ batch_size * num_images_per_prompt,
969
+ num_channels_latents,
970
+ height,
971
+ width,
972
+ prompt_embeds.dtype,
973
+ device,
974
+ generator,
975
+ latents,
976
+ )
977
+
978
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
979
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
980
+
981
+ # 7.1 Prepare added time ids & embeddings
982
+ if isinstance(image, list):
983
+ original_size = original_size or image[0].shape[-2:]
984
+ else:
985
+ original_size = original_size or image.shape[-2:]
986
+ target_size = target_size or (height, width)
987
+
988
+ add_text_embeds = pooled_prompt_embeds
989
+ if self.text_encoder_2 is None:
990
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
991
+ else:
992
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
993
+
994
+ add_time_ids = self._get_add_time_ids(
995
+ original_size,
996
+ crops_coords_top_left,
997
+ target_size,
998
+ dtype=prompt_embeds.dtype,
999
+ text_encoder_projection_dim=text_encoder_projection_dim,
1000
+ )
1001
+
1002
+ if negative_original_size is not None and negative_target_size is not None:
1003
+ negative_add_time_ids = self._get_add_time_ids(
1004
+ negative_original_size,
1005
+ negative_crops_coords_top_left,
1006
+ negative_target_size,
1007
+ dtype=prompt_embeds.dtype,
1008
+ text_encoder_projection_dim=text_encoder_projection_dim,
1009
+ )
1010
+ else:
1011
+ negative_add_time_ids = add_time_ids
1012
+
1013
+ if do_classifier_free_guidance:
1014
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1015
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1016
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1017
+
1018
+ prompt_embeds = prompt_embeds.to(device)
1019
+ add_text_embeds = add_text_embeds.to(device)
1020
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1021
+
1022
+ # 8. Denoising loop
1023
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1024
+ self._num_timesteps = len(timesteps)
1025
+ is_controlnet_compiled = is_compiled_module(self.unet)
1026
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
1027
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1028
+ for i, t in enumerate(timesteps):
1029
+ # Relevant thread:
1030
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
1031
+ if is_controlnet_compiled and is_torch_higher_equal_2_1:
1032
+ torch._inductor.cudagraph_mark_step_begin()
1033
+ # expand the latents if we are doing classifier free guidance
1034
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1035
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1036
+
1037
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1038
+
1039
+ # predict the noise residual
1040
+ apply_control = (
1041
+ i / len(timesteps) >= control_guidance_start and (i + 1) / len(timesteps) <= control_guidance_end
1042
+ )
1043
+ noise_pred = self.unet(
1044
+ sample=latent_model_input,
1045
+ timestep=t,
1046
+ encoder_hidden_states=prompt_embeds,
1047
+ controlnet_cond=image,
1048
+ conditioning_scale=controlnet_conditioning_scale,
1049
+ cross_attention_kwargs=cross_attention_kwargs,
1050
+ added_cond_kwargs=added_cond_kwargs,
1051
+ return_dict=True,
1052
+ apply_control=apply_control,
1053
+ ).sample
1054
+
1055
+ # perform guidance
1056
+ if do_classifier_free_guidance:
1057
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1058
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1059
+
1060
+ # compute the previous noisy sample x_t -> x_t-1
1061
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1062
+
1063
+ if callback_on_step_end is not None:
1064
+ callback_kwargs = {}
1065
+ for k in callback_on_step_end_tensor_inputs:
1066
+ callback_kwargs[k] = locals()[k]
1067
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1068
+
1069
+ latents = callback_outputs.pop("latents", latents)
1070
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1071
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1072
+
1073
+ # call the callback, if provided
1074
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1075
+ progress_bar.update()
1076
+
1077
+ # manually for max memory savings
1078
+ if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1079
+ self.upcast_vae()
1080
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1081
+
1082
+ if not output_type == "latent":
1083
+ # make sure the VAE is in float32 mode, as it overflows in float16
1084
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1085
+
1086
+ if needs_upcasting:
1087
+ self.upcast_vae()
1088
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1089
+
1090
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1091
+
1092
+ # cast back to fp16 if needed
1093
+ if needs_upcasting:
1094
+ self.vae.to(dtype=torch.float16)
1095
+ else:
1096
+ image = latents
1097
+
1098
+ if not output_type == "latent":
1099
+ # apply watermark if available
1100
+ if self.watermark is not None:
1101
+ image = self.watermark.apply_watermark(image)
1102
+
1103
+ image = self.image_processor.postprocess(image, output_type=output_type)
1104
+
1105
+ # Offload all models
1106
+ self.maybe_free_model_hooks()
1107
+
1108
+ if not return_dict:
1109
+ return (image,)
1110
+
1111
+ return StableDiffusionXLPipelineOutput(images=image)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {
15
+ "timesteps": [
16
+ "fast27_timesteps",
17
+ "smart100_timesteps",
18
+ "smart185_timesteps",
19
+ "smart27_timesteps",
20
+ "smart50_timesteps",
21
+ "super100_timesteps",
22
+ "super27_timesteps",
23
+ "super40_timesteps",
24
+ ]
25
+ }
26
+
27
+ try:
28
+ if not (is_transformers_available() and is_torch_available()):
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
32
+
33
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
34
+ else:
35
+ _import_structure["pipeline_if"] = ["IFPipeline"]
36
+ _import_structure["pipeline_if_img2img"] = ["IFImg2ImgPipeline"]
37
+ _import_structure["pipeline_if_img2img_superresolution"] = ["IFImg2ImgSuperResolutionPipeline"]
38
+ _import_structure["pipeline_if_inpainting"] = ["IFInpaintingPipeline"]
39
+ _import_structure["pipeline_if_inpainting_superresolution"] = ["IFInpaintingSuperResolutionPipeline"]
40
+ _import_structure["pipeline_if_superresolution"] = ["IFSuperResolutionPipeline"]
41
+ _import_structure["pipeline_output"] = ["IFPipelineOutput"]
42
+ _import_structure["safety_checker"] = ["IFSafetyChecker"]
43
+ _import_structure["watermark"] = ["IFWatermarker"]
44
+
45
+
46
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
47
+ try:
48
+ if not (is_transformers_available() and is_torch_available()):
49
+ raise OptionalDependencyNotAvailable()
50
+
51
+ except OptionalDependencyNotAvailable:
52
+ from ...utils.dummy_torch_and_transformers_objects import *
53
+ else:
54
+ from .pipeline_if import IFPipeline
55
+ from .pipeline_if_img2img import IFImg2ImgPipeline
56
+ from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline
57
+ from .pipeline_if_inpainting import IFInpaintingPipeline
58
+ from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
59
+ from .pipeline_if_superresolution import IFSuperResolutionPipeline
60
+ from .pipeline_output import IFPipelineOutput
61
+ from .safety_checker import IFSafetyChecker
62
+ from .timesteps import (
63
+ fast27_timesteps,
64
+ smart27_timesteps,
65
+ smart50_timesteps,
66
+ smart100_timesteps,
67
+ smart185_timesteps,
68
+ super27_timesteps,
69
+ super40_timesteps,
70
+ super100_timesteps,
71
+ )
72
+ from .watermark import IFWatermarker
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(
78
+ __name__,
79
+ globals()["__file__"],
80
+ _import_structure,
81
+ module_spec=__spec__,
82
+ )
83
+
84
+ for name, value in _dummy_objects.items():
85
+ setattr(sys.modules[__name__], name, value)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py ADDED
@@ -0,0 +1,1121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
12
+
13
+ from ...loaders import StableDiffusionLoraLoaderMixin
14
+ from ...models import UNet2DConditionModel
15
+ from ...schedulers import DDPMScheduler
16
+ from ...utils import (
17
+ BACKENDS_MAPPING,
18
+ PIL_INTERPOLATION,
19
+ is_bs4_available,
20
+ is_ftfy_available,
21
+ logging,
22
+ replace_example_docstring,
23
+ )
24
+ from ...utils.torch_utils import randn_tensor
25
+ from ..pipeline_utils import DiffusionPipeline
26
+ from .pipeline_output import IFPipelineOutput
27
+ from .safety_checker import IFSafetyChecker
28
+ from .watermark import IFWatermarker
29
+
30
+
31
+ if is_bs4_available():
32
+ from bs4 import BeautifulSoup
33
+
34
+ if is_ftfy_available():
35
+ import ftfy
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize
42
+ def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image:
43
+ w, h = images.size
44
+
45
+ coef = w / h
46
+
47
+ w, h = img_size, img_size
48
+
49
+ if coef >= 1:
50
+ w = int(round(img_size / 8 * coef) * 8)
51
+ else:
52
+ h = int(round(img_size / 8 / coef) * 8)
53
+
54
+ images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None)
55
+
56
+ return images
57
+
58
+
59
+ EXAMPLE_DOC_STRING = """
60
+ Examples:
61
+ ```py
62
+ >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
63
+ >>> from diffusers.utils import pt_to_pil
64
+ >>> import torch
65
+ >>> from PIL import Image
66
+ >>> import requests
67
+ >>> from io import BytesIO
68
+
69
+ >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"
70
+ >>> response = requests.get(url)
71
+ >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")
72
+ >>> original_image = original_image
73
+
74
+ >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"
75
+ >>> response = requests.get(url)
76
+ >>> mask_image = Image.open(BytesIO(response.content))
77
+ >>> mask_image = mask_image
78
+
79
+ >>> pipe = IFInpaintingPipeline.from_pretrained(
80
+ ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16
81
+ ... )
82
+ >>> pipe.enable_model_cpu_offload()
83
+
84
+ >>> prompt = "blue sunglasses"
85
+
86
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
87
+ >>> image = pipe(
88
+ ... image=original_image,
89
+ ... mask_image=mask_image,
90
+ ... prompt_embeds=prompt_embeds,
91
+ ... negative_prompt_embeds=negative_embeds,
92
+ ... output_type="pt",
93
+ ... ).images
94
+
95
+ >>> # save intermediate image
96
+ >>> pil_image = pt_to_pil(image)
97
+ >>> pil_image[0].save("./if_stage_I.png")
98
+
99
+ >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(
100
+ ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
101
+ ... )
102
+ >>> super_res_1_pipe.enable_model_cpu_offload()
103
+
104
+ >>> image = super_res_1_pipe(
105
+ ... image=image,
106
+ ... mask_image=mask_image,
107
+ ... original_image=original_image,
108
+ ... prompt_embeds=prompt_embeds,
109
+ ... negative_prompt_embeds=negative_embeds,
110
+ ... ).images
111
+ >>> image[0].save("./if_stage_II.png")
112
+ ```
113
+ """
114
+
115
+
116
+ class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin):
117
+ tokenizer: T5Tokenizer
118
+ text_encoder: T5EncoderModel
119
+
120
+ unet: UNet2DConditionModel
121
+ scheduler: DDPMScheduler
122
+ image_noising_scheduler: DDPMScheduler
123
+
124
+ feature_extractor: Optional[CLIPImageProcessor]
125
+ safety_checker: Optional[IFSafetyChecker]
126
+
127
+ watermarker: Optional[IFWatermarker]
128
+
129
+ bad_punct_regex = re.compile(
130
+ r"["
131
+ + "#®•©™&@·º½¾¿¡§~"
132
+ + r"\)"
133
+ + r"\("
134
+ + r"\]"
135
+ + r"\["
136
+ + r"\}"
137
+ + r"\{"
138
+ + r"\|"
139
+ + "\\"
140
+ + r"\/"
141
+ + r"\*"
142
+ + r"]{1,}"
143
+ ) # noqa
144
+
145
+ model_cpu_offload_seq = "text_encoder->unet"
146
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
147
+ _exclude_from_cpu_offload = ["watermarker"]
148
+
149
+ def __init__(
150
+ self,
151
+ tokenizer: T5Tokenizer,
152
+ text_encoder: T5EncoderModel,
153
+ unet: UNet2DConditionModel,
154
+ scheduler: DDPMScheduler,
155
+ image_noising_scheduler: DDPMScheduler,
156
+ safety_checker: Optional[IFSafetyChecker],
157
+ feature_extractor: Optional[CLIPImageProcessor],
158
+ watermarker: Optional[IFWatermarker],
159
+ requires_safety_checker: bool = True,
160
+ ):
161
+ super().__init__()
162
+
163
+ if safety_checker is None and requires_safety_checker:
164
+ logger.warning(
165
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
166
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
167
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
168
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
169
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
170
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
171
+ )
172
+
173
+ if safety_checker is not None and feature_extractor is None:
174
+ raise ValueError(
175
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
176
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
177
+ )
178
+
179
+ if unet.config.in_channels != 6:
180
+ logger.warning(
181
+ "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`."
182
+ )
183
+
184
+ self.register_modules(
185
+ tokenizer=tokenizer,
186
+ text_encoder=text_encoder,
187
+ unet=unet,
188
+ scheduler=scheduler,
189
+ image_noising_scheduler=image_noising_scheduler,
190
+ safety_checker=safety_checker,
191
+ feature_extractor=feature_extractor,
192
+ watermarker=watermarker,
193
+ )
194
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
195
+
196
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
197
+ def _text_preprocessing(self, text, clean_caption=False):
198
+ if clean_caption and not is_bs4_available():
199
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
200
+ logger.warning("Setting `clean_caption` to False...")
201
+ clean_caption = False
202
+
203
+ if clean_caption and not is_ftfy_available():
204
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
205
+ logger.warning("Setting `clean_caption` to False...")
206
+ clean_caption = False
207
+
208
+ if not isinstance(text, (tuple, list)):
209
+ text = [text]
210
+
211
+ def process(text: str):
212
+ if clean_caption:
213
+ text = self._clean_caption(text)
214
+ text = self._clean_caption(text)
215
+ else:
216
+ text = text.lower().strip()
217
+ return text
218
+
219
+ return [process(t) for t in text]
220
+
221
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
222
+ def _clean_caption(self, caption):
223
+ caption = str(caption)
224
+ caption = ul.unquote_plus(caption)
225
+ caption = caption.strip().lower()
226
+ caption = re.sub("<person>", "person", caption)
227
+ # urls:
228
+ caption = re.sub(
229
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
230
+ "",
231
+ caption,
232
+ ) # regex for urls
233
+ caption = re.sub(
234
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
235
+ "",
236
+ caption,
237
+ ) # regex for urls
238
+ # html:
239
+ caption = BeautifulSoup(caption, features="html.parser").text
240
+
241
+ # @<nickname>
242
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
243
+
244
+ # 31C0—31EF CJK Strokes
245
+ # 31F0—31FF Katakana Phonetic Extensions
246
+ # 3200—32FF Enclosed CJK Letters and Months
247
+ # 3300—33FF CJK Compatibility
248
+ # 3400—4DBF CJK Unified Ideographs Extension A
249
+ # 4DC0—4DFF Yijing Hexagram Symbols
250
+ # 4E00—9FFF CJK Unified Ideographs
251
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
252
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
253
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
254
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
255
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
256
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
257
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
258
+ #######################################################
259
+
260
+ # все виды тире / all types of dash --> "-"
261
+ caption = re.sub(
262
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
263
+ "-",
264
+ caption,
265
+ )
266
+
267
+ # кавычки к одному стандарту
268
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
269
+ caption = re.sub(r"[‘’]", "'", caption)
270
+
271
+ # &quot;
272
+ caption = re.sub(r"&quot;?", "", caption)
273
+ # &amp
274
+ caption = re.sub(r"&amp", "", caption)
275
+
276
+ # ip adresses:
277
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
278
+
279
+ # article ids:
280
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
281
+
282
+ # \n
283
+ caption = re.sub(r"\\n", " ", caption)
284
+
285
+ # "#123"
286
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
287
+ # "#12345.."
288
+ caption = re.sub(r"#\d{5,}\b", "", caption)
289
+ # "123456.."
290
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
291
+ # filenames:
292
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
293
+
294
+ #
295
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
296
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
297
+
298
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
299
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
300
+
301
+ # this-is-my-cute-cat / this_is_my_cute_cat
302
+ regex2 = re.compile(r"(?:\-|\_)")
303
+ if len(re.findall(regex2, caption)) > 3:
304
+ caption = re.sub(regex2, " ", caption)
305
+
306
+ caption = ftfy.fix_text(caption)
307
+ caption = html.unescape(html.unescape(caption))
308
+
309
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
310
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
311
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
312
+
313
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
314
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
315
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
316
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
317
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
318
+
319
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
320
+
321
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
322
+
323
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
324
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
325
+ caption = re.sub(r"\s+", " ", caption)
326
+
327
+ caption.strip()
328
+
329
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
330
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
331
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
332
+ caption = re.sub(r"^\.\S+$", "", caption)
333
+
334
+ return caption.strip()
335
+
336
+ @torch.no_grad()
337
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
338
+ def encode_prompt(
339
+ self,
340
+ prompt: Union[str, List[str]],
341
+ do_classifier_free_guidance: bool = True,
342
+ num_images_per_prompt: int = 1,
343
+ device: Optional[torch.device] = None,
344
+ negative_prompt: Optional[Union[str, List[str]]] = None,
345
+ prompt_embeds: Optional[torch.Tensor] = None,
346
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
347
+ clean_caption: bool = False,
348
+ ):
349
+ r"""
350
+ Encodes the prompt into text encoder hidden states.
351
+
352
+ Args:
353
+ prompt (`str` or `List[str]`, *optional*):
354
+ prompt to be encoded
355
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
356
+ whether to use classifier free guidance or not
357
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
358
+ number of images that should be generated per prompt
359
+ device: (`torch.device`, *optional*):
360
+ torch device to place the resulting embeddings on
361
+ negative_prompt (`str` or `List[str]`, *optional*):
362
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
363
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
364
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
365
+ prompt_embeds (`torch.Tensor`, *optional*):
366
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
367
+ provided, text embeddings will be generated from `prompt` input argument.
368
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
369
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
370
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
371
+ argument.
372
+ clean_caption (bool, defaults to `False`):
373
+ If `True`, the function will preprocess and clean the provided caption before encoding.
374
+ """
375
+ if prompt is not None and negative_prompt is not None:
376
+ if type(prompt) is not type(negative_prompt):
377
+ raise TypeError(
378
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
379
+ f" {type(prompt)}."
380
+ )
381
+
382
+ if device is None:
383
+ device = self._execution_device
384
+
385
+ if prompt is not None and isinstance(prompt, str):
386
+ batch_size = 1
387
+ elif prompt is not None and isinstance(prompt, list):
388
+ batch_size = len(prompt)
389
+ else:
390
+ batch_size = prompt_embeds.shape[0]
391
+
392
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
393
+ max_length = 77
394
+
395
+ if prompt_embeds is None:
396
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
397
+ text_inputs = self.tokenizer(
398
+ prompt,
399
+ padding="max_length",
400
+ max_length=max_length,
401
+ truncation=True,
402
+ add_special_tokens=True,
403
+ return_tensors="pt",
404
+ )
405
+ text_input_ids = text_inputs.input_ids
406
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
407
+
408
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
409
+ text_input_ids, untruncated_ids
410
+ ):
411
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
412
+ logger.warning(
413
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
414
+ f" {max_length} tokens: {removed_text}"
415
+ )
416
+
417
+ attention_mask = text_inputs.attention_mask.to(device)
418
+
419
+ prompt_embeds = self.text_encoder(
420
+ text_input_ids.to(device),
421
+ attention_mask=attention_mask,
422
+ )
423
+ prompt_embeds = prompt_embeds[0]
424
+
425
+ if self.text_encoder is not None:
426
+ dtype = self.text_encoder.dtype
427
+ elif self.unet is not None:
428
+ dtype = self.unet.dtype
429
+ else:
430
+ dtype = None
431
+
432
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
433
+
434
+ bs_embed, seq_len, _ = prompt_embeds.shape
435
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
436
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
437
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
438
+
439
+ # get unconditional embeddings for classifier free guidance
440
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
441
+ uncond_tokens: List[str]
442
+ if negative_prompt is None:
443
+ uncond_tokens = [""] * batch_size
444
+ elif isinstance(negative_prompt, str):
445
+ uncond_tokens = [negative_prompt]
446
+ elif batch_size != len(negative_prompt):
447
+ raise ValueError(
448
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
449
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
450
+ " the batch size of `prompt`."
451
+ )
452
+ else:
453
+ uncond_tokens = negative_prompt
454
+
455
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
456
+ max_length = prompt_embeds.shape[1]
457
+ uncond_input = self.tokenizer(
458
+ uncond_tokens,
459
+ padding="max_length",
460
+ max_length=max_length,
461
+ truncation=True,
462
+ return_attention_mask=True,
463
+ add_special_tokens=True,
464
+ return_tensors="pt",
465
+ )
466
+ attention_mask = uncond_input.attention_mask.to(device)
467
+
468
+ negative_prompt_embeds = self.text_encoder(
469
+ uncond_input.input_ids.to(device),
470
+ attention_mask=attention_mask,
471
+ )
472
+ negative_prompt_embeds = negative_prompt_embeds[0]
473
+
474
+ if do_classifier_free_guidance:
475
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
476
+ seq_len = negative_prompt_embeds.shape[1]
477
+
478
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
479
+
480
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
481
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
482
+
483
+ # For classifier free guidance, we need to do two forward passes.
484
+ # Here we concatenate the unconditional and text embeddings into a single batch
485
+ # to avoid doing two forward passes
486
+ else:
487
+ negative_prompt_embeds = None
488
+
489
+ return prompt_embeds, negative_prompt_embeds
490
+
491
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
492
+ def run_safety_checker(self, image, device, dtype):
493
+ if self.safety_checker is not None:
494
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
495
+ image, nsfw_detected, watermark_detected = self.safety_checker(
496
+ images=image,
497
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
498
+ )
499
+ else:
500
+ nsfw_detected = None
501
+ watermark_detected = None
502
+
503
+ return image, nsfw_detected, watermark_detected
504
+
505
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
506
+ def prepare_extra_step_kwargs(self, generator, eta):
507
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
508
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
509
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
510
+ # and should be between [0, 1]
511
+
512
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
513
+ extra_step_kwargs = {}
514
+ if accepts_eta:
515
+ extra_step_kwargs["eta"] = eta
516
+
517
+ # check if the scheduler accepts generator
518
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
519
+ if accepts_generator:
520
+ extra_step_kwargs["generator"] = generator
521
+ return extra_step_kwargs
522
+
523
+ def check_inputs(
524
+ self,
525
+ prompt,
526
+ image,
527
+ original_image,
528
+ mask_image,
529
+ batch_size,
530
+ callback_steps,
531
+ negative_prompt=None,
532
+ prompt_embeds=None,
533
+ negative_prompt_embeds=None,
534
+ ):
535
+ if (callback_steps is None) or (
536
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
537
+ ):
538
+ raise ValueError(
539
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
540
+ f" {type(callback_steps)}."
541
+ )
542
+
543
+ if prompt is not None and prompt_embeds is not None:
544
+ raise ValueError(
545
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
546
+ " only forward one of the two."
547
+ )
548
+ elif prompt is None and prompt_embeds is None:
549
+ raise ValueError(
550
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
551
+ )
552
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
553
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
554
+
555
+ if negative_prompt is not None and negative_prompt_embeds is not None:
556
+ raise ValueError(
557
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
558
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
559
+ )
560
+
561
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
562
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
563
+ raise ValueError(
564
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
565
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
566
+ f" {negative_prompt_embeds.shape}."
567
+ )
568
+
569
+ # image
570
+
571
+ if isinstance(image, list):
572
+ check_image_type = image[0]
573
+ else:
574
+ check_image_type = image
575
+
576
+ if (
577
+ not isinstance(check_image_type, torch.Tensor)
578
+ and not isinstance(check_image_type, PIL.Image.Image)
579
+ and not isinstance(check_image_type, np.ndarray)
580
+ ):
581
+ raise ValueError(
582
+ "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
583
+ f" {type(check_image_type)}"
584
+ )
585
+
586
+ if isinstance(image, list):
587
+ image_batch_size = len(image)
588
+ elif isinstance(image, torch.Tensor):
589
+ image_batch_size = image.shape[0]
590
+ elif isinstance(image, PIL.Image.Image):
591
+ image_batch_size = 1
592
+ elif isinstance(image, np.ndarray):
593
+ image_batch_size = image.shape[0]
594
+ else:
595
+ assert False
596
+
597
+ if batch_size != image_batch_size:
598
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
599
+
600
+ # original_image
601
+
602
+ if isinstance(original_image, list):
603
+ check_image_type = original_image[0]
604
+ else:
605
+ check_image_type = original_image
606
+
607
+ if (
608
+ not isinstance(check_image_type, torch.Tensor)
609
+ and not isinstance(check_image_type, PIL.Image.Image)
610
+ and not isinstance(check_image_type, np.ndarray)
611
+ ):
612
+ raise ValueError(
613
+ "`original_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
614
+ f" {type(check_image_type)}"
615
+ )
616
+
617
+ if isinstance(original_image, list):
618
+ image_batch_size = len(original_image)
619
+ elif isinstance(original_image, torch.Tensor):
620
+ image_batch_size = original_image.shape[0]
621
+ elif isinstance(original_image, PIL.Image.Image):
622
+ image_batch_size = 1
623
+ elif isinstance(original_image, np.ndarray):
624
+ image_batch_size = original_image.shape[0]
625
+ else:
626
+ assert False
627
+
628
+ if batch_size != image_batch_size:
629
+ raise ValueError(
630
+ f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}"
631
+ )
632
+
633
+ # mask_image
634
+
635
+ if isinstance(mask_image, list):
636
+ check_image_type = mask_image[0]
637
+ else:
638
+ check_image_type = mask_image
639
+
640
+ if (
641
+ not isinstance(check_image_type, torch.Tensor)
642
+ and not isinstance(check_image_type, PIL.Image.Image)
643
+ and not isinstance(check_image_type, np.ndarray)
644
+ ):
645
+ raise ValueError(
646
+ "`mask_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
647
+ f" {type(check_image_type)}"
648
+ )
649
+
650
+ if isinstance(mask_image, list):
651
+ image_batch_size = len(mask_image)
652
+ elif isinstance(mask_image, torch.Tensor):
653
+ image_batch_size = mask_image.shape[0]
654
+ elif isinstance(mask_image, PIL.Image.Image):
655
+ image_batch_size = 1
656
+ elif isinstance(mask_image, np.ndarray):
657
+ image_batch_size = mask_image.shape[0]
658
+ else:
659
+ assert False
660
+
661
+ if image_batch_size != 1 and batch_size != image_batch_size:
662
+ raise ValueError(
663
+ f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}"
664
+ )
665
+
666
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image
667
+ def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor:
668
+ if not isinstance(image, list):
669
+ image = [image]
670
+
671
+ def numpy_to_pt(images):
672
+ if images.ndim == 3:
673
+ images = images[..., None]
674
+
675
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
676
+ return images
677
+
678
+ if isinstance(image[0], PIL.Image.Image):
679
+ new_image = []
680
+
681
+ for image_ in image:
682
+ image_ = image_.convert("RGB")
683
+ image_ = resize(image_, self.unet.config.sample_size)
684
+ image_ = np.array(image_)
685
+ image_ = image_.astype(np.float32)
686
+ image_ = image_ / 127.5 - 1
687
+ new_image.append(image_)
688
+
689
+ image = new_image
690
+
691
+ image = np.stack(image, axis=0) # to np
692
+ image = numpy_to_pt(image) # to pt
693
+
694
+ elif isinstance(image[0], np.ndarray):
695
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
696
+ image = numpy_to_pt(image)
697
+
698
+ elif isinstance(image[0], torch.Tensor):
699
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
700
+
701
+ return image
702
+
703
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image
704
+ def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor:
705
+ if not isinstance(image, torch.Tensor) and not isinstance(image, list):
706
+ image = [image]
707
+
708
+ if isinstance(image[0], PIL.Image.Image):
709
+ image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image]
710
+
711
+ image = np.stack(image, axis=0) # to np
712
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
713
+ elif isinstance(image[0], np.ndarray):
714
+ image = np.stack(image, axis=0) # to np
715
+ if image.ndim == 5:
716
+ image = image[0]
717
+
718
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
719
+ elif isinstance(image, list) and isinstance(image[0], torch.Tensor):
720
+ dims = image[0].ndim
721
+
722
+ if dims == 3:
723
+ image = torch.stack(image, dim=0)
724
+ elif dims == 4:
725
+ image = torch.concat(image, dim=0)
726
+ else:
727
+ raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}")
728
+
729
+ image = image.to(device=device, dtype=self.unet.dtype)
730
+
731
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
732
+
733
+ return image
734
+
735
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.preprocess_mask_image
736
+ def preprocess_mask_image(self, mask_image) -> torch.Tensor:
737
+ if not isinstance(mask_image, list):
738
+ mask_image = [mask_image]
739
+
740
+ if isinstance(mask_image[0], torch.Tensor):
741
+ mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0)
742
+
743
+ if mask_image.ndim == 2:
744
+ # Batch and add channel dim for single mask
745
+ mask_image = mask_image.unsqueeze(0).unsqueeze(0)
746
+ elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
747
+ # Single mask, the 0'th dimension is considered to be
748
+ # the existing batch size of 1
749
+ mask_image = mask_image.unsqueeze(0)
750
+ elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
751
+ # Batch of mask, the 0'th dimension is considered to be
752
+ # the batching dimension
753
+ mask_image = mask_image.unsqueeze(1)
754
+
755
+ mask_image[mask_image < 0.5] = 0
756
+ mask_image[mask_image >= 0.5] = 1
757
+
758
+ elif isinstance(mask_image[0], PIL.Image.Image):
759
+ new_mask_image = []
760
+
761
+ for mask_image_ in mask_image:
762
+ mask_image_ = mask_image_.convert("L")
763
+ mask_image_ = resize(mask_image_, self.unet.config.sample_size)
764
+ mask_image_ = np.array(mask_image_)
765
+ mask_image_ = mask_image_[None, None, :]
766
+ new_mask_image.append(mask_image_)
767
+
768
+ mask_image = new_mask_image
769
+
770
+ mask_image = np.concatenate(mask_image, axis=0)
771
+ mask_image = mask_image.astype(np.float32) / 255.0
772
+ mask_image[mask_image < 0.5] = 0
773
+ mask_image[mask_image >= 0.5] = 1
774
+ mask_image = torch.from_numpy(mask_image)
775
+
776
+ elif isinstance(mask_image[0], np.ndarray):
777
+ mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
778
+
779
+ mask_image[mask_image < 0.5] = 0
780
+ mask_image[mask_image >= 0.5] = 1
781
+ mask_image = torch.from_numpy(mask_image)
782
+
783
+ return mask_image
784
+
785
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
786
+ def get_timesteps(self, num_inference_steps, strength):
787
+ # get the original timestep using init_timestep
788
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
789
+
790
+ t_start = max(num_inference_steps - init_timestep, 0)
791
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
792
+ if hasattr(self.scheduler, "set_begin_index"):
793
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
794
+
795
+ return timesteps, num_inference_steps - t_start
796
+
797
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.prepare_intermediate_images
798
+ def prepare_intermediate_images(
799
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None
800
+ ):
801
+ image_batch_size, channels, height, width = image.shape
802
+
803
+ batch_size = batch_size * num_images_per_prompt
804
+
805
+ shape = (batch_size, channels, height, width)
806
+
807
+ if isinstance(generator, list) and len(generator) != batch_size:
808
+ raise ValueError(
809
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
810
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
811
+ )
812
+
813
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
814
+
815
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
816
+ noised_image = self.scheduler.add_noise(image, noise, timestep)
817
+
818
+ image = (1 - mask_image) * image + mask_image * noised_image
819
+
820
+ return image
821
+
822
+ @torch.no_grad()
823
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
824
+ def __call__(
825
+ self,
826
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
827
+ original_image: Union[
828
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
829
+ ] = None,
830
+ mask_image: Union[
831
+ PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
832
+ ] = None,
833
+ strength: float = 0.8,
834
+ prompt: Union[str, List[str]] = None,
835
+ num_inference_steps: int = 100,
836
+ timesteps: List[int] = None,
837
+ guidance_scale: float = 4.0,
838
+ negative_prompt: Optional[Union[str, List[str]]] = None,
839
+ num_images_per_prompt: Optional[int] = 1,
840
+ eta: float = 0.0,
841
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
842
+ prompt_embeds: Optional[torch.Tensor] = None,
843
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
844
+ output_type: Optional[str] = "pil",
845
+ return_dict: bool = True,
846
+ callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
847
+ callback_steps: int = 1,
848
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
849
+ noise_level: int = 0,
850
+ clean_caption: bool = True,
851
+ ):
852
+ """
853
+ Function invoked when calling the pipeline for generation.
854
+
855
+ Args:
856
+ image (`torch.Tensor` or `PIL.Image.Image`):
857
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
858
+ process.
859
+ original_image (`torch.Tensor` or `PIL.Image.Image`):
860
+ The original image that `image` was varied from.
861
+ mask_image (`PIL.Image.Image`):
862
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
863
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
864
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
865
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
866
+ strength (`float`, *optional*, defaults to 0.8):
867
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
868
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
869
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
870
+ be maximum and the denoising process will run for the full number of iterations specified in
871
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
872
+ prompt (`str` or `List[str]`, *optional*):
873
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
874
+ instead.
875
+ num_inference_steps (`int`, *optional*, defaults to 100):
876
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
877
+ expense of slower inference.
878
+ timesteps (`List[int]`, *optional*):
879
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
880
+ timesteps are used. Must be in descending order.
881
+ guidance_scale (`float`, *optional*, defaults to 4.0):
882
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
883
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
884
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
885
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
886
+ usually at the expense of lower image quality.
887
+ negative_prompt (`str` or `List[str]`, *optional*):
888
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
889
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
890
+ less than `1`).
891
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
892
+ The number of images to generate per prompt.
893
+ eta (`float`, *optional*, defaults to 0.0):
894
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
895
+ [`schedulers.DDIMScheduler`], will be ignored for others.
896
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
897
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
898
+ to make generation deterministic.
899
+ prompt_embeds (`torch.Tensor`, *optional*):
900
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
901
+ provided, text embeddings will be generated from `prompt` input argument.
902
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
903
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
904
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
905
+ argument.
906
+ output_type (`str`, *optional*, defaults to `"pil"`):
907
+ The output format of the generate image. Choose between
908
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
909
+ return_dict (`bool`, *optional*, defaults to `True`):
910
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
911
+ callback (`Callable`, *optional*):
912
+ A function that will be called every `callback_steps` steps during inference. The function will be
913
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
914
+ callback_steps (`int`, *optional*, defaults to 1):
915
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
916
+ called at every step.
917
+ cross_attention_kwargs (`dict`, *optional*):
918
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
919
+ `self.processor` in
920
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
921
+ noise_level (`int`, *optional*, defaults to 0):
922
+ The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)`
923
+ clean_caption (`bool`, *optional*, defaults to `True`):
924
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
925
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
926
+ prompt.
927
+
928
+ Examples:
929
+
930
+ Returns:
931
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
932
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
933
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
934
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
935
+ or watermarked content, according to the `safety_checker`.
936
+ """
937
+ # 1. Check inputs. Raise error if not correct
938
+ if prompt is not None and isinstance(prompt, str):
939
+ batch_size = 1
940
+ elif prompt is not None and isinstance(prompt, list):
941
+ batch_size = len(prompt)
942
+ else:
943
+ batch_size = prompt_embeds.shape[0]
944
+
945
+ self.check_inputs(
946
+ prompt,
947
+ image,
948
+ original_image,
949
+ mask_image,
950
+ batch_size,
951
+ callback_steps,
952
+ negative_prompt,
953
+ prompt_embeds,
954
+ negative_prompt_embeds,
955
+ )
956
+
957
+ # 2. Define call parameters
958
+
959
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
960
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
961
+ # corresponds to doing no classifier free guidance.
962
+ do_classifier_free_guidance = guidance_scale > 1.0
963
+
964
+ device = self._execution_device
965
+
966
+ # 3. Encode input prompt
967
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
968
+ prompt,
969
+ do_classifier_free_guidance,
970
+ num_images_per_prompt=num_images_per_prompt,
971
+ device=device,
972
+ negative_prompt=negative_prompt,
973
+ prompt_embeds=prompt_embeds,
974
+ negative_prompt_embeds=negative_prompt_embeds,
975
+ clean_caption=clean_caption,
976
+ )
977
+
978
+ if do_classifier_free_guidance:
979
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
980
+
981
+ dtype = prompt_embeds.dtype
982
+
983
+ # 4. Prepare timesteps
984
+ if timesteps is not None:
985
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
986
+ timesteps = self.scheduler.timesteps
987
+ num_inference_steps = len(timesteps)
988
+ else:
989
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
990
+ timesteps = self.scheduler.timesteps
991
+
992
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
993
+
994
+ # 5. prepare original image
995
+ original_image = self.preprocess_original_image(original_image)
996
+ original_image = original_image.to(device=device, dtype=dtype)
997
+
998
+ # 6. prepare mask image
999
+ mask_image = self.preprocess_mask_image(mask_image)
1000
+ mask_image = mask_image.to(device=device, dtype=dtype)
1001
+
1002
+ if mask_image.shape[0] == 1:
1003
+ mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0)
1004
+ else:
1005
+ mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0)
1006
+
1007
+ # 6. Prepare intermediate images
1008
+ noise_timestep = timesteps[0:1]
1009
+ noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt)
1010
+
1011
+ intermediate_images = self.prepare_intermediate_images(
1012
+ original_image,
1013
+ noise_timestep,
1014
+ batch_size,
1015
+ num_images_per_prompt,
1016
+ dtype,
1017
+ device,
1018
+ mask_image,
1019
+ generator,
1020
+ )
1021
+
1022
+ # 7. Prepare upscaled image and noise level
1023
+ _, _, height, width = original_image.shape
1024
+
1025
+ image = self.preprocess_image(image, num_images_per_prompt, device)
1026
+
1027
+ upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True)
1028
+
1029
+ noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)
1030
+ noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)
1031
+ upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)
1032
+
1033
+ if do_classifier_free_guidance:
1034
+ noise_level = torch.cat([noise_level] * 2)
1035
+
1036
+ # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1037
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1038
+
1039
+ # HACK: see comment in `enable_model_cpu_offload`
1040
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
1041
+ self.text_encoder_offload_hook.offload()
1042
+
1043
+ # 9. Denoising loop
1044
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1045
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1046
+ for i, t in enumerate(timesteps):
1047
+ model_input = torch.cat([intermediate_images, upscaled], dim=1)
1048
+
1049
+ model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
1050
+ model_input = self.scheduler.scale_model_input(model_input, t)
1051
+
1052
+ # predict the noise residual
1053
+ noise_pred = self.unet(
1054
+ model_input,
1055
+ t,
1056
+ encoder_hidden_states=prompt_embeds,
1057
+ class_labels=noise_level,
1058
+ cross_attention_kwargs=cross_attention_kwargs,
1059
+ return_dict=False,
1060
+ )[0]
1061
+
1062
+ # perform guidance
1063
+ if do_classifier_free_guidance:
1064
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1065
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)
1066
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)
1067
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1068
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
1069
+
1070
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
1071
+ noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)
1072
+
1073
+ # compute the previous noisy sample x_t -> x_t-1
1074
+ prev_intermediate_images = intermediate_images
1075
+
1076
+ intermediate_images = self.scheduler.step(
1077
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
1078
+ )[0]
1079
+
1080
+ intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images
1081
+
1082
+ # call the callback, if provided
1083
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1084
+ progress_bar.update()
1085
+ if callback is not None and i % callback_steps == 0:
1086
+ callback(i, t, intermediate_images)
1087
+
1088
+ image = intermediate_images
1089
+
1090
+ if output_type == "pil":
1091
+ # 10. Post-processing
1092
+ image = (image / 2 + 0.5).clamp(0, 1)
1093
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1094
+
1095
+ # 11. Run safety checker
1096
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1097
+
1098
+ # 12. Convert to PIL
1099
+ image = self.numpy_to_pil(image)
1100
+
1101
+ # 13. Apply watermark
1102
+ if self.watermarker is not None:
1103
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
1104
+ elif output_type == "pt":
1105
+ nsfw_detected = None
1106
+ watermark_detected = None
1107
+
1108
+ else:
1109
+ # 10. Post-processing
1110
+ image = (image / 2 + 0.5).clamp(0, 1)
1111
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1112
+
1113
+ # 11. Run safety checker
1114
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1115
+
1116
+ self.maybe_free_model_hooks()
1117
+
1118
+ if not return_dict:
1119
+ return (image, nsfw_detected, watermark_detected)
1120
+
1121
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py ADDED
@@ -0,0 +1,870 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import inspect
3
+ import re
4
+ import urllib.parse as ul
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import numpy as np
8
+ import PIL.Image
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
12
+
13
+ from ...loaders import StableDiffusionLoraLoaderMixin
14
+ from ...models import UNet2DConditionModel
15
+ from ...schedulers import DDPMScheduler
16
+ from ...utils import (
17
+ BACKENDS_MAPPING,
18
+ is_bs4_available,
19
+ is_ftfy_available,
20
+ logging,
21
+ replace_example_docstring,
22
+ )
23
+ from ...utils.torch_utils import randn_tensor
24
+ from ..pipeline_utils import DiffusionPipeline
25
+ from .pipeline_output import IFPipelineOutput
26
+ from .safety_checker import IFSafetyChecker
27
+ from .watermark import IFWatermarker
28
+
29
+
30
+ if is_bs4_available():
31
+ from bs4 import BeautifulSoup
32
+
33
+ if is_ftfy_available():
34
+ import ftfy
35
+
36
+
37
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
+
39
+
40
+ EXAMPLE_DOC_STRING = """
41
+ Examples:
42
+ ```py
43
+ >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline
44
+ >>> from diffusers.utils import pt_to_pil
45
+ >>> import torch
46
+
47
+ >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
48
+ >>> pipe.enable_model_cpu_offload()
49
+
50
+ >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'
51
+ >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
52
+
53
+ >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images
54
+
55
+ >>> # save intermediate image
56
+ >>> pil_image = pt_to_pil(image)
57
+ >>> pil_image[0].save("./if_stage_I.png")
58
+
59
+ >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(
60
+ ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16
61
+ ... )
62
+ >>> super_res_1_pipe.enable_model_cpu_offload()
63
+
64
+ >>> image = super_res_1_pipe(
65
+ ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds
66
+ ... ).images
67
+ >>> image[0].save("./if_stage_II.png")
68
+ ```
69
+ """
70
+
71
+
72
+ class IFSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin):
73
+ tokenizer: T5Tokenizer
74
+ text_encoder: T5EncoderModel
75
+
76
+ unet: UNet2DConditionModel
77
+ scheduler: DDPMScheduler
78
+ image_noising_scheduler: DDPMScheduler
79
+
80
+ feature_extractor: Optional[CLIPImageProcessor]
81
+ safety_checker: Optional[IFSafetyChecker]
82
+
83
+ watermarker: Optional[IFWatermarker]
84
+
85
+ bad_punct_regex = re.compile(
86
+ r"["
87
+ + "#®•©™&@·º½¾¿¡§~"
88
+ + r"\)"
89
+ + r"\("
90
+ + r"\]"
91
+ + r"\["
92
+ + r"\}"
93
+ + r"\{"
94
+ + r"\|"
95
+ + "\\"
96
+ + r"\/"
97
+ + r"\*"
98
+ + r"]{1,}"
99
+ ) # noqa
100
+
101
+ _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"]
102
+ model_cpu_offload_seq = "text_encoder->unet"
103
+ _exclude_from_cpu_offload = ["watermarker"]
104
+
105
+ def __init__(
106
+ self,
107
+ tokenizer: T5Tokenizer,
108
+ text_encoder: T5EncoderModel,
109
+ unet: UNet2DConditionModel,
110
+ scheduler: DDPMScheduler,
111
+ image_noising_scheduler: DDPMScheduler,
112
+ safety_checker: Optional[IFSafetyChecker],
113
+ feature_extractor: Optional[CLIPImageProcessor],
114
+ watermarker: Optional[IFWatermarker],
115
+ requires_safety_checker: bool = True,
116
+ ):
117
+ super().__init__()
118
+
119
+ if safety_checker is None and requires_safety_checker:
120
+ logger.warning(
121
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
122
+ " that you abide to the conditions of the IF license and do not expose unfiltered"
123
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
124
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
125
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
126
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
127
+ )
128
+
129
+ if safety_checker is not None and feature_extractor is None:
130
+ raise ValueError(
131
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
132
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
133
+ )
134
+
135
+ if unet.config.in_channels != 6:
136
+ logger.warning(
137
+ "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`."
138
+ )
139
+
140
+ self.register_modules(
141
+ tokenizer=tokenizer,
142
+ text_encoder=text_encoder,
143
+ unet=unet,
144
+ scheduler=scheduler,
145
+ image_noising_scheduler=image_noising_scheduler,
146
+ safety_checker=safety_checker,
147
+ feature_extractor=feature_extractor,
148
+ watermarker=watermarker,
149
+ )
150
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
151
+
152
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
153
+ def _text_preprocessing(self, text, clean_caption=False):
154
+ if clean_caption and not is_bs4_available():
155
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
156
+ logger.warning("Setting `clean_caption` to False...")
157
+ clean_caption = False
158
+
159
+ if clean_caption and not is_ftfy_available():
160
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
161
+ logger.warning("Setting `clean_caption` to False...")
162
+ clean_caption = False
163
+
164
+ if not isinstance(text, (tuple, list)):
165
+ text = [text]
166
+
167
+ def process(text: str):
168
+ if clean_caption:
169
+ text = self._clean_caption(text)
170
+ text = self._clean_caption(text)
171
+ else:
172
+ text = text.lower().strip()
173
+ return text
174
+
175
+ return [process(t) for t in text]
176
+
177
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
178
+ def _clean_caption(self, caption):
179
+ caption = str(caption)
180
+ caption = ul.unquote_plus(caption)
181
+ caption = caption.strip().lower()
182
+ caption = re.sub("<person>", "person", caption)
183
+ # urls:
184
+ caption = re.sub(
185
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
186
+ "",
187
+ caption,
188
+ ) # regex for urls
189
+ caption = re.sub(
190
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
191
+ "",
192
+ caption,
193
+ ) # regex for urls
194
+ # html:
195
+ caption = BeautifulSoup(caption, features="html.parser").text
196
+
197
+ # @<nickname>
198
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
199
+
200
+ # 31C0—31EF CJK Strokes
201
+ # 31F0—31FF Katakana Phonetic Extensions
202
+ # 3200—32FF Enclosed CJK Letters and Months
203
+ # 3300—33FF CJK Compatibility
204
+ # 3400—4DBF CJK Unified Ideographs Extension A
205
+ # 4DC0—4DFF Yijing Hexagram Symbols
206
+ # 4E00—9FFF CJK Unified Ideographs
207
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
208
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
209
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
210
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
211
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
212
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
213
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
214
+ #######################################################
215
+
216
+ # все виды тире / all types of dash --> "-"
217
+ caption = re.sub(
218
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
219
+ "-",
220
+ caption,
221
+ )
222
+
223
+ # кавычки к одному стандарту
224
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
225
+ caption = re.sub(r"[‘’]", "'", caption)
226
+
227
+ # &quot;
228
+ caption = re.sub(r"&quot;?", "", caption)
229
+ # &amp
230
+ caption = re.sub(r"&amp", "", caption)
231
+
232
+ # ip adresses:
233
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
234
+
235
+ # article ids:
236
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
237
+
238
+ # \n
239
+ caption = re.sub(r"\\n", " ", caption)
240
+
241
+ # "#123"
242
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
243
+ # "#12345.."
244
+ caption = re.sub(r"#\d{5,}\b", "", caption)
245
+ # "123456.."
246
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
247
+ # filenames:
248
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
249
+
250
+ #
251
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
252
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
253
+
254
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
255
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
256
+
257
+ # this-is-my-cute-cat / this_is_my_cute_cat
258
+ regex2 = re.compile(r"(?:\-|\_)")
259
+ if len(re.findall(regex2, caption)) > 3:
260
+ caption = re.sub(regex2, " ", caption)
261
+
262
+ caption = ftfy.fix_text(caption)
263
+ caption = html.unescape(html.unescape(caption))
264
+
265
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
266
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
267
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
268
+
269
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
270
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
271
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
272
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
273
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
274
+
275
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
276
+
277
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
278
+
279
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
280
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
281
+ caption = re.sub(r"\s+", " ", caption)
282
+
283
+ caption.strip()
284
+
285
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
286
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
287
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
288
+ caption = re.sub(r"^\.\S+$", "", caption)
289
+
290
+ return caption.strip()
291
+
292
+ @torch.no_grad()
293
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
294
+ def encode_prompt(
295
+ self,
296
+ prompt: Union[str, List[str]],
297
+ do_classifier_free_guidance: bool = True,
298
+ num_images_per_prompt: int = 1,
299
+ device: Optional[torch.device] = None,
300
+ negative_prompt: Optional[Union[str, List[str]]] = None,
301
+ prompt_embeds: Optional[torch.Tensor] = None,
302
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
303
+ clean_caption: bool = False,
304
+ ):
305
+ r"""
306
+ Encodes the prompt into text encoder hidden states.
307
+
308
+ Args:
309
+ prompt (`str` or `List[str]`, *optional*):
310
+ prompt to be encoded
311
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
312
+ whether to use classifier free guidance or not
313
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
314
+ number of images that should be generated per prompt
315
+ device: (`torch.device`, *optional*):
316
+ torch device to place the resulting embeddings on
317
+ negative_prompt (`str` or `List[str]`, *optional*):
318
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
319
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
320
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
321
+ prompt_embeds (`torch.Tensor`, *optional*):
322
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
323
+ provided, text embeddings will be generated from `prompt` input argument.
324
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
325
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
326
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
327
+ argument.
328
+ clean_caption (bool, defaults to `False`):
329
+ If `True`, the function will preprocess and clean the provided caption before encoding.
330
+ """
331
+ if prompt is not None and negative_prompt is not None:
332
+ if type(prompt) is not type(negative_prompt):
333
+ raise TypeError(
334
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
335
+ f" {type(prompt)}."
336
+ )
337
+
338
+ if device is None:
339
+ device = self._execution_device
340
+
341
+ if prompt is not None and isinstance(prompt, str):
342
+ batch_size = 1
343
+ elif prompt is not None and isinstance(prompt, list):
344
+ batch_size = len(prompt)
345
+ else:
346
+ batch_size = prompt_embeds.shape[0]
347
+
348
+ # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
349
+ max_length = 77
350
+
351
+ if prompt_embeds is None:
352
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
353
+ text_inputs = self.tokenizer(
354
+ prompt,
355
+ padding="max_length",
356
+ max_length=max_length,
357
+ truncation=True,
358
+ add_special_tokens=True,
359
+ return_tensors="pt",
360
+ )
361
+ text_input_ids = text_inputs.input_ids
362
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
363
+
364
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
365
+ text_input_ids, untruncated_ids
366
+ ):
367
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
368
+ logger.warning(
369
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
370
+ f" {max_length} tokens: {removed_text}"
371
+ )
372
+
373
+ attention_mask = text_inputs.attention_mask.to(device)
374
+
375
+ prompt_embeds = self.text_encoder(
376
+ text_input_ids.to(device),
377
+ attention_mask=attention_mask,
378
+ )
379
+ prompt_embeds = prompt_embeds[0]
380
+
381
+ if self.text_encoder is not None:
382
+ dtype = self.text_encoder.dtype
383
+ elif self.unet is not None:
384
+ dtype = self.unet.dtype
385
+ else:
386
+ dtype = None
387
+
388
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
389
+
390
+ bs_embed, seq_len, _ = prompt_embeds.shape
391
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
392
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
393
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
394
+
395
+ # get unconditional embeddings for classifier free guidance
396
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
397
+ uncond_tokens: List[str]
398
+ if negative_prompt is None:
399
+ uncond_tokens = [""] * batch_size
400
+ elif isinstance(negative_prompt, str):
401
+ uncond_tokens = [negative_prompt]
402
+ elif batch_size != len(negative_prompt):
403
+ raise ValueError(
404
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
405
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
406
+ " the batch size of `prompt`."
407
+ )
408
+ else:
409
+ uncond_tokens = negative_prompt
410
+
411
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
412
+ max_length = prompt_embeds.shape[1]
413
+ uncond_input = self.tokenizer(
414
+ uncond_tokens,
415
+ padding="max_length",
416
+ max_length=max_length,
417
+ truncation=True,
418
+ return_attention_mask=True,
419
+ add_special_tokens=True,
420
+ return_tensors="pt",
421
+ )
422
+ attention_mask = uncond_input.attention_mask.to(device)
423
+
424
+ negative_prompt_embeds = self.text_encoder(
425
+ uncond_input.input_ids.to(device),
426
+ attention_mask=attention_mask,
427
+ )
428
+ negative_prompt_embeds = negative_prompt_embeds[0]
429
+
430
+ if do_classifier_free_guidance:
431
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
432
+ seq_len = negative_prompt_embeds.shape[1]
433
+
434
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
435
+
436
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
437
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
438
+
439
+ # For classifier free guidance, we need to do two forward passes.
440
+ # Here we concatenate the unconditional and text embeddings into a single batch
441
+ # to avoid doing two forward passes
442
+ else:
443
+ negative_prompt_embeds = None
444
+
445
+ return prompt_embeds, negative_prompt_embeds
446
+
447
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
448
+ def run_safety_checker(self, image, device, dtype):
449
+ if self.safety_checker is not None:
450
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
451
+ image, nsfw_detected, watermark_detected = self.safety_checker(
452
+ images=image,
453
+ clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
454
+ )
455
+ else:
456
+ nsfw_detected = None
457
+ watermark_detected = None
458
+
459
+ return image, nsfw_detected, watermark_detected
460
+
461
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
462
+ def prepare_extra_step_kwargs(self, generator, eta):
463
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
464
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
465
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
466
+ # and should be between [0, 1]
467
+
468
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
469
+ extra_step_kwargs = {}
470
+ if accepts_eta:
471
+ extra_step_kwargs["eta"] = eta
472
+
473
+ # check if the scheduler accepts generator
474
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
475
+ if accepts_generator:
476
+ extra_step_kwargs["generator"] = generator
477
+ return extra_step_kwargs
478
+
479
+ def check_inputs(
480
+ self,
481
+ prompt,
482
+ image,
483
+ batch_size,
484
+ noise_level,
485
+ callback_steps,
486
+ negative_prompt=None,
487
+ prompt_embeds=None,
488
+ negative_prompt_embeds=None,
489
+ ):
490
+ if (callback_steps is None) or (
491
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
492
+ ):
493
+ raise ValueError(
494
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
495
+ f" {type(callback_steps)}."
496
+ )
497
+
498
+ if prompt is not None and prompt_embeds is not None:
499
+ raise ValueError(
500
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
501
+ " only forward one of the two."
502
+ )
503
+ elif prompt is None and prompt_embeds is None:
504
+ raise ValueError(
505
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
506
+ )
507
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
508
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
509
+
510
+ if negative_prompt is not None and negative_prompt_embeds is not None:
511
+ raise ValueError(
512
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
513
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
514
+ )
515
+
516
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
517
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
518
+ raise ValueError(
519
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
520
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
521
+ f" {negative_prompt_embeds.shape}."
522
+ )
523
+
524
+ if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps:
525
+ raise ValueError(
526
+ f"`noise_level`: {noise_level} must be a valid timestep in `self.noising_scheduler`, [0, {self.image_noising_scheduler.config.num_train_timesteps})"
527
+ )
528
+
529
+ if isinstance(image, list):
530
+ check_image_type = image[0]
531
+ else:
532
+ check_image_type = image
533
+
534
+ if (
535
+ not isinstance(check_image_type, torch.Tensor)
536
+ and not isinstance(check_image_type, PIL.Image.Image)
537
+ and not isinstance(check_image_type, np.ndarray)
538
+ ):
539
+ raise ValueError(
540
+ "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
541
+ f" {type(check_image_type)}"
542
+ )
543
+
544
+ if isinstance(image, list):
545
+ image_batch_size = len(image)
546
+ elif isinstance(image, torch.Tensor):
547
+ image_batch_size = image.shape[0]
548
+ elif isinstance(image, PIL.Image.Image):
549
+ image_batch_size = 1
550
+ elif isinstance(image, np.ndarray):
551
+ image_batch_size = image.shape[0]
552
+ else:
553
+ assert False
554
+
555
+ if batch_size != image_batch_size:
556
+ raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
557
+
558
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_intermediate_images
559
+ def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator):
560
+ shape = (batch_size, num_channels, height, width)
561
+ if isinstance(generator, list) and len(generator) != batch_size:
562
+ raise ValueError(
563
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
564
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
565
+ )
566
+
567
+ intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
568
+
569
+ # scale the initial noise by the standard deviation required by the scheduler
570
+ intermediate_images = intermediate_images * self.scheduler.init_noise_sigma
571
+ return intermediate_images
572
+
573
+ def preprocess_image(self, image, num_images_per_prompt, device):
574
+ if not isinstance(image, torch.Tensor) and not isinstance(image, list):
575
+ image = [image]
576
+
577
+ if isinstance(image[0], PIL.Image.Image):
578
+ image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image]
579
+
580
+ image = np.stack(image, axis=0) # to np
581
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
582
+ elif isinstance(image[0], np.ndarray):
583
+ image = np.stack(image, axis=0) # to np
584
+ if image.ndim == 5:
585
+ image = image[0]
586
+
587
+ image = torch.from_numpy(image.transpose(0, 3, 1, 2))
588
+ elif isinstance(image, list) and isinstance(image[0], torch.Tensor):
589
+ dims = image[0].ndim
590
+
591
+ if dims == 3:
592
+ image = torch.stack(image, dim=0)
593
+ elif dims == 4:
594
+ image = torch.concat(image, dim=0)
595
+ else:
596
+ raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}")
597
+
598
+ image = image.to(device=device, dtype=self.unet.dtype)
599
+
600
+ image = image.repeat_interleave(num_images_per_prompt, dim=0)
601
+
602
+ return image
603
+
604
+ @torch.no_grad()
605
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
606
+ def __call__(
607
+ self,
608
+ prompt: Union[str, List[str]] = None,
609
+ height: int = None,
610
+ width: int = None,
611
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor] = None,
612
+ num_inference_steps: int = 50,
613
+ timesteps: List[int] = None,
614
+ guidance_scale: float = 4.0,
615
+ negative_prompt: Optional[Union[str, List[str]]] = None,
616
+ num_images_per_prompt: Optional[int] = 1,
617
+ eta: float = 0.0,
618
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
619
+ prompt_embeds: Optional[torch.Tensor] = None,
620
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
621
+ output_type: Optional[str] = "pil",
622
+ return_dict: bool = True,
623
+ callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
624
+ callback_steps: int = 1,
625
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
626
+ noise_level: int = 250,
627
+ clean_caption: bool = True,
628
+ ):
629
+ """
630
+ Function invoked when calling the pipeline for generation.
631
+
632
+ Args:
633
+ prompt (`str` or `List[str]`, *optional*):
634
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
635
+ instead.
636
+ height (`int`, *optional*, defaults to None):
637
+ The height in pixels of the generated image.
638
+ width (`int`, *optional*, defaults to None):
639
+ The width in pixels of the generated image.
640
+ image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`):
641
+ The image to be upscaled.
642
+ num_inference_steps (`int`, *optional*, defaults to 50):
643
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
644
+ expense of slower inference.
645
+ timesteps (`List[int]`, *optional*, defaults to None):
646
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
647
+ timesteps are used. Must be in descending order.
648
+ guidance_scale (`float`, *optional*, defaults to 4.0):
649
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
650
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
651
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
652
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
653
+ usually at the expense of lower image quality.
654
+ negative_prompt (`str` or `List[str]`, *optional*):
655
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
656
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
657
+ less than `1`).
658
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
659
+ The number of images to generate per prompt.
660
+ eta (`float`, *optional*, defaults to 0.0):
661
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
662
+ [`schedulers.DDIMScheduler`], will be ignored for others.
663
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
664
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
665
+ to make generation deterministic.
666
+ prompt_embeds (`torch.Tensor`, *optional*):
667
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
668
+ provided, text embeddings will be generated from `prompt` input argument.
669
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
670
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
671
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
672
+ argument.
673
+ output_type (`str`, *optional*, defaults to `"pil"`):
674
+ The output format of the generate image. Choose between
675
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
676
+ return_dict (`bool`, *optional*, defaults to `True`):
677
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
678
+ callback (`Callable`, *optional*):
679
+ A function that will be called every `callback_steps` steps during inference. The function will be
680
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
681
+ callback_steps (`int`, *optional*, defaults to 1):
682
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
683
+ called at every step.
684
+ cross_attention_kwargs (`dict`, *optional*):
685
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
686
+ `self.processor` in
687
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
688
+ noise_level (`int`, *optional*, defaults to 250):
689
+ The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)`
690
+ clean_caption (`bool`, *optional*, defaults to `True`):
691
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
692
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
693
+ prompt.
694
+
695
+ Examples:
696
+
697
+ Returns:
698
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
699
+ [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
700
+ returning a tuple, the first element is a list with the generated images, and the second element is a list
701
+ of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
702
+ or watermarked content, according to the `safety_checker`.
703
+ """
704
+ # 1. Check inputs. Raise error if not correct
705
+
706
+ if prompt is not None and isinstance(prompt, str):
707
+ batch_size = 1
708
+ elif prompt is not None and isinstance(prompt, list):
709
+ batch_size = len(prompt)
710
+ else:
711
+ batch_size = prompt_embeds.shape[0]
712
+
713
+ self.check_inputs(
714
+ prompt,
715
+ image,
716
+ batch_size,
717
+ noise_level,
718
+ callback_steps,
719
+ negative_prompt,
720
+ prompt_embeds,
721
+ negative_prompt_embeds,
722
+ )
723
+
724
+ # 2. Define call parameters
725
+
726
+ height = height or self.unet.config.sample_size
727
+ width = width or self.unet.config.sample_size
728
+
729
+ device = self._execution_device
730
+
731
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
732
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
733
+ # corresponds to doing no classifier free guidance.
734
+ do_classifier_free_guidance = guidance_scale > 1.0
735
+
736
+ # 3. Encode input prompt
737
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
738
+ prompt,
739
+ do_classifier_free_guidance,
740
+ num_images_per_prompt=num_images_per_prompt,
741
+ device=device,
742
+ negative_prompt=negative_prompt,
743
+ prompt_embeds=prompt_embeds,
744
+ negative_prompt_embeds=negative_prompt_embeds,
745
+ clean_caption=clean_caption,
746
+ )
747
+
748
+ if do_classifier_free_guidance:
749
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
750
+
751
+ # 4. Prepare timesteps
752
+ if timesteps is not None:
753
+ self.scheduler.set_timesteps(timesteps=timesteps, device=device)
754
+ timesteps = self.scheduler.timesteps
755
+ num_inference_steps = len(timesteps)
756
+ else:
757
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
758
+ timesteps = self.scheduler.timesteps
759
+
760
+ if hasattr(self.scheduler, "set_begin_index"):
761
+ self.scheduler.set_begin_index(0)
762
+
763
+ # 5. Prepare intermediate images
764
+ num_channels = self.unet.config.in_channels // 2
765
+ intermediate_images = self.prepare_intermediate_images(
766
+ batch_size * num_images_per_prompt,
767
+ num_channels,
768
+ height,
769
+ width,
770
+ prompt_embeds.dtype,
771
+ device,
772
+ generator,
773
+ )
774
+
775
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
776
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
777
+
778
+ # 7. Prepare upscaled image and noise level
779
+ image = self.preprocess_image(image, num_images_per_prompt, device)
780
+ upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True)
781
+
782
+ noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)
783
+ noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)
784
+ upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)
785
+
786
+ if do_classifier_free_guidance:
787
+ noise_level = torch.cat([noise_level] * 2)
788
+
789
+ # HACK: see comment in `enable_model_cpu_offload`
790
+ if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
791
+ self.text_encoder_offload_hook.offload()
792
+
793
+ # 8. Denoising loop
794
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
795
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
796
+ for i, t in enumerate(timesteps):
797
+ model_input = torch.cat([intermediate_images, upscaled], dim=1)
798
+
799
+ model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
800
+ model_input = self.scheduler.scale_model_input(model_input, t)
801
+
802
+ # predict the noise residual
803
+ noise_pred = self.unet(
804
+ model_input,
805
+ t,
806
+ encoder_hidden_states=prompt_embeds,
807
+ class_labels=noise_level,
808
+ cross_attention_kwargs=cross_attention_kwargs,
809
+ return_dict=False,
810
+ )[0]
811
+
812
+ # perform guidance
813
+ if do_classifier_free_guidance:
814
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
815
+ noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)
816
+ noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)
817
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
818
+ noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
819
+
820
+ if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
821
+ noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)
822
+
823
+ # compute the previous noisy sample x_t -> x_t-1
824
+ intermediate_images = self.scheduler.step(
825
+ noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
826
+ )[0]
827
+
828
+ # call the callback, if provided
829
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
830
+ progress_bar.update()
831
+ if callback is not None and i % callback_steps == 0:
832
+ callback(i, t, intermediate_images)
833
+
834
+ image = intermediate_images
835
+
836
+ if output_type == "pil":
837
+ # 9. Post-processing
838
+ image = (image / 2 + 0.5).clamp(0, 1)
839
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
840
+
841
+ # 10. Run safety checker
842
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
843
+
844
+ # 11. Convert to PIL
845
+ image = self.numpy_to_pil(image)
846
+
847
+ # 12. Apply watermark
848
+ if self.watermarker is not None:
849
+ self.watermarker.apply_watermark(image, self.unet.config.sample_size)
850
+ elif output_type == "pt":
851
+ nsfw_detected = None
852
+ watermark_detected = None
853
+
854
+ if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
855
+ self.unet_offload_hook.offload()
856
+ else:
857
+ # 9. Post-processing
858
+ image = (image / 2 + 0.5).clamp(0, 1)
859
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
860
+
861
+ # 10. Run safety checker
862
+ image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
863
+
864
+ # Offload all models
865
+ self.maybe_free_model_hooks()
866
+
867
+ if not return_dict:
868
+ return (image, nsfw_detected, watermark_detected)
869
+
870
+ return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/pipeline_output.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+
7
+ from ...utils import BaseOutput
8
+
9
+
10
+ @dataclass
11
+ class IFPipelineOutput(BaseOutput):
12
+ r"""
13
+ Output class for Stable Diffusion pipelines.
14
+
15
+ Args:
16
+ images (`List[PIL.Image.Image]` or `np.ndarray`):
17
+ List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
18
+ num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
19
+ nsfw_detected (`List[bool]`):
20
+ List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
21
+ (nsfw) content or a watermark. `None` if safety checking could not be performed.
22
+ watermark_detected (`List[bool]`):
23
+ List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety
24
+ checking could not be performed.
25
+ """
26
+
27
+ images: Union[List[PIL.Image.Image], np.ndarray]
28
+ nsfw_detected: Optional[List[bool]]
29
+ watermark_detected: Optional[List[bool]]
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/timesteps.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fast27_timesteps = [
2
+ 999,
3
+ 800,
4
+ 799,
5
+ 600,
6
+ 599,
7
+ 500,
8
+ 400,
9
+ 399,
10
+ 377,
11
+ 355,
12
+ 333,
13
+ 311,
14
+ 288,
15
+ 266,
16
+ 244,
17
+ 222,
18
+ 200,
19
+ 199,
20
+ 177,
21
+ 155,
22
+ 133,
23
+ 111,
24
+ 88,
25
+ 66,
26
+ 44,
27
+ 22,
28
+ 0,
29
+ ]
30
+
31
+ smart27_timesteps = [
32
+ 999,
33
+ 976,
34
+ 952,
35
+ 928,
36
+ 905,
37
+ 882,
38
+ 858,
39
+ 857,
40
+ 810,
41
+ 762,
42
+ 715,
43
+ 714,
44
+ 572,
45
+ 429,
46
+ 428,
47
+ 286,
48
+ 285,
49
+ 238,
50
+ 190,
51
+ 143,
52
+ 142,
53
+ 118,
54
+ 95,
55
+ 71,
56
+ 47,
57
+ 24,
58
+ 0,
59
+ ]
60
+
61
+ smart50_timesteps = [
62
+ 999,
63
+ 988,
64
+ 977,
65
+ 966,
66
+ 955,
67
+ 944,
68
+ 933,
69
+ 922,
70
+ 911,
71
+ 900,
72
+ 899,
73
+ 879,
74
+ 859,
75
+ 840,
76
+ 820,
77
+ 800,
78
+ 799,
79
+ 766,
80
+ 733,
81
+ 700,
82
+ 699,
83
+ 650,
84
+ 600,
85
+ 599,
86
+ 500,
87
+ 499,
88
+ 400,
89
+ 399,
90
+ 350,
91
+ 300,
92
+ 299,
93
+ 266,
94
+ 233,
95
+ 200,
96
+ 199,
97
+ 179,
98
+ 159,
99
+ 140,
100
+ 120,
101
+ 100,
102
+ 99,
103
+ 88,
104
+ 77,
105
+ 66,
106
+ 55,
107
+ 44,
108
+ 33,
109
+ 22,
110
+ 11,
111
+ 0,
112
+ ]
113
+
114
+ smart100_timesteps = [
115
+ 999,
116
+ 995,
117
+ 992,
118
+ 989,
119
+ 985,
120
+ 981,
121
+ 978,
122
+ 975,
123
+ 971,
124
+ 967,
125
+ 964,
126
+ 961,
127
+ 957,
128
+ 956,
129
+ 951,
130
+ 947,
131
+ 942,
132
+ 937,
133
+ 933,
134
+ 928,
135
+ 923,
136
+ 919,
137
+ 914,
138
+ 913,
139
+ 908,
140
+ 903,
141
+ 897,
142
+ 892,
143
+ 887,
144
+ 881,
145
+ 876,
146
+ 871,
147
+ 870,
148
+ 864,
149
+ 858,
150
+ 852,
151
+ 846,
152
+ 840,
153
+ 834,
154
+ 828,
155
+ 827,
156
+ 820,
157
+ 813,
158
+ 806,
159
+ 799,
160
+ 792,
161
+ 785,
162
+ 784,
163
+ 777,
164
+ 770,
165
+ 763,
166
+ 756,
167
+ 749,
168
+ 742,
169
+ 741,
170
+ 733,
171
+ 724,
172
+ 716,
173
+ 707,
174
+ 699,
175
+ 698,
176
+ 688,
177
+ 677,
178
+ 666,
179
+ 656,
180
+ 655,
181
+ 645,
182
+ 634,
183
+ 623,
184
+ 613,
185
+ 612,
186
+ 598,
187
+ 584,
188
+ 570,
189
+ 569,
190
+ 555,
191
+ 541,
192
+ 527,
193
+ 526,
194
+ 505,
195
+ 484,
196
+ 483,
197
+ 462,
198
+ 440,
199
+ 439,
200
+ 396,
201
+ 395,
202
+ 352,
203
+ 351,
204
+ 308,
205
+ 307,
206
+ 264,
207
+ 263,
208
+ 220,
209
+ 219,
210
+ 176,
211
+ 132,
212
+ 88,
213
+ 44,
214
+ 0,
215
+ ]
216
+
217
+ smart185_timesteps = [
218
+ 999,
219
+ 997,
220
+ 995,
221
+ 992,
222
+ 990,
223
+ 988,
224
+ 986,
225
+ 984,
226
+ 981,
227
+ 979,
228
+ 977,
229
+ 975,
230
+ 972,
231
+ 970,
232
+ 968,
233
+ 966,
234
+ 964,
235
+ 961,
236
+ 959,
237
+ 957,
238
+ 956,
239
+ 954,
240
+ 951,
241
+ 949,
242
+ 946,
243
+ 944,
244
+ 941,
245
+ 939,
246
+ 936,
247
+ 934,
248
+ 931,
249
+ 929,
250
+ 926,
251
+ 924,
252
+ 921,
253
+ 919,
254
+ 916,
255
+ 914,
256
+ 913,
257
+ 910,
258
+ 907,
259
+ 905,
260
+ 902,
261
+ 899,
262
+ 896,
263
+ 893,
264
+ 891,
265
+ 888,
266
+ 885,
267
+ 882,
268
+ 879,
269
+ 877,
270
+ 874,
271
+ 871,
272
+ 870,
273
+ 867,
274
+ 864,
275
+ 861,
276
+ 858,
277
+ 855,
278
+ 852,
279
+ 849,
280
+ 846,
281
+ 843,
282
+ 840,
283
+ 837,
284
+ 834,
285
+ 831,
286
+ 828,
287
+ 827,
288
+ 824,
289
+ 821,
290
+ 817,
291
+ 814,
292
+ 811,
293
+ 808,
294
+ 804,
295
+ 801,
296
+ 798,
297
+ 795,
298
+ 791,
299
+ 788,
300
+ 785,
301
+ 784,
302
+ 780,
303
+ 777,
304
+ 774,
305
+ 770,
306
+ 766,
307
+ 763,
308
+ 760,
309
+ 756,
310
+ 752,
311
+ 749,
312
+ 746,
313
+ 742,
314
+ 741,
315
+ 737,
316
+ 733,
317
+ 730,
318
+ 726,
319
+ 722,
320
+ 718,
321
+ 714,
322
+ 710,
323
+ 707,
324
+ 703,
325
+ 699,
326
+ 698,
327
+ 694,
328
+ 690,
329
+ 685,
330
+ 681,
331
+ 677,
332
+ 673,
333
+ 669,
334
+ 664,
335
+ 660,
336
+ 656,
337
+ 655,
338
+ 650,
339
+ 646,
340
+ 641,
341
+ 636,
342
+ 632,
343
+ 627,
344
+ 622,
345
+ 618,
346
+ 613,
347
+ 612,
348
+ 607,
349
+ 602,
350
+ 596,
351
+ 591,
352
+ 586,
353
+ 580,
354
+ 575,
355
+ 570,
356
+ 569,
357
+ 563,
358
+ 557,
359
+ 551,
360
+ 545,
361
+ 539,
362
+ 533,
363
+ 527,
364
+ 526,
365
+ 519,
366
+ 512,
367
+ 505,
368
+ 498,
369
+ 491,
370
+ 484,
371
+ 483,
372
+ 474,
373
+ 466,
374
+ 457,
375
+ 449,
376
+ 440,
377
+ 439,
378
+ 428,
379
+ 418,
380
+ 407,
381
+ 396,
382
+ 395,
383
+ 381,
384
+ 366,
385
+ 352,
386
+ 351,
387
+ 330,
388
+ 308,
389
+ 307,
390
+ 286,
391
+ 264,
392
+ 263,
393
+ 242,
394
+ 220,
395
+ 219,
396
+ 176,
397
+ 175,
398
+ 132,
399
+ 131,
400
+ 88,
401
+ 44,
402
+ 0,
403
+ ]
404
+
405
+ super27_timesteps = [
406
+ 999,
407
+ 991,
408
+ 982,
409
+ 974,
410
+ 966,
411
+ 958,
412
+ 950,
413
+ 941,
414
+ 933,
415
+ 925,
416
+ 916,
417
+ 908,
418
+ 900,
419
+ 899,
420
+ 874,
421
+ 850,
422
+ 825,
423
+ 800,
424
+ 799,
425
+ 700,
426
+ 600,
427
+ 500,
428
+ 400,
429
+ 300,
430
+ 200,
431
+ 100,
432
+ 0,
433
+ ]
434
+
435
+ super40_timesteps = [
436
+ 999,
437
+ 992,
438
+ 985,
439
+ 978,
440
+ 971,
441
+ 964,
442
+ 957,
443
+ 949,
444
+ 942,
445
+ 935,
446
+ 928,
447
+ 921,
448
+ 914,
449
+ 907,
450
+ 900,
451
+ 899,
452
+ 879,
453
+ 859,
454
+ 840,
455
+ 820,
456
+ 800,
457
+ 799,
458
+ 766,
459
+ 733,
460
+ 700,
461
+ 699,
462
+ 650,
463
+ 600,
464
+ 599,
465
+ 500,
466
+ 499,
467
+ 400,
468
+ 399,
469
+ 300,
470
+ 299,
471
+ 200,
472
+ 199,
473
+ 100,
474
+ 99,
475
+ 0,
476
+ ]
477
+
478
+ super100_timesteps = [
479
+ 999,
480
+ 996,
481
+ 992,
482
+ 989,
483
+ 985,
484
+ 982,
485
+ 979,
486
+ 975,
487
+ 972,
488
+ 968,
489
+ 965,
490
+ 961,
491
+ 958,
492
+ 955,
493
+ 951,
494
+ 948,
495
+ 944,
496
+ 941,
497
+ 938,
498
+ 934,
499
+ 931,
500
+ 927,
501
+ 924,
502
+ 920,
503
+ 917,
504
+ 914,
505
+ 910,
506
+ 907,
507
+ 903,
508
+ 900,
509
+ 899,
510
+ 891,
511
+ 884,
512
+ 876,
513
+ 869,
514
+ 861,
515
+ 853,
516
+ 846,
517
+ 838,
518
+ 830,
519
+ 823,
520
+ 815,
521
+ 808,
522
+ 800,
523
+ 799,
524
+ 788,
525
+ 777,
526
+ 766,
527
+ 755,
528
+ 744,
529
+ 733,
530
+ 722,
531
+ 711,
532
+ 700,
533
+ 699,
534
+ 688,
535
+ 677,
536
+ 666,
537
+ 655,
538
+ 644,
539
+ 633,
540
+ 622,
541
+ 611,
542
+ 600,
543
+ 599,
544
+ 585,
545
+ 571,
546
+ 557,
547
+ 542,
548
+ 528,
549
+ 514,
550
+ 500,
551
+ 499,
552
+ 485,
553
+ 471,
554
+ 457,
555
+ 442,
556
+ 428,
557
+ 414,
558
+ 400,
559
+ 399,
560
+ 379,
561
+ 359,
562
+ 340,
563
+ 320,
564
+ 300,
565
+ 299,
566
+ 279,
567
+ 259,
568
+ 240,
569
+ 220,
570
+ 200,
571
+ 199,
572
+ 166,
573
+ 133,
574
+ 100,
575
+ 99,
576
+ 66,
577
+ 33,
578
+ 0,
579
+ ]
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/watermark.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import PIL.Image
4
+ import torch
5
+ from PIL import Image
6
+
7
+ from ...configuration_utils import ConfigMixin
8
+ from ...models.modeling_utils import ModelMixin
9
+ from ...utils import PIL_INTERPOLATION
10
+
11
+
12
+ class IFWatermarker(ModelMixin, ConfigMixin):
13
+ def __init__(self):
14
+ super().__init__()
15
+
16
+ self.register_buffer("watermark_image", torch.zeros((62, 62, 4)))
17
+ self.watermark_image_as_pil = None
18
+
19
+ def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None):
20
+ # Copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287
21
+
22
+ h = images[0].height
23
+ w = images[0].width
24
+
25
+ sample_size = sample_size or h
26
+
27
+ coef = min(h / sample_size, w / sample_size)
28
+ img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w)
29
+
30
+ S1, S2 = 1024**2, img_w * img_h
31
+ K = (S2 / S1) ** 0.5
32
+ wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K)
33
+
34
+ if self.watermark_image_as_pil is None:
35
+ watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy()
36
+ watermark_image = Image.fromarray(watermark_image, mode="RGBA")
37
+ self.watermark_image_as_pil = watermark_image
38
+
39
+ wm_img = self.watermark_image_as_pil.resize(
40
+ (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None
41
+ )
42
+
43
+ for pil_img in images:
44
+ pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1])
45
+
46
+ return images
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__init__.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_flax_available,
9
+ is_k_diffusion_available,
10
+ is_k_diffusion_version,
11
+ is_onnx_available,
12
+ is_torch_available,
13
+ is_transformers_available,
14
+ is_transformers_version,
15
+ )
16
+
17
+
18
+ _dummy_objects = {}
19
+ _additional_imports = {}
20
+ _import_structure = {"pipeline_output": ["StableDiffusionPipelineOutput"]}
21
+
22
+ if is_transformers_available() and is_flax_available():
23
+ _import_structure["pipeline_output"].extend(["FlaxStableDiffusionPipelineOutput"])
24
+ try:
25
+ if not (is_transformers_available() and is_torch_available()):
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
29
+
30
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
31
+ else:
32
+ _import_structure["clip_image_project_model"] = ["CLIPImageProjection"]
33
+ _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"]
34
+ _import_structure["pipeline_stable_diffusion"] = ["StableDiffusionPipeline"]
35
+ _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"]
36
+ _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"]
37
+ _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"]
38
+ _import_structure["pipeline_stable_diffusion_img2img"] = ["StableDiffusionImg2ImgPipeline"]
39
+ _import_structure["pipeline_stable_diffusion_inpaint"] = ["StableDiffusionInpaintPipeline"]
40
+ _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"]
41
+ _import_structure["pipeline_stable_diffusion_instruct_pix2pix"] = ["StableDiffusionInstructPix2PixPipeline"]
42
+ _import_structure["pipeline_stable_diffusion_latent_upscale"] = ["StableDiffusionLatentUpscalePipeline"]
43
+ _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"]
44
+ _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"]
45
+ _import_structure["pipeline_stable_diffusion_upscale"] = ["StableDiffusionUpscalePipeline"]
46
+ _import_structure["pipeline_stable_unclip"] = ["StableUnCLIPPipeline"]
47
+ _import_structure["pipeline_stable_unclip_img2img"] = ["StableUnCLIPImg2ImgPipeline"]
48
+ _import_structure["safety_checker"] = ["StableDiffusionSafetyChecker"]
49
+ _import_structure["stable_unclip_image_normalizer"] = ["StableUnCLIPImageNormalizer"]
50
+ try:
51
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ from ...utils.dummy_torch_and_transformers_objects import (
55
+ StableDiffusionImageVariationPipeline,
56
+ )
57
+
58
+ _dummy_objects.update({"StableDiffusionImageVariationPipeline": StableDiffusionImageVariationPipeline})
59
+ else:
60
+ _import_structure["pipeline_stable_diffusion_image_variation"] = ["StableDiffusionImageVariationPipeline"]
61
+ try:
62
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ from ...utils.dummy_torch_and_transformers_objects import (
66
+ StableDiffusionDepth2ImgPipeline,
67
+ )
68
+
69
+ _dummy_objects.update(
70
+ {
71
+ "StableDiffusionDepth2ImgPipeline": StableDiffusionDepth2ImgPipeline,
72
+ }
73
+ )
74
+ else:
75
+ _import_structure["pipeline_stable_diffusion_depth2img"] = ["StableDiffusionDepth2ImgPipeline"]
76
+
77
+ try:
78
+ if not (is_transformers_available() and is_onnx_available()):
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ from ...utils import dummy_onnx_objects # noqa F403
82
+
83
+ _dummy_objects.update(get_objects_from_module(dummy_onnx_objects))
84
+ else:
85
+ _import_structure["pipeline_onnx_stable_diffusion"] = [
86
+ "OnnxStableDiffusionPipeline",
87
+ "StableDiffusionOnnxPipeline",
88
+ ]
89
+ _import_structure["pipeline_onnx_stable_diffusion_img2img"] = ["OnnxStableDiffusionImg2ImgPipeline"]
90
+ _import_structure["pipeline_onnx_stable_diffusion_inpaint"] = ["OnnxStableDiffusionInpaintPipeline"]
91
+ _import_structure["pipeline_onnx_stable_diffusion_inpaint_legacy"] = ["OnnxStableDiffusionInpaintPipelineLegacy"]
92
+ _import_structure["pipeline_onnx_stable_diffusion_upscale"] = ["OnnxStableDiffusionUpscalePipeline"]
93
+
94
+ if is_transformers_available() and is_flax_available():
95
+ from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
96
+
97
+ _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState})
98
+ _import_structure["pipeline_flax_stable_diffusion"] = ["FlaxStableDiffusionPipeline"]
99
+ _import_structure["pipeline_flax_stable_diffusion_img2img"] = ["FlaxStableDiffusionImg2ImgPipeline"]
100
+ _import_structure["pipeline_flax_stable_diffusion_inpaint"] = ["FlaxStableDiffusionInpaintPipeline"]
101
+ _import_structure["safety_checker_flax"] = ["FlaxStableDiffusionSafetyChecker"]
102
+
103
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
104
+ try:
105
+ if not (is_transformers_available() and is_torch_available()):
106
+ raise OptionalDependencyNotAvailable()
107
+
108
+ except OptionalDependencyNotAvailable:
109
+ from ...utils.dummy_torch_and_transformers_objects import *
110
+
111
+ else:
112
+ from .clip_image_project_model import CLIPImageProjection
113
+ from .pipeline_stable_diffusion import (
114
+ StableDiffusionPipeline,
115
+ StableDiffusionPipelineOutput,
116
+ )
117
+ from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
118
+ from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
119
+ from .pipeline_stable_diffusion_instruct_pix2pix import (
120
+ StableDiffusionInstructPix2PixPipeline,
121
+ )
122
+ from .pipeline_stable_diffusion_latent_upscale import (
123
+ StableDiffusionLatentUpscalePipeline,
124
+ )
125
+ from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
126
+ from .pipeline_stable_unclip import StableUnCLIPPipeline
127
+ from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline
128
+ from .safety_checker import StableDiffusionSafetyChecker
129
+ from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
130
+
131
+ try:
132
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
133
+ raise OptionalDependencyNotAvailable()
134
+ except OptionalDependencyNotAvailable:
135
+ from ...utils.dummy_torch_and_transformers_objects import (
136
+ StableDiffusionImageVariationPipeline,
137
+ )
138
+ else:
139
+ from .pipeline_stable_diffusion_image_variation import (
140
+ StableDiffusionImageVariationPipeline,
141
+ )
142
+
143
+ try:
144
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
145
+ raise OptionalDependencyNotAvailable()
146
+ except OptionalDependencyNotAvailable:
147
+ from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline
148
+ else:
149
+ from .pipeline_stable_diffusion_depth2img import (
150
+ StableDiffusionDepth2ImgPipeline,
151
+ )
152
+
153
+ try:
154
+ if not (is_transformers_available() and is_onnx_available()):
155
+ raise OptionalDependencyNotAvailable()
156
+ except OptionalDependencyNotAvailable:
157
+ from ...utils.dummy_onnx_objects import *
158
+ else:
159
+ from .pipeline_onnx_stable_diffusion import (
160
+ OnnxStableDiffusionPipeline,
161
+ StableDiffusionOnnxPipeline,
162
+ )
163
+ from .pipeline_onnx_stable_diffusion_img2img import (
164
+ OnnxStableDiffusionImg2ImgPipeline,
165
+ )
166
+ from .pipeline_onnx_stable_diffusion_inpaint import (
167
+ OnnxStableDiffusionInpaintPipeline,
168
+ )
169
+ from .pipeline_onnx_stable_diffusion_upscale import (
170
+ OnnxStableDiffusionUpscalePipeline,
171
+ )
172
+
173
+ try:
174
+ if not (is_transformers_available() and is_flax_available()):
175
+ raise OptionalDependencyNotAvailable()
176
+ except OptionalDependencyNotAvailable:
177
+ from ...utils.dummy_flax_objects import *
178
+ else:
179
+ from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
180
+ from .pipeline_flax_stable_diffusion_img2img import (
181
+ FlaxStableDiffusionImg2ImgPipeline,
182
+ )
183
+ from .pipeline_flax_stable_diffusion_inpaint import (
184
+ FlaxStableDiffusionInpaintPipeline,
185
+ )
186
+ from .pipeline_output import FlaxStableDiffusionPipelineOutput
187
+ from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
188
+
189
+ else:
190
+ import sys
191
+
192
+ sys.modules[__name__] = _LazyModule(
193
+ __name__,
194
+ globals()["__file__"],
195
+ _import_structure,
196
+ module_spec=__spec__,
197
+ )
198
+
199
+ for name, value in _dummy_objects.items():
200
+ setattr(sys.modules[__name__], name, value)
201
+ for name, value in _additional_imports.items():
202
+ setattr(sys.modules[__name__], name, value)
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/clip_image_project_model.cpython-310.pyc ADDED
Binary file (1 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/convert_from_ckpt.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_flax_stable_diffusion.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_flax_stable_diffusion_img2img.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_flax_stable_diffusion_inpaint.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion_img2img.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion_inpaint.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_onnx_stable_diffusion_upscale.cpython-310.pyc ADDED
Binary file (18.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_output.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_depth2img.cpython-310.pyc ADDED
Binary file (28.6 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_image_variation.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_img2img.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_inpaint.cpython-310.pyc ADDED
Binary file (44.1 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_instruct_pix2pix.cpython-310.pyc ADDED
Binary file (29.1 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_latent_upscale.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion_upscale.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_unclip.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/pipeline_stable_unclip_img2img.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/safety_checker.cpython-310.pyc ADDED
Binary file (3.63 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/safety_checker_flax.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__pycache__/stable_unclip_image_normalizer.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py ADDED
@@ -0,0 +1,1869 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Conversion script for the Stable Diffusion checkpoints."""
16
+
17
+ import re
18
+ from contextlib import nullcontext
19
+ from io import BytesIO
20
+ from typing import Dict, Optional, Union
21
+
22
+ import requests
23
+ import torch
24
+ import yaml
25
+ from transformers import (
26
+ AutoFeatureExtractor,
27
+ BertTokenizerFast,
28
+ CLIPImageProcessor,
29
+ CLIPTextConfig,
30
+ CLIPTextModel,
31
+ CLIPTextModelWithProjection,
32
+ CLIPTokenizer,
33
+ CLIPVisionConfig,
34
+ CLIPVisionModelWithProjection,
35
+ )
36
+
37
+ from ...models import (
38
+ AutoencoderKL,
39
+ ControlNetModel,
40
+ PriorTransformer,
41
+ UNet2DConditionModel,
42
+ )
43
+ from ...schedulers import (
44
+ DDIMScheduler,
45
+ DDPMScheduler,
46
+ DPMSolverMultistepScheduler,
47
+ EulerAncestralDiscreteScheduler,
48
+ EulerDiscreteScheduler,
49
+ HeunDiscreteScheduler,
50
+ LMSDiscreteScheduler,
51
+ PNDMScheduler,
52
+ UnCLIPScheduler,
53
+ )
54
+ from ...utils import is_accelerate_available, logging
55
+ from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
56
+ from ..paint_by_example import PaintByExampleImageEncoder
57
+ from ..pipeline_utils import DiffusionPipeline
58
+ from .safety_checker import StableDiffusionSafetyChecker
59
+ from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
60
+
61
+
62
+ if is_accelerate_available():
63
+ from accelerate import init_empty_weights
64
+ from accelerate.utils import set_module_tensor_to_device
65
+
66
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
67
+
68
+
69
+ def shave_segments(path, n_shave_prefix_segments=1):
70
+ """
71
+ Removes segments. Positive values shave the first segments, negative shave the last segments.
72
+ """
73
+ if n_shave_prefix_segments >= 0:
74
+ return ".".join(path.split(".")[n_shave_prefix_segments:])
75
+ else:
76
+ return ".".join(path.split(".")[:n_shave_prefix_segments])
77
+
78
+
79
+ def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
80
+ """
81
+ Updates paths inside resnets to the new naming scheme (local renaming)
82
+ """
83
+ mapping = []
84
+ for old_item in old_list:
85
+ new_item = old_item.replace("in_layers.0", "norm1")
86
+ new_item = new_item.replace("in_layers.2", "conv1")
87
+
88
+ new_item = new_item.replace("out_layers.0", "norm2")
89
+ new_item = new_item.replace("out_layers.3", "conv2")
90
+
91
+ new_item = new_item.replace("emb_layers.1", "time_emb_proj")
92
+ new_item = new_item.replace("skip_connection", "conv_shortcut")
93
+
94
+ new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
95
+
96
+ mapping.append({"old": old_item, "new": new_item})
97
+
98
+ return mapping
99
+
100
+
101
+ def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
102
+ """
103
+ Updates paths inside resnets to the new naming scheme (local renaming)
104
+ """
105
+ mapping = []
106
+ for old_item in old_list:
107
+ new_item = old_item
108
+
109
+ new_item = new_item.replace("nin_shortcut", "conv_shortcut")
110
+ new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
111
+
112
+ mapping.append({"old": old_item, "new": new_item})
113
+
114
+ return mapping
115
+
116
+
117
+ def renew_attention_paths(old_list, n_shave_prefix_segments=0):
118
+ """
119
+ Updates paths inside attentions to the new naming scheme (local renaming)
120
+ """
121
+ mapping = []
122
+ for old_item in old_list:
123
+ new_item = old_item
124
+
125
+ # new_item = new_item.replace('norm.weight', 'group_norm.weight')
126
+ # new_item = new_item.replace('norm.bias', 'group_norm.bias')
127
+
128
+ # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
129
+ # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
130
+
131
+ # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
132
+
133
+ mapping.append({"old": old_item, "new": new_item})
134
+
135
+ return mapping
136
+
137
+
138
+ def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
139
+ """
140
+ Updates paths inside attentions to the new naming scheme (local renaming)
141
+ """
142
+ mapping = []
143
+ for old_item in old_list:
144
+ new_item = old_item
145
+
146
+ new_item = new_item.replace("norm.weight", "group_norm.weight")
147
+ new_item = new_item.replace("norm.bias", "group_norm.bias")
148
+
149
+ new_item = new_item.replace("q.weight", "to_q.weight")
150
+ new_item = new_item.replace("q.bias", "to_q.bias")
151
+
152
+ new_item = new_item.replace("k.weight", "to_k.weight")
153
+ new_item = new_item.replace("k.bias", "to_k.bias")
154
+
155
+ new_item = new_item.replace("v.weight", "to_v.weight")
156
+ new_item = new_item.replace("v.bias", "to_v.bias")
157
+
158
+ new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
159
+ new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
160
+
161
+ new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
162
+
163
+ mapping.append({"old": old_item, "new": new_item})
164
+
165
+ return mapping
166
+
167
+
168
+ def assign_to_checkpoint(
169
+ paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
170
+ ):
171
+ """
172
+ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
173
+ attention layers, and takes into account additional replacements that may arise.
174
+
175
+ Assigns the weights to the new checkpoint.
176
+ """
177
+ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
178
+
179
+ # Splits the attention layers into three variables.
180
+ if attention_paths_to_split is not None:
181
+ for path, path_map in attention_paths_to_split.items():
182
+ old_tensor = old_checkpoint[path]
183
+ channels = old_tensor.shape[0] // 3
184
+
185
+ target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
186
+
187
+ num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
188
+
189
+ old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
190
+ query, key, value = old_tensor.split(channels // num_heads, dim=1)
191
+
192
+ checkpoint[path_map["query"]] = query.reshape(target_shape)
193
+ checkpoint[path_map["key"]] = key.reshape(target_shape)
194
+ checkpoint[path_map["value"]] = value.reshape(target_shape)
195
+
196
+ for path in paths:
197
+ new_path = path["new"]
198
+
199
+ # These have already been assigned
200
+ if attention_paths_to_split is not None and new_path in attention_paths_to_split:
201
+ continue
202
+
203
+ # Global renaming happens here
204
+ new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
205
+ new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
206
+ new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
207
+
208
+ if additional_replacements is not None:
209
+ for replacement in additional_replacements:
210
+ new_path = new_path.replace(replacement["old"], replacement["new"])
211
+
212
+ # proj_attn.weight has to be converted from conv 1D to linear
213
+ is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path)
214
+ shape = old_checkpoint[path["old"]].shape
215
+ if is_attn_weight and len(shape) == 3:
216
+ checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
217
+ elif is_attn_weight and len(shape) == 4:
218
+ checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0]
219
+ else:
220
+ checkpoint[new_path] = old_checkpoint[path["old"]]
221
+
222
+
223
+ def conv_attn_to_linear(checkpoint):
224
+ keys = list(checkpoint.keys())
225
+ attn_keys = ["query.weight", "key.weight", "value.weight"]
226
+ for key in keys:
227
+ if ".".join(key.split(".")[-2:]) in attn_keys:
228
+ if checkpoint[key].ndim > 2:
229
+ checkpoint[key] = checkpoint[key][:, :, 0, 0]
230
+ elif "proj_attn.weight" in key:
231
+ if checkpoint[key].ndim > 2:
232
+ checkpoint[key] = checkpoint[key][:, :, 0]
233
+
234
+
235
+ def create_unet_diffusers_config(original_config, image_size: int, controlnet=False):
236
+ """
237
+ Creates a config for the diffusers based on the config of the LDM model.
238
+ """
239
+ if controlnet:
240
+ unet_params = original_config["model"]["params"]["control_stage_config"]["params"]
241
+ else:
242
+ if (
243
+ "unet_config" in original_config["model"]["params"]
244
+ and original_config["model"]["params"]["unet_config"] is not None
245
+ ):
246
+ unet_params = original_config["model"]["params"]["unet_config"]["params"]
247
+ else:
248
+ unet_params = original_config["model"]["params"]["network_config"]["params"]
249
+
250
+ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"]
251
+
252
+ block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]]
253
+
254
+ down_block_types = []
255
+ resolution = 1
256
+ for i in range(len(block_out_channels)):
257
+ block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D"
258
+ down_block_types.append(block_type)
259
+ if i != len(block_out_channels) - 1:
260
+ resolution *= 2
261
+
262
+ up_block_types = []
263
+ for i in range(len(block_out_channels)):
264
+ block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D"
265
+ up_block_types.append(block_type)
266
+ resolution //= 2
267
+
268
+ if unet_params["transformer_depth"] is not None:
269
+ transformer_layers_per_block = (
270
+ unet_params["transformer_depth"]
271
+ if isinstance(unet_params["transformer_depth"], int)
272
+ else list(unet_params["transformer_depth"])
273
+ )
274
+ else:
275
+ transformer_layers_per_block = 1
276
+
277
+ vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1)
278
+
279
+ head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None
280
+ use_linear_projection = (
281
+ unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False
282
+ )
283
+ if use_linear_projection:
284
+ # stable diffusion 2-base-512 and 2-768
285
+ if head_dim is None:
286
+ head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"]
287
+ head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])]
288
+
289
+ class_embed_type = None
290
+ addition_embed_type = None
291
+ addition_time_embed_dim = None
292
+ projection_class_embeddings_input_dim = None
293
+ context_dim = None
294
+
295
+ if unet_params["context_dim"] is not None:
296
+ context_dim = (
297
+ unet_params["context_dim"]
298
+ if isinstance(unet_params["context_dim"], int)
299
+ else unet_params["context_dim"][0]
300
+ )
301
+
302
+ if "num_classes" in unet_params:
303
+ if unet_params["num_classes"] == "sequential":
304
+ if context_dim in [2048, 1280]:
305
+ # SDXL
306
+ addition_embed_type = "text_time"
307
+ addition_time_embed_dim = 256
308
+ else:
309
+ class_embed_type = "projection"
310
+ assert "adm_in_channels" in unet_params
311
+ projection_class_embeddings_input_dim = unet_params["adm_in_channels"]
312
+
313
+ config = {
314
+ "sample_size": image_size // vae_scale_factor,
315
+ "in_channels": unet_params["in_channels"],
316
+ "down_block_types": tuple(down_block_types),
317
+ "block_out_channels": tuple(block_out_channels),
318
+ "layers_per_block": unet_params["num_res_blocks"],
319
+ "cross_attention_dim": context_dim,
320
+ "attention_head_dim": head_dim,
321
+ "use_linear_projection": use_linear_projection,
322
+ "class_embed_type": class_embed_type,
323
+ "addition_embed_type": addition_embed_type,
324
+ "addition_time_embed_dim": addition_time_embed_dim,
325
+ "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
326
+ "transformer_layers_per_block": transformer_layers_per_block,
327
+ }
328
+
329
+ if "disable_self_attentions" in unet_params:
330
+ config["only_cross_attention"] = unet_params["disable_self_attentions"]
331
+
332
+ if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int):
333
+ config["num_class_embeds"] = unet_params["num_classes"]
334
+
335
+ if controlnet:
336
+ config["conditioning_channels"] = unet_params["hint_channels"]
337
+ else:
338
+ config["out_channels"] = unet_params["out_channels"]
339
+ config["up_block_types"] = tuple(up_block_types)
340
+
341
+ return config
342
+
343
+
344
+ def create_vae_diffusers_config(original_config, image_size: int):
345
+ """
346
+ Creates a config for the diffusers based on the config of the LDM model.
347
+ """
348
+ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"]
349
+ _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"]
350
+
351
+ block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]]
352
+ down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
353
+ up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
354
+
355
+ config = {
356
+ "sample_size": image_size,
357
+ "in_channels": vae_params["in_channels"],
358
+ "out_channels": vae_params["out_ch"],
359
+ "down_block_types": tuple(down_block_types),
360
+ "up_block_types": tuple(up_block_types),
361
+ "block_out_channels": tuple(block_out_channels),
362
+ "latent_channels": vae_params["z_channels"],
363
+ "layers_per_block": vae_params["num_res_blocks"],
364
+ }
365
+ return config
366
+
367
+
368
+ def create_diffusers_schedular(original_config):
369
+ schedular = DDIMScheduler(
370
+ num_train_timesteps=original_config["model"]["params"]["timesteps"],
371
+ beta_start=original_config["model"]["params"]["linear_start"],
372
+ beta_end=original_config["model"]["params"]["linear_end"],
373
+ beta_schedule="scaled_linear",
374
+ )
375
+ return schedular
376
+
377
+
378
+ def create_ldm_bert_config(original_config):
379
+ bert_params = original_config["model"]["params"]["cond_stage_config"]["params"]
380
+ config = LDMBertConfig(
381
+ d_model=bert_params.n_embed,
382
+ encoder_layers=bert_params.n_layer,
383
+ encoder_ffn_dim=bert_params.n_embed * 4,
384
+ )
385
+ return config
386
+
387
+
388
+ def convert_ldm_unet_checkpoint(
389
+ checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False
390
+ ):
391
+ """
392
+ Takes a state dict and a config, and returns a converted checkpoint.
393
+ """
394
+
395
+ if skip_extract_state_dict:
396
+ unet_state_dict = checkpoint
397
+ else:
398
+ # extract state_dict for UNet
399
+ unet_state_dict = {}
400
+ keys = list(checkpoint.keys())
401
+
402
+ if controlnet:
403
+ unet_key = "control_model."
404
+ else:
405
+ unet_key = "model.diffusion_model."
406
+
407
+ # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
408
+ if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema:
409
+ logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.")
410
+ logger.warning(
411
+ "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
412
+ " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
413
+ )
414
+ for key in keys:
415
+ if key.startswith("model.diffusion_model"):
416
+ flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
417
+ unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
418
+ else:
419
+ if sum(k.startswith("model_ema") for k in keys) > 100:
420
+ logger.warning(
421
+ "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
422
+ " weights (usually better for inference), please make sure to add the `--extract_ema` flag."
423
+ )
424
+
425
+ for key in keys:
426
+ if key.startswith(unet_key):
427
+ unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
428
+
429
+ new_checkpoint = {}
430
+
431
+ new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
432
+ new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
433
+ new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
434
+ new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
435
+
436
+ if config["class_embed_type"] is None:
437
+ # No parameters to port
438
+ ...
439
+ elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
440
+ new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
441
+ new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
442
+ new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
443
+ new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
444
+ else:
445
+ raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
446
+
447
+ if config["addition_embed_type"] == "text_time":
448
+ new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
449
+ new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
450
+ new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
451
+ new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
452
+
453
+ # Relevant to StableDiffusionUpscalePipeline
454
+ if "num_class_embeds" in config:
455
+ if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict):
456
+ new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"]
457
+
458
+ new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
459
+ new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
460
+
461
+ if not controlnet:
462
+ new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
463
+ new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
464
+ new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
465
+ new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
466
+
467
+ # Retrieves the keys for the input blocks only
468
+ num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
469
+ input_blocks = {
470
+ layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
471
+ for layer_id in range(num_input_blocks)
472
+ }
473
+
474
+ # Retrieves the keys for the middle blocks only
475
+ num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
476
+ middle_blocks = {
477
+ layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
478
+ for layer_id in range(num_middle_blocks)
479
+ }
480
+
481
+ # Retrieves the keys for the output blocks only
482
+ num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
483
+ output_blocks = {
484
+ layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
485
+ for layer_id in range(num_output_blocks)
486
+ }
487
+
488
+ for i in range(1, num_input_blocks):
489
+ block_id = (i - 1) // (config["layers_per_block"] + 1)
490
+ layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
491
+
492
+ resnets = [
493
+ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
494
+ ]
495
+ attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
496
+
497
+ if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
498
+ new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
499
+ f"input_blocks.{i}.0.op.weight"
500
+ )
501
+ new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
502
+ f"input_blocks.{i}.0.op.bias"
503
+ )
504
+
505
+ paths = renew_resnet_paths(resnets)
506
+ meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
507
+ assign_to_checkpoint(
508
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
509
+ )
510
+
511
+ if len(attentions):
512
+ paths = renew_attention_paths(attentions)
513
+
514
+ meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
515
+ assign_to_checkpoint(
516
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
517
+ )
518
+
519
+ resnet_0 = middle_blocks[0]
520
+ attentions = middle_blocks[1]
521
+ resnet_1 = middle_blocks[2]
522
+
523
+ resnet_0_paths = renew_resnet_paths(resnet_0)
524
+ assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
525
+
526
+ resnet_1_paths = renew_resnet_paths(resnet_1)
527
+ assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
528
+
529
+ attentions_paths = renew_attention_paths(attentions)
530
+ meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
531
+ assign_to_checkpoint(
532
+ attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
533
+ )
534
+
535
+ for i in range(num_output_blocks):
536
+ block_id = i // (config["layers_per_block"] + 1)
537
+ layer_in_block_id = i % (config["layers_per_block"] + 1)
538
+ output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
539
+ output_block_list = {}
540
+
541
+ for layer in output_block_layers:
542
+ layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
543
+ if layer_id in output_block_list:
544
+ output_block_list[layer_id].append(layer_name)
545
+ else:
546
+ output_block_list[layer_id] = [layer_name]
547
+
548
+ if len(output_block_list) > 1:
549
+ resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
550
+ attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
551
+
552
+ resnet_0_paths = renew_resnet_paths(resnets)
553
+ paths = renew_resnet_paths(resnets)
554
+
555
+ meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
556
+ assign_to_checkpoint(
557
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
558
+ )
559
+
560
+ output_block_list = {k: sorted(v) for k, v in sorted(output_block_list.items())}
561
+ if ["conv.bias", "conv.weight"] in output_block_list.values():
562
+ index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
563
+ new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
564
+ f"output_blocks.{i}.{index}.conv.weight"
565
+ ]
566
+ new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
567
+ f"output_blocks.{i}.{index}.conv.bias"
568
+ ]
569
+
570
+ # Clear attentions as they have been attributed above.
571
+ if len(attentions) == 2:
572
+ attentions = []
573
+
574
+ if len(attentions):
575
+ paths = renew_attention_paths(attentions)
576
+ meta_path = {
577
+ "old": f"output_blocks.{i}.1",
578
+ "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
579
+ }
580
+ assign_to_checkpoint(
581
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
582
+ )
583
+ else:
584
+ resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
585
+ for path in resnet_0_paths:
586
+ old_path = ".".join(["output_blocks", str(i), path["old"]])
587
+ new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
588
+
589
+ new_checkpoint[new_path] = unet_state_dict[old_path]
590
+
591
+ if controlnet:
592
+ # conditioning embedding
593
+
594
+ orig_index = 0
595
+
596
+ new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop(
597
+ f"input_hint_block.{orig_index}.weight"
598
+ )
599
+ new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop(
600
+ f"input_hint_block.{orig_index}.bias"
601
+ )
602
+
603
+ orig_index += 2
604
+
605
+ diffusers_index = 0
606
+
607
+ while diffusers_index < 6:
608
+ new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop(
609
+ f"input_hint_block.{orig_index}.weight"
610
+ )
611
+ new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop(
612
+ f"input_hint_block.{orig_index}.bias"
613
+ )
614
+ diffusers_index += 1
615
+ orig_index += 2
616
+
617
+ new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop(
618
+ f"input_hint_block.{orig_index}.weight"
619
+ )
620
+ new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop(
621
+ f"input_hint_block.{orig_index}.bias"
622
+ )
623
+
624
+ # down blocks
625
+ for i in range(num_input_blocks):
626
+ new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight")
627
+ new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias")
628
+
629
+ # mid block
630
+ new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight")
631
+ new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias")
632
+
633
+ return new_checkpoint
634
+
635
+
636
+ def convert_ldm_vae_checkpoint(checkpoint, config):
637
+ # extract state dict for VAE
638
+ vae_state_dict = {}
639
+ keys = list(checkpoint.keys())
640
+ vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else ""
641
+ for key in keys:
642
+ if key.startswith(vae_key):
643
+ vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
644
+
645
+ new_checkpoint = {}
646
+
647
+ new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
648
+ new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
649
+ new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
650
+ new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
651
+ new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
652
+ new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
653
+
654
+ new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
655
+ new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
656
+ new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
657
+ new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
658
+ new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
659
+ new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
660
+
661
+ new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
662
+ new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
663
+ new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
664
+ new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
665
+
666
+ # Retrieves the keys for the encoder down blocks only
667
+ num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
668
+ down_blocks = {
669
+ layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
670
+ }
671
+
672
+ # Retrieves the keys for the decoder up blocks only
673
+ num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
674
+ up_blocks = {
675
+ layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
676
+ }
677
+
678
+ for i in range(num_down_blocks):
679
+ resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
680
+
681
+ if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
682
+ new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
683
+ f"encoder.down.{i}.downsample.conv.weight"
684
+ )
685
+ new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
686
+ f"encoder.down.{i}.downsample.conv.bias"
687
+ )
688
+
689
+ paths = renew_vae_resnet_paths(resnets)
690
+ meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
691
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
692
+
693
+ mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
694
+ num_mid_res_blocks = 2
695
+ for i in range(1, num_mid_res_blocks + 1):
696
+ resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
697
+
698
+ paths = renew_vae_resnet_paths(resnets)
699
+ meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
700
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
701
+
702
+ mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
703
+ paths = renew_vae_attention_paths(mid_attentions)
704
+ meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
705
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
706
+ conv_attn_to_linear(new_checkpoint)
707
+
708
+ for i in range(num_up_blocks):
709
+ block_id = num_up_blocks - 1 - i
710
+ resnets = [
711
+ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
712
+ ]
713
+
714
+ if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
715
+ new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
716
+ f"decoder.up.{block_id}.upsample.conv.weight"
717
+ ]
718
+ new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
719
+ f"decoder.up.{block_id}.upsample.conv.bias"
720
+ ]
721
+
722
+ paths = renew_vae_resnet_paths(resnets)
723
+ meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
724
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
725
+
726
+ mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
727
+ num_mid_res_blocks = 2
728
+ for i in range(1, num_mid_res_blocks + 1):
729
+ resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
730
+
731
+ paths = renew_vae_resnet_paths(resnets)
732
+ meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
733
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
734
+
735
+ mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
736
+ paths = renew_vae_attention_paths(mid_attentions)
737
+ meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
738
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
739
+ conv_attn_to_linear(new_checkpoint)
740
+ return new_checkpoint
741
+
742
+
743
+ def convert_ldm_bert_checkpoint(checkpoint, config):
744
+ def _copy_attn_layer(hf_attn_layer, pt_attn_layer):
745
+ hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight
746
+ hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight
747
+ hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight
748
+
749
+ hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight
750
+ hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias
751
+
752
+ def _copy_linear(hf_linear, pt_linear):
753
+ hf_linear.weight = pt_linear.weight
754
+ hf_linear.bias = pt_linear.bias
755
+
756
+ def _copy_layer(hf_layer, pt_layer):
757
+ # copy layer norms
758
+ _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0])
759
+ _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0])
760
+
761
+ # copy attn
762
+ _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1])
763
+
764
+ # copy MLP
765
+ pt_mlp = pt_layer[1][1]
766
+ _copy_linear(hf_layer.fc1, pt_mlp.net[0][0])
767
+ _copy_linear(hf_layer.fc2, pt_mlp.net[2])
768
+
769
+ def _copy_layers(hf_layers, pt_layers):
770
+ for i, hf_layer in enumerate(hf_layers):
771
+ if i != 0:
772
+ i += i
773
+ pt_layer = pt_layers[i : i + 2]
774
+ _copy_layer(hf_layer, pt_layer)
775
+
776
+ hf_model = LDMBertModel(config).eval()
777
+
778
+ # copy embeds
779
+ hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight
780
+ hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight
781
+
782
+ # copy layer norm
783
+ _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm)
784
+
785
+ # copy hidden layers
786
+ _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers)
787
+
788
+ _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits)
789
+
790
+ return hf_model
791
+
792
+
793
+ def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None):
794
+ if text_encoder is None:
795
+ config_name = "openai/clip-vit-large-patch14"
796
+ try:
797
+ config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only)
798
+ except Exception:
799
+ raise ValueError(
800
+ f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'."
801
+ )
802
+
803
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
804
+ with ctx():
805
+ text_model = CLIPTextModel(config)
806
+ else:
807
+ text_model = text_encoder
808
+
809
+ keys = list(checkpoint.keys())
810
+
811
+ text_model_dict = {}
812
+
813
+ remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"]
814
+
815
+ for key in keys:
816
+ for prefix in remove_prefixes:
817
+ if key.startswith(prefix):
818
+ text_model_dict[key[len(prefix + ".") :]] = checkpoint[key]
819
+
820
+ if is_accelerate_available():
821
+ for param_name, param in text_model_dict.items():
822
+ set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
823
+ else:
824
+ if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
825
+ text_model_dict.pop("text_model.embeddings.position_ids", None)
826
+
827
+ text_model.load_state_dict(text_model_dict)
828
+
829
+ return text_model
830
+
831
+
832
+ textenc_conversion_lst = [
833
+ ("positional_embedding", "text_model.embeddings.position_embedding.weight"),
834
+ ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"),
835
+ ("ln_final.weight", "text_model.final_layer_norm.weight"),
836
+ ("ln_final.bias", "text_model.final_layer_norm.bias"),
837
+ ("text_projection", "text_projection.weight"),
838
+ ]
839
+ textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst}
840
+
841
+ textenc_transformer_conversion_lst = [
842
+ # (stable-diffusion, HF Diffusers)
843
+ ("resblocks.", "text_model.encoder.layers."),
844
+ ("ln_1", "layer_norm1"),
845
+ ("ln_2", "layer_norm2"),
846
+ (".c_fc.", ".fc1."),
847
+ (".c_proj.", ".fc2."),
848
+ (".attn", ".self_attn"),
849
+ ("ln_final.", "transformer.text_model.final_layer_norm."),
850
+ ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
851
+ ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
852
+ ]
853
+ protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst}
854
+ textenc_pattern = re.compile("|".join(protected.keys()))
855
+
856
+
857
+ def convert_paint_by_example_checkpoint(checkpoint, local_files_only=False):
858
+ config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only)
859
+ model = PaintByExampleImageEncoder(config)
860
+
861
+ keys = list(checkpoint.keys())
862
+
863
+ text_model_dict = {}
864
+
865
+ for key in keys:
866
+ if key.startswith("cond_stage_model.transformer"):
867
+ text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key]
868
+
869
+ # load clip vision
870
+ model.model.load_state_dict(text_model_dict)
871
+
872
+ # load mapper
873
+ keys_mapper = {
874
+ k[len("cond_stage_model.mapper.res") :]: v
875
+ for k, v in checkpoint.items()
876
+ if k.startswith("cond_stage_model.mapper")
877
+ }
878
+
879
+ MAPPING = {
880
+ "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"],
881
+ "attn.c_proj": ["attn1.to_out.0"],
882
+ "ln_1": ["norm1"],
883
+ "ln_2": ["norm3"],
884
+ "mlp.c_fc": ["ff.net.0.proj"],
885
+ "mlp.c_proj": ["ff.net.2"],
886
+ }
887
+
888
+ mapped_weights = {}
889
+ for key, value in keys_mapper.items():
890
+ prefix = key[: len("blocks.i")]
891
+ suffix = key.split(prefix)[-1].split(".")[-1]
892
+ name = key.split(prefix)[-1].split(suffix)[0][1:-1]
893
+ mapped_names = MAPPING[name]
894
+
895
+ num_splits = len(mapped_names)
896
+ for i, mapped_name in enumerate(mapped_names):
897
+ new_name = ".".join([prefix, mapped_name, suffix])
898
+ shape = value.shape[0] // num_splits
899
+ mapped_weights[new_name] = value[i * shape : (i + 1) * shape]
900
+
901
+ model.mapper.load_state_dict(mapped_weights)
902
+
903
+ # load final layer norm
904
+ model.final_layer_norm.load_state_dict(
905
+ {
906
+ "bias": checkpoint["cond_stage_model.final_ln.bias"],
907
+ "weight": checkpoint["cond_stage_model.final_ln.weight"],
908
+ }
909
+ )
910
+
911
+ # load final proj
912
+ model.proj_out.load_state_dict(
913
+ {
914
+ "bias": checkpoint["proj_out.bias"],
915
+ "weight": checkpoint["proj_out.weight"],
916
+ }
917
+ )
918
+
919
+ # load uncond vector
920
+ model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"])
921
+ return model
922
+
923
+
924
+ def convert_open_clip_checkpoint(
925
+ checkpoint,
926
+ config_name,
927
+ prefix="cond_stage_model.model.",
928
+ has_projection=False,
929
+ local_files_only=False,
930
+ **config_kwargs,
931
+ ):
932
+ # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder")
933
+ # text_model = CLIPTextModelWithProjection.from_pretrained(
934
+ # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280
935
+ # )
936
+ try:
937
+ config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only)
938
+ except Exception:
939
+ raise ValueError(
940
+ f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'."
941
+ )
942
+
943
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
944
+ with ctx():
945
+ text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config)
946
+
947
+ keys = list(checkpoint.keys())
948
+
949
+ keys_to_ignore = []
950
+ if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23:
951
+ # make sure to remove all keys > 22
952
+ keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")]
953
+ keys_to_ignore += ["cond_stage_model.model.text_projection"]
954
+
955
+ text_model_dict = {}
956
+
957
+ if prefix + "text_projection" in checkpoint:
958
+ d_model = int(checkpoint[prefix + "text_projection"].shape[0])
959
+ else:
960
+ d_model = 1024
961
+
962
+ text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids")
963
+
964
+ for key in keys:
965
+ if key in keys_to_ignore:
966
+ continue
967
+ if key[len(prefix) :] in textenc_conversion_map:
968
+ if key.endswith("text_projection"):
969
+ value = checkpoint[key].T.contiguous()
970
+ else:
971
+ value = checkpoint[key]
972
+
973
+ text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value
974
+
975
+ if key.startswith(prefix + "transformer."):
976
+ new_key = key[len(prefix + "transformer.") :]
977
+ if new_key.endswith(".in_proj_weight"):
978
+ new_key = new_key[: -len(".in_proj_weight")]
979
+ new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
980
+ text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :]
981
+ text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :]
982
+ text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :]
983
+ elif new_key.endswith(".in_proj_bias"):
984
+ new_key = new_key[: -len(".in_proj_bias")]
985
+ new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
986
+ text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model]
987
+ text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2]
988
+ text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :]
989
+ else:
990
+ new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
991
+
992
+ text_model_dict[new_key] = checkpoint[key]
993
+
994
+ if is_accelerate_available():
995
+ for param_name, param in text_model_dict.items():
996
+ set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
997
+ else:
998
+ if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
999
+ text_model_dict.pop("text_model.embeddings.position_ids", None)
1000
+
1001
+ text_model.load_state_dict(text_model_dict)
1002
+
1003
+ return text_model
1004
+
1005
+
1006
+ def stable_unclip_image_encoder(original_config, local_files_only=False):
1007
+ """
1008
+ Returns the image processor and clip image encoder for the img2img unclip pipeline.
1009
+
1010
+ We currently know of two types of stable unclip models which separately use the clip and the openclip image
1011
+ encoders.
1012
+ """
1013
+
1014
+ image_embedder_config = original_config["model"]["params"]["embedder_config"]
1015
+
1016
+ sd_clip_image_embedder_class = image_embedder_config["target"]
1017
+ sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1]
1018
+
1019
+ if sd_clip_image_embedder_class == "ClipImageEmbedder":
1020
+ clip_model_name = image_embedder_config.params.model
1021
+
1022
+ if clip_model_name == "ViT-L/14":
1023
+ feature_extractor = CLIPImageProcessor()
1024
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
1025
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1026
+ )
1027
+ else:
1028
+ raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}")
1029
+
1030
+ elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder":
1031
+ feature_extractor = CLIPImageProcessor()
1032
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
1033
+ "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", local_files_only=local_files_only
1034
+ )
1035
+ else:
1036
+ raise NotImplementedError(
1037
+ f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}"
1038
+ )
1039
+
1040
+ return feature_extractor, image_encoder
1041
+
1042
+
1043
+ def stable_unclip_image_noising_components(
1044
+ original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None
1045
+ ):
1046
+ """
1047
+ Returns the noising components for the img2img and txt2img unclip pipelines.
1048
+
1049
+ Converts the stability noise augmentor into
1050
+ 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats
1051
+ 2. a `DDPMScheduler` for holding the noise schedule
1052
+
1053
+ If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided.
1054
+ """
1055
+ noise_aug_config = original_config["model"]["params"]["noise_aug_config"]
1056
+ noise_aug_class = noise_aug_config["target"]
1057
+ noise_aug_class = noise_aug_class.split(".")[-1]
1058
+
1059
+ if noise_aug_class == "CLIPEmbeddingNoiseAugmentation":
1060
+ noise_aug_config = noise_aug_config.params
1061
+ embedding_dim = noise_aug_config.timestep_dim
1062
+ max_noise_level = noise_aug_config.noise_schedule_config.timesteps
1063
+ beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule
1064
+
1065
+ image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim)
1066
+ image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule)
1067
+
1068
+ if "clip_stats_path" in noise_aug_config:
1069
+ if clip_stats_path is None:
1070
+ raise ValueError("This stable unclip config requires a `clip_stats_path`")
1071
+
1072
+ clip_mean, clip_std = torch.load(clip_stats_path, map_location=device)
1073
+ clip_mean = clip_mean[None, :]
1074
+ clip_std = clip_std[None, :]
1075
+
1076
+ clip_stats_state_dict = {
1077
+ "mean": clip_mean,
1078
+ "std": clip_std,
1079
+ }
1080
+
1081
+ image_normalizer.load_state_dict(clip_stats_state_dict)
1082
+ else:
1083
+ raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}")
1084
+
1085
+ return image_normalizer, image_noising_scheduler
1086
+
1087
+
1088
+ def convert_controlnet_checkpoint(
1089
+ checkpoint,
1090
+ original_config,
1091
+ checkpoint_path,
1092
+ image_size,
1093
+ upcast_attention,
1094
+ extract_ema,
1095
+ use_linear_projection=None,
1096
+ cross_attention_dim=None,
1097
+ ):
1098
+ ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True)
1099
+ ctrlnet_config["upcast_attention"] = upcast_attention
1100
+
1101
+ ctrlnet_config.pop("sample_size")
1102
+
1103
+ if use_linear_projection is not None:
1104
+ ctrlnet_config["use_linear_projection"] = use_linear_projection
1105
+
1106
+ if cross_attention_dim is not None:
1107
+ ctrlnet_config["cross_attention_dim"] = cross_attention_dim
1108
+
1109
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
1110
+ with ctx():
1111
+ controlnet = ControlNetModel(**ctrlnet_config)
1112
+
1113
+ # Some controlnet ckpt files are distributed independently from the rest of the
1114
+ # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/
1115
+ if "time_embed.0.weight" in checkpoint:
1116
+ skip_extract_state_dict = True
1117
+ else:
1118
+ skip_extract_state_dict = False
1119
+
1120
+ converted_ctrl_checkpoint = convert_ldm_unet_checkpoint(
1121
+ checkpoint,
1122
+ ctrlnet_config,
1123
+ path=checkpoint_path,
1124
+ extract_ema=extract_ema,
1125
+ controlnet=True,
1126
+ skip_extract_state_dict=skip_extract_state_dict,
1127
+ )
1128
+
1129
+ if is_accelerate_available():
1130
+ for param_name, param in converted_ctrl_checkpoint.items():
1131
+ set_module_tensor_to_device(controlnet, param_name, "cpu", value=param)
1132
+ else:
1133
+ controlnet.load_state_dict(converted_ctrl_checkpoint)
1134
+
1135
+ return controlnet
1136
+
1137
+
1138
+ def download_from_original_stable_diffusion_ckpt(
1139
+ checkpoint_path_or_dict: Union[str, Dict[str, torch.Tensor]],
1140
+ original_config_file: str = None,
1141
+ image_size: Optional[int] = None,
1142
+ prediction_type: str = None,
1143
+ model_type: str = None,
1144
+ extract_ema: bool = False,
1145
+ scheduler_type: str = "pndm",
1146
+ num_in_channels: Optional[int] = None,
1147
+ upcast_attention: Optional[bool] = None,
1148
+ device: str = None,
1149
+ from_safetensors: bool = False,
1150
+ stable_unclip: Optional[str] = None,
1151
+ stable_unclip_prior: Optional[str] = None,
1152
+ clip_stats_path: Optional[str] = None,
1153
+ controlnet: Optional[bool] = None,
1154
+ adapter: Optional[bool] = None,
1155
+ load_safety_checker: bool = True,
1156
+ safety_checker: Optional[StableDiffusionSafetyChecker] = None,
1157
+ feature_extractor: Optional[AutoFeatureExtractor] = None,
1158
+ pipeline_class: DiffusionPipeline = None,
1159
+ local_files_only=False,
1160
+ vae_path=None,
1161
+ vae=None,
1162
+ text_encoder=None,
1163
+ text_encoder_2=None,
1164
+ tokenizer=None,
1165
+ tokenizer_2=None,
1166
+ config_files=None,
1167
+ ) -> DiffusionPipeline:
1168
+ """
1169
+ Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
1170
+ config file.
1171
+
1172
+ Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the
1173
+ global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is
1174
+ recommended that you override the default values and/or supply an `original_config_file` wherever possible.
1175
+
1176
+ Args:
1177
+ checkpoint_path_or_dict (`str` or `dict`): Path to `.ckpt` file, or the state dict.
1178
+ original_config_file (`str`):
1179
+ Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically
1180
+ inferred by looking for a key that only exists in SD2.0 models.
1181
+ image_size (`int`, *optional*, defaults to 512):
1182
+ The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2
1183
+ Base. Use 768 for Stable Diffusion v2.
1184
+ prediction_type (`str`, *optional*):
1185
+ The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable
1186
+ Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2.
1187
+ num_in_channels (`int`, *optional*, defaults to None):
1188
+ The number of input channels. If `None`, it will be automatically inferred.
1189
+ scheduler_type (`str`, *optional*, defaults to 'pndm'):
1190
+ Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
1191
+ "ddim"]`.
1192
+ model_type (`str`, *optional*, defaults to `None`):
1193
+ The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder",
1194
+ "FrozenCLIPEmbedder", "PaintByExample"]`.
1195
+ is_img2img (`bool`, *optional*, defaults to `False`):
1196
+ Whether the model should be loaded as an img2img pipeline.
1197
+ extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for
1198
+ checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to
1199
+ `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for
1200
+ inference. Non-EMA weights are usually better to continue fine-tuning.
1201
+ upcast_attention (`bool`, *optional*, defaults to `None`):
1202
+ Whether the attention computation should always be upcasted. This is necessary when running stable
1203
+ diffusion 2.1.
1204
+ device (`str`, *optional*, defaults to `None`):
1205
+ The device to use. Pass `None` to determine automatically.
1206
+ from_safetensors (`str`, *optional*, defaults to `False`):
1207
+ If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.
1208
+ load_safety_checker (`bool`, *optional*, defaults to `True`):
1209
+ Whether to load the safety checker or not. Defaults to `True`.
1210
+ safety_checker (`StableDiffusionSafetyChecker`, *optional*, defaults to `None`):
1211
+ Safety checker to use. If this parameter is `None`, the function will load a new instance of
1212
+ [StableDiffusionSafetyChecker] by itself, if needed.
1213
+ feature_extractor (`AutoFeatureExtractor`, *optional*, defaults to `None`):
1214
+ Feature extractor to use. If this parameter is `None`, the function will load a new instance of
1215
+ [AutoFeatureExtractor] by itself, if needed.
1216
+ pipeline_class (`str`, *optional*, defaults to `None`):
1217
+ The pipeline class to use. Pass `None` to determine automatically.
1218
+ local_files_only (`bool`, *optional*, defaults to `False`):
1219
+ Whether or not to only look at local files (i.e., do not try to download the model).
1220
+ vae (`AutoencoderKL`, *optional*, defaults to `None`):
1221
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
1222
+ this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
1223
+ text_encoder (`CLIPTextModel`, *optional*, defaults to `None`):
1224
+ An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel)
1225
+ to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)
1226
+ variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
1227
+ tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`):
1228
+ An instance of
1229
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer)
1230
+ to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if
1231
+ needed.
1232
+ config_files (`Dict[str, str]`, *optional*, defaults to `None`):
1233
+ A dictionary mapping from config file names to their contents. If this parameter is `None`, the function
1234
+ will load the config files by itself, if needed. Valid keys are:
1235
+ - `v1`: Config file for Stable Diffusion v1
1236
+ - `v2`: Config file for Stable Diffusion v2
1237
+ - `xl`: Config file for Stable Diffusion XL
1238
+ - `xl_refiner`: Config file for Stable Diffusion XL Refiner
1239
+ return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
1240
+ """
1241
+
1242
+ # import pipelines here to avoid circular import error when using from_single_file method
1243
+ from diffusers import (
1244
+ LDMTextToImagePipeline,
1245
+ PaintByExamplePipeline,
1246
+ StableDiffusionControlNetPipeline,
1247
+ StableDiffusionInpaintPipeline,
1248
+ StableDiffusionPipeline,
1249
+ StableDiffusionUpscalePipeline,
1250
+ StableDiffusionXLControlNetInpaintPipeline,
1251
+ StableDiffusionXLImg2ImgPipeline,
1252
+ StableDiffusionXLInpaintPipeline,
1253
+ StableDiffusionXLPipeline,
1254
+ StableUnCLIPImg2ImgPipeline,
1255
+ StableUnCLIPPipeline,
1256
+ )
1257
+
1258
+ if prediction_type == "v-prediction":
1259
+ prediction_type = "v_prediction"
1260
+
1261
+ if isinstance(checkpoint_path_or_dict, str):
1262
+ if from_safetensors:
1263
+ from safetensors.torch import load_file as safe_load
1264
+
1265
+ checkpoint = safe_load(checkpoint_path_or_dict, device="cpu")
1266
+ else:
1267
+ if device is None:
1268
+ device = "cuda" if torch.cuda.is_available() else "cpu"
1269
+ checkpoint = torch.load(checkpoint_path_or_dict, map_location=device)
1270
+ else:
1271
+ checkpoint = torch.load(checkpoint_path_or_dict, map_location=device)
1272
+ elif isinstance(checkpoint_path_or_dict, dict):
1273
+ checkpoint = checkpoint_path_or_dict
1274
+
1275
+ # Sometimes models don't have the global_step item
1276
+ if "global_step" in checkpoint:
1277
+ global_step = checkpoint["global_step"]
1278
+ else:
1279
+ logger.debug("global_step key not found in model")
1280
+ global_step = None
1281
+
1282
+ # NOTE: this while loop isn't great but this controlnet checkpoint has one additional
1283
+ # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
1284
+ while "state_dict" in checkpoint:
1285
+ checkpoint = checkpoint["state_dict"]
1286
+
1287
+ if original_config_file is None:
1288
+ key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
1289
+ key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias"
1290
+ key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
1291
+ is_upscale = pipeline_class == StableDiffusionUpscalePipeline
1292
+
1293
+ config_url = None
1294
+
1295
+ # model_type = "v1"
1296
+ if config_files is not None and "v1" in config_files:
1297
+ original_config_file = config_files["v1"]
1298
+ else:
1299
+ config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
1300
+
1301
+ if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024:
1302
+ # model_type = "v2"
1303
+ if config_files is not None and "v2" in config_files:
1304
+ original_config_file = config_files["v2"]
1305
+ else:
1306
+ config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
1307
+ if global_step == 110000:
1308
+ # v2.1 needs to upcast attention
1309
+ upcast_attention = True
1310
+ elif key_name_sd_xl_base in checkpoint:
1311
+ # only base xl has two text embedders
1312
+ if config_files is not None and "xl" in config_files:
1313
+ original_config_file = config_files["xl"]
1314
+ else:
1315
+ config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
1316
+ elif key_name_sd_xl_refiner in checkpoint:
1317
+ # only refiner xl has embedder and one text embedders
1318
+ if config_files is not None and "xl_refiner" in config_files:
1319
+ original_config_file = config_files["xl_refiner"]
1320
+ else:
1321
+ config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml"
1322
+
1323
+ if is_upscale:
1324
+ config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
1325
+
1326
+ if config_url is not None:
1327
+ original_config_file = BytesIO(requests.get(config_url).content)
1328
+ else:
1329
+ with open(original_config_file, "r") as f:
1330
+ original_config_file = f.read()
1331
+ else:
1332
+ with open(original_config_file, "r") as f:
1333
+ original_config_file = f.read()
1334
+
1335
+ original_config = yaml.safe_load(original_config_file)
1336
+
1337
+ # Convert the text model.
1338
+ if (
1339
+ model_type is None
1340
+ and "cond_stage_config" in original_config["model"]["params"]
1341
+ and original_config["model"]["params"]["cond_stage_config"] is not None
1342
+ ):
1343
+ model_type = original_config["model"]["params"]["cond_stage_config"]["target"].split(".")[-1]
1344
+ logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}")
1345
+ elif model_type is None and original_config["model"]["params"]["network_config"] is not None:
1346
+ if original_config["model"]["params"]["network_config"]["params"]["context_dim"] == 2048:
1347
+ model_type = "SDXL"
1348
+ else:
1349
+ model_type = "SDXL-Refiner"
1350
+ if image_size is None:
1351
+ image_size = 1024
1352
+
1353
+ if pipeline_class is None:
1354
+ # Check if we have a SDXL or SD model and initialize default pipeline
1355
+ if model_type not in ["SDXL", "SDXL-Refiner"]:
1356
+ pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline
1357
+ else:
1358
+ pipeline_class = StableDiffusionXLPipeline if model_type == "SDXL" else StableDiffusionXLImg2ImgPipeline
1359
+
1360
+ if num_in_channels is None and pipeline_class in [
1361
+ StableDiffusionInpaintPipeline,
1362
+ StableDiffusionXLInpaintPipeline,
1363
+ StableDiffusionXLControlNetInpaintPipeline,
1364
+ ]:
1365
+ num_in_channels = 9
1366
+ if num_in_channels is None and pipeline_class == StableDiffusionUpscalePipeline:
1367
+ num_in_channels = 7
1368
+ elif num_in_channels is None:
1369
+ num_in_channels = 4
1370
+
1371
+ if "unet_config" in original_config["model"]["params"]:
1372
+ original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
1373
+ elif "network_config" in original_config["model"]["params"]:
1374
+ original_config["model"]["params"]["network_config"]["params"]["in_channels"] = num_in_channels
1375
+
1376
+ if (
1377
+ "parameterization" in original_config["model"]["params"]
1378
+ and original_config["model"]["params"]["parameterization"] == "v"
1379
+ ):
1380
+ if prediction_type is None:
1381
+ # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"`
1382
+ # as it relies on a brittle global step parameter here
1383
+ prediction_type = "epsilon" if global_step == 875000 else "v_prediction"
1384
+ if image_size is None:
1385
+ # NOTE: For stable diffusion 2 base one has to pass `image_size==512`
1386
+ # as it relies on a brittle global step parameter here
1387
+ image_size = 512 if global_step == 875000 else 768
1388
+ else:
1389
+ if prediction_type is None:
1390
+ prediction_type = "epsilon"
1391
+ if image_size is None:
1392
+ image_size = 512
1393
+
1394
+ if controlnet is None and "control_stage_config" in original_config["model"]["params"]:
1395
+ path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else ""
1396
+ controlnet = convert_controlnet_checkpoint(
1397
+ checkpoint, original_config, path, image_size, upcast_attention, extract_ema
1398
+ )
1399
+
1400
+ if "timesteps" in original_config["model"]["params"]:
1401
+ num_train_timesteps = original_config["model"]["params"]["timesteps"]
1402
+ else:
1403
+ num_train_timesteps = 1000
1404
+
1405
+ if model_type in ["SDXL", "SDXL-Refiner"]:
1406
+ scheduler_dict = {
1407
+ "beta_schedule": "scaled_linear",
1408
+ "beta_start": 0.00085,
1409
+ "beta_end": 0.012,
1410
+ "interpolation_type": "linear",
1411
+ "num_train_timesteps": num_train_timesteps,
1412
+ "prediction_type": "epsilon",
1413
+ "sample_max_value": 1.0,
1414
+ "set_alpha_to_one": False,
1415
+ "skip_prk_steps": True,
1416
+ "steps_offset": 1,
1417
+ "timestep_spacing": "leading",
1418
+ }
1419
+ scheduler = EulerDiscreteScheduler.from_config(scheduler_dict)
1420
+ scheduler_type = "euler"
1421
+ else:
1422
+ if "linear_start" in original_config["model"]["params"]:
1423
+ beta_start = original_config["model"]["params"]["linear_start"]
1424
+ else:
1425
+ beta_start = 0.02
1426
+
1427
+ if "linear_end" in original_config["model"]["params"]:
1428
+ beta_end = original_config["model"]["params"]["linear_end"]
1429
+ else:
1430
+ beta_end = 0.085
1431
+ scheduler = DDIMScheduler(
1432
+ beta_end=beta_end,
1433
+ beta_schedule="scaled_linear",
1434
+ beta_start=beta_start,
1435
+ num_train_timesteps=num_train_timesteps,
1436
+ steps_offset=1,
1437
+ clip_sample=False,
1438
+ set_alpha_to_one=False,
1439
+ prediction_type=prediction_type,
1440
+ )
1441
+ # make sure scheduler works correctly with DDIM
1442
+ scheduler.register_to_config(clip_sample=False)
1443
+
1444
+ if scheduler_type == "pndm":
1445
+ config = dict(scheduler.config)
1446
+ config["skip_prk_steps"] = True
1447
+ scheduler = PNDMScheduler.from_config(config)
1448
+ elif scheduler_type == "lms":
1449
+ scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
1450
+ elif scheduler_type == "heun":
1451
+ scheduler = HeunDiscreteScheduler.from_config(scheduler.config)
1452
+ elif scheduler_type == "euler":
1453
+ scheduler = EulerDiscreteScheduler.from_config(scheduler.config)
1454
+ elif scheduler_type == "euler-ancestral":
1455
+ scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
1456
+ elif scheduler_type == "dpm":
1457
+ scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
1458
+ elif scheduler_type == "ddim":
1459
+ scheduler = scheduler
1460
+ else:
1461
+ raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!")
1462
+
1463
+ if pipeline_class == StableDiffusionUpscalePipeline:
1464
+ image_size = original_config["model"]["params"]["unet_config"]["params"]["image_size"]
1465
+
1466
+ # Convert the UNet2DConditionModel model.
1467
+ unet_config = create_unet_diffusers_config(original_config, image_size=image_size)
1468
+ unet_config["upcast_attention"] = upcast_attention
1469
+
1470
+ path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else ""
1471
+ converted_unet_checkpoint = convert_ldm_unet_checkpoint(
1472
+ checkpoint, unet_config, path=path, extract_ema=extract_ema
1473
+ )
1474
+
1475
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
1476
+ with ctx():
1477
+ unet = UNet2DConditionModel(**unet_config)
1478
+
1479
+ if is_accelerate_available():
1480
+ if model_type not in ["SDXL", "SDXL-Refiner"]: # SBM Delay this.
1481
+ for param_name, param in converted_unet_checkpoint.items():
1482
+ set_module_tensor_to_device(unet, param_name, "cpu", value=param)
1483
+ else:
1484
+ unet.load_state_dict(converted_unet_checkpoint)
1485
+
1486
+ # Convert the VAE model.
1487
+ if vae_path is None and vae is None:
1488
+ vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
1489
+ converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
1490
+
1491
+ if (
1492
+ "model" in original_config
1493
+ and "params" in original_config["model"]
1494
+ and "scale_factor" in original_config["model"]["params"]
1495
+ ):
1496
+ vae_scaling_factor = original_config["model"]["params"]["scale_factor"]
1497
+ else:
1498
+ vae_scaling_factor = 0.18215 # default SD scaling factor
1499
+
1500
+ vae_config["scaling_factor"] = vae_scaling_factor
1501
+
1502
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
1503
+ with ctx():
1504
+ vae = AutoencoderKL(**vae_config)
1505
+
1506
+ if is_accelerate_available():
1507
+ for param_name, param in converted_vae_checkpoint.items():
1508
+ set_module_tensor_to_device(vae, param_name, "cpu", value=param)
1509
+ else:
1510
+ vae.load_state_dict(converted_vae_checkpoint)
1511
+ elif vae is None:
1512
+ vae = AutoencoderKL.from_pretrained(vae_path, local_files_only=local_files_only)
1513
+
1514
+ if model_type == "FrozenOpenCLIPEmbedder":
1515
+ config_name = "stabilityai/stable-diffusion-2"
1516
+ config_kwargs = {"subfolder": "text_encoder"}
1517
+
1518
+ if text_encoder is None:
1519
+ text_model = convert_open_clip_checkpoint(
1520
+ checkpoint, config_name, local_files_only=local_files_only, **config_kwargs
1521
+ )
1522
+ else:
1523
+ text_model = text_encoder
1524
+
1525
+ try:
1526
+ tokenizer = CLIPTokenizer.from_pretrained(
1527
+ "stabilityai/stable-diffusion-2", subfolder="tokenizer", local_files_only=local_files_only
1528
+ )
1529
+ except Exception:
1530
+ raise ValueError(
1531
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'stabilityai/stable-diffusion-2'."
1532
+ )
1533
+
1534
+ if stable_unclip is None:
1535
+ if controlnet:
1536
+ pipe = pipeline_class(
1537
+ vae=vae,
1538
+ text_encoder=text_model,
1539
+ tokenizer=tokenizer,
1540
+ unet=unet,
1541
+ scheduler=scheduler,
1542
+ controlnet=controlnet,
1543
+ safety_checker=safety_checker,
1544
+ feature_extractor=feature_extractor,
1545
+ )
1546
+ if hasattr(pipe, "requires_safety_checker"):
1547
+ pipe.requires_safety_checker = False
1548
+
1549
+ elif pipeline_class == StableDiffusionUpscalePipeline:
1550
+ scheduler = DDIMScheduler.from_pretrained(
1551
+ "stabilityai/stable-diffusion-x4-upscaler", subfolder="scheduler"
1552
+ )
1553
+ low_res_scheduler = DDPMScheduler.from_pretrained(
1554
+ "stabilityai/stable-diffusion-x4-upscaler", subfolder="low_res_scheduler"
1555
+ )
1556
+
1557
+ pipe = pipeline_class(
1558
+ vae=vae,
1559
+ text_encoder=text_model,
1560
+ tokenizer=tokenizer,
1561
+ unet=unet,
1562
+ scheduler=scheduler,
1563
+ low_res_scheduler=low_res_scheduler,
1564
+ safety_checker=safety_checker,
1565
+ feature_extractor=feature_extractor,
1566
+ )
1567
+
1568
+ else:
1569
+ pipe = pipeline_class(
1570
+ vae=vae,
1571
+ text_encoder=text_model,
1572
+ tokenizer=tokenizer,
1573
+ unet=unet,
1574
+ scheduler=scheduler,
1575
+ safety_checker=safety_checker,
1576
+ feature_extractor=feature_extractor,
1577
+ )
1578
+ if hasattr(pipe, "requires_safety_checker"):
1579
+ pipe.requires_safety_checker = False
1580
+
1581
+ else:
1582
+ image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components(
1583
+ original_config, clip_stats_path=clip_stats_path, device=device
1584
+ )
1585
+
1586
+ if stable_unclip == "img2img":
1587
+ feature_extractor, image_encoder = stable_unclip_image_encoder(original_config)
1588
+
1589
+ pipe = StableUnCLIPImg2ImgPipeline(
1590
+ # image encoding components
1591
+ feature_extractor=feature_extractor,
1592
+ image_encoder=image_encoder,
1593
+ # image noising components
1594
+ image_normalizer=image_normalizer,
1595
+ image_noising_scheduler=image_noising_scheduler,
1596
+ # regular denoising components
1597
+ tokenizer=tokenizer,
1598
+ text_encoder=text_model,
1599
+ unet=unet,
1600
+ scheduler=scheduler,
1601
+ # vae
1602
+ vae=vae,
1603
+ )
1604
+ elif stable_unclip == "txt2img":
1605
+ if stable_unclip_prior is None or stable_unclip_prior == "karlo":
1606
+ karlo_model = "kakaobrain/karlo-v1-alpha"
1607
+ prior = PriorTransformer.from_pretrained(
1608
+ karlo_model, subfolder="prior", local_files_only=local_files_only
1609
+ )
1610
+
1611
+ try:
1612
+ prior_tokenizer = CLIPTokenizer.from_pretrained(
1613
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1614
+ )
1615
+ except Exception:
1616
+ raise ValueError(
1617
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1618
+ )
1619
+ prior_text_model = CLIPTextModelWithProjection.from_pretrained(
1620
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1621
+ )
1622
+
1623
+ prior_scheduler = UnCLIPScheduler.from_pretrained(
1624
+ karlo_model, subfolder="prior_scheduler", local_files_only=local_files_only
1625
+ )
1626
+ prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config)
1627
+ else:
1628
+ raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}")
1629
+
1630
+ pipe = StableUnCLIPPipeline(
1631
+ # prior components
1632
+ prior_tokenizer=prior_tokenizer,
1633
+ prior_text_encoder=prior_text_model,
1634
+ prior=prior,
1635
+ prior_scheduler=prior_scheduler,
1636
+ # image noising components
1637
+ image_normalizer=image_normalizer,
1638
+ image_noising_scheduler=image_noising_scheduler,
1639
+ # regular denoising components
1640
+ tokenizer=tokenizer,
1641
+ text_encoder=text_model,
1642
+ unet=unet,
1643
+ scheduler=scheduler,
1644
+ # vae
1645
+ vae=vae,
1646
+ )
1647
+ else:
1648
+ raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}")
1649
+ elif model_type == "PaintByExample":
1650
+ vision_model = convert_paint_by_example_checkpoint(checkpoint)
1651
+ try:
1652
+ tokenizer = CLIPTokenizer.from_pretrained(
1653
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1654
+ )
1655
+ except Exception:
1656
+ raise ValueError(
1657
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1658
+ )
1659
+ try:
1660
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
1661
+ "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only
1662
+ )
1663
+ except Exception:
1664
+ raise ValueError(
1665
+ f"With local_files_only set to {local_files_only}, you must first locally save the feature_extractor in the following path: 'CompVis/stable-diffusion-safety-checker'."
1666
+ )
1667
+ pipe = PaintByExamplePipeline(
1668
+ vae=vae,
1669
+ image_encoder=vision_model,
1670
+ unet=unet,
1671
+ scheduler=scheduler,
1672
+ safety_checker=None,
1673
+ feature_extractor=feature_extractor,
1674
+ )
1675
+ elif model_type == "FrozenCLIPEmbedder":
1676
+ text_model = convert_ldm_clip_checkpoint(
1677
+ checkpoint, local_files_only=local_files_only, text_encoder=text_encoder
1678
+ )
1679
+ try:
1680
+ tokenizer = (
1681
+ CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only)
1682
+ if tokenizer is None
1683
+ else tokenizer
1684
+ )
1685
+ except Exception:
1686
+ raise ValueError(
1687
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1688
+ )
1689
+
1690
+ if load_safety_checker:
1691
+ safety_checker = StableDiffusionSafetyChecker.from_pretrained(
1692
+ "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only
1693
+ )
1694
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
1695
+ "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only
1696
+ )
1697
+
1698
+ if controlnet:
1699
+ pipe = pipeline_class(
1700
+ vae=vae,
1701
+ text_encoder=text_model,
1702
+ tokenizer=tokenizer,
1703
+ unet=unet,
1704
+ controlnet=controlnet,
1705
+ scheduler=scheduler,
1706
+ safety_checker=safety_checker,
1707
+ feature_extractor=feature_extractor,
1708
+ )
1709
+ else:
1710
+ pipe = pipeline_class(
1711
+ vae=vae,
1712
+ text_encoder=text_model,
1713
+ tokenizer=tokenizer,
1714
+ unet=unet,
1715
+ scheduler=scheduler,
1716
+ safety_checker=safety_checker,
1717
+ feature_extractor=feature_extractor,
1718
+ )
1719
+ elif model_type in ["SDXL", "SDXL-Refiner"]:
1720
+ is_refiner = model_type == "SDXL-Refiner"
1721
+
1722
+ if (is_refiner is False) and (tokenizer is None):
1723
+ try:
1724
+ tokenizer = CLIPTokenizer.from_pretrained(
1725
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1726
+ )
1727
+ except Exception:
1728
+ raise ValueError(
1729
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1730
+ )
1731
+
1732
+ if (is_refiner is False) and (text_encoder is None):
1733
+ text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only)
1734
+
1735
+ if tokenizer_2 is None:
1736
+ try:
1737
+ tokenizer_2 = CLIPTokenizer.from_pretrained(
1738
+ "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!", local_files_only=local_files_only
1739
+ )
1740
+ except Exception:
1741
+ raise ValueError(
1742
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' with `pad_token` set to '!'."
1743
+ )
1744
+
1745
+ if text_encoder_2 is None:
1746
+ config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
1747
+ config_kwargs = {"projection_dim": 1280}
1748
+ prefix = "conditioner.embedders.0.model." if is_refiner else "conditioner.embedders.1.model."
1749
+
1750
+ text_encoder_2 = convert_open_clip_checkpoint(
1751
+ checkpoint,
1752
+ config_name,
1753
+ prefix=prefix,
1754
+ has_projection=True,
1755
+ local_files_only=local_files_only,
1756
+ **config_kwargs,
1757
+ )
1758
+
1759
+ if is_accelerate_available(): # SBM Now move model to cpu.
1760
+ for param_name, param in converted_unet_checkpoint.items():
1761
+ set_module_tensor_to_device(unet, param_name, "cpu", value=param)
1762
+
1763
+ if controlnet:
1764
+ pipe = pipeline_class(
1765
+ vae=vae,
1766
+ text_encoder=text_encoder,
1767
+ tokenizer=tokenizer,
1768
+ text_encoder_2=text_encoder_2,
1769
+ tokenizer_2=tokenizer_2,
1770
+ unet=unet,
1771
+ controlnet=controlnet,
1772
+ scheduler=scheduler,
1773
+ force_zeros_for_empty_prompt=True,
1774
+ )
1775
+ elif adapter:
1776
+ pipe = pipeline_class(
1777
+ vae=vae,
1778
+ text_encoder=text_encoder,
1779
+ tokenizer=tokenizer,
1780
+ text_encoder_2=text_encoder_2,
1781
+ tokenizer_2=tokenizer_2,
1782
+ unet=unet,
1783
+ adapter=adapter,
1784
+ scheduler=scheduler,
1785
+ force_zeros_for_empty_prompt=True,
1786
+ )
1787
+
1788
+ else:
1789
+ pipeline_kwargs = {
1790
+ "vae": vae,
1791
+ "text_encoder": text_encoder,
1792
+ "tokenizer": tokenizer,
1793
+ "text_encoder_2": text_encoder_2,
1794
+ "tokenizer_2": tokenizer_2,
1795
+ "unet": unet,
1796
+ "scheduler": scheduler,
1797
+ }
1798
+
1799
+ if (pipeline_class == StableDiffusionXLImg2ImgPipeline) or (
1800
+ pipeline_class == StableDiffusionXLInpaintPipeline
1801
+ ):
1802
+ pipeline_kwargs.update({"requires_aesthetics_score": is_refiner})
1803
+
1804
+ if is_refiner:
1805
+ pipeline_kwargs.update({"force_zeros_for_empty_prompt": False})
1806
+
1807
+ pipe = pipeline_class(**pipeline_kwargs)
1808
+ else:
1809
+ text_config = create_ldm_bert_config(original_config)
1810
+ text_model = convert_ldm_bert_checkpoint(checkpoint, text_config)
1811
+ tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased", local_files_only=local_files_only)
1812
+ pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
1813
+
1814
+ return pipe
1815
+
1816
+
1817
+ def download_controlnet_from_original_ckpt(
1818
+ checkpoint_path: str,
1819
+ original_config_file: str,
1820
+ image_size: int = 512,
1821
+ extract_ema: bool = False,
1822
+ num_in_channels: Optional[int] = None,
1823
+ upcast_attention: Optional[bool] = None,
1824
+ device: str = None,
1825
+ from_safetensors: bool = False,
1826
+ use_linear_projection: Optional[bool] = None,
1827
+ cross_attention_dim: Optional[bool] = None,
1828
+ ) -> DiffusionPipeline:
1829
+ if from_safetensors:
1830
+ from safetensors import safe_open
1831
+
1832
+ checkpoint = {}
1833
+ with safe_open(checkpoint_path, framework="pt", device="cpu") as f:
1834
+ for key in f.keys():
1835
+ checkpoint[key] = f.get_tensor(key)
1836
+ else:
1837
+ if device is None:
1838
+ device = "cuda" if torch.cuda.is_available() else "cpu"
1839
+ checkpoint = torch.load(checkpoint_path, map_location=device)
1840
+ else:
1841
+ checkpoint = torch.load(checkpoint_path, map_location=device)
1842
+
1843
+ # NOTE: this while loop isn't great but this controlnet checkpoint has one additional
1844
+ # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
1845
+ while "state_dict" in checkpoint:
1846
+ checkpoint = checkpoint["state_dict"]
1847
+
1848
+ with open(original_config_file, "r") as f:
1849
+ original_config_file = f.read()
1850
+ original_config = yaml.safe_load(original_config_file)
1851
+
1852
+ if num_in_channels is not None:
1853
+ original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
1854
+
1855
+ if "control_stage_config" not in original_config["model"]["params"]:
1856
+ raise ValueError("`control_stage_config` not present in original config")
1857
+
1858
+ controlnet = convert_controlnet_checkpoint(
1859
+ checkpoint,
1860
+ original_config,
1861
+ checkpoint_path,
1862
+ image_size,
1863
+ upcast_attention,
1864
+ extract_ema,
1865
+ use_linear_projection=use_linear_projection,
1866
+ cross_attention_dim=cross_attention_dim,
1867
+ )
1868
+
1869
+ return controlnet