ZTWHHH commited on
Commit
05c087f
·
verified ·
1 Parent(s): 263080f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_captioning.py +51 -0
  2. evalkit_internvl/lib/python3.10/site-packages/transformers/tools/prompts.py +48 -0
  3. evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  4. evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_objects.cpython-310.pyc +0 -0
  5. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm/__init__.py +51 -0
  6. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm/__pycache__/__init__.cpython-310.pyc +0 -0
  7. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm/__pycache__/pipeline_audioldm.cpython-310.pyc +0 -0
  8. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/blip_diffusion/blip_image_processing.py +318 -0
  9. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/ddim/pipeline_ddim.py +154 -0
  10. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_consistency_models/__init__.py +50 -0
  11. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_consistency_models/__pycache__/pipeline_latent_consistency_text2img.cpython-310.pyc +0 -0
  12. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__init__.py +71 -0
  13. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/__init__.cpython-310.pyc +0 -0
  14. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/camera.cpython-310.pyc +0 -0
  15. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/pipeline_shap_e.cpython-310.pyc +0 -0
  16. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/pipeline_shap_e_img2img.cpython-310.pyc +0 -0
  17. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/renderer.cpython-310.pyc +0 -0
  18. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/camera.py +147 -0
  19. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/pipeline_shap_e.py +334 -0
  20. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +321 -0
  21. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/renderer.py +1050 -0
  22. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/__init__.cpython-310.pyc +0 -0
  23. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/pipeline_stable_cascade.cpython-310.pyc +0 -0
  24. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/pipeline_stable_cascade_combined.cpython-310.pyc +0 -0
  25. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/pipeline_stable_cascade_prior.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +496 -0
  27. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +311 -0
  28. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +638 -0
  29. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__init__.py +203 -0
  30. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +1860 -0
  31. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +473 -0
  32. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +532 -0
  33. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +1032 -0
  34. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +420 -0
  35. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +807 -0
  36. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +932 -0
  37. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/safety_checker_flax.py +112 -0
  38. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py +57 -0
  39. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +62 -0
  40. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
  41. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_ldm3d/__pycache__/__init__.cpython-310.pyc +0 -0
  42. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__init__.py +99 -0
  43. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/__init__.cpython-310.pyc +0 -0
  44. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/pipeline_output.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/pipeline_stable_diffusion_safe.cpython-310.pyc +0 -0
  46. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/safety_checker.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py +34 -0
  48. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +764 -0
  49. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +109 -0
  50. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
evalkit_internvl/lib/python3.10/site-packages/transformers/tools/image_captioning.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+
4
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ from typing import TYPE_CHECKING
18
+
19
+ from ..models.auto import AutoModelForVision2Seq
20
+ from ..utils import requires_backends
21
+ from .base import PipelineTool
22
+
23
+
24
+ if TYPE_CHECKING:
25
+ from PIL import Image
26
+
27
+
28
+ class ImageCaptioningTool(PipelineTool):
29
+ default_checkpoint = "Salesforce/blip-image-captioning-base"
30
+ description = (
31
+ "This is a tool that generates a description of an image. It takes an input named `image` which should be the "
32
+ "image to caption, and returns a text that contains the description in English."
33
+ )
34
+ name = "image_captioner"
35
+ model_class = AutoModelForVision2Seq
36
+
37
+ inputs = ["image"]
38
+ outputs = ["text"]
39
+
40
+ def __init__(self, *args, **kwargs):
41
+ requires_backends(self, ["vision"])
42
+ super().__init__(*args, **kwargs)
43
+
44
+ def encode(self, image: "Image"):
45
+ return self.pre_processor(images=image, return_tensors="pt")
46
+
47
+ def forward(self, inputs):
48
+ return self.model.generate(**inputs)
49
+
50
+ def decode(self, outputs):
51
+ return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
evalkit_internvl/lib/python3.10/site-packages/transformers/tools/prompts.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+
4
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ import re
18
+
19
+ from ..utils import cached_file
20
+
21
+
22
+ # docstyle-ignore
23
+ CHAT_MESSAGE_PROMPT = """
24
+ Human: <<task>>
25
+
26
+ Assistant: """
27
+
28
+
29
+ DEFAULT_PROMPTS_REPO = "huggingface-tools/default-prompts"
30
+ PROMPT_FILES = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
31
+
32
+
33
+ def download_prompt(prompt_or_repo_id, agent_name, mode="run"):
34
+ """
35
+ Downloads and caches the prompt from a repo and returns it contents (if necessary)
36
+ """
37
+ if prompt_or_repo_id is None:
38
+ prompt_or_repo_id = DEFAULT_PROMPTS_REPO
39
+
40
+ # prompt is considered a repo ID when it does not contain any kind of space
41
+ if re.search("\\s", prompt_or_repo_id) is not None:
42
+ return prompt_or_repo_id
43
+
44
+ prompt_file = cached_file(
45
+ prompt_or_repo_id, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name}
46
+ )
47
+ with open(prompt_file, "r", encoding="utf-8") as f:
48
+ return f.read()
evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.67 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_sentencepiece_objects.cpython-310.pyc ADDED
Binary file (8.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ is_torch_available,
8
+ is_transformers_available,
9
+ is_transformers_version,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+ try:
17
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")):
18
+ raise OptionalDependencyNotAvailable()
19
+ except OptionalDependencyNotAvailable:
20
+ from ...utils.dummy_torch_and_transformers_objects import (
21
+ AudioLDMPipeline,
22
+ )
23
+
24
+ _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline})
25
+ else:
26
+ _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"]
27
+
28
+
29
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
30
+ try:
31
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")):
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ from ...utils.dummy_torch_and_transformers_objects import (
35
+ AudioLDMPipeline,
36
+ )
37
+
38
+ else:
39
+ from .pipeline_audioldm import AudioLDMPipeline
40
+ else:
41
+ import sys
42
+
43
+ sys.modules[__name__] = _LazyModule(
44
+ __name__,
45
+ globals()["__file__"],
46
+ _import_structure,
47
+ module_spec=__spec__,
48
+ )
49
+
50
+ for name, value in _dummy_objects.items():
51
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/audioldm/__pycache__/pipeline_audioldm.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/blip_diffusion/blip_image_processing.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for BLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from transformers.image_transforms import convert_to_rgb, resize, to_channel_dimension_format
23
+ from transformers.image_utils import (
24
+ OPENAI_CLIP_MEAN,
25
+ OPENAI_CLIP_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ )
35
+ from transformers.utils import TensorType, is_vision_available, logging
36
+
37
+ from diffusers.utils import numpy_to_pil
38
+
39
+
40
+ if is_vision_available():
41
+ import PIL.Image
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ # We needed some extra functions on top of the ones in transformers.image_processing_utils.BaseImageProcessor, namely center crop
48
+ # Copy-pasted from transformers.models.blip.image_processing_blip.BlipImageProcessor
49
+ class BlipImageProcessor(BaseImageProcessor):
50
+ r"""
51
+ Constructs a BLIP image processor.
52
+
53
+ Args:
54
+ do_resize (`bool`, *optional*, defaults to `True`):
55
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
56
+ `do_resize` parameter in the `preprocess` method.
57
+ size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
58
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
59
+ method.
60
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
61
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
62
+ overridden by the `resample` parameter in the `preprocess` method.
63
+ do_rescale (`bool`, *optional*, defaults to `True`):
64
+ Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
65
+ `do_rescale` parameter in the `preprocess` method.
66
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
67
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
68
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
69
+ do_normalize (`bool`, *optional*, defaults to `True`):
70
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
71
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
72
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
73
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
74
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
75
+ overridden by the `image_mean` parameter in the `preprocess` method.
76
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
77
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
78
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
79
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
80
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
81
+ Whether to convert the image to RGB.
82
+ """
83
+
84
+ model_input_names = ["pixel_values"]
85
+
86
+ def __init__(
87
+ self,
88
+ do_resize: bool = True,
89
+ size: Dict[str, int] = None,
90
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
91
+ do_rescale: bool = True,
92
+ rescale_factor: Union[int, float] = 1 / 255,
93
+ do_normalize: bool = True,
94
+ image_mean: Optional[Union[float, List[float]]] = None,
95
+ image_std: Optional[Union[float, List[float]]] = None,
96
+ do_convert_rgb: bool = True,
97
+ do_center_crop: bool = True,
98
+ **kwargs,
99
+ ) -> None:
100
+ super().__init__(**kwargs)
101
+ size = size if size is not None else {"height": 224, "width": 224}
102
+ size = get_size_dict(size, default_to_square=True)
103
+
104
+ self.do_resize = do_resize
105
+ self.size = size
106
+ self.resample = resample
107
+ self.do_rescale = do_rescale
108
+ self.rescale_factor = rescale_factor
109
+ self.do_normalize = do_normalize
110
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
111
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
112
+ self.do_convert_rgb = do_convert_rgb
113
+ self.do_center_crop = do_center_crop
114
+
115
+ # Copy-pasted from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
116
+ def resize(
117
+ self,
118
+ image: np.ndarray,
119
+ size: Dict[str, int],
120
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
121
+ data_format: Optional[Union[str, ChannelDimension]] = None,
122
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
123
+ **kwargs,
124
+ ) -> np.ndarray:
125
+ """
126
+ Resize an image to `(size["height"], size["width"])`.
127
+
128
+ Args:
129
+ image (`np.ndarray`):
130
+ Image to resize.
131
+ size (`Dict[str, int]`):
132
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
133
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
134
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
135
+ data_format (`ChannelDimension` or `str`, *optional*):
136
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
137
+ image is used. Can be one of:
138
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
139
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
140
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
141
+ input_data_format (`ChannelDimension` or `str`, *optional*):
142
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
143
+ from the input image. Can be one of:
144
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
145
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
146
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
147
+
148
+ Returns:
149
+ `np.ndarray`: The resized image.
150
+ """
151
+ size = get_size_dict(size)
152
+ if "height" not in size or "width" not in size:
153
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
154
+ output_size = (size["height"], size["width"])
155
+ return resize(
156
+ image,
157
+ size=output_size,
158
+ resample=resample,
159
+ data_format=data_format,
160
+ input_data_format=input_data_format,
161
+ **kwargs,
162
+ )
163
+
164
+ def preprocess(
165
+ self,
166
+ images: ImageInput,
167
+ do_resize: Optional[bool] = None,
168
+ size: Optional[Dict[str, int]] = None,
169
+ resample: PILImageResampling = None,
170
+ do_rescale: Optional[bool] = None,
171
+ do_center_crop: Optional[bool] = None,
172
+ rescale_factor: Optional[float] = None,
173
+ do_normalize: Optional[bool] = None,
174
+ image_mean: Optional[Union[float, List[float]]] = None,
175
+ image_std: Optional[Union[float, List[float]]] = None,
176
+ return_tensors: Optional[Union[str, TensorType]] = None,
177
+ do_convert_rgb: bool = None,
178
+ data_format: ChannelDimension = ChannelDimension.FIRST,
179
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
180
+ **kwargs,
181
+ ) -> PIL.Image.Image:
182
+ """
183
+ Preprocess an image or batch of images.
184
+
185
+ Args:
186
+ images (`ImageInput`):
187
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
188
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
189
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
190
+ Whether to resize the image.
191
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
192
+ Controls the size of the image after `resize`. The shortest edge of the image is resized to
193
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
194
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
195
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
196
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
197
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
198
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
199
+ Whether to rescale the image values between [0 - 1].
200
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
201
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
202
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
203
+ Whether to normalize the image.
204
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
205
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
206
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
207
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
208
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
209
+ Whether to convert the image to RGB.
210
+ return_tensors (`str` or `TensorType`, *optional*):
211
+ The type of tensors to return. Can be one of:
212
+ - Unset: Return a list of `np.ndarray`.
213
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
214
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
215
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
216
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
217
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
218
+ The channel dimension format for the output image. Can be one of:
219
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
220
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
221
+ - Unset: Use the channel dimension format of the input image.
222
+ input_data_format (`ChannelDimension` or `str`, *optional*):
223
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
224
+ from the input image. Can be one of:
225
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
226
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
227
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
228
+ """
229
+ do_resize = do_resize if do_resize is not None else self.do_resize
230
+ resample = resample if resample is not None else self.resample
231
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
232
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
233
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
234
+ image_mean = image_mean if image_mean is not None else self.image_mean
235
+ image_std = image_std if image_std is not None else self.image_std
236
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
237
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
238
+
239
+ size = size if size is not None else self.size
240
+ size = get_size_dict(size, default_to_square=False)
241
+ images = make_list_of_images(images)
242
+
243
+ if not valid_images(images):
244
+ raise ValueError(
245
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
246
+ "torch.Tensor, tf.Tensor or jax.ndarray."
247
+ )
248
+
249
+ if do_resize and size is None or resample is None:
250
+ raise ValueError("Size and resample must be specified if do_resize is True.")
251
+
252
+ if do_rescale and rescale_factor is None:
253
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
254
+
255
+ if do_normalize and (image_mean is None or image_std is None):
256
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
257
+
258
+ # PIL RGBA images are converted to RGB
259
+ if do_convert_rgb:
260
+ images = [convert_to_rgb(image) for image in images]
261
+
262
+ # All transformations expect numpy arrays.
263
+ images = [to_numpy_array(image) for image in images]
264
+
265
+ if is_scaled_image(images[0]) and do_rescale:
266
+ logger.warning_once(
267
+ "It looks like you are trying to rescale already rescaled images. If the input"
268
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
269
+ )
270
+ if input_data_format is None:
271
+ # We assume that all images have the same channel dimension format.
272
+ input_data_format = infer_channel_dimension_format(images[0])
273
+
274
+ if do_resize:
275
+ images = [
276
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
277
+ for image in images
278
+ ]
279
+
280
+ if do_rescale:
281
+ images = [
282
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
283
+ for image in images
284
+ ]
285
+ if do_normalize:
286
+ images = [
287
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
288
+ for image in images
289
+ ]
290
+ if do_center_crop:
291
+ images = [self.center_crop(image, size, input_data_format=input_data_format) for image in images]
292
+
293
+ images = [
294
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
295
+ ]
296
+
297
+ encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
298
+ return encoded_outputs
299
+
300
+ # Follows diffusers.VaeImageProcessor.postprocess
301
+ def postprocess(self, sample: torch.FloatTensor, output_type: str = "pil"):
302
+ if output_type not in ["pt", "np", "pil"]:
303
+ raise ValueError(
304
+ f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']"
305
+ )
306
+
307
+ # Equivalent to diffusers.VaeImageProcessor.denormalize
308
+ sample = (sample / 2 + 0.5).clamp(0, 1)
309
+ if output_type == "pt":
310
+ return sample
311
+
312
+ # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy
313
+ sample = sample.cpu().permute(0, 2, 3, 1).numpy()
314
+ if output_type == "np":
315
+ return sample
316
+ # Output_type must be 'pil'
317
+ sample = numpy_to_pil(sample)
318
+ return sample
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/ddim/pipeline_ddim.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import torch
18
+
19
+ from ...schedulers import DDIMScheduler
20
+ from ...utils.torch_utils import randn_tensor
21
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
+
23
+
24
+ class DDIMPipeline(DiffusionPipeline):
25
+ r"""
26
+ Pipeline for image generation.
27
+
28
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
29
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
30
+
31
+ Parameters:
32
+ unet ([`UNet2DModel`]):
33
+ A `UNet2DModel` to denoise the encoded image latents.
34
+ scheduler ([`SchedulerMixin`]):
35
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
36
+ [`DDPMScheduler`], or [`DDIMScheduler`].
37
+ """
38
+
39
+ model_cpu_offload_seq = "unet"
40
+
41
+ def __init__(self, unet, scheduler):
42
+ super().__init__()
43
+
44
+ # make sure scheduler can always be converted to DDIM
45
+ scheduler = DDIMScheduler.from_config(scheduler.config)
46
+
47
+ self.register_modules(unet=unet, scheduler=scheduler)
48
+
49
+ @torch.no_grad()
50
+ def __call__(
51
+ self,
52
+ batch_size: int = 1,
53
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
54
+ eta: float = 0.0,
55
+ num_inference_steps: int = 50,
56
+ use_clipped_model_output: Optional[bool] = None,
57
+ output_type: Optional[str] = "pil",
58
+ return_dict: bool = True,
59
+ ) -> Union[ImagePipelineOutput, Tuple]:
60
+ r"""
61
+ The call function to the pipeline for generation.
62
+
63
+ Args:
64
+ batch_size (`int`, *optional*, defaults to 1):
65
+ The number of images to generate.
66
+ generator (`torch.Generator`, *optional*):
67
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
68
+ generation deterministic.
69
+ eta (`float`, *optional*, defaults to 0.0):
70
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
71
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. A value of `0` corresponds to
72
+ DDIM and `1` corresponds to DDPM.
73
+ num_inference_steps (`int`, *optional*, defaults to 50):
74
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
75
+ expense of slower inference.
76
+ use_clipped_model_output (`bool`, *optional*, defaults to `None`):
77
+ If `True` or `False`, see documentation for [`DDIMScheduler.step`]. If `None`, nothing is passed
78
+ downstream to the scheduler (use `None` for schedulers which don't support this argument).
79
+ output_type (`str`, *optional*, defaults to `"pil"`):
80
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
81
+ return_dict (`bool`, *optional*, defaults to `True`):
82
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
83
+
84
+ Example:
85
+
86
+ ```py
87
+ >>> from diffusers import DDIMPipeline
88
+ >>> import PIL.Image
89
+ >>> import numpy as np
90
+
91
+ >>> # load model and scheduler
92
+ >>> pipe = DDIMPipeline.from_pretrained("fusing/ddim-lsun-bedroom")
93
+
94
+ >>> # run pipeline in inference (sample random noise and denoise)
95
+ >>> image = pipe(eta=0.0, num_inference_steps=50)
96
+
97
+ >>> # process image to PIL
98
+ >>> image_processed = image.cpu().permute(0, 2, 3, 1)
99
+ >>> image_processed = (image_processed + 1.0) * 127.5
100
+ >>> image_processed = image_processed.numpy().astype(np.uint8)
101
+ >>> image_pil = PIL.Image.fromarray(image_processed[0])
102
+
103
+ >>> # save image
104
+ >>> image_pil.save("test.png")
105
+ ```
106
+
107
+ Returns:
108
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
109
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
110
+ returned where the first element is a list with the generated images
111
+ """
112
+
113
+ # Sample gaussian noise to begin loop
114
+ if isinstance(self.unet.config.sample_size, int):
115
+ image_shape = (
116
+ batch_size,
117
+ self.unet.config.in_channels,
118
+ self.unet.config.sample_size,
119
+ self.unet.config.sample_size,
120
+ )
121
+ else:
122
+ image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
123
+
124
+ if isinstance(generator, list) and len(generator) != batch_size:
125
+ raise ValueError(
126
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
127
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
128
+ )
129
+
130
+ image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype)
131
+
132
+ # set step values
133
+ self.scheduler.set_timesteps(num_inference_steps)
134
+
135
+ for t in self.progress_bar(self.scheduler.timesteps):
136
+ # 1. predict noise model_output
137
+ model_output = self.unet(image, t).sample
138
+
139
+ # 2. predict previous mean of image x_t-1 and add variance depending on eta
140
+ # eta corresponds to η in paper and should be between [0, 1]
141
+ # do x_t -> x_t-1
142
+ image = self.scheduler.step(
143
+ model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator
144
+ ).prev_sample
145
+
146
+ image = (image / 2 + 0.5).clamp(0, 1)
147
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
148
+ if output_type == "pil":
149
+ image = self.numpy_to_pil(image)
150
+
151
+ if not return_dict:
152
+ return (image,)
153
+
154
+ return ImagePipelineOutput(images=image)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_consistency_models/__init__.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_latent_consistency_img2img"] = ["LatentConsistencyModelImg2ImgPipeline"]
26
+ _import_structure["pipeline_latent_consistency_text2img"] = ["LatentConsistencyModelPipeline"]
27
+
28
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
29
+ try:
30
+ if not (is_transformers_available() and is_torch_available()):
31
+ raise OptionalDependencyNotAvailable()
32
+
33
+ except OptionalDependencyNotAvailable:
34
+ from ...utils.dummy_torch_and_transformers_objects import *
35
+ else:
36
+ from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline
37
+ from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline
38
+
39
+ else:
40
+ import sys
41
+
42
+ sys.modules[__name__] = _LazyModule(
43
+ __name__,
44
+ globals()["__file__"],
45
+ _import_structure,
46
+ module_spec=__spec__,
47
+ )
48
+
49
+ for name, value in _dummy_objects.items():
50
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_consistency_models/__pycache__/pipeline_latent_consistency_text2img.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+ try:
17
+ if not (is_transformers_available() and is_torch_available()):
18
+ raise OptionalDependencyNotAvailable()
19
+ except OptionalDependencyNotAvailable:
20
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
21
+
22
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
23
+ else:
24
+ _import_structure["camera"] = ["create_pan_cameras"]
25
+ _import_structure["pipeline_shap_e"] = ["ShapEPipeline"]
26
+ _import_structure["pipeline_shap_e_img2img"] = ["ShapEImg2ImgPipeline"]
27
+ _import_structure["renderer"] = [
28
+ "BoundingBoxVolume",
29
+ "ImportanceRaySampler",
30
+ "MLPNeRFModelOutput",
31
+ "MLPNeRSTFModel",
32
+ "ShapEParamsProjModel",
33
+ "ShapERenderer",
34
+ "StratifiedRaySampler",
35
+ "VoidNeRFModel",
36
+ ]
37
+
38
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
39
+ try:
40
+ if not (is_transformers_available() and is_torch_available()):
41
+ raise OptionalDependencyNotAvailable()
42
+
43
+ except OptionalDependencyNotAvailable:
44
+ from ...utils.dummy_torch_and_transformers_objects import *
45
+ else:
46
+ from .camera import create_pan_cameras
47
+ from .pipeline_shap_e import ShapEPipeline
48
+ from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline
49
+ from .renderer import (
50
+ BoundingBoxVolume,
51
+ ImportanceRaySampler,
52
+ MLPNeRFModelOutput,
53
+ MLPNeRSTFModel,
54
+ ShapEParamsProjModel,
55
+ ShapERenderer,
56
+ StratifiedRaySampler,
57
+ VoidNeRFModel,
58
+ )
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(
64
+ __name__,
65
+ globals()["__file__"],
66
+ _import_structure,
67
+ module_spec=__spec__,
68
+ )
69
+
70
+ for name, value in _dummy_objects.items():
71
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/camera.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/pipeline_shap_e.cpython-310.pyc ADDED
Binary file (9.9 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/pipeline_shap_e_img2img.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/__pycache__/renderer.cpython-310.pyc ADDED
Binary file (29.6 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/camera.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Open AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Tuple
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+
22
+ @dataclass
23
+ class DifferentiableProjectiveCamera:
24
+ """
25
+ Implements a batch, differentiable, standard pinhole camera
26
+ """
27
+
28
+ origin: torch.Tensor # [batch_size x 3]
29
+ x: torch.Tensor # [batch_size x 3]
30
+ y: torch.Tensor # [batch_size x 3]
31
+ z: torch.Tensor # [batch_size x 3]
32
+ width: int
33
+ height: int
34
+ x_fov: float
35
+ y_fov: float
36
+ shape: Tuple[int]
37
+
38
+ def __post_init__(self):
39
+ assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
40
+ assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
41
+ assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
42
+
43
+ def resolution(self):
44
+ return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32))
45
+
46
+ def fov(self):
47
+ return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32))
48
+
49
+ def get_image_coords(self) -> torch.Tensor:
50
+ """
51
+ :return: coords of shape (width * height, 2)
52
+ """
53
+ pixel_indices = torch.arange(self.height * self.width)
54
+ coords = torch.stack(
55
+ [
56
+ pixel_indices % self.width,
57
+ torch.div(pixel_indices, self.width, rounding_mode="trunc"),
58
+ ],
59
+ axis=1,
60
+ )
61
+ return coords
62
+
63
+ @property
64
+ def camera_rays(self):
65
+ batch_size, *inner_shape = self.shape
66
+ inner_batch_size = int(np.prod(inner_shape))
67
+
68
+ coords = self.get_image_coords()
69
+ coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape])
70
+ rays = self.get_camera_rays(coords)
71
+
72
+ rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3)
73
+
74
+ return rays
75
+
76
+ def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor:
77
+ batch_size, *shape, n_coords = coords.shape
78
+ assert n_coords == 2
79
+ assert batch_size == self.origin.shape[0]
80
+
81
+ flat = coords.view(batch_size, -1, 2)
82
+
83
+ res = self.resolution()
84
+ fov = self.fov()
85
+
86
+ fracs = (flat.float() / (res - 1)) * 2 - 1
87
+ fracs = fracs * torch.tan(fov / 2)
88
+
89
+ fracs = fracs.view(batch_size, -1, 2)
90
+ directions = (
91
+ self.z.view(batch_size, 1, 3)
92
+ + self.x.view(batch_size, 1, 3) * fracs[:, :, :1]
93
+ + self.y.view(batch_size, 1, 3) * fracs[:, :, 1:]
94
+ )
95
+ directions = directions / directions.norm(dim=-1, keepdim=True)
96
+ rays = torch.stack(
97
+ [
98
+ torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]),
99
+ directions,
100
+ ],
101
+ dim=2,
102
+ )
103
+ return rays.view(batch_size, *shape, 2, 3)
104
+
105
+ def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera":
106
+ """
107
+ Creates a new camera for the resized view assuming the aspect ratio does not change.
108
+ """
109
+ assert width * self.height == height * self.width, "The aspect ratio should not change."
110
+ return DifferentiableProjectiveCamera(
111
+ origin=self.origin,
112
+ x=self.x,
113
+ y=self.y,
114
+ z=self.z,
115
+ width=width,
116
+ height=height,
117
+ x_fov=self.x_fov,
118
+ y_fov=self.y_fov,
119
+ )
120
+
121
+
122
+ def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera:
123
+ origins = []
124
+ xs = []
125
+ ys = []
126
+ zs = []
127
+ for theta in np.linspace(0, 2 * np.pi, num=20):
128
+ z = np.array([np.sin(theta), np.cos(theta), -0.5])
129
+ z /= np.sqrt(np.sum(z**2))
130
+ origin = -z * 4
131
+ x = np.array([np.cos(theta), -np.sin(theta), 0.0])
132
+ y = np.cross(z, x)
133
+ origins.append(origin)
134
+ xs.append(x)
135
+ ys.append(y)
136
+ zs.append(z)
137
+ return DifferentiableProjectiveCamera(
138
+ origin=torch.from_numpy(np.stack(origins, axis=0)).float(),
139
+ x=torch.from_numpy(np.stack(xs, axis=0)).float(),
140
+ y=torch.from_numpy(np.stack(ys, axis=0)).float(),
141
+ z=torch.from_numpy(np.stack(zs, axis=0)).float(),
142
+ width=size,
143
+ height=size,
144
+ x_fov=0.7,
145
+ y_fov=0.7,
146
+ shape=(1, len(xs)),
147
+ )
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/pipeline_shap_e.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Open AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from dataclasses import dataclass
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ from transformers import CLIPTextModelWithProjection, CLIPTokenizer
23
+
24
+ from ...models import PriorTransformer
25
+ from ...schedulers import HeunDiscreteScheduler
26
+ from ...utils import (
27
+ BaseOutput,
28
+ logging,
29
+ replace_example_docstring,
30
+ )
31
+ from ...utils.torch_utils import randn_tensor
32
+ from ..pipeline_utils import DiffusionPipeline
33
+ from .renderer import ShapERenderer
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+ EXAMPLE_DOC_STRING = """
39
+ Examples:
40
+ ```py
41
+ >>> import torch
42
+ >>> from diffusers import DiffusionPipeline
43
+ >>> from diffusers.utils import export_to_gif
44
+
45
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
46
+
47
+ >>> repo = "openai/shap-e"
48
+ >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
49
+ >>> pipe = pipe.to(device)
50
+
51
+ >>> guidance_scale = 15.0
52
+ >>> prompt = "a shark"
53
+
54
+ >>> images = pipe(
55
+ ... prompt,
56
+ ... guidance_scale=guidance_scale,
57
+ ... num_inference_steps=64,
58
+ ... frame_size=256,
59
+ ... ).images
60
+
61
+ >>> gif_path = export_to_gif(images[0], "shark_3d.gif")
62
+ ```
63
+ """
64
+
65
+
66
+ @dataclass
67
+ class ShapEPipelineOutput(BaseOutput):
68
+ """
69
+ Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`].
70
+
71
+ Args:
72
+ images (`torch.FloatTensor`)
73
+ A list of images for 3D rendering.
74
+ """
75
+
76
+ images: Union[List[List[PIL.Image.Image]], List[List[np.ndarray]]]
77
+
78
+
79
+ class ShapEPipeline(DiffusionPipeline):
80
+ """
81
+ Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method.
82
+
83
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
84
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
85
+
86
+ Args:
87
+ prior ([`PriorTransformer`]):
88
+ The canonical unCLIP prior to approximate the image embedding from the text embedding.
89
+ text_encoder ([`~transformers.CLIPTextModelWithProjection`]):
90
+ Frozen text-encoder.
91
+ tokenizer ([`~transformers.CLIPTokenizer`]):
92
+ A `CLIPTokenizer` to tokenize text.
93
+ scheduler ([`HeunDiscreteScheduler`]):
94
+ A scheduler to be used in combination with the `prior` model to generate image embedding.
95
+ shap_e_renderer ([`ShapERenderer`]):
96
+ Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF
97
+ rendering method.
98
+ """
99
+
100
+ model_cpu_offload_seq = "text_encoder->prior"
101
+ _exclude_from_cpu_offload = ["shap_e_renderer"]
102
+
103
+ def __init__(
104
+ self,
105
+ prior: PriorTransformer,
106
+ text_encoder: CLIPTextModelWithProjection,
107
+ tokenizer: CLIPTokenizer,
108
+ scheduler: HeunDiscreteScheduler,
109
+ shap_e_renderer: ShapERenderer,
110
+ ):
111
+ super().__init__()
112
+
113
+ self.register_modules(
114
+ prior=prior,
115
+ text_encoder=text_encoder,
116
+ tokenizer=tokenizer,
117
+ scheduler=scheduler,
118
+ shap_e_renderer=shap_e_renderer,
119
+ )
120
+
121
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
122
+ def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
123
+ if latents is None:
124
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
125
+ else:
126
+ if latents.shape != shape:
127
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
128
+ latents = latents.to(device)
129
+
130
+ latents = latents * scheduler.init_noise_sigma
131
+ return latents
132
+
133
+ def _encode_prompt(
134
+ self,
135
+ prompt,
136
+ device,
137
+ num_images_per_prompt,
138
+ do_classifier_free_guidance,
139
+ ):
140
+ len(prompt) if isinstance(prompt, list) else 1
141
+
142
+ # YiYi Notes: set pad_token_id to be 0, not sure why I can't set in the config file
143
+ self.tokenizer.pad_token_id = 0
144
+ # get prompt text embeddings
145
+ text_inputs = self.tokenizer(
146
+ prompt,
147
+ padding="max_length",
148
+ max_length=self.tokenizer.model_max_length,
149
+ truncation=True,
150
+ return_tensors="pt",
151
+ )
152
+ text_input_ids = text_inputs.input_ids
153
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
154
+
155
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
156
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
157
+ logger.warning(
158
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
159
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
160
+ )
161
+
162
+ text_encoder_output = self.text_encoder(text_input_ids.to(device))
163
+ prompt_embeds = text_encoder_output.text_embeds
164
+
165
+ prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
166
+ # in Shap-E it normalize the prompt_embeds and then later rescale it
167
+ prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True)
168
+
169
+ if do_classifier_free_guidance:
170
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
171
+
172
+ # For classifier free guidance, we need to do two forward passes.
173
+ # Here we concatenate the unconditional and text embeddings into a single batch
174
+ # to avoid doing two forward passes
175
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
176
+
177
+ # Rescale the features to have unit variance
178
+ prompt_embeds = math.sqrt(prompt_embeds.shape[1]) * prompt_embeds
179
+
180
+ return prompt_embeds
181
+
182
+ @torch.no_grad()
183
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
184
+ def __call__(
185
+ self,
186
+ prompt: str,
187
+ num_images_per_prompt: int = 1,
188
+ num_inference_steps: int = 25,
189
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
190
+ latents: Optional[torch.FloatTensor] = None,
191
+ guidance_scale: float = 4.0,
192
+ frame_size: int = 64,
193
+ output_type: Optional[str] = "pil", # pil, np, latent, mesh
194
+ return_dict: bool = True,
195
+ ):
196
+ """
197
+ The call function to the pipeline for generation.
198
+
199
+ Args:
200
+ prompt (`str` or `List[str]`):
201
+ The prompt or prompts to guide the image generation.
202
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
203
+ The number of images to generate per prompt.
204
+ num_inference_steps (`int`, *optional*, defaults to 25):
205
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
206
+ expense of slower inference.
207
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
208
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
209
+ generation deterministic.
210
+ latents (`torch.FloatTensor`, *optional*):
211
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
212
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
213
+ tensor is generated by sampling using the supplied random `generator`.
214
+ guidance_scale (`float`, *optional*, defaults to 4.0):
215
+ A higher guidance scale value encourages the model to generate images closely linked to the text
216
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
217
+ frame_size (`int`, *optional*, default to 64):
218
+ The width and height of each image frame of the generated 3D output.
219
+ output_type (`str`, *optional*, defaults to `"pil"`):
220
+ The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"`
221
+ (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]).
222
+ return_dict (`bool`, *optional*, defaults to `True`):
223
+ Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain
224
+ tuple.
225
+
226
+ Examples:
227
+
228
+ Returns:
229
+ [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`:
230
+ If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned,
231
+ otherwise a `tuple` is returned where the first element is a list with the generated images.
232
+ """
233
+
234
+ if isinstance(prompt, str):
235
+ batch_size = 1
236
+ elif isinstance(prompt, list):
237
+ batch_size = len(prompt)
238
+ else:
239
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
240
+
241
+ device = self._execution_device
242
+
243
+ batch_size = batch_size * num_images_per_prompt
244
+
245
+ do_classifier_free_guidance = guidance_scale > 1.0
246
+ prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance)
247
+
248
+ # prior
249
+
250
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
251
+ timesteps = self.scheduler.timesteps
252
+
253
+ num_embeddings = self.prior.config.num_embeddings
254
+ embedding_dim = self.prior.config.embedding_dim
255
+
256
+ latents = self.prepare_latents(
257
+ (batch_size, num_embeddings * embedding_dim),
258
+ prompt_embeds.dtype,
259
+ device,
260
+ generator,
261
+ latents,
262
+ self.scheduler,
263
+ )
264
+
265
+ # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
266
+ latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim)
267
+
268
+ for i, t in enumerate(self.progress_bar(timesteps)):
269
+ # expand the latents if we are doing classifier free guidance
270
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
271
+ scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t)
272
+
273
+ noise_pred = self.prior(
274
+ scaled_model_input,
275
+ timestep=t,
276
+ proj_embedding=prompt_embeds,
277
+ ).predicted_image_embedding
278
+
279
+ # remove the variance
280
+ noise_pred, _ = noise_pred.split(
281
+ scaled_model_input.shape[2], dim=2
282
+ ) # batch_size, num_embeddings, embedding_dim
283
+
284
+ if do_classifier_free_guidance:
285
+ noise_pred_uncond, noise_pred = noise_pred.chunk(2)
286
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
287
+
288
+ latents = self.scheduler.step(
289
+ noise_pred,
290
+ timestep=t,
291
+ sample=latents,
292
+ ).prev_sample
293
+
294
+ # Offload all models
295
+ self.maybe_free_model_hooks()
296
+
297
+ if output_type not in ["np", "pil", "latent", "mesh"]:
298
+ raise ValueError(
299
+ f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}"
300
+ )
301
+
302
+ if output_type == "latent":
303
+ return ShapEPipelineOutput(images=latents)
304
+
305
+ images = []
306
+ if output_type == "mesh":
307
+ for i, latent in enumerate(latents):
308
+ mesh = self.shap_e_renderer.decode_to_mesh(
309
+ latent[None, :],
310
+ device,
311
+ )
312
+ images.append(mesh)
313
+
314
+ else:
315
+ # np, pil
316
+ for i, latent in enumerate(latents):
317
+ image = self.shap_e_renderer.decode_to_image(
318
+ latent[None, :],
319
+ device,
320
+ size=frame_size,
321
+ )
322
+ images.append(image)
323
+
324
+ images = torch.stack(images)
325
+
326
+ images = images.cpu().numpy()
327
+
328
+ if output_type == "pil":
329
+ images = [self.numpy_to_pil(image) for image in images]
330
+
331
+ if not return_dict:
332
+ return (images,)
333
+
334
+ return ShapEPipelineOutput(images=images)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Open AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ from transformers import CLIPImageProcessor, CLIPVisionModel
22
+
23
+ from ...models import PriorTransformer
24
+ from ...schedulers import HeunDiscreteScheduler
25
+ from ...utils import (
26
+ BaseOutput,
27
+ logging,
28
+ replace_example_docstring,
29
+ )
30
+ from ...utils.torch_utils import randn_tensor
31
+ from ..pipeline_utils import DiffusionPipeline
32
+ from .renderer import ShapERenderer
33
+
34
+
35
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
+
37
+ EXAMPLE_DOC_STRING = """
38
+ Examples:
39
+ ```py
40
+ >>> from PIL import Image
41
+ >>> import torch
42
+ >>> from diffusers import DiffusionPipeline
43
+ >>> from diffusers.utils import export_to_gif, load_image
44
+
45
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
46
+
47
+ >>> repo = "openai/shap-e-img2img"
48
+ >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
49
+ >>> pipe = pipe.to(device)
50
+
51
+ >>> guidance_scale = 3.0
52
+ >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
53
+ >>> image = load_image(image_url).convert("RGB")
54
+
55
+ >>> images = pipe(
56
+ ... image,
57
+ ... guidance_scale=guidance_scale,
58
+ ... num_inference_steps=64,
59
+ ... frame_size=256,
60
+ ... ).images
61
+
62
+ >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
63
+ ```
64
+ """
65
+
66
+
67
+ @dataclass
68
+ class ShapEPipelineOutput(BaseOutput):
69
+ """
70
+ Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`].
71
+
72
+ Args:
73
+ images (`torch.FloatTensor`)
74
+ A list of images for 3D rendering.
75
+ """
76
+
77
+ images: Union[PIL.Image.Image, np.ndarray]
78
+
79
+
80
+ class ShapEImg2ImgPipeline(DiffusionPipeline):
81
+ """
82
+ Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method from an image.
83
+
84
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
85
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
86
+
87
+ Args:
88
+ prior ([`PriorTransformer`]):
89
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
90
+ image_encoder ([`~transformers.CLIPVisionModel`]):
91
+ Frozen image-encoder.
92
+ image_processor ([`~transformers.CLIPImageProcessor`]):
93
+ A `CLIPImageProcessor` to process images.
94
+ scheduler ([`HeunDiscreteScheduler`]):
95
+ A scheduler to be used in combination with the `prior` model to generate image embedding.
96
+ shap_e_renderer ([`ShapERenderer`]):
97
+ Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF
98
+ rendering method.
99
+ """
100
+
101
+ model_cpu_offload_seq = "image_encoder->prior"
102
+ _exclude_from_cpu_offload = ["shap_e_renderer"]
103
+
104
+ def __init__(
105
+ self,
106
+ prior: PriorTransformer,
107
+ image_encoder: CLIPVisionModel,
108
+ image_processor: CLIPImageProcessor,
109
+ scheduler: HeunDiscreteScheduler,
110
+ shap_e_renderer: ShapERenderer,
111
+ ):
112
+ super().__init__()
113
+
114
+ self.register_modules(
115
+ prior=prior,
116
+ image_encoder=image_encoder,
117
+ image_processor=image_processor,
118
+ scheduler=scheduler,
119
+ shap_e_renderer=shap_e_renderer,
120
+ )
121
+
122
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
123
+ def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
124
+ if latents is None:
125
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
126
+ else:
127
+ if latents.shape != shape:
128
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
129
+ latents = latents.to(device)
130
+
131
+ latents = latents * scheduler.init_noise_sigma
132
+ return latents
133
+
134
+ def _encode_image(
135
+ self,
136
+ image,
137
+ device,
138
+ num_images_per_prompt,
139
+ do_classifier_free_guidance,
140
+ ):
141
+ if isinstance(image, List) and isinstance(image[0], torch.Tensor):
142
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
143
+
144
+ if not isinstance(image, torch.Tensor):
145
+ image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0)
146
+
147
+ image = image.to(dtype=self.image_encoder.dtype, device=device)
148
+
149
+ image_embeds = self.image_encoder(image)["last_hidden_state"]
150
+ image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
151
+
152
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
153
+
154
+ if do_classifier_free_guidance:
155
+ negative_image_embeds = torch.zeros_like(image_embeds)
156
+
157
+ # For classifier free guidance, we need to do two forward passes.
158
+ # Here we concatenate the unconditional and text embeddings into a single batch
159
+ # to avoid doing two forward passes
160
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
161
+
162
+ return image_embeds
163
+
164
+ @torch.no_grad()
165
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
166
+ def __call__(
167
+ self,
168
+ image: Union[PIL.Image.Image, List[PIL.Image.Image]],
169
+ num_images_per_prompt: int = 1,
170
+ num_inference_steps: int = 25,
171
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
172
+ latents: Optional[torch.FloatTensor] = None,
173
+ guidance_scale: float = 4.0,
174
+ frame_size: int = 64,
175
+ output_type: Optional[str] = "pil", # pil, np, latent, mesh
176
+ return_dict: bool = True,
177
+ ):
178
+ """
179
+ The call function to the pipeline for generation.
180
+
181
+ Args:
182
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
183
+ `Image` or tensor representing an image batch to be used as the starting point. Can also accept image
184
+ latents as image, but if passing latents directly it is not encoded again.
185
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
186
+ The number of images to generate per prompt.
187
+ num_inference_steps (`int`, *optional*, defaults to 25):
188
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
189
+ expense of slower inference.
190
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
191
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
192
+ generation deterministic.
193
+ latents (`torch.FloatTensor`, *optional*):
194
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
195
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
196
+ tensor is generated by sampling using the supplied random `generator`.
197
+ guidance_scale (`float`, *optional*, defaults to 4.0):
198
+ A higher guidance scale value encourages the model to generate images closely linked to the text
199
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
200
+ frame_size (`int`, *optional*, default to 64):
201
+ The width and height of each image frame of the generated 3D output.
202
+ output_type (`str`, *optional*, defaults to `"pil"`):
203
+ The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"`
204
+ (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]).
205
+ return_dict (`bool`, *optional*, defaults to `True`):
206
+ Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain
207
+ tuple.
208
+
209
+ Examples:
210
+
211
+ Returns:
212
+ [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`:
213
+ If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned,
214
+ otherwise a `tuple` is returned where the first element is a list with the generated images.
215
+ """
216
+
217
+ if isinstance(image, PIL.Image.Image):
218
+ batch_size = 1
219
+ elif isinstance(image, torch.Tensor):
220
+ batch_size = image.shape[0]
221
+ elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)):
222
+ batch_size = len(image)
223
+ else:
224
+ raise ValueError(
225
+ f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}"
226
+ )
227
+
228
+ device = self._execution_device
229
+
230
+ batch_size = batch_size * num_images_per_prompt
231
+
232
+ do_classifier_free_guidance = guidance_scale > 1.0
233
+ image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance)
234
+
235
+ # prior
236
+
237
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
238
+ timesteps = self.scheduler.timesteps
239
+
240
+ num_embeddings = self.prior.config.num_embeddings
241
+ embedding_dim = self.prior.config.embedding_dim
242
+
243
+ latents = self.prepare_latents(
244
+ (batch_size, num_embeddings * embedding_dim),
245
+ image_embeds.dtype,
246
+ device,
247
+ generator,
248
+ latents,
249
+ self.scheduler,
250
+ )
251
+
252
+ # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
253
+ latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim)
254
+
255
+ for i, t in enumerate(self.progress_bar(timesteps)):
256
+ # expand the latents if we are doing classifier free guidance
257
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
258
+ scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t)
259
+
260
+ noise_pred = self.prior(
261
+ scaled_model_input,
262
+ timestep=t,
263
+ proj_embedding=image_embeds,
264
+ ).predicted_image_embedding
265
+
266
+ # remove the variance
267
+ noise_pred, _ = noise_pred.split(
268
+ scaled_model_input.shape[2], dim=2
269
+ ) # batch_size, num_embeddings, embedding_dim
270
+
271
+ if do_classifier_free_guidance:
272
+ noise_pred_uncond, noise_pred = noise_pred.chunk(2)
273
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
274
+
275
+ latents = self.scheduler.step(
276
+ noise_pred,
277
+ timestep=t,
278
+ sample=latents,
279
+ ).prev_sample
280
+
281
+ if output_type not in ["np", "pil", "latent", "mesh"]:
282
+ raise ValueError(
283
+ f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}"
284
+ )
285
+
286
+ # Offload all models
287
+ self.maybe_free_model_hooks()
288
+
289
+ if output_type == "latent":
290
+ return ShapEPipelineOutput(images=latents)
291
+
292
+ images = []
293
+ if output_type == "mesh":
294
+ for i, latent in enumerate(latents):
295
+ mesh = self.shap_e_renderer.decode_to_mesh(
296
+ latent[None, :],
297
+ device,
298
+ )
299
+ images.append(mesh)
300
+
301
+ else:
302
+ # np, pil
303
+ for i, latent in enumerate(latents):
304
+ image = self.shap_e_renderer.decode_to_image(
305
+ latent[None, :],
306
+ device,
307
+ size=frame_size,
308
+ )
309
+ images.append(image)
310
+
311
+ images = torch.stack(images)
312
+
313
+ images = images.cpu().numpy()
314
+
315
+ if output_type == "pil":
316
+ images = [self.numpy_to_pil(image) for image in images]
317
+
318
+ if not return_dict:
319
+ return (images,)
320
+
321
+ return ShapEPipelineOutput(images=images)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/shap_e/renderer.py ADDED
@@ -0,0 +1,1050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Open AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from dataclasses import dataclass
17
+ from typing import Dict, Optional, Tuple
18
+
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from torch import nn
23
+
24
+ from ...configuration_utils import ConfigMixin, register_to_config
25
+ from ...models import ModelMixin
26
+ from ...utils import BaseOutput
27
+ from .camera import create_pan_cameras
28
+
29
+
30
+ def sample_pmf(pmf: torch.Tensor, n_samples: int) -> torch.Tensor:
31
+ r"""
32
+ Sample from the given discrete probability distribution with replacement.
33
+
34
+ The i-th bin is assumed to have mass pmf[i].
35
+
36
+ Args:
37
+ pmf: [batch_size, *shape, n_samples, 1] where (pmf.sum(dim=-2) == 1).all()
38
+ n_samples: number of samples
39
+
40
+ Return:
41
+ indices sampled with replacement
42
+ """
43
+
44
+ *shape, support_size, last_dim = pmf.shape
45
+ assert last_dim == 1
46
+
47
+ cdf = torch.cumsum(pmf.view(-1, support_size), dim=1)
48
+ inds = torch.searchsorted(cdf, torch.rand(cdf.shape[0], n_samples, device=cdf.device))
49
+
50
+ return inds.view(*shape, n_samples, 1).clamp(0, support_size - 1)
51
+
52
+
53
+ def posenc_nerf(x: torch.Tensor, min_deg: int = 0, max_deg: int = 15) -> torch.Tensor:
54
+ """
55
+ Concatenate x and its positional encodings, following NeRF.
56
+
57
+ Reference: https://arxiv.org/pdf/2210.04628.pdf
58
+ """
59
+ if min_deg == max_deg:
60
+ return x
61
+
62
+ scales = 2.0 ** torch.arange(min_deg, max_deg, dtype=x.dtype, device=x.device)
63
+ *shape, dim = x.shape
64
+ xb = (x.reshape(-1, 1, dim) * scales.view(1, -1, 1)).reshape(*shape, -1)
65
+ assert xb.shape[-1] == dim * (max_deg - min_deg)
66
+ emb = torch.cat([xb, xb + math.pi / 2.0], axis=-1).sin()
67
+ return torch.cat([x, emb], dim=-1)
68
+
69
+
70
+ def encode_position(position):
71
+ return posenc_nerf(position, min_deg=0, max_deg=15)
72
+
73
+
74
+ def encode_direction(position, direction=None):
75
+ if direction is None:
76
+ return torch.zeros_like(posenc_nerf(position, min_deg=0, max_deg=8))
77
+ else:
78
+ return posenc_nerf(direction, min_deg=0, max_deg=8)
79
+
80
+
81
+ def _sanitize_name(x: str) -> str:
82
+ return x.replace(".", "__")
83
+
84
+
85
+ def integrate_samples(volume_range, ts, density, channels):
86
+ r"""
87
+ Function integrating the model output.
88
+
89
+ Args:
90
+ volume_range: Specifies the integral range [t0, t1]
91
+ ts: timesteps
92
+ density: torch.Tensor [batch_size, *shape, n_samples, 1]
93
+ channels: torch.Tensor [batch_size, *shape, n_samples, n_channels]
94
+ returns:
95
+ channels: integrated rgb output weights: torch.Tensor [batch_size, *shape, n_samples, 1] (density
96
+ *transmittance)[i] weight for each rgb output at [..., i, :]. transmittance: transmittance of this volume
97
+ )
98
+ """
99
+
100
+ # 1. Calculate the weights
101
+ _, _, dt = volume_range.partition(ts)
102
+ ddensity = density * dt
103
+
104
+ mass = torch.cumsum(ddensity, dim=-2)
105
+ transmittance = torch.exp(-mass[..., -1, :])
106
+
107
+ alphas = 1.0 - torch.exp(-ddensity)
108
+ Ts = torch.exp(torch.cat([torch.zeros_like(mass[..., :1, :]), -mass[..., :-1, :]], dim=-2))
109
+ # This is the probability of light hitting and reflecting off of
110
+ # something at depth [..., i, :].
111
+ weights = alphas * Ts
112
+
113
+ # 2. Integrate channels
114
+ channels = torch.sum(channels * weights, dim=-2)
115
+
116
+ return channels, weights, transmittance
117
+
118
+
119
+ def volume_query_points(volume, grid_size):
120
+ indices = torch.arange(grid_size**3, device=volume.bbox_min.device)
121
+ zs = indices % grid_size
122
+ ys = torch.div(indices, grid_size, rounding_mode="trunc") % grid_size
123
+ xs = torch.div(indices, grid_size**2, rounding_mode="trunc") % grid_size
124
+ combined = torch.stack([xs, ys, zs], dim=1)
125
+ return (combined.float() / (grid_size - 1)) * (volume.bbox_max - volume.bbox_min) + volume.bbox_min
126
+
127
+
128
+ def _convert_srgb_to_linear(u: torch.Tensor):
129
+ return torch.where(u <= 0.04045, u / 12.92, ((u + 0.055) / 1.055) ** 2.4)
130
+
131
+
132
+ def _create_flat_edge_indices(
133
+ flat_cube_indices: torch.Tensor,
134
+ grid_size: Tuple[int, int, int],
135
+ ):
136
+ num_xs = (grid_size[0] - 1) * grid_size[1] * grid_size[2]
137
+ y_offset = num_xs
138
+ num_ys = grid_size[0] * (grid_size[1] - 1) * grid_size[2]
139
+ z_offset = num_xs + num_ys
140
+ return torch.stack(
141
+ [
142
+ # Edges spanning x-axis.
143
+ flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
144
+ + flat_cube_indices[:, 1] * grid_size[2]
145
+ + flat_cube_indices[:, 2],
146
+ flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
147
+ + (flat_cube_indices[:, 1] + 1) * grid_size[2]
148
+ + flat_cube_indices[:, 2],
149
+ flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
150
+ + flat_cube_indices[:, 1] * grid_size[2]
151
+ + flat_cube_indices[:, 2]
152
+ + 1,
153
+ flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
154
+ + (flat_cube_indices[:, 1] + 1) * grid_size[2]
155
+ + flat_cube_indices[:, 2]
156
+ + 1,
157
+ # Edges spanning y-axis.
158
+ (
159
+ y_offset
160
+ + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2]
161
+ + flat_cube_indices[:, 1] * grid_size[2]
162
+ + flat_cube_indices[:, 2]
163
+ ),
164
+ (
165
+ y_offset
166
+ + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2]
167
+ + flat_cube_indices[:, 1] * grid_size[2]
168
+ + flat_cube_indices[:, 2]
169
+ ),
170
+ (
171
+ y_offset
172
+ + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2]
173
+ + flat_cube_indices[:, 1] * grid_size[2]
174
+ + flat_cube_indices[:, 2]
175
+ + 1
176
+ ),
177
+ (
178
+ y_offset
179
+ + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2]
180
+ + flat_cube_indices[:, 1] * grid_size[2]
181
+ + flat_cube_indices[:, 2]
182
+ + 1
183
+ ),
184
+ # Edges spanning z-axis.
185
+ (
186
+ z_offset
187
+ + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1)
188
+ + flat_cube_indices[:, 1] * (grid_size[2] - 1)
189
+ + flat_cube_indices[:, 2]
190
+ ),
191
+ (
192
+ z_offset
193
+ + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1)
194
+ + flat_cube_indices[:, 1] * (grid_size[2] - 1)
195
+ + flat_cube_indices[:, 2]
196
+ ),
197
+ (
198
+ z_offset
199
+ + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1)
200
+ + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1)
201
+ + flat_cube_indices[:, 2]
202
+ ),
203
+ (
204
+ z_offset
205
+ + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1)
206
+ + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1)
207
+ + flat_cube_indices[:, 2]
208
+ ),
209
+ ],
210
+ dim=-1,
211
+ )
212
+
213
+
214
+ class VoidNeRFModel(nn.Module):
215
+ """
216
+ Implements the default empty space model where all queries are rendered as background.
217
+ """
218
+
219
+ def __init__(self, background, channel_scale=255.0):
220
+ super().__init__()
221
+ background = nn.Parameter(torch.from_numpy(np.array(background)).to(dtype=torch.float32) / channel_scale)
222
+
223
+ self.register_buffer("background", background)
224
+
225
+ def forward(self, position):
226
+ background = self.background[None].to(position.device)
227
+
228
+ shape = position.shape[:-1]
229
+ ones = [1] * (len(shape) - 1)
230
+ n_channels = background.shape[-1]
231
+ background = torch.broadcast_to(background.view(background.shape[0], *ones, n_channels), [*shape, n_channels])
232
+
233
+ return background
234
+
235
+
236
+ @dataclass
237
+ class VolumeRange:
238
+ t0: torch.Tensor
239
+ t1: torch.Tensor
240
+ intersected: torch.Tensor
241
+
242
+ def __post_init__(self):
243
+ assert self.t0.shape == self.t1.shape == self.intersected.shape
244
+
245
+ def partition(self, ts):
246
+ """
247
+ Partitions t0 and t1 into n_samples intervals.
248
+
249
+ Args:
250
+ ts: [batch_size, *shape, n_samples, 1]
251
+
252
+ Return:
253
+
254
+ lower: [batch_size, *shape, n_samples, 1] upper: [batch_size, *shape, n_samples, 1] delta: [batch_size,
255
+ *shape, n_samples, 1]
256
+
257
+ where
258
+ ts \\in [lower, upper] deltas = upper - lower
259
+ """
260
+
261
+ mids = (ts[..., 1:, :] + ts[..., :-1, :]) * 0.5
262
+ lower = torch.cat([self.t0[..., None, :], mids], dim=-2)
263
+ upper = torch.cat([mids, self.t1[..., None, :]], dim=-2)
264
+ delta = upper - lower
265
+ assert lower.shape == upper.shape == delta.shape == ts.shape
266
+ return lower, upper, delta
267
+
268
+
269
+ class BoundingBoxVolume(nn.Module):
270
+ """
271
+ Axis-aligned bounding box defined by the two opposite corners.
272
+ """
273
+
274
+ def __init__(
275
+ self,
276
+ *,
277
+ bbox_min,
278
+ bbox_max,
279
+ min_dist: float = 0.0,
280
+ min_t_range: float = 1e-3,
281
+ ):
282
+ """
283
+ Args:
284
+ bbox_min: the left/bottommost corner of the bounding box
285
+ bbox_max: the other corner of the bounding box
286
+ min_dist: all rays should start at least this distance away from the origin.
287
+ """
288
+ super().__init__()
289
+
290
+ self.min_dist = min_dist
291
+ self.min_t_range = min_t_range
292
+
293
+ self.bbox_min = torch.tensor(bbox_min)
294
+ self.bbox_max = torch.tensor(bbox_max)
295
+ self.bbox = torch.stack([self.bbox_min, self.bbox_max])
296
+ assert self.bbox.shape == (2, 3)
297
+ assert min_dist >= 0.0
298
+ assert min_t_range > 0.0
299
+
300
+ def intersect(
301
+ self,
302
+ origin: torch.Tensor,
303
+ direction: torch.Tensor,
304
+ t0_lower: Optional[torch.Tensor] = None,
305
+ epsilon=1e-6,
306
+ ):
307
+ """
308
+ Args:
309
+ origin: [batch_size, *shape, 3]
310
+ direction: [batch_size, *shape, 3]
311
+ t0_lower: Optional [batch_size, *shape, 1] lower bound of t0 when intersecting this volume.
312
+ params: Optional meta parameters in case Volume is parametric
313
+ epsilon: to stabilize calculations
314
+
315
+ Return:
316
+ A tuple of (t0, t1, intersected) where each has a shape [batch_size, *shape, 1]. If a ray intersects with
317
+ the volume, `o + td` is in the volume for all t in [t0, t1]. If the volume is bounded, t1 is guaranteed to
318
+ be on the boundary of the volume.
319
+ """
320
+
321
+ batch_size, *shape, _ = origin.shape
322
+ ones = [1] * len(shape)
323
+ bbox = self.bbox.view(1, *ones, 2, 3).to(origin.device)
324
+
325
+ def _safe_divide(a, b, epsilon=1e-6):
326
+ return a / torch.where(b < 0, b - epsilon, b + epsilon)
327
+
328
+ ts = _safe_divide(bbox - origin[..., None, :], direction[..., None, :], epsilon=epsilon)
329
+
330
+ # Cases to think about:
331
+ #
332
+ # 1. t1 <= t0: the ray does not pass through the AABB.
333
+ # 2. t0 < t1 <= 0: the ray intersects but the BB is behind the origin.
334
+ # 3. t0 <= 0 <= t1: the ray starts from inside the BB
335
+ # 4. 0 <= t0 < t1: the ray is not inside and intersects with the BB twice.
336
+ #
337
+ # 1 and 4 are clearly handled from t0 < t1 below.
338
+ # Making t0 at least min_dist (>= 0) takes care of 2 and 3.
339
+ t0 = ts.min(dim=-2).values.max(dim=-1, keepdim=True).values.clamp(self.min_dist)
340
+ t1 = ts.max(dim=-2).values.min(dim=-1, keepdim=True).values
341
+ assert t0.shape == t1.shape == (batch_size, *shape, 1)
342
+ if t0_lower is not None:
343
+ assert t0.shape == t0_lower.shape
344
+ t0 = torch.maximum(t0, t0_lower)
345
+
346
+ intersected = t0 + self.min_t_range < t1
347
+ t0 = torch.where(intersected, t0, torch.zeros_like(t0))
348
+ t1 = torch.where(intersected, t1, torch.ones_like(t1))
349
+
350
+ return VolumeRange(t0=t0, t1=t1, intersected=intersected)
351
+
352
+
353
+ class StratifiedRaySampler(nn.Module):
354
+ """
355
+ Instead of fixed intervals, a sample is drawn uniformly at random from each interval.
356
+ """
357
+
358
+ def __init__(self, depth_mode: str = "linear"):
359
+ """
360
+ :param depth_mode: linear samples ts linearly in depth. harmonic ensures
361
+ closer points are sampled more densely.
362
+ """
363
+ self.depth_mode = depth_mode
364
+ assert self.depth_mode in ("linear", "geometric", "harmonic")
365
+
366
+ def sample(
367
+ self,
368
+ t0: torch.Tensor,
369
+ t1: torch.Tensor,
370
+ n_samples: int,
371
+ epsilon: float = 1e-3,
372
+ ) -> torch.Tensor:
373
+ """
374
+ Args:
375
+ t0: start time has shape [batch_size, *shape, 1]
376
+ t1: finish time has shape [batch_size, *shape, 1]
377
+ n_samples: number of ts to sample
378
+ Return:
379
+ sampled ts of shape [batch_size, *shape, n_samples, 1]
380
+ """
381
+ ones = [1] * (len(t0.shape) - 1)
382
+ ts = torch.linspace(0, 1, n_samples).view(*ones, n_samples).to(t0.dtype).to(t0.device)
383
+
384
+ if self.depth_mode == "linear":
385
+ ts = t0 * (1.0 - ts) + t1 * ts
386
+ elif self.depth_mode == "geometric":
387
+ ts = (t0.clamp(epsilon).log() * (1.0 - ts) + t1.clamp(epsilon).log() * ts).exp()
388
+ elif self.depth_mode == "harmonic":
389
+ # The original NeRF recommends this interpolation scheme for
390
+ # spherical scenes, but there could be some weird edge cases when
391
+ # the observer crosses from the inner to outer volume.
392
+ ts = 1.0 / (1.0 / t0.clamp(epsilon) * (1.0 - ts) + 1.0 / t1.clamp(epsilon) * ts)
393
+
394
+ mids = 0.5 * (ts[..., 1:] + ts[..., :-1])
395
+ upper = torch.cat([mids, t1], dim=-1)
396
+ lower = torch.cat([t0, mids], dim=-1)
397
+ # yiyi notes: add a random seed here for testing, don't forget to remove
398
+ torch.manual_seed(0)
399
+ t_rand = torch.rand_like(ts)
400
+
401
+ ts = lower + (upper - lower) * t_rand
402
+ return ts.unsqueeze(-1)
403
+
404
+
405
+ class ImportanceRaySampler(nn.Module):
406
+ """
407
+ Given the initial estimate of densities, this samples more from regions/bins expected to have objects.
408
+ """
409
+
410
+ def __init__(
411
+ self,
412
+ volume_range: VolumeRange,
413
+ ts: torch.Tensor,
414
+ weights: torch.Tensor,
415
+ blur_pool: bool = False,
416
+ alpha: float = 1e-5,
417
+ ):
418
+ """
419
+ Args:
420
+ volume_range: the range in which a ray intersects the given volume.
421
+ ts: earlier samples from the coarse rendering step
422
+ weights: discretized version of density * transmittance
423
+ blur_pool: if true, use 2-tap max + 2-tap blur filter from mip-NeRF.
424
+ alpha: small value to add to weights.
425
+ """
426
+ self.volume_range = volume_range
427
+ self.ts = ts.clone().detach()
428
+ self.weights = weights.clone().detach()
429
+ self.blur_pool = blur_pool
430
+ self.alpha = alpha
431
+
432
+ @torch.no_grad()
433
+ def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor:
434
+ """
435
+ Args:
436
+ t0: start time has shape [batch_size, *shape, 1]
437
+ t1: finish time has shape [batch_size, *shape, 1]
438
+ n_samples: number of ts to sample
439
+ Return:
440
+ sampled ts of shape [batch_size, *shape, n_samples, 1]
441
+ """
442
+ lower, upper, _ = self.volume_range.partition(self.ts)
443
+
444
+ batch_size, *shape, n_coarse_samples, _ = self.ts.shape
445
+
446
+ weights = self.weights
447
+ if self.blur_pool:
448
+ padded = torch.cat([weights[..., :1, :], weights, weights[..., -1:, :]], dim=-2)
449
+ maxes = torch.maximum(padded[..., :-1, :], padded[..., 1:, :])
450
+ weights = 0.5 * (maxes[..., :-1, :] + maxes[..., 1:, :])
451
+ weights = weights + self.alpha
452
+ pmf = weights / weights.sum(dim=-2, keepdim=True)
453
+ inds = sample_pmf(pmf, n_samples)
454
+ assert inds.shape == (batch_size, *shape, n_samples, 1)
455
+ assert (inds >= 0).all() and (inds < n_coarse_samples).all()
456
+
457
+ t_rand = torch.rand(inds.shape, device=inds.device)
458
+ lower_ = torch.gather(lower, -2, inds)
459
+ upper_ = torch.gather(upper, -2, inds)
460
+
461
+ ts = lower_ + (upper_ - lower_) * t_rand
462
+ ts = torch.sort(ts, dim=-2).values
463
+ return ts
464
+
465
+
466
+ @dataclass
467
+ class MeshDecoderOutput(BaseOutput):
468
+ """
469
+ A 3D triangle mesh with optional data at the vertices and faces.
470
+
471
+ Args:
472
+ verts (`torch.Tensor` of shape `(N, 3)`):
473
+ array of vertext coordinates
474
+ faces (`torch.Tensor` of shape `(N, 3)`):
475
+ array of triangles, pointing to indices in verts.
476
+ vertext_channels (Dict):
477
+ vertext coordinates for each color channel
478
+ """
479
+
480
+ verts: torch.Tensor
481
+ faces: torch.Tensor
482
+ vertex_channels: Dict[str, torch.Tensor]
483
+
484
+
485
+ class MeshDecoder(nn.Module):
486
+ """
487
+ Construct meshes from Signed distance functions (SDFs) using marching cubes method
488
+ """
489
+
490
+ def __init__(self):
491
+ super().__init__()
492
+ cases = torch.zeros(256, 5, 3, dtype=torch.long)
493
+ masks = torch.zeros(256, 5, dtype=torch.bool)
494
+
495
+ self.register_buffer("cases", cases)
496
+ self.register_buffer("masks", masks)
497
+
498
+ def forward(self, field: torch.Tensor, min_point: torch.Tensor, size: torch.Tensor):
499
+ """
500
+ For a signed distance field, produce a mesh using marching cubes.
501
+
502
+ :param field: a 3D tensor of field values, where negative values correspond
503
+ to the outside of the shape. The dimensions correspond to the x, y, and z directions, respectively.
504
+ :param min_point: a tensor of shape [3] containing the point corresponding
505
+ to (0, 0, 0) in the field.
506
+ :param size: a tensor of shape [3] containing the per-axis distance from the
507
+ (0, 0, 0) field corner and the (-1, -1, -1) field corner.
508
+ """
509
+ assert len(field.shape) == 3, "input must be a 3D scalar field"
510
+ dev = field.device
511
+
512
+ cases = self.cases.to(dev)
513
+ masks = self.masks.to(dev)
514
+
515
+ min_point = min_point.to(dev)
516
+ size = size.to(dev)
517
+
518
+ grid_size = field.shape
519
+ grid_size_tensor = torch.tensor(grid_size).to(size)
520
+
521
+ # Create bitmasks between 0 and 255 (inclusive) indicating the state
522
+ # of the eight corners of each cube.
523
+ bitmasks = (field > 0).to(torch.uint8)
524
+ bitmasks = bitmasks[:-1, :, :] | (bitmasks[1:, :, :] << 1)
525
+ bitmasks = bitmasks[:, :-1, :] | (bitmasks[:, 1:, :] << 2)
526
+ bitmasks = bitmasks[:, :, :-1] | (bitmasks[:, :, 1:] << 4)
527
+
528
+ # Compute corner coordinates across the entire grid.
529
+ corner_coords = torch.empty(*grid_size, 3, device=dev, dtype=field.dtype)
530
+ corner_coords[range(grid_size[0]), :, :, 0] = torch.arange(grid_size[0], device=dev, dtype=field.dtype)[
531
+ :, None, None
532
+ ]
533
+ corner_coords[:, range(grid_size[1]), :, 1] = torch.arange(grid_size[1], device=dev, dtype=field.dtype)[
534
+ :, None
535
+ ]
536
+ corner_coords[:, :, range(grid_size[2]), 2] = torch.arange(grid_size[2], device=dev, dtype=field.dtype)
537
+
538
+ # Compute all vertices across all edges in the grid, even though we will
539
+ # throw some out later. We have (X-1)*Y*Z + X*(Y-1)*Z + X*Y*(Z-1) vertices.
540
+ # These are all midpoints, and don't account for interpolation (which is
541
+ # done later based on the used edge midpoints).
542
+ edge_midpoints = torch.cat(
543
+ [
544
+ ((corner_coords[:-1] + corner_coords[1:]) / 2).reshape(-1, 3),
545
+ ((corner_coords[:, :-1] + corner_coords[:, 1:]) / 2).reshape(-1, 3),
546
+ ((corner_coords[:, :, :-1] + corner_coords[:, :, 1:]) / 2).reshape(-1, 3),
547
+ ],
548
+ dim=0,
549
+ )
550
+
551
+ # Create a flat array of [X, Y, Z] indices for each cube.
552
+ cube_indices = torch.zeros(
553
+ grid_size[0] - 1, grid_size[1] - 1, grid_size[2] - 1, 3, device=dev, dtype=torch.long
554
+ )
555
+ cube_indices[range(grid_size[0] - 1), :, :, 0] = torch.arange(grid_size[0] - 1, device=dev)[:, None, None]
556
+ cube_indices[:, range(grid_size[1] - 1), :, 1] = torch.arange(grid_size[1] - 1, device=dev)[:, None]
557
+ cube_indices[:, :, range(grid_size[2] - 1), 2] = torch.arange(grid_size[2] - 1, device=dev)
558
+ flat_cube_indices = cube_indices.reshape(-1, 3)
559
+
560
+ # Create a flat array mapping each cube to 12 global edge indices.
561
+ edge_indices = _create_flat_edge_indices(flat_cube_indices, grid_size)
562
+
563
+ # Apply the LUT to figure out the triangles.
564
+ flat_bitmasks = bitmasks.reshape(-1).long() # must cast to long for indexing to believe this not a mask
565
+ local_tris = cases[flat_bitmasks]
566
+ local_masks = masks[flat_bitmasks]
567
+ # Compute the global edge indices for the triangles.
568
+ global_tris = torch.gather(edge_indices, 1, local_tris.reshape(local_tris.shape[0], -1)).reshape(
569
+ local_tris.shape
570
+ )
571
+ # Select the used triangles for each cube.
572
+ selected_tris = global_tris.reshape(-1, 3)[local_masks.reshape(-1)]
573
+
574
+ # Now we have a bunch of indices into the full list of possible vertices,
575
+ # but we want to reduce this list to only the used vertices.
576
+ used_vertex_indices = torch.unique(selected_tris.view(-1))
577
+ used_edge_midpoints = edge_midpoints[used_vertex_indices]
578
+ old_index_to_new_index = torch.zeros(len(edge_midpoints), device=dev, dtype=torch.long)
579
+ old_index_to_new_index[used_vertex_indices] = torch.arange(
580
+ len(used_vertex_indices), device=dev, dtype=torch.long
581
+ )
582
+
583
+ # Rewrite the triangles to use the new indices
584
+ faces = torch.gather(old_index_to_new_index, 0, selected_tris.view(-1)).reshape(selected_tris.shape)
585
+
586
+ # Compute the actual interpolated coordinates corresponding to edge midpoints.
587
+ v1 = torch.floor(used_edge_midpoints).to(torch.long)
588
+ v2 = torch.ceil(used_edge_midpoints).to(torch.long)
589
+ s1 = field[v1[:, 0], v1[:, 1], v1[:, 2]]
590
+ s2 = field[v2[:, 0], v2[:, 1], v2[:, 2]]
591
+ p1 = (v1.float() / (grid_size_tensor - 1)) * size + min_point
592
+ p2 = (v2.float() / (grid_size_tensor - 1)) * size + min_point
593
+ # The signs of s1 and s2 should be different. We want to find
594
+ # t such that t*s2 + (1-t)*s1 = 0.
595
+ t = (s1 / (s1 - s2))[:, None]
596
+ verts = t * p2 + (1 - t) * p1
597
+
598
+ return MeshDecoderOutput(verts=verts, faces=faces, vertex_channels=None)
599
+
600
+
601
+ @dataclass
602
+ class MLPNeRFModelOutput(BaseOutput):
603
+ density: torch.Tensor
604
+ signed_distance: torch.Tensor
605
+ channels: torch.Tensor
606
+ ts: torch.Tensor
607
+
608
+
609
+ class MLPNeRSTFModel(ModelMixin, ConfigMixin):
610
+ @register_to_config
611
+ def __init__(
612
+ self,
613
+ d_hidden: int = 256,
614
+ n_output: int = 12,
615
+ n_hidden_layers: int = 6,
616
+ act_fn: str = "swish",
617
+ insert_direction_at: int = 4,
618
+ ):
619
+ super().__init__()
620
+
621
+ # Instantiate the MLP
622
+
623
+ # Find out the dimension of encoded position and direction
624
+ dummy = torch.eye(1, 3)
625
+ d_posenc_pos = encode_position(position=dummy).shape[-1]
626
+ d_posenc_dir = encode_direction(position=dummy).shape[-1]
627
+
628
+ mlp_widths = [d_hidden] * n_hidden_layers
629
+ input_widths = [d_posenc_pos] + mlp_widths
630
+ output_widths = mlp_widths + [n_output]
631
+
632
+ if insert_direction_at is not None:
633
+ input_widths[insert_direction_at] += d_posenc_dir
634
+
635
+ self.mlp = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out in zip(input_widths, output_widths)])
636
+
637
+ if act_fn == "swish":
638
+ # self.activation = swish
639
+ # yiyi testing:
640
+ self.activation = lambda x: F.silu(x)
641
+ else:
642
+ raise ValueError(f"Unsupported activation function {act_fn}")
643
+
644
+ self.sdf_activation = torch.tanh
645
+ self.density_activation = torch.nn.functional.relu
646
+ self.channel_activation = torch.sigmoid
647
+
648
+ def map_indices_to_keys(self, output):
649
+ h_map = {
650
+ "sdf": (0, 1),
651
+ "density_coarse": (1, 2),
652
+ "density_fine": (2, 3),
653
+ "stf": (3, 6),
654
+ "nerf_coarse": (6, 9),
655
+ "nerf_fine": (9, 12),
656
+ }
657
+
658
+ mapped_output = {k: output[..., start:end] for k, (start, end) in h_map.items()}
659
+
660
+ return mapped_output
661
+
662
+ def forward(self, *, position, direction, ts, nerf_level="coarse", rendering_mode="nerf"):
663
+ h = encode_position(position)
664
+
665
+ h_preact = h
666
+ h_directionless = None
667
+ for i, layer in enumerate(self.mlp):
668
+ if i == self.config.insert_direction_at: # 4 in the config
669
+ h_directionless = h_preact
670
+ h_direction = encode_direction(position, direction=direction)
671
+ h = torch.cat([h, h_direction], dim=-1)
672
+
673
+ h = layer(h)
674
+
675
+ h_preact = h
676
+
677
+ if i < len(self.mlp) - 1:
678
+ h = self.activation(h)
679
+
680
+ h_final = h
681
+ if h_directionless is None:
682
+ h_directionless = h_preact
683
+
684
+ activation = self.map_indices_to_keys(h_final)
685
+
686
+ if nerf_level == "coarse":
687
+ h_density = activation["density_coarse"]
688
+ else:
689
+ h_density = activation["density_fine"]
690
+
691
+ if rendering_mode == "nerf":
692
+ if nerf_level == "coarse":
693
+ h_channels = activation["nerf_coarse"]
694
+ else:
695
+ h_channels = activation["nerf_fine"]
696
+
697
+ elif rendering_mode == "stf":
698
+ h_channels = activation["stf"]
699
+
700
+ density = self.density_activation(h_density)
701
+ signed_distance = self.sdf_activation(activation["sdf"])
702
+ channels = self.channel_activation(h_channels)
703
+
704
+ # yiyi notes: I think signed_distance is not used
705
+ return MLPNeRFModelOutput(density=density, signed_distance=signed_distance, channels=channels, ts=ts)
706
+
707
+
708
+ class ChannelsProj(nn.Module):
709
+ def __init__(
710
+ self,
711
+ *,
712
+ vectors: int,
713
+ channels: int,
714
+ d_latent: int,
715
+ ):
716
+ super().__init__()
717
+ self.proj = nn.Linear(d_latent, vectors * channels)
718
+ self.norm = nn.LayerNorm(channels)
719
+ self.d_latent = d_latent
720
+ self.vectors = vectors
721
+ self.channels = channels
722
+
723
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
724
+ x_bvd = x
725
+ w_vcd = self.proj.weight.view(self.vectors, self.channels, self.d_latent)
726
+ b_vc = self.proj.bias.view(1, self.vectors, self.channels)
727
+ h = torch.einsum("bvd,vcd->bvc", x_bvd, w_vcd)
728
+ h = self.norm(h)
729
+
730
+ h = h + b_vc
731
+ return h
732
+
733
+
734
+ class ShapEParamsProjModel(ModelMixin, ConfigMixin):
735
+ """
736
+ project the latent representation of a 3D asset to obtain weights of a multi-layer perceptron (MLP).
737
+
738
+ For more details, see the original paper:
739
+ """
740
+
741
+ @register_to_config
742
+ def __init__(
743
+ self,
744
+ *,
745
+ param_names: Tuple[str] = (
746
+ "nerstf.mlp.0.weight",
747
+ "nerstf.mlp.1.weight",
748
+ "nerstf.mlp.2.weight",
749
+ "nerstf.mlp.3.weight",
750
+ ),
751
+ param_shapes: Tuple[Tuple[int]] = (
752
+ (256, 93),
753
+ (256, 256),
754
+ (256, 256),
755
+ (256, 256),
756
+ ),
757
+ d_latent: int = 1024,
758
+ ):
759
+ super().__init__()
760
+
761
+ # check inputs
762
+ if len(param_names) != len(param_shapes):
763
+ raise ValueError("Must provide same number of `param_names` as `param_shapes`")
764
+ self.projections = nn.ModuleDict({})
765
+ for k, (vectors, channels) in zip(param_names, param_shapes):
766
+ self.projections[_sanitize_name(k)] = ChannelsProj(
767
+ vectors=vectors,
768
+ channels=channels,
769
+ d_latent=d_latent,
770
+ )
771
+
772
+ def forward(self, x: torch.Tensor):
773
+ out = {}
774
+ start = 0
775
+ for k, shape in zip(self.config.param_names, self.config.param_shapes):
776
+ vectors, _ = shape
777
+ end = start + vectors
778
+ x_bvd = x[:, start:end]
779
+ out[k] = self.projections[_sanitize_name(k)](x_bvd).reshape(len(x), *shape)
780
+ start = end
781
+ return out
782
+
783
+
784
+ class ShapERenderer(ModelMixin, ConfigMixin):
785
+ @register_to_config
786
+ def __init__(
787
+ self,
788
+ *,
789
+ param_names: Tuple[str] = (
790
+ "nerstf.mlp.0.weight",
791
+ "nerstf.mlp.1.weight",
792
+ "nerstf.mlp.2.weight",
793
+ "nerstf.mlp.3.weight",
794
+ ),
795
+ param_shapes: Tuple[Tuple[int]] = (
796
+ (256, 93),
797
+ (256, 256),
798
+ (256, 256),
799
+ (256, 256),
800
+ ),
801
+ d_latent: int = 1024,
802
+ d_hidden: int = 256,
803
+ n_output: int = 12,
804
+ n_hidden_layers: int = 6,
805
+ act_fn: str = "swish",
806
+ insert_direction_at: int = 4,
807
+ background: Tuple[float] = (
808
+ 255.0,
809
+ 255.0,
810
+ 255.0,
811
+ ),
812
+ ):
813
+ super().__init__()
814
+
815
+ self.params_proj = ShapEParamsProjModel(
816
+ param_names=param_names,
817
+ param_shapes=param_shapes,
818
+ d_latent=d_latent,
819
+ )
820
+ self.mlp = MLPNeRSTFModel(d_hidden, n_output, n_hidden_layers, act_fn, insert_direction_at)
821
+ self.void = VoidNeRFModel(background=background, channel_scale=255.0)
822
+ self.volume = BoundingBoxVolume(bbox_max=[1.0, 1.0, 1.0], bbox_min=[-1.0, -1.0, -1.0])
823
+ self.mesh_decoder = MeshDecoder()
824
+
825
+ @torch.no_grad()
826
+ def render_rays(self, rays, sampler, n_samples, prev_model_out=None, render_with_direction=False):
827
+ """
828
+ Perform volumetric rendering over a partition of possible t's in the union of rendering volumes (written below
829
+ with some abuse of notations)
830
+
831
+ C(r) := sum(
832
+ transmittance(t[i]) * integrate(
833
+ lambda t: density(t) * channels(t) * transmittance(t), [t[i], t[i + 1]],
834
+ ) for i in range(len(parts))
835
+ ) + transmittance(t[-1]) * void_model(t[-1]).channels
836
+
837
+ where
838
+
839
+ 1) transmittance(s) := exp(-integrate(density, [t[0], s])) calculates the probability of light passing through
840
+ the volume specified by [t[0], s]. (transmittance of 1 means light can pass freely) 2) density and channels are
841
+ obtained by evaluating the appropriate part.model at time t. 3) [t[i], t[i + 1]] is defined as the range of t
842
+ where the ray intersects (parts[i].volume \\ union(part.volume for part in parts[:i])) at the surface of the
843
+ shell (if bounded). If the ray does not intersect, the integral over this segment is evaluated as 0 and
844
+ transmittance(t[i + 1]) := transmittance(t[i]). 4) The last term is integration to infinity (e.g. [t[-1],
845
+ math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty).
846
+
847
+ args:
848
+ rays: [batch_size x ... x 2 x 3] origin and direction. sampler: disjoint volume integrals. n_samples:
849
+ number of ts to sample. prev_model_outputs: model outputs from the previous rendering step, including
850
+
851
+ :return: A tuple of
852
+ - `channels`
853
+ - A importance samplers for additional fine-grained rendering
854
+ - raw model output
855
+ """
856
+ origin, direction = rays[..., 0, :], rays[..., 1, :]
857
+
858
+ # Integrate over [t[i], t[i + 1]]
859
+
860
+ # 1 Intersect the rays with the current volume and sample ts to integrate along.
861
+ vrange = self.volume.intersect(origin, direction, t0_lower=None)
862
+ ts = sampler.sample(vrange.t0, vrange.t1, n_samples)
863
+ ts = ts.to(rays.dtype)
864
+
865
+ if prev_model_out is not None:
866
+ # Append the previous ts now before fprop because previous
867
+ # rendering used a different model and we can't reuse the output.
868
+ ts = torch.sort(torch.cat([ts, prev_model_out.ts], dim=-2), dim=-2).values
869
+
870
+ batch_size, *_shape, _t0_dim = vrange.t0.shape
871
+ _, *ts_shape, _ts_dim = ts.shape
872
+
873
+ # 2. Get the points along the ray and query the model
874
+ directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3])
875
+ positions = origin.unsqueeze(-2) + ts * directions
876
+
877
+ directions = directions.to(self.mlp.dtype)
878
+ positions = positions.to(self.mlp.dtype)
879
+
880
+ optional_directions = directions if render_with_direction else None
881
+
882
+ model_out = self.mlp(
883
+ position=positions,
884
+ direction=optional_directions,
885
+ ts=ts,
886
+ nerf_level="coarse" if prev_model_out is None else "fine",
887
+ )
888
+
889
+ # 3. Integrate the model results
890
+ channels, weights, transmittance = integrate_samples(
891
+ vrange, model_out.ts, model_out.density, model_out.channels
892
+ )
893
+
894
+ # 4. Clean up results that do not intersect with the volume.
895
+ transmittance = torch.where(vrange.intersected, transmittance, torch.ones_like(transmittance))
896
+ channels = torch.where(vrange.intersected, channels, torch.zeros_like(channels))
897
+ # 5. integration to infinity (e.g. [t[-1], math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty).
898
+ channels = channels + transmittance * self.void(origin)
899
+
900
+ weighted_sampler = ImportanceRaySampler(vrange, ts=model_out.ts, weights=weights)
901
+
902
+ return channels, weighted_sampler, model_out
903
+
904
+ @torch.no_grad()
905
+ def decode_to_image(
906
+ self,
907
+ latents,
908
+ device,
909
+ size: int = 64,
910
+ ray_batch_size: int = 4096,
911
+ n_coarse_samples=64,
912
+ n_fine_samples=128,
913
+ ):
914
+ # project the parameters from the generated latents
915
+ projected_params = self.params_proj(latents)
916
+
917
+ # update the mlp layers of the renderer
918
+ for name, param in self.mlp.state_dict().items():
919
+ if f"nerstf.{name}" in projected_params.keys():
920
+ param.copy_(projected_params[f"nerstf.{name}"].squeeze(0))
921
+
922
+ # create cameras object
923
+ camera = create_pan_cameras(size)
924
+ rays = camera.camera_rays
925
+ rays = rays.to(device)
926
+ n_batches = rays.shape[1] // ray_batch_size
927
+
928
+ coarse_sampler = StratifiedRaySampler()
929
+
930
+ images = []
931
+
932
+ for idx in range(n_batches):
933
+ rays_batch = rays[:, idx * ray_batch_size : (idx + 1) * ray_batch_size]
934
+
935
+ # render rays with coarse, stratified samples.
936
+ _, fine_sampler, coarse_model_out = self.render_rays(rays_batch, coarse_sampler, n_coarse_samples)
937
+ # Then, render with additional importance-weighted ray samples.
938
+ channels, _, _ = self.render_rays(
939
+ rays_batch, fine_sampler, n_fine_samples, prev_model_out=coarse_model_out
940
+ )
941
+
942
+ images.append(channels)
943
+
944
+ images = torch.cat(images, dim=1)
945
+ images = images.view(*camera.shape, camera.height, camera.width, -1).squeeze(0)
946
+
947
+ return images
948
+
949
+ @torch.no_grad()
950
+ def decode_to_mesh(
951
+ self,
952
+ latents,
953
+ device,
954
+ grid_size: int = 128,
955
+ query_batch_size: int = 4096,
956
+ texture_channels: Tuple = ("R", "G", "B"),
957
+ ):
958
+ # 1. project the parameters from the generated latents
959
+ projected_params = self.params_proj(latents)
960
+
961
+ # 2. update the mlp layers of the renderer
962
+ for name, param in self.mlp.state_dict().items():
963
+ if f"nerstf.{name}" in projected_params.keys():
964
+ param.copy_(projected_params[f"nerstf.{name}"].squeeze(0))
965
+
966
+ # 3. decoding with STF rendering
967
+ # 3.1 query the SDF values at vertices along a regular 128**3 grid
968
+
969
+ query_points = volume_query_points(self.volume, grid_size)
970
+ query_positions = query_points[None].repeat(1, 1, 1).to(device=device, dtype=self.mlp.dtype)
971
+
972
+ fields = []
973
+
974
+ for idx in range(0, query_positions.shape[1], query_batch_size):
975
+ query_batch = query_positions[:, idx : idx + query_batch_size]
976
+
977
+ model_out = self.mlp(
978
+ position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf"
979
+ )
980
+ fields.append(model_out.signed_distance)
981
+
982
+ # predicted SDF values
983
+ fields = torch.cat(fields, dim=1)
984
+ fields = fields.float()
985
+
986
+ assert (
987
+ len(fields.shape) == 3 and fields.shape[-1] == 1
988
+ ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}"
989
+
990
+ fields = fields.reshape(1, *([grid_size] * 3))
991
+
992
+ # create grid 128 x 128 x 128
993
+ # - force a negative border around the SDFs to close off all the models.
994
+ full_grid = torch.zeros(
995
+ 1,
996
+ grid_size + 2,
997
+ grid_size + 2,
998
+ grid_size + 2,
999
+ device=fields.device,
1000
+ dtype=fields.dtype,
1001
+ )
1002
+ full_grid.fill_(-1.0)
1003
+ full_grid[:, 1:-1, 1:-1, 1:-1] = fields
1004
+ fields = full_grid
1005
+
1006
+ # apply a differentiable implementation of Marching Cubes to construct meshs
1007
+ raw_meshes = []
1008
+ mesh_mask = []
1009
+
1010
+ for field in fields:
1011
+ raw_mesh = self.mesh_decoder(field, self.volume.bbox_min, self.volume.bbox_max - self.volume.bbox_min)
1012
+ mesh_mask.append(True)
1013
+ raw_meshes.append(raw_mesh)
1014
+
1015
+ mesh_mask = torch.tensor(mesh_mask, device=fields.device)
1016
+ max_vertices = max(len(m.verts) for m in raw_meshes)
1017
+
1018
+ # 3.2. query the texture color head at each vertex of the resulting mesh.
1019
+ texture_query_positions = torch.stack(
1020
+ [m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes],
1021
+ dim=0,
1022
+ )
1023
+ texture_query_positions = texture_query_positions.to(device=device, dtype=self.mlp.dtype)
1024
+
1025
+ textures = []
1026
+
1027
+ for idx in range(0, texture_query_positions.shape[1], query_batch_size):
1028
+ query_batch = texture_query_positions[:, idx : idx + query_batch_size]
1029
+
1030
+ texture_model_out = self.mlp(
1031
+ position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf"
1032
+ )
1033
+ textures.append(texture_model_out.channels)
1034
+
1035
+ # predict texture color
1036
+ textures = torch.cat(textures, dim=1)
1037
+
1038
+ textures = _convert_srgb_to_linear(textures)
1039
+ textures = textures.float()
1040
+
1041
+ # 3.3 augument the mesh with texture data
1042
+ assert len(textures.shape) == 3 and textures.shape[-1] == len(
1043
+ texture_channels
1044
+ ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}"
1045
+
1046
+ for m, texture in zip(raw_meshes, textures):
1047
+ texture = texture[: len(m.verts)]
1048
+ m.vertex_channels = dict(zip(texture_channels, texture.unbind(-1)))
1049
+
1050
+ return raw_meshes[0]
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/pipeline_stable_cascade.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/pipeline_stable_cascade_combined.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/__pycache__/pipeline_stable_cascade_prior.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Callable, Dict, List, Optional, Union
16
+
17
+ import torch
18
+ from transformers import CLIPTextModel, CLIPTokenizer
19
+
20
+ from ...models import StableCascadeUNet
21
+ from ...schedulers import DDPMWuerstchenScheduler
22
+ from ...utils import is_torch_version, logging, replace_example_docstring
23
+ from ...utils.torch_utils import randn_tensor
24
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
+ from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel
26
+
27
+
28
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
29
+
30
+ EXAMPLE_DOC_STRING = """
31
+ Examples:
32
+ ```py
33
+ >>> import torch
34
+ >>> from diffusers import StableCascadePriorPipeline, StableCascadeDecoderPipeline
35
+
36
+ >>> prior_pipe = StableCascadePriorPipeline.from_pretrained(
37
+ ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16
38
+ ... ).to("cuda")
39
+ >>> gen_pipe = StableCascadeDecoderPipeline.from_pretrain(
40
+ ... "stabilityai/stable-cascade", torch_dtype=torch.float16
41
+ ... ).to("cuda")
42
+
43
+ >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"
44
+ >>> prior_output = pipe(prompt)
45
+ >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt)
46
+ ```
47
+ """
48
+
49
+
50
+ class StableCascadeDecoderPipeline(DiffusionPipeline):
51
+ """
52
+ Pipeline for generating images from the Stable Cascade model.
53
+
54
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
55
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
56
+
57
+ Args:
58
+ tokenizer (`CLIPTokenizer`):
59
+ The CLIP tokenizer.
60
+ text_encoder (`CLIPTextModel`):
61
+ The CLIP text encoder.
62
+ decoder ([`StableCascadeUNet`]):
63
+ The Stable Cascade decoder unet.
64
+ vqgan ([`PaellaVQModel`]):
65
+ The VQGAN model.
66
+ scheduler ([`DDPMWuerstchenScheduler`]):
67
+ A scheduler to be used in combination with `prior` to generate image embedding.
68
+ latent_dim_scale (float, `optional`, defaults to 10.67):
69
+ Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are
70
+ height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and
71
+ width=int(24*10.67)=256 in order to match the training conditions.
72
+ """
73
+
74
+ unet_name = "decoder"
75
+ text_encoder_name = "text_encoder"
76
+ model_cpu_offload_seq = "text_encoder->decoder->vqgan"
77
+ _callback_tensor_inputs = [
78
+ "latents",
79
+ "prompt_embeds_pooled",
80
+ "negative_prompt_embeds",
81
+ "image_embeddings",
82
+ ]
83
+
84
+ def __init__(
85
+ self,
86
+ decoder: StableCascadeUNet,
87
+ tokenizer: CLIPTokenizer,
88
+ text_encoder: CLIPTextModel,
89
+ scheduler: DDPMWuerstchenScheduler,
90
+ vqgan: PaellaVQModel,
91
+ latent_dim_scale: float = 10.67,
92
+ ) -> None:
93
+ super().__init__()
94
+ self.register_modules(
95
+ decoder=decoder,
96
+ tokenizer=tokenizer,
97
+ text_encoder=text_encoder,
98
+ scheduler=scheduler,
99
+ vqgan=vqgan,
100
+ )
101
+ self.register_to_config(latent_dim_scale=latent_dim_scale)
102
+
103
+ def prepare_latents(
104
+ self, batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, scheduler
105
+ ):
106
+ _, channels, height, width = image_embeddings.shape
107
+ latents_shape = (
108
+ batch_size * num_images_per_prompt,
109
+ 4,
110
+ int(height * self.config.latent_dim_scale),
111
+ int(width * self.config.latent_dim_scale),
112
+ )
113
+
114
+ if latents is None:
115
+ latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype)
116
+ else:
117
+ if latents.shape != latents_shape:
118
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
119
+ latents = latents.to(device)
120
+
121
+ latents = latents * scheduler.init_noise_sigma
122
+ return latents
123
+
124
+ def encode_prompt(
125
+ self,
126
+ device,
127
+ batch_size,
128
+ num_images_per_prompt,
129
+ do_classifier_free_guidance,
130
+ prompt=None,
131
+ negative_prompt=None,
132
+ prompt_embeds: Optional[torch.FloatTensor] = None,
133
+ prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
134
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
135
+ negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
136
+ ):
137
+ if prompt_embeds is None:
138
+ # get prompt text embeddings
139
+ text_inputs = self.tokenizer(
140
+ prompt,
141
+ padding="max_length",
142
+ max_length=self.tokenizer.model_max_length,
143
+ truncation=True,
144
+ return_tensors="pt",
145
+ )
146
+ text_input_ids = text_inputs.input_ids
147
+ attention_mask = text_inputs.attention_mask
148
+
149
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
150
+
151
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
152
+ text_input_ids, untruncated_ids
153
+ ):
154
+ removed_text = self.tokenizer.batch_decode(
155
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
156
+ )
157
+ logger.warning(
158
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
159
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
160
+ )
161
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
162
+ attention_mask = attention_mask[:, : self.tokenizer.model_max_length]
163
+
164
+ text_encoder_output = self.text_encoder(
165
+ text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True
166
+ )
167
+ prompt_embeds = text_encoder_output.hidden_states[-1]
168
+ if prompt_embeds_pooled is None:
169
+ prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1)
170
+
171
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
172
+ prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device)
173
+ prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
174
+ prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0)
175
+
176
+ if negative_prompt_embeds is None and do_classifier_free_guidance:
177
+ uncond_tokens: List[str]
178
+ if negative_prompt is None:
179
+ uncond_tokens = [""] * batch_size
180
+ elif type(prompt) is not type(negative_prompt):
181
+ raise TypeError(
182
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
183
+ f" {type(prompt)}."
184
+ )
185
+ elif isinstance(negative_prompt, str):
186
+ uncond_tokens = [negative_prompt]
187
+ elif batch_size != len(negative_prompt):
188
+ raise ValueError(
189
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
190
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
191
+ " the batch size of `prompt`."
192
+ )
193
+ else:
194
+ uncond_tokens = negative_prompt
195
+
196
+ uncond_input = self.tokenizer(
197
+ uncond_tokens,
198
+ padding="max_length",
199
+ max_length=self.tokenizer.model_max_length,
200
+ truncation=True,
201
+ return_tensors="pt",
202
+ )
203
+ negative_prompt_embeds_text_encoder_output = self.text_encoder(
204
+ uncond_input.input_ids.to(device),
205
+ attention_mask=uncond_input.attention_mask.to(device),
206
+ output_hidden_states=True,
207
+ )
208
+
209
+ negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1]
210
+ negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1)
211
+
212
+ if do_classifier_free_guidance:
213
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
214
+ seq_len = negative_prompt_embeds.shape[1]
215
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
216
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
217
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
218
+
219
+ seq_len = negative_prompt_embeds_pooled.shape[1]
220
+ negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to(
221
+ dtype=self.text_encoder.dtype, device=device
222
+ )
223
+ negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1)
224
+ negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view(
225
+ batch_size * num_images_per_prompt, seq_len, -1
226
+ )
227
+ # done duplicates
228
+
229
+ return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled
230
+
231
+ def check_inputs(
232
+ self,
233
+ prompt,
234
+ negative_prompt=None,
235
+ prompt_embeds=None,
236
+ negative_prompt_embeds=None,
237
+ callback_on_step_end_tensor_inputs=None,
238
+ ):
239
+ if callback_on_step_end_tensor_inputs is not None and not all(
240
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
241
+ ):
242
+ raise ValueError(
243
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
244
+ )
245
+
246
+ if prompt is not None and prompt_embeds is not None:
247
+ raise ValueError(
248
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
249
+ " only forward one of the two."
250
+ )
251
+ elif prompt is None and prompt_embeds is None:
252
+ raise ValueError(
253
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
254
+ )
255
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
256
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
257
+
258
+ if negative_prompt is not None and negative_prompt_embeds is not None:
259
+ raise ValueError(
260
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
261
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
262
+ )
263
+
264
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
265
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
266
+ raise ValueError(
267
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
268
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
269
+ f" {negative_prompt_embeds.shape}."
270
+ )
271
+
272
+ @property
273
+ def guidance_scale(self):
274
+ return self._guidance_scale
275
+
276
+ @property
277
+ def do_classifier_free_guidance(self):
278
+ return self._guidance_scale > 1
279
+
280
+ @property
281
+ def num_timesteps(self):
282
+ return self._num_timesteps
283
+
284
+ @torch.no_grad()
285
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
286
+ def __call__(
287
+ self,
288
+ image_embeddings: Union[torch.FloatTensor, List[torch.FloatTensor]],
289
+ prompt: Union[str, List[str]] = None,
290
+ num_inference_steps: int = 10,
291
+ guidance_scale: float = 0.0,
292
+ negative_prompt: Optional[Union[str, List[str]]] = None,
293
+ prompt_embeds: Optional[torch.FloatTensor] = None,
294
+ prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
295
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
296
+ negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
297
+ num_images_per_prompt: int = 1,
298
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
299
+ latents: Optional[torch.FloatTensor] = None,
300
+ output_type: Optional[str] = "pil",
301
+ return_dict: bool = True,
302
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
303
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
304
+ ):
305
+ """
306
+ Function invoked when calling the pipeline for generation.
307
+
308
+ Args:
309
+ image_embedding (`torch.FloatTensor` or `List[torch.FloatTensor]`):
310
+ Image Embeddings either extracted from an image or generated by a Prior Model.
311
+ prompt (`str` or `List[str]`):
312
+ The prompt or prompts to guide the image generation.
313
+ num_inference_steps (`int`, *optional*, defaults to 12):
314
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
315
+ expense of slower inference.
316
+ guidance_scale (`float`, *optional*, defaults to 0.0):
317
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
318
+ `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen
319
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting
320
+ `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely
321
+ linked to the text `prompt`, usually at the expense of lower image quality.
322
+ negative_prompt (`str` or `List[str]`, *optional*):
323
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
324
+ if `decoder_guidance_scale` is less than `1`).
325
+ prompt_embeds (`torch.FloatTensor`, *optional*):
326
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
327
+ provided, text embeddings will be generated from `prompt` input argument.
328
+ prompt_embeds_pooled (`torch.FloatTensor`, *optional*):
329
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
330
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
331
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
332
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
333
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
334
+ argument.
335
+ negative_prompt_embeds_pooled (`torch.FloatTensor`, *optional*):
336
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
337
+ weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` input
338
+ argument.
339
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
340
+ The number of images to generate per prompt.
341
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
342
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
343
+ to make generation deterministic.
344
+ latents (`torch.FloatTensor`, *optional*):
345
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
346
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
347
+ tensor will ge generated by sampling using the supplied random `generator`.
348
+ output_type (`str`, *optional*, defaults to `"pil"`):
349
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
350
+ (`np.array`) or `"pt"` (`torch.Tensor`).
351
+ return_dict (`bool`, *optional*, defaults to `True`):
352
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
353
+ callback_on_step_end (`Callable`, *optional*):
354
+ A function that calls at the end of each denoising steps during the inference. The function is called
355
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
356
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
357
+ `callback_on_step_end_tensor_inputs`.
358
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
359
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
360
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
361
+ `._callback_tensor_inputs` attribute of your pipeline class.
362
+
363
+ Examples:
364
+
365
+ Returns:
366
+ [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True,
367
+ otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image
368
+ embeddings.
369
+ """
370
+
371
+ # 0. Define commonly used variables
372
+ device = self._execution_device
373
+ dtype = self.decoder.dtype
374
+ self._guidance_scale = guidance_scale
375
+ if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16:
376
+ raise ValueError("`StableCascadeDecoderPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype.")
377
+
378
+ # 1. Check inputs. Raise error if not correct
379
+ self.check_inputs(
380
+ prompt,
381
+ negative_prompt=negative_prompt,
382
+ prompt_embeds=prompt_embeds,
383
+ negative_prompt_embeds=negative_prompt_embeds,
384
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
385
+ )
386
+ if isinstance(image_embeddings, list):
387
+ image_embeddings = torch.cat(image_embeddings, dim=0)
388
+
389
+ if prompt is not None and isinstance(prompt, str):
390
+ batch_size = 1
391
+ elif prompt is not None and isinstance(prompt, list):
392
+ batch_size = len(prompt)
393
+ else:
394
+ batch_size = prompt_embeds.shape[0]
395
+
396
+ # Compute the effective number of images per prompt
397
+ # We must account for the fact that the image embeddings from the prior can be generated with num_images_per_prompt > 1
398
+ # This results in a case where a single prompt is associated with multiple image embeddings
399
+ # Divide the number of image embeddings by the batch size to determine if this is the case.
400
+ num_images_per_prompt = num_images_per_prompt * (image_embeddings.shape[0] // batch_size)
401
+
402
+ # 2. Encode caption
403
+ if prompt_embeds is None and negative_prompt_embeds is None:
404
+ _, prompt_embeds_pooled, _, negative_prompt_embeds_pooled = self.encode_prompt(
405
+ prompt=prompt,
406
+ device=device,
407
+ batch_size=batch_size,
408
+ num_images_per_prompt=num_images_per_prompt,
409
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
410
+ negative_prompt=negative_prompt,
411
+ prompt_embeds=prompt_embeds,
412
+ prompt_embeds_pooled=prompt_embeds_pooled,
413
+ negative_prompt_embeds=negative_prompt_embeds,
414
+ negative_prompt_embeds_pooled=negative_prompt_embeds_pooled,
415
+ )
416
+
417
+ # The pooled embeds from the prior are pooled again before being passed to the decoder
418
+ prompt_embeds_pooled = (
419
+ torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled])
420
+ if self.do_classifier_free_guidance
421
+ else prompt_embeds_pooled
422
+ )
423
+ effnet = (
424
+ torch.cat([image_embeddings, torch.zeros_like(image_embeddings)])
425
+ if self.do_classifier_free_guidance
426
+ else image_embeddings
427
+ )
428
+
429
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
430
+ timesteps = self.scheduler.timesteps
431
+
432
+ # 5. Prepare latents
433
+ latents = self.prepare_latents(
434
+ batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, self.scheduler
435
+ )
436
+
437
+ # 6. Run denoising loop
438
+ self._num_timesteps = len(timesteps[:-1])
439
+ for i, t in enumerate(self.progress_bar(timesteps[:-1])):
440
+ timestep_ratio = t.expand(latents.size(0)).to(dtype)
441
+
442
+ # 7. Denoise latents
443
+ predicted_latents = self.decoder(
444
+ sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents,
445
+ timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio,
446
+ clip_text_pooled=prompt_embeds_pooled,
447
+ effnet=effnet,
448
+ return_dict=False,
449
+ )[0]
450
+
451
+ # 8. Check for classifier free guidance and apply it
452
+ if self.do_classifier_free_guidance:
453
+ predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2)
454
+ predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale)
455
+
456
+ # 9. Renoise latents to next timestep
457
+ latents = self.scheduler.step(
458
+ model_output=predicted_latents,
459
+ timestep=timestep_ratio,
460
+ sample=latents,
461
+ generator=generator,
462
+ ).prev_sample
463
+
464
+ if callback_on_step_end is not None:
465
+ callback_kwargs = {}
466
+ for k in callback_on_step_end_tensor_inputs:
467
+ callback_kwargs[k] = locals()[k]
468
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
469
+
470
+ latents = callback_outputs.pop("latents", latents)
471
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
472
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
473
+
474
+ if output_type not in ["pt", "np", "pil", "latent"]:
475
+ raise ValueError(
476
+ f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}"
477
+ )
478
+
479
+ if not output_type == "latent":
480
+ # 10. Scale and decode the image latents with vq-vae
481
+ latents = self.vqgan.config.scale_factor * latents
482
+ images = self.vqgan.decode(latents).sample.clamp(0, 1)
483
+ if output_type == "np":
484
+ images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesnt work
485
+ elif output_type == "pil":
486
+ images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesnt work
487
+ images = self.numpy_to_pil(images)
488
+ else:
489
+ images = latents
490
+
491
+ # Offload all models
492
+ self.maybe_free_model_hooks()
493
+
494
+ if not return_dict:
495
+ return images
496
+ return ImagePipelineOutput(images)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Callable, Dict, List, Optional, Union
15
+
16
+ import PIL
17
+ import torch
18
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
19
+
20
+ from ...models import StableCascadeUNet
21
+ from ...schedulers import DDPMWuerstchenScheduler
22
+ from ...utils import is_torch_version, replace_example_docstring
23
+ from ..pipeline_utils import DiffusionPipeline
24
+ from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel
25
+ from .pipeline_stable_cascade import StableCascadeDecoderPipeline
26
+ from .pipeline_stable_cascade_prior import StableCascadePriorPipeline
27
+
28
+
29
+ TEXT2IMAGE_EXAMPLE_DOC_STRING = """
30
+ Examples:
31
+ ```py
32
+ >>> import torch
33
+ >>> from diffusers import StableCascadeCombinedPipeline
34
+ >>> pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
35
+ >>> pipe.enable_model_cpu_offload()
36
+ >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"
37
+ >>> images = pipe(prompt=prompt)
38
+ ```
39
+ """
40
+
41
+
42
+ class StableCascadeCombinedPipeline(DiffusionPipeline):
43
+ """
44
+ Combined Pipeline for text-to-image generation using Stable Cascade.
45
+
46
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
47
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
48
+
49
+ Args:
50
+ tokenizer (`CLIPTokenizer`):
51
+ The decoder tokenizer to be used for text inputs.
52
+ text_encoder (`CLIPTextModel`):
53
+ The decoder text encoder to be used for text inputs.
54
+ decoder (`StableCascadeUNet`):
55
+ The decoder model to be used for decoder image generation pipeline.
56
+ scheduler (`DDPMWuerstchenScheduler`):
57
+ The scheduler to be used for decoder image generation pipeline.
58
+ vqgan (`PaellaVQModel`):
59
+ The VQGAN model to be used for decoder image generation pipeline.
60
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
61
+ Model that extracts features from generated images to be used as inputs for the `image_encoder`.
62
+ image_encoder ([`CLIPVisionModelWithProjection`]):
63
+ Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
64
+ prior_prior (`StableCascadeUNet`):
65
+ The prior model to be used for prior pipeline.
66
+ prior_scheduler (`DDPMWuerstchenScheduler`):
67
+ The scheduler to be used for prior pipeline.
68
+ """
69
+
70
+ _load_connected_pipes = True
71
+
72
+ def __init__(
73
+ self,
74
+ tokenizer: CLIPTokenizer,
75
+ text_encoder: CLIPTextModel,
76
+ decoder: StableCascadeUNet,
77
+ scheduler: DDPMWuerstchenScheduler,
78
+ vqgan: PaellaVQModel,
79
+ prior_prior: StableCascadeUNet,
80
+ prior_text_encoder: CLIPTextModel,
81
+ prior_tokenizer: CLIPTokenizer,
82
+ prior_scheduler: DDPMWuerstchenScheduler,
83
+ prior_feature_extractor: Optional[CLIPImageProcessor] = None,
84
+ prior_image_encoder: Optional[CLIPVisionModelWithProjection] = None,
85
+ ):
86
+ super().__init__()
87
+
88
+ self.register_modules(
89
+ text_encoder=text_encoder,
90
+ tokenizer=tokenizer,
91
+ decoder=decoder,
92
+ scheduler=scheduler,
93
+ vqgan=vqgan,
94
+ prior_text_encoder=prior_text_encoder,
95
+ prior_tokenizer=prior_tokenizer,
96
+ prior_prior=prior_prior,
97
+ prior_scheduler=prior_scheduler,
98
+ prior_feature_extractor=prior_feature_extractor,
99
+ prior_image_encoder=prior_image_encoder,
100
+ )
101
+ self.prior_pipe = StableCascadePriorPipeline(
102
+ prior=prior_prior,
103
+ text_encoder=prior_text_encoder,
104
+ tokenizer=prior_tokenizer,
105
+ scheduler=prior_scheduler,
106
+ image_encoder=prior_image_encoder,
107
+ feature_extractor=prior_feature_extractor,
108
+ )
109
+ self.decoder_pipe = StableCascadeDecoderPipeline(
110
+ text_encoder=text_encoder,
111
+ tokenizer=tokenizer,
112
+ decoder=decoder,
113
+ scheduler=scheduler,
114
+ vqgan=vqgan,
115
+ )
116
+
117
+ def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
118
+ self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
119
+
120
+ def enable_model_cpu_offload(self, gpu_id=0):
121
+ r"""
122
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
123
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
124
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
125
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
126
+ """
127
+ self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id)
128
+ self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id)
129
+
130
+ def enable_sequential_cpu_offload(self, gpu_id=0):
131
+ r"""
132
+ Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗
133
+ Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a
134
+ GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis.
135
+ Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower.
136
+ """
137
+ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
138
+ self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
139
+
140
+ def progress_bar(self, iterable=None, total=None):
141
+ self.prior_pipe.progress_bar(iterable=iterable, total=total)
142
+ self.decoder_pipe.progress_bar(iterable=iterable, total=total)
143
+
144
+ def set_progress_bar_config(self, **kwargs):
145
+ self.prior_pipe.set_progress_bar_config(**kwargs)
146
+ self.decoder_pipe.set_progress_bar_config(**kwargs)
147
+
148
+ @torch.no_grad()
149
+ @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
150
+ def __call__(
151
+ self,
152
+ prompt: Optional[Union[str, List[str]]] = None,
153
+ images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None,
154
+ height: int = 512,
155
+ width: int = 512,
156
+ prior_num_inference_steps: int = 60,
157
+ prior_guidance_scale: float = 4.0,
158
+ num_inference_steps: int = 12,
159
+ decoder_guidance_scale: float = 0.0,
160
+ negative_prompt: Optional[Union[str, List[str]]] = None,
161
+ prompt_embeds: Optional[torch.FloatTensor] = None,
162
+ prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
163
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
164
+ negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
165
+ num_images_per_prompt: int = 1,
166
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
167
+ latents: Optional[torch.FloatTensor] = None,
168
+ output_type: Optional[str] = "pil",
169
+ return_dict: bool = True,
170
+ prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
171
+ prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"],
172
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
173
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
174
+ ):
175
+ """
176
+ Function invoked when calling the pipeline for generation.
177
+
178
+ Args:
179
+ prompt (`str` or `List[str]`):
180
+ The prompt or prompts to guide the image generation for the prior and decoder.
181
+ images (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, *optional*):
182
+ The images to guide the image generation for the prior.
183
+ negative_prompt (`str` or `List[str]`, *optional*):
184
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
185
+ if `guidance_scale` is less than `1`).
186
+ prompt_embeds (`torch.FloatTensor`, *optional*):
187
+ Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt
188
+ weighting. If not provided, text embeddings will be generated from `prompt` input argument.
189
+ prompt_embeds_pooled (`torch.FloatTensor`, *optional*):
190
+ Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt
191
+ weighting. If not provided, text embeddings will be generated from `prompt` input argument.
192
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
193
+ Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.*
194
+ prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt`
195
+ input argument.
196
+ negative_prompt_embeds_pooled (`torch.FloatTensor`, *optional*):
197
+ Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.*
198
+ prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt`
199
+ input argument.
200
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
201
+ The number of images to generate per prompt.
202
+ height (`int`, *optional*, defaults to 512):
203
+ The height in pixels of the generated image.
204
+ width (`int`, *optional*, defaults to 512):
205
+ The width in pixels of the generated image.
206
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
207
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
208
+ `prior_guidance_scale` is defined as `w` of equation 2. of [Imagen
209
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting
210
+ `prior_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked
211
+ to the text `prompt`, usually at the expense of lower image quality.
212
+ prior_num_inference_steps (`Union[int, Dict[float, int]]`, *optional*, defaults to 60):
213
+ The number of prior denoising steps. More denoising steps usually lead to a higher quality image at the
214
+ expense of slower inference. For more specific timestep spacing, you can pass customized
215
+ `prior_timesteps`
216
+ num_inference_steps (`int`, *optional*, defaults to 12):
217
+ The number of decoder denoising steps. More denoising steps usually lead to a higher quality image at
218
+ the expense of slower inference. For more specific timestep spacing, you can pass customized
219
+ `timesteps`
220
+ decoder_guidance_scale (`float`, *optional*, defaults to 0.0):
221
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
222
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
223
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
224
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
225
+ usually at the expense of lower image quality.
226
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
227
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
228
+ to make generation deterministic.
229
+ latents (`torch.FloatTensor`, *optional*):
230
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
231
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
232
+ tensor will ge generated by sampling using the supplied random `generator`.
233
+ output_type (`str`, *optional*, defaults to `"pil"`):
234
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
235
+ (`np.array`) or `"pt"` (`torch.Tensor`).
236
+ return_dict (`bool`, *optional*, defaults to `True`):
237
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
238
+ prior_callback_on_step_end (`Callable`, *optional*):
239
+ A function that calls at the end of each denoising steps during the inference. The function is called
240
+ with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep:
241
+ int, callback_kwargs: Dict)`.
242
+ prior_callback_on_step_end_tensor_inputs (`List`, *optional*):
243
+ The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the
244
+ list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in
245
+ the `._callback_tensor_inputs` attribute of your pipeine class.
246
+ callback_on_step_end (`Callable`, *optional*):
247
+ A function that calls at the end of each denoising steps during the inference. The function is called
248
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
249
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
250
+ `callback_on_step_end_tensor_inputs`.
251
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
252
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
253
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
254
+ `._callback_tensor_inputs` attribute of your pipeine class.
255
+
256
+ Examples:
257
+
258
+ Returns:
259
+ [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True,
260
+ otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images.
261
+ """
262
+ dtype = self.decoder_pipe.decoder.dtype
263
+ if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16:
264
+ raise ValueError(
265
+ "`StableCascadeCombinedPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype."
266
+ )
267
+
268
+ prior_outputs = self.prior_pipe(
269
+ prompt=prompt if prompt_embeds is None else None,
270
+ images=images,
271
+ height=height,
272
+ width=width,
273
+ num_inference_steps=prior_num_inference_steps,
274
+ guidance_scale=prior_guidance_scale,
275
+ negative_prompt=negative_prompt if negative_prompt_embeds is None else None,
276
+ prompt_embeds=prompt_embeds,
277
+ prompt_embeds_pooled=prompt_embeds_pooled,
278
+ negative_prompt_embeds=negative_prompt_embeds,
279
+ negative_prompt_embeds_pooled=negative_prompt_embeds_pooled,
280
+ num_images_per_prompt=num_images_per_prompt,
281
+ generator=generator,
282
+ latents=latents,
283
+ output_type="pt",
284
+ return_dict=True,
285
+ callback_on_step_end=prior_callback_on_step_end,
286
+ callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs,
287
+ )
288
+ image_embeddings = prior_outputs.image_embeddings
289
+ prompt_embeds = prior_outputs.get("prompt_embeds", None)
290
+ prompt_embeds_pooled = prior_outputs.get("prompt_embeds_pooled", None)
291
+ negative_prompt_embeds = prior_outputs.get("negative_prompt_embeds", None)
292
+ negative_prompt_embeds_pooled = prior_outputs.get("negative_prompt_embeds_pooled", None)
293
+
294
+ outputs = self.decoder_pipe(
295
+ image_embeddings=image_embeddings,
296
+ prompt=prompt if prompt_embeds is None else None,
297
+ num_inference_steps=num_inference_steps,
298
+ guidance_scale=decoder_guidance_scale,
299
+ negative_prompt=negative_prompt if negative_prompt_embeds is None else None,
300
+ prompt_embeds=prompt_embeds,
301
+ prompt_embeds_pooled=prompt_embeds_pooled,
302
+ negative_prompt_embeds=negative_prompt_embeds,
303
+ negative_prompt_embeds_pooled=negative_prompt_embeds_pooled,
304
+ generator=generator,
305
+ output_type=output_type,
306
+ return_dict=return_dict,
307
+ callback_on_step_end=callback_on_step_end,
308
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
309
+ )
310
+
311
+ return outputs
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py ADDED
@@ -0,0 +1,638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from math import ceil
17
+ from typing import Callable, Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+ import PIL
21
+ import torch
22
+ from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection
23
+
24
+ from ...models import StableCascadeUNet
25
+ from ...schedulers import DDPMWuerstchenScheduler
26
+ from ...utils import BaseOutput, logging, replace_example_docstring
27
+ from ...utils.torch_utils import randn_tensor
28
+ from ..pipeline_utils import DiffusionPipeline
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+ DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:]
34
+
35
+ EXAMPLE_DOC_STRING = """
36
+ Examples:
37
+ ```py
38
+ >>> import torch
39
+ >>> from diffusers import StableCascadePriorPipeline
40
+
41
+ >>> prior_pipe = StableCascadePriorPipeline.from_pretrained(
42
+ ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16
43
+ ... ).to("cuda")
44
+
45
+ >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"
46
+ >>> prior_output = pipe(prompt)
47
+ ```
48
+ """
49
+
50
+
51
+ @dataclass
52
+ class StableCascadePriorPipelineOutput(BaseOutput):
53
+ """
54
+ Output class for WuerstchenPriorPipeline.
55
+
56
+ Args:
57
+ image_embeddings (`torch.FloatTensor` or `np.ndarray`)
58
+ Prior image embeddings for text prompt
59
+ prompt_embeds (`torch.FloatTensor`):
60
+ Text embeddings for the prompt.
61
+ negative_prompt_embeds (`torch.FloatTensor`):
62
+ Text embeddings for the negative prompt.
63
+ """
64
+
65
+ image_embeddings: Union[torch.FloatTensor, np.ndarray]
66
+ prompt_embeds: Union[torch.FloatTensor, np.ndarray]
67
+ prompt_embeds_pooled: Union[torch.FloatTensor, np.ndarray]
68
+ negative_prompt_embeds: Union[torch.FloatTensor, np.ndarray]
69
+ negative_prompt_embeds_pooled: Union[torch.FloatTensor, np.ndarray]
70
+
71
+
72
+ class StableCascadePriorPipeline(DiffusionPipeline):
73
+ """
74
+ Pipeline for generating image prior for Stable Cascade.
75
+
76
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
77
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
78
+
79
+ Args:
80
+ prior ([`StableCascadeUNet`]):
81
+ The Stable Cascade prior to approximate the image embedding from the text and/or image embedding.
82
+ text_encoder ([`CLIPTextModelWithProjection`]):
83
+ Frozen text-encoder ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
84
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
85
+ Model that extracts features from generated images to be used as inputs for the `image_encoder`.
86
+ image_encoder ([`CLIPVisionModelWithProjection`]):
87
+ Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
88
+ tokenizer (`CLIPTokenizer`):
89
+ Tokenizer of class
90
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
91
+ scheduler ([`DDPMWuerstchenScheduler`]):
92
+ A scheduler to be used in combination with `prior` to generate image embedding.
93
+ resolution_multiple ('float', *optional*, defaults to 42.67):
94
+ Default resolution for multiple images generated.
95
+ """
96
+
97
+ unet_name = "prior"
98
+ text_encoder_name = "text_encoder"
99
+ model_cpu_offload_seq = "image_encoder->text_encoder->prior"
100
+ _optional_components = ["image_encoder", "feature_extractor"]
101
+ _callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"]
102
+
103
+ def __init__(
104
+ self,
105
+ tokenizer: CLIPTokenizer,
106
+ text_encoder: CLIPTextModelWithProjection,
107
+ prior: StableCascadeUNet,
108
+ scheduler: DDPMWuerstchenScheduler,
109
+ resolution_multiple: float = 42.67,
110
+ feature_extractor: Optional[CLIPImageProcessor] = None,
111
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
112
+ ) -> None:
113
+ super().__init__()
114
+ self.register_modules(
115
+ tokenizer=tokenizer,
116
+ text_encoder=text_encoder,
117
+ image_encoder=image_encoder,
118
+ feature_extractor=feature_extractor,
119
+ prior=prior,
120
+ scheduler=scheduler,
121
+ )
122
+ self.register_to_config(resolution_multiple=resolution_multiple)
123
+
124
+ def prepare_latents(
125
+ self, batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, scheduler
126
+ ):
127
+ latent_shape = (
128
+ num_images_per_prompt * batch_size,
129
+ self.prior.config.in_channels,
130
+ ceil(height / self.config.resolution_multiple),
131
+ ceil(width / self.config.resolution_multiple),
132
+ )
133
+
134
+ if latents is None:
135
+ latents = randn_tensor(latent_shape, generator=generator, device=device, dtype=dtype)
136
+ else:
137
+ if latents.shape != latent_shape:
138
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latent_shape}")
139
+ latents = latents.to(device)
140
+
141
+ latents = latents * scheduler.init_noise_sigma
142
+ return latents
143
+
144
+ def encode_prompt(
145
+ self,
146
+ device,
147
+ batch_size,
148
+ num_images_per_prompt,
149
+ do_classifier_free_guidance,
150
+ prompt=None,
151
+ negative_prompt=None,
152
+ prompt_embeds: Optional[torch.FloatTensor] = None,
153
+ prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
154
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
155
+ negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
156
+ ):
157
+ if prompt_embeds is None:
158
+ # get prompt text embeddings
159
+ text_inputs = self.tokenizer(
160
+ prompt,
161
+ padding="max_length",
162
+ max_length=self.tokenizer.model_max_length,
163
+ truncation=True,
164
+ return_tensors="pt",
165
+ )
166
+ text_input_ids = text_inputs.input_ids
167
+ attention_mask = text_inputs.attention_mask
168
+
169
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
170
+
171
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
172
+ text_input_ids, untruncated_ids
173
+ ):
174
+ removed_text = self.tokenizer.batch_decode(
175
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
176
+ )
177
+ logger.warning(
178
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
179
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
180
+ )
181
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
182
+ attention_mask = attention_mask[:, : self.tokenizer.model_max_length]
183
+
184
+ text_encoder_output = self.text_encoder(
185
+ text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True
186
+ )
187
+ prompt_embeds = text_encoder_output.hidden_states[-1]
188
+ if prompt_embeds_pooled is None:
189
+ prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1)
190
+
191
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
192
+ prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device)
193
+ prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
194
+ prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0)
195
+
196
+ if negative_prompt_embeds is None and do_classifier_free_guidance:
197
+ uncond_tokens: List[str]
198
+ if negative_prompt is None:
199
+ uncond_tokens = [""] * batch_size
200
+ elif type(prompt) is not type(negative_prompt):
201
+ raise TypeError(
202
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
203
+ f" {type(prompt)}."
204
+ )
205
+ elif isinstance(negative_prompt, str):
206
+ uncond_tokens = [negative_prompt]
207
+ elif batch_size != len(negative_prompt):
208
+ raise ValueError(
209
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
210
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
211
+ " the batch size of `prompt`."
212
+ )
213
+ else:
214
+ uncond_tokens = negative_prompt
215
+
216
+ uncond_input = self.tokenizer(
217
+ uncond_tokens,
218
+ padding="max_length",
219
+ max_length=self.tokenizer.model_max_length,
220
+ truncation=True,
221
+ return_tensors="pt",
222
+ )
223
+ negative_prompt_embeds_text_encoder_output = self.text_encoder(
224
+ uncond_input.input_ids.to(device),
225
+ attention_mask=uncond_input.attention_mask.to(device),
226
+ output_hidden_states=True,
227
+ )
228
+
229
+ negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1]
230
+ negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1)
231
+
232
+ if do_classifier_free_guidance:
233
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
234
+ seq_len = negative_prompt_embeds.shape[1]
235
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
236
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
237
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
238
+
239
+ seq_len = negative_prompt_embeds_pooled.shape[1]
240
+ negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to(
241
+ dtype=self.text_encoder.dtype, device=device
242
+ )
243
+ negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1)
244
+ negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view(
245
+ batch_size * num_images_per_prompt, seq_len, -1
246
+ )
247
+ # done duplicates
248
+
249
+ return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled
250
+
251
+ def encode_image(self, images, device, dtype, batch_size, num_images_per_prompt):
252
+ image_embeds = []
253
+ for image in images:
254
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
255
+ image = image.to(device=device, dtype=dtype)
256
+ image_embed = self.image_encoder(image).image_embeds.unsqueeze(1)
257
+ image_embeds.append(image_embed)
258
+ image_embeds = torch.cat(image_embeds, dim=1)
259
+
260
+ image_embeds = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1)
261
+ negative_image_embeds = torch.zeros_like(image_embeds)
262
+
263
+ return image_embeds, negative_image_embeds
264
+
265
+ def check_inputs(
266
+ self,
267
+ prompt,
268
+ images=None,
269
+ image_embeds=None,
270
+ negative_prompt=None,
271
+ prompt_embeds=None,
272
+ prompt_embeds_pooled=None,
273
+ negative_prompt_embeds=None,
274
+ negative_prompt_embeds_pooled=None,
275
+ callback_on_step_end_tensor_inputs=None,
276
+ ):
277
+ if callback_on_step_end_tensor_inputs is not None and not all(
278
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
279
+ ):
280
+ raise ValueError(
281
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
282
+ )
283
+
284
+ if prompt is not None and prompt_embeds is not None:
285
+ raise ValueError(
286
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
287
+ " only forward one of the two."
288
+ )
289
+ elif prompt is None and prompt_embeds is None:
290
+ raise ValueError(
291
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
292
+ )
293
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
294
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
295
+
296
+ if negative_prompt is not None and negative_prompt_embeds is not None:
297
+ raise ValueError(
298
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
299
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
300
+ )
301
+
302
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
303
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
304
+ raise ValueError(
305
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
306
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
307
+ f" {negative_prompt_embeds.shape}."
308
+ )
309
+
310
+ if prompt_embeds is not None and prompt_embeds_pooled is None:
311
+ raise ValueError(
312
+ "If `prompt_embeds` are provided, `prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`"
313
+ )
314
+
315
+ if negative_prompt_embeds is not None and negative_prompt_embeds_pooled is None:
316
+ raise ValueError(
317
+ "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`"
318
+ )
319
+
320
+ if prompt_embeds_pooled is not None and negative_prompt_embeds_pooled is not None:
321
+ if prompt_embeds_pooled.shape != negative_prompt_embeds_pooled.shape:
322
+ raise ValueError(
323
+ "`prompt_embeds_pooled` and `negative_prompt_embeds_pooled` must have the same shape when passed"
324
+ f"directly, but got: `prompt_embeds_pooled` {prompt_embeds_pooled.shape} !="
325
+ f"`negative_prompt_embeds_pooled` {negative_prompt_embeds_pooled.shape}."
326
+ )
327
+
328
+ if image_embeds is not None and images is not None:
329
+ raise ValueError(
330
+ f"Cannot forward both `images`: {images} and `image_embeds`: {image_embeds}. Please make sure to"
331
+ " only forward one of the two."
332
+ )
333
+
334
+ if images:
335
+ for i, image in enumerate(images):
336
+ if not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image):
337
+ raise TypeError(
338
+ f"'images' must contain images of type 'torch.Tensor' or 'PIL.Image.Image, but got"
339
+ f"{type(image)} for image number {i}."
340
+ )
341
+
342
+ @property
343
+ def guidance_scale(self):
344
+ return self._guidance_scale
345
+
346
+ @property
347
+ def do_classifier_free_guidance(self):
348
+ return self._guidance_scale > 1
349
+
350
+ @property
351
+ def num_timesteps(self):
352
+ return self._num_timesteps
353
+
354
+ def get_timestep_ratio_conditioning(self, t, alphas_cumprod):
355
+ s = torch.tensor([0.003])
356
+ clamp_range = [0, 1]
357
+ min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2
358
+ var = alphas_cumprod[t]
359
+ var = var.clamp(*clamp_range)
360
+ s, min_var = s.to(var.device), min_var.to(var.device)
361
+ ratio = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
362
+ return ratio
363
+
364
+ @torch.no_grad()
365
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
366
+ def __call__(
367
+ self,
368
+ prompt: Optional[Union[str, List[str]]] = None,
369
+ images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None,
370
+ height: int = 1024,
371
+ width: int = 1024,
372
+ num_inference_steps: int = 20,
373
+ timesteps: List[float] = None,
374
+ guidance_scale: float = 4.0,
375
+ negative_prompt: Optional[Union[str, List[str]]] = None,
376
+ prompt_embeds: Optional[torch.FloatTensor] = None,
377
+ prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
378
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
379
+ negative_prompt_embeds_pooled: Optional[torch.FloatTensor] = None,
380
+ image_embeds: Optional[torch.FloatTensor] = None,
381
+ num_images_per_prompt: Optional[int] = 1,
382
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
383
+ latents: Optional[torch.FloatTensor] = None,
384
+ output_type: Optional[str] = "pt",
385
+ return_dict: bool = True,
386
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
387
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
388
+ ):
389
+ """
390
+ Function invoked when calling the pipeline for generation.
391
+
392
+ Args:
393
+ prompt (`str` or `List[str]`):
394
+ The prompt or prompts to guide the image generation.
395
+ height (`int`, *optional*, defaults to 1024):
396
+ The height in pixels of the generated image.
397
+ width (`int`, *optional*, defaults to 1024):
398
+ The width in pixels of the generated image.
399
+ num_inference_steps (`int`, *optional*, defaults to 60):
400
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
401
+ expense of slower inference.
402
+ guidance_scale (`float`, *optional*, defaults to 8.0):
403
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
404
+ `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen
405
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting
406
+ `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely
407
+ linked to the text `prompt`, usually at the expense of lower image quality.
408
+ negative_prompt (`str` or `List[str]`, *optional*):
409
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
410
+ if `decoder_guidance_scale` is less than `1`).
411
+ prompt_embeds (`torch.FloatTensor`, *optional*):
412
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
413
+ provided, text embeddings will be generated from `prompt` input argument.
414
+ prompt_embeds_pooled (`torch.FloatTensor`, *optional*):
415
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
416
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
417
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
418
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
419
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
420
+ argument.
421
+ negative_prompt_embeds_pooled (`torch.FloatTensor`, *optional*):
422
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
423
+ weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` input
424
+ argument.
425
+ image_embeds (`torch.FloatTensor`, *optional*):
426
+ Pre-generated image embeddings. Can be used to easily tweak image inputs, *e.g.* prompt weighting.
427
+ If not provided, image embeddings will be generated from `image` input argument if existing.
428
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
429
+ The number of images to generate per prompt.
430
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
431
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
432
+ to make generation deterministic.
433
+ latents (`torch.FloatTensor`, *optional*):
434
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
435
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
436
+ tensor will ge generated by sampling using the supplied random `generator`.
437
+ output_type (`str`, *optional*, defaults to `"pil"`):
438
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
439
+ (`np.array`) or `"pt"` (`torch.Tensor`).
440
+ return_dict (`bool`, *optional*, defaults to `True`):
441
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
442
+ callback_on_step_end (`Callable`, *optional*):
443
+ A function that calls at the end of each denoising steps during the inference. The function is called
444
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
445
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
446
+ `callback_on_step_end_tensor_inputs`.
447
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
448
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
449
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
450
+ `._callback_tensor_inputs` attribute of your pipeline class.
451
+
452
+ Examples:
453
+
454
+ Returns:
455
+ [`StableCascadePriorPipelineOutput`] or `tuple` [`StableCascadePriorPipelineOutput`] if
456
+ `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the
457
+ generated image embeddings.
458
+ """
459
+
460
+ # 0. Define commonly used variables
461
+ device = self._execution_device
462
+ dtype = next(self.prior.parameters()).dtype
463
+ self._guidance_scale = guidance_scale
464
+ if prompt is not None and isinstance(prompt, str):
465
+ batch_size = 1
466
+ elif prompt is not None and isinstance(prompt, list):
467
+ batch_size = len(prompt)
468
+ else:
469
+ batch_size = prompt_embeds.shape[0]
470
+
471
+ # 1. Check inputs. Raise error if not correct
472
+ self.check_inputs(
473
+ prompt,
474
+ images=images,
475
+ image_embeds=image_embeds,
476
+ negative_prompt=negative_prompt,
477
+ prompt_embeds=prompt_embeds,
478
+ prompt_embeds_pooled=prompt_embeds_pooled,
479
+ negative_prompt_embeds=negative_prompt_embeds,
480
+ negative_prompt_embeds_pooled=negative_prompt_embeds_pooled,
481
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
482
+ )
483
+
484
+ # 2. Encode caption + images
485
+ (
486
+ prompt_embeds,
487
+ prompt_embeds_pooled,
488
+ negative_prompt_embeds,
489
+ negative_prompt_embeds_pooled,
490
+ ) = self.encode_prompt(
491
+ prompt=prompt,
492
+ device=device,
493
+ batch_size=batch_size,
494
+ num_images_per_prompt=num_images_per_prompt,
495
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
496
+ negative_prompt=negative_prompt,
497
+ prompt_embeds=prompt_embeds,
498
+ prompt_embeds_pooled=prompt_embeds_pooled,
499
+ negative_prompt_embeds=negative_prompt_embeds,
500
+ negative_prompt_embeds_pooled=negative_prompt_embeds_pooled,
501
+ )
502
+
503
+ if images is not None:
504
+ image_embeds_pooled, uncond_image_embeds_pooled = self.encode_image(
505
+ images=images,
506
+ device=device,
507
+ dtype=dtype,
508
+ batch_size=batch_size,
509
+ num_images_per_prompt=num_images_per_prompt,
510
+ )
511
+ elif image_embeds is not None:
512
+ image_embeds_pooled = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1)
513
+ uncond_image_embeds_pooled = torch.zeros_like(image_embeds_pooled)
514
+ else:
515
+ image_embeds_pooled = torch.zeros(
516
+ batch_size * num_images_per_prompt,
517
+ 1,
518
+ self.prior.config.clip_image_in_channels,
519
+ device=device,
520
+ dtype=dtype,
521
+ )
522
+ uncond_image_embeds_pooled = torch.zeros(
523
+ batch_size * num_images_per_prompt,
524
+ 1,
525
+ self.prior.config.clip_image_in_channels,
526
+ device=device,
527
+ dtype=dtype,
528
+ )
529
+
530
+ if self.do_classifier_free_guidance:
531
+ image_embeds = torch.cat([image_embeds_pooled, uncond_image_embeds_pooled], dim=0)
532
+ else:
533
+ image_embeds = image_embeds_pooled
534
+
535
+ # For classifier free guidance, we need to do two forward passes.
536
+ # Here we concatenate the unconditional and text embeddings into a single batch
537
+ # to avoid doing two forward passes
538
+ text_encoder_hidden_states = (
539
+ torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds
540
+ )
541
+ text_encoder_pooled = (
542
+ torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled])
543
+ if negative_prompt_embeds is not None
544
+ else prompt_embeds_pooled
545
+ )
546
+
547
+ # 4. Prepare and set timesteps
548
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
549
+ timesteps = self.scheduler.timesteps
550
+
551
+ # 5. Prepare latents
552
+ latents = self.prepare_latents(
553
+ batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, self.scheduler
554
+ )
555
+
556
+ if isinstance(self.scheduler, DDPMWuerstchenScheduler):
557
+ timesteps = timesteps[:-1]
558
+ else:
559
+ if self.scheduler.config.clip_sample:
560
+ self.scheduler.config.clip_sample = False # disample sample clipping
561
+ logger.warning(" set `clip_sample` to be False")
562
+ # 6. Run denoising loop
563
+ if hasattr(self.scheduler, "betas"):
564
+ alphas = 1.0 - self.scheduler.betas
565
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
566
+ else:
567
+ alphas_cumprod = []
568
+
569
+ self._num_timesteps = len(timesteps)
570
+ for i, t in enumerate(self.progress_bar(timesteps)):
571
+ if not isinstance(self.scheduler, DDPMWuerstchenScheduler):
572
+ if len(alphas_cumprod) > 0:
573
+ timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod)
574
+ timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device)
575
+ else:
576
+ timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype)
577
+ else:
578
+ timestep_ratio = t.expand(latents.size(0)).to(dtype)
579
+ # 7. Denoise image embeddings
580
+ predicted_image_embedding = self.prior(
581
+ sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents,
582
+ timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio,
583
+ clip_text_pooled=text_encoder_pooled,
584
+ clip_text=text_encoder_hidden_states,
585
+ clip_img=image_embeds,
586
+ return_dict=False,
587
+ )[0]
588
+
589
+ # 8. Check for classifier free guidance and apply it
590
+ if self.do_classifier_free_guidance:
591
+ predicted_image_embedding_text, predicted_image_embedding_uncond = predicted_image_embedding.chunk(2)
592
+ predicted_image_embedding = torch.lerp(
593
+ predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale
594
+ )
595
+
596
+ # 9. Renoise latents to next timestep
597
+ if not isinstance(self.scheduler, DDPMWuerstchenScheduler):
598
+ timestep_ratio = t
599
+ latents = self.scheduler.step(
600
+ model_output=predicted_image_embedding, timestep=timestep_ratio, sample=latents, generator=generator
601
+ ).prev_sample
602
+
603
+ if callback_on_step_end is not None:
604
+ callback_kwargs = {}
605
+ for k in callback_on_step_end_tensor_inputs:
606
+ callback_kwargs[k] = locals()[k]
607
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
608
+
609
+ latents = callback_outputs.pop("latents", latents)
610
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
611
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
612
+
613
+ # Offload all models
614
+ self.maybe_free_model_hooks()
615
+
616
+ if output_type == "np":
617
+ latents = latents.cpu().float().numpy() # float() as bfloat16-> numpy doesnt work
618
+ prompt_embeds = prompt_embeds.cpu().float().numpy() # float() as bfloat16-> numpy doesnt work
619
+ negative_prompt_embeds = (
620
+ negative_prompt_embeds.cpu().float().numpy() if negative_prompt_embeds is not None else None
621
+ ) # float() as bfloat16-> numpy doesnt work
622
+
623
+ if not return_dict:
624
+ return (
625
+ latents,
626
+ prompt_embeds,
627
+ prompt_embeds_pooled,
628
+ negative_prompt_embeds,
629
+ negative_prompt_embeds_pooled,
630
+ )
631
+
632
+ return StableCascadePriorPipelineOutput(
633
+ image_embeddings=latents,
634
+ prompt_embeds=prompt_embeds,
635
+ prompt_embeds_pooled=prompt_embeds_pooled,
636
+ negative_prompt_embeds=negative_prompt_embeds,
637
+ negative_prompt_embeds_pooled=negative_prompt_embeds_pooled,
638
+ )
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/__init__.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_flax_available,
9
+ is_k_diffusion_available,
10
+ is_k_diffusion_version,
11
+ is_onnx_available,
12
+ is_torch_available,
13
+ is_transformers_available,
14
+ is_transformers_version,
15
+ )
16
+
17
+
18
+ _dummy_objects = {}
19
+ _additional_imports = {}
20
+ _import_structure = {"pipeline_output": ["StableDiffusionPipelineOutput"]}
21
+
22
+ if is_transformers_available() and is_flax_available():
23
+ _import_structure["pipeline_output"].extend(["FlaxStableDiffusionPipelineOutput"])
24
+ try:
25
+ if not (is_transformers_available() and is_torch_available()):
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
29
+
30
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
31
+ else:
32
+ _import_structure["clip_image_project_model"] = ["CLIPImageProjection"]
33
+ _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"]
34
+ _import_structure["pipeline_stable_diffusion"] = ["StableDiffusionPipeline"]
35
+ _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"]
36
+ _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"]
37
+ _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"]
38
+ _import_structure["pipeline_stable_diffusion_img2img"] = ["StableDiffusionImg2ImgPipeline"]
39
+ _import_structure["pipeline_stable_diffusion_inpaint"] = ["StableDiffusionInpaintPipeline"]
40
+ _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"]
41
+ _import_structure["pipeline_stable_diffusion_instruct_pix2pix"] = ["StableDiffusionInstructPix2PixPipeline"]
42
+ _import_structure["pipeline_stable_diffusion_latent_upscale"] = ["StableDiffusionLatentUpscalePipeline"]
43
+ _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"]
44
+ _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"]
45
+ _import_structure["pipeline_stable_diffusion_upscale"] = ["StableDiffusionUpscalePipeline"]
46
+ _import_structure["pipeline_stable_unclip"] = ["StableUnCLIPPipeline"]
47
+ _import_structure["pipeline_stable_unclip_img2img"] = ["StableUnCLIPImg2ImgPipeline"]
48
+ _import_structure["safety_checker"] = ["StableDiffusionSafetyChecker"]
49
+ _import_structure["stable_unclip_image_normalizer"] = ["StableUnCLIPImageNormalizer"]
50
+ try:
51
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ from ...utils.dummy_torch_and_transformers_objects import (
55
+ StableDiffusionImageVariationPipeline,
56
+ )
57
+
58
+ _dummy_objects.update({"StableDiffusionImageVariationPipeline": StableDiffusionImageVariationPipeline})
59
+ else:
60
+ _import_structure["pipeline_stable_diffusion_image_variation"] = ["StableDiffusionImageVariationPipeline"]
61
+ try:
62
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ from ...utils.dummy_torch_and_transformers_objects import (
66
+ StableDiffusionDepth2ImgPipeline,
67
+ )
68
+
69
+ _dummy_objects.update(
70
+ {
71
+ "StableDiffusionDepth2ImgPipeline": StableDiffusionDepth2ImgPipeline,
72
+ }
73
+ )
74
+ else:
75
+ _import_structure["pipeline_stable_diffusion_depth2img"] = ["StableDiffusionDepth2ImgPipeline"]
76
+
77
+ try:
78
+ if not (is_transformers_available() and is_onnx_available()):
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ from ...utils import dummy_onnx_objects # noqa F403
82
+
83
+ _dummy_objects.update(get_objects_from_module(dummy_onnx_objects))
84
+ else:
85
+ _import_structure["pipeline_onnx_stable_diffusion"] = [
86
+ "OnnxStableDiffusionPipeline",
87
+ "StableDiffusionOnnxPipeline",
88
+ ]
89
+ _import_structure["pipeline_onnx_stable_diffusion_img2img"] = ["OnnxStableDiffusionImg2ImgPipeline"]
90
+ _import_structure["pipeline_onnx_stable_diffusion_inpaint"] = ["OnnxStableDiffusionInpaintPipeline"]
91
+ _import_structure["pipeline_onnx_stable_diffusion_inpaint_legacy"] = ["OnnxStableDiffusionInpaintPipelineLegacy"]
92
+ _import_structure["pipeline_onnx_stable_diffusion_upscale"] = ["OnnxStableDiffusionUpscalePipeline"]
93
+
94
+ if is_transformers_available() and is_flax_available():
95
+ from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
96
+
97
+ _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState})
98
+ _import_structure["pipeline_flax_stable_diffusion"] = ["FlaxStableDiffusionPipeline"]
99
+ _import_structure["pipeline_flax_stable_diffusion_img2img"] = ["FlaxStableDiffusionImg2ImgPipeline"]
100
+ _import_structure["pipeline_flax_stable_diffusion_inpaint"] = ["FlaxStableDiffusionInpaintPipeline"]
101
+ _import_structure["safety_checker_flax"] = ["FlaxStableDiffusionSafetyChecker"]
102
+
103
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
104
+ try:
105
+ if not (is_transformers_available() and is_torch_available()):
106
+ raise OptionalDependencyNotAvailable()
107
+
108
+ except OptionalDependencyNotAvailable:
109
+ from ...utils.dummy_torch_and_transformers_objects import *
110
+
111
+ else:
112
+ from .clip_image_project_model import CLIPImageProjection
113
+ from .pipeline_stable_diffusion import (
114
+ StableDiffusionPipeline,
115
+ StableDiffusionPipelineOutput,
116
+ StableDiffusionSafetyChecker,
117
+ )
118
+ from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
119
+ from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
120
+ from .pipeline_stable_diffusion_instruct_pix2pix import (
121
+ StableDiffusionInstructPix2PixPipeline,
122
+ )
123
+ from .pipeline_stable_diffusion_latent_upscale import (
124
+ StableDiffusionLatentUpscalePipeline,
125
+ )
126
+ from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
127
+ from .pipeline_stable_unclip import StableUnCLIPPipeline
128
+ from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline
129
+ from .safety_checker import StableDiffusionSafetyChecker
130
+ from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
131
+
132
+ try:
133
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
134
+ raise OptionalDependencyNotAvailable()
135
+ except OptionalDependencyNotAvailable:
136
+ from ...utils.dummy_torch_and_transformers_objects import (
137
+ StableDiffusionImageVariationPipeline,
138
+ )
139
+ else:
140
+ from .pipeline_stable_diffusion_image_variation import (
141
+ StableDiffusionImageVariationPipeline,
142
+ )
143
+
144
+ try:
145
+ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
146
+ raise OptionalDependencyNotAvailable()
147
+ except OptionalDependencyNotAvailable:
148
+ from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline
149
+ else:
150
+ from .pipeline_stable_diffusion_depth2img import (
151
+ StableDiffusionDepth2ImgPipeline,
152
+ )
153
+
154
+ try:
155
+ if not (is_transformers_available() and is_onnx_available()):
156
+ raise OptionalDependencyNotAvailable()
157
+ except OptionalDependencyNotAvailable:
158
+ from ...utils.dummy_onnx_objects import *
159
+ else:
160
+ from .pipeline_onnx_stable_diffusion import (
161
+ OnnxStableDiffusionPipeline,
162
+ StableDiffusionOnnxPipeline,
163
+ )
164
+ from .pipeline_onnx_stable_diffusion_img2img import (
165
+ OnnxStableDiffusionImg2ImgPipeline,
166
+ )
167
+ from .pipeline_onnx_stable_diffusion_inpaint import (
168
+ OnnxStableDiffusionInpaintPipeline,
169
+ )
170
+ from .pipeline_onnx_stable_diffusion_upscale import (
171
+ OnnxStableDiffusionUpscalePipeline,
172
+ )
173
+
174
+ try:
175
+ if not (is_transformers_available() and is_flax_available()):
176
+ raise OptionalDependencyNotAvailable()
177
+ except OptionalDependencyNotAvailable:
178
+ from ...utils.dummy_flax_objects import *
179
+ else:
180
+ from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
181
+ from .pipeline_flax_stable_diffusion_img2img import (
182
+ FlaxStableDiffusionImg2ImgPipeline,
183
+ )
184
+ from .pipeline_flax_stable_diffusion_inpaint import (
185
+ FlaxStableDiffusionInpaintPipeline,
186
+ )
187
+ from .pipeline_output import FlaxStableDiffusionPipelineOutput
188
+ from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
189
+
190
+ else:
191
+ import sys
192
+
193
+ sys.modules[__name__] = _LazyModule(
194
+ __name__,
195
+ globals()["__file__"],
196
+ _import_structure,
197
+ module_spec=__spec__,
198
+ )
199
+
200
+ for name, value in _dummy_objects.items():
201
+ setattr(sys.modules[__name__], name, value)
202
+ for name, value in _additional_imports.items():
203
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py ADDED
@@ -0,0 +1,1860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Conversion script for the Stable Diffusion checkpoints."""
16
+
17
+ import re
18
+ from contextlib import nullcontext
19
+ from io import BytesIO
20
+ from typing import Dict, Optional, Union
21
+
22
+ import requests
23
+ import torch
24
+ import yaml
25
+ from transformers import (
26
+ AutoFeatureExtractor,
27
+ BertTokenizerFast,
28
+ CLIPImageProcessor,
29
+ CLIPTextConfig,
30
+ CLIPTextModel,
31
+ CLIPTextModelWithProjection,
32
+ CLIPTokenizer,
33
+ CLIPVisionConfig,
34
+ CLIPVisionModelWithProjection,
35
+ )
36
+
37
+ from ...models import (
38
+ AutoencoderKL,
39
+ ControlNetModel,
40
+ PriorTransformer,
41
+ UNet2DConditionModel,
42
+ )
43
+ from ...schedulers import (
44
+ DDIMScheduler,
45
+ DDPMScheduler,
46
+ DPMSolverMultistepScheduler,
47
+ EulerAncestralDiscreteScheduler,
48
+ EulerDiscreteScheduler,
49
+ HeunDiscreteScheduler,
50
+ LMSDiscreteScheduler,
51
+ PNDMScheduler,
52
+ UnCLIPScheduler,
53
+ )
54
+ from ...utils import is_accelerate_available, logging
55
+ from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
56
+ from ..paint_by_example import PaintByExampleImageEncoder
57
+ from ..pipeline_utils import DiffusionPipeline
58
+ from .safety_checker import StableDiffusionSafetyChecker
59
+ from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
60
+
61
+
62
+ if is_accelerate_available():
63
+ from accelerate import init_empty_weights
64
+ from accelerate.utils import set_module_tensor_to_device
65
+
66
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
67
+
68
+
69
+ def shave_segments(path, n_shave_prefix_segments=1):
70
+ """
71
+ Removes segments. Positive values shave the first segments, negative shave the last segments.
72
+ """
73
+ if n_shave_prefix_segments >= 0:
74
+ return ".".join(path.split(".")[n_shave_prefix_segments:])
75
+ else:
76
+ return ".".join(path.split(".")[:n_shave_prefix_segments])
77
+
78
+
79
+ def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
80
+ """
81
+ Updates paths inside resnets to the new naming scheme (local renaming)
82
+ """
83
+ mapping = []
84
+ for old_item in old_list:
85
+ new_item = old_item.replace("in_layers.0", "norm1")
86
+ new_item = new_item.replace("in_layers.2", "conv1")
87
+
88
+ new_item = new_item.replace("out_layers.0", "norm2")
89
+ new_item = new_item.replace("out_layers.3", "conv2")
90
+
91
+ new_item = new_item.replace("emb_layers.1", "time_emb_proj")
92
+ new_item = new_item.replace("skip_connection", "conv_shortcut")
93
+
94
+ new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
95
+
96
+ mapping.append({"old": old_item, "new": new_item})
97
+
98
+ return mapping
99
+
100
+
101
+ def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
102
+ """
103
+ Updates paths inside resnets to the new naming scheme (local renaming)
104
+ """
105
+ mapping = []
106
+ for old_item in old_list:
107
+ new_item = old_item
108
+
109
+ new_item = new_item.replace("nin_shortcut", "conv_shortcut")
110
+ new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
111
+
112
+ mapping.append({"old": old_item, "new": new_item})
113
+
114
+ return mapping
115
+
116
+
117
+ def renew_attention_paths(old_list, n_shave_prefix_segments=0):
118
+ """
119
+ Updates paths inside attentions to the new naming scheme (local renaming)
120
+ """
121
+ mapping = []
122
+ for old_item in old_list:
123
+ new_item = old_item
124
+
125
+ # new_item = new_item.replace('norm.weight', 'group_norm.weight')
126
+ # new_item = new_item.replace('norm.bias', 'group_norm.bias')
127
+
128
+ # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
129
+ # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
130
+
131
+ # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
132
+
133
+ mapping.append({"old": old_item, "new": new_item})
134
+
135
+ return mapping
136
+
137
+
138
+ def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
139
+ """
140
+ Updates paths inside attentions to the new naming scheme (local renaming)
141
+ """
142
+ mapping = []
143
+ for old_item in old_list:
144
+ new_item = old_item
145
+
146
+ new_item = new_item.replace("norm.weight", "group_norm.weight")
147
+ new_item = new_item.replace("norm.bias", "group_norm.bias")
148
+
149
+ new_item = new_item.replace("q.weight", "to_q.weight")
150
+ new_item = new_item.replace("q.bias", "to_q.bias")
151
+
152
+ new_item = new_item.replace("k.weight", "to_k.weight")
153
+ new_item = new_item.replace("k.bias", "to_k.bias")
154
+
155
+ new_item = new_item.replace("v.weight", "to_v.weight")
156
+ new_item = new_item.replace("v.bias", "to_v.bias")
157
+
158
+ new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
159
+ new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
160
+
161
+ new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
162
+
163
+ mapping.append({"old": old_item, "new": new_item})
164
+
165
+ return mapping
166
+
167
+
168
+ def assign_to_checkpoint(
169
+ paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
170
+ ):
171
+ """
172
+ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
173
+ attention layers, and takes into account additional replacements that may arise.
174
+
175
+ Assigns the weights to the new checkpoint.
176
+ """
177
+ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
178
+
179
+ # Splits the attention layers into three variables.
180
+ if attention_paths_to_split is not None:
181
+ for path, path_map in attention_paths_to_split.items():
182
+ old_tensor = old_checkpoint[path]
183
+ channels = old_tensor.shape[0] // 3
184
+
185
+ target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
186
+
187
+ num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
188
+
189
+ old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
190
+ query, key, value = old_tensor.split(channels // num_heads, dim=1)
191
+
192
+ checkpoint[path_map["query"]] = query.reshape(target_shape)
193
+ checkpoint[path_map["key"]] = key.reshape(target_shape)
194
+ checkpoint[path_map["value"]] = value.reshape(target_shape)
195
+
196
+ for path in paths:
197
+ new_path = path["new"]
198
+
199
+ # These have already been assigned
200
+ if attention_paths_to_split is not None and new_path in attention_paths_to_split:
201
+ continue
202
+
203
+ # Global renaming happens here
204
+ new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
205
+ new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
206
+ new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
207
+
208
+ if additional_replacements is not None:
209
+ for replacement in additional_replacements:
210
+ new_path = new_path.replace(replacement["old"], replacement["new"])
211
+
212
+ # proj_attn.weight has to be converted from conv 1D to linear
213
+ is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path)
214
+ shape = old_checkpoint[path["old"]].shape
215
+ if is_attn_weight and len(shape) == 3:
216
+ checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
217
+ elif is_attn_weight and len(shape) == 4:
218
+ checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0]
219
+ else:
220
+ checkpoint[new_path] = old_checkpoint[path["old"]]
221
+
222
+
223
+ def conv_attn_to_linear(checkpoint):
224
+ keys = list(checkpoint.keys())
225
+ attn_keys = ["query.weight", "key.weight", "value.weight"]
226
+ for key in keys:
227
+ if ".".join(key.split(".")[-2:]) in attn_keys:
228
+ if checkpoint[key].ndim > 2:
229
+ checkpoint[key] = checkpoint[key][:, :, 0, 0]
230
+ elif "proj_attn.weight" in key:
231
+ if checkpoint[key].ndim > 2:
232
+ checkpoint[key] = checkpoint[key][:, :, 0]
233
+
234
+
235
+ def create_unet_diffusers_config(original_config, image_size: int, controlnet=False):
236
+ """
237
+ Creates a config for the diffusers based on the config of the LDM model.
238
+ """
239
+ if controlnet:
240
+ unet_params = original_config["model"]["params"]["control_stage_config"]["params"]
241
+ else:
242
+ if (
243
+ "unet_config" in original_config["model"]["params"]
244
+ and original_config["model"]["params"]["unet_config"] is not None
245
+ ):
246
+ unet_params = original_config["model"]["params"]["unet_config"]["params"]
247
+ else:
248
+ unet_params = original_config["model"]["params"]["network_config"]["params"]
249
+
250
+ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"]
251
+
252
+ block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]]
253
+
254
+ down_block_types = []
255
+ resolution = 1
256
+ for i in range(len(block_out_channels)):
257
+ block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D"
258
+ down_block_types.append(block_type)
259
+ if i != len(block_out_channels) - 1:
260
+ resolution *= 2
261
+
262
+ up_block_types = []
263
+ for i in range(len(block_out_channels)):
264
+ block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D"
265
+ up_block_types.append(block_type)
266
+ resolution //= 2
267
+
268
+ if unet_params["transformer_depth"] is not None:
269
+ transformer_layers_per_block = (
270
+ unet_params["transformer_depth"]
271
+ if isinstance(unet_params["transformer_depth"], int)
272
+ else list(unet_params["transformer_depth"])
273
+ )
274
+ else:
275
+ transformer_layers_per_block = 1
276
+
277
+ vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1)
278
+
279
+ head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None
280
+ use_linear_projection = (
281
+ unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False
282
+ )
283
+ if use_linear_projection:
284
+ # stable diffusion 2-base-512 and 2-768
285
+ if head_dim is None:
286
+ head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"]
287
+ head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])]
288
+
289
+ class_embed_type = None
290
+ addition_embed_type = None
291
+ addition_time_embed_dim = None
292
+ projection_class_embeddings_input_dim = None
293
+ context_dim = None
294
+
295
+ if unet_params["context_dim"] is not None:
296
+ context_dim = (
297
+ unet_params["context_dim"]
298
+ if isinstance(unet_params["context_dim"], int)
299
+ else unet_params["context_dim"][0]
300
+ )
301
+
302
+ if "num_classes" in unet_params:
303
+ if unet_params["num_classes"] == "sequential":
304
+ if context_dim in [2048, 1280]:
305
+ # SDXL
306
+ addition_embed_type = "text_time"
307
+ addition_time_embed_dim = 256
308
+ else:
309
+ class_embed_type = "projection"
310
+ assert "adm_in_channels" in unet_params
311
+ projection_class_embeddings_input_dim = unet_params["adm_in_channels"]
312
+
313
+ config = {
314
+ "sample_size": image_size // vae_scale_factor,
315
+ "in_channels": unet_params["in_channels"],
316
+ "down_block_types": tuple(down_block_types),
317
+ "block_out_channels": tuple(block_out_channels),
318
+ "layers_per_block": unet_params["num_res_blocks"],
319
+ "cross_attention_dim": context_dim,
320
+ "attention_head_dim": head_dim,
321
+ "use_linear_projection": use_linear_projection,
322
+ "class_embed_type": class_embed_type,
323
+ "addition_embed_type": addition_embed_type,
324
+ "addition_time_embed_dim": addition_time_embed_dim,
325
+ "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
326
+ "transformer_layers_per_block": transformer_layers_per_block,
327
+ }
328
+
329
+ if "disable_self_attentions" in unet_params:
330
+ config["only_cross_attention"] = unet_params["disable_self_attentions"]
331
+
332
+ if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int):
333
+ config["num_class_embeds"] = unet_params["num_classes"]
334
+
335
+ if controlnet:
336
+ config["conditioning_channels"] = unet_params["hint_channels"]
337
+ else:
338
+ config["out_channels"] = unet_params["out_channels"]
339
+ config["up_block_types"] = tuple(up_block_types)
340
+
341
+ return config
342
+
343
+
344
+ def create_vae_diffusers_config(original_config, image_size: int):
345
+ """
346
+ Creates a config for the diffusers based on the config of the LDM model.
347
+ """
348
+ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"]
349
+ _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"]
350
+
351
+ block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]]
352
+ down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
353
+ up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
354
+
355
+ config = {
356
+ "sample_size": image_size,
357
+ "in_channels": vae_params["in_channels"],
358
+ "out_channels": vae_params["out_ch"],
359
+ "down_block_types": tuple(down_block_types),
360
+ "up_block_types": tuple(up_block_types),
361
+ "block_out_channels": tuple(block_out_channels),
362
+ "latent_channels": vae_params["z_channels"],
363
+ "layers_per_block": vae_params["num_res_blocks"],
364
+ }
365
+ return config
366
+
367
+
368
+ def create_diffusers_schedular(original_config):
369
+ schedular = DDIMScheduler(
370
+ num_train_timesteps=original_config["model"]["params"]["timesteps"],
371
+ beta_start=original_config["model"]["params"]["linear_start"],
372
+ beta_end=original_config["model"]["params"]["linear_end"],
373
+ beta_schedule="scaled_linear",
374
+ )
375
+ return schedular
376
+
377
+
378
+ def create_ldm_bert_config(original_config):
379
+ bert_params = original_config["model"]["params"]["cond_stage_config"]["params"]
380
+ config = LDMBertConfig(
381
+ d_model=bert_params.n_embed,
382
+ encoder_layers=bert_params.n_layer,
383
+ encoder_ffn_dim=bert_params.n_embed * 4,
384
+ )
385
+ return config
386
+
387
+
388
+ def convert_ldm_unet_checkpoint(
389
+ checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False
390
+ ):
391
+ """
392
+ Takes a state dict and a config, and returns a converted checkpoint.
393
+ """
394
+
395
+ if skip_extract_state_dict:
396
+ unet_state_dict = checkpoint
397
+ else:
398
+ # extract state_dict for UNet
399
+ unet_state_dict = {}
400
+ keys = list(checkpoint.keys())
401
+
402
+ if controlnet:
403
+ unet_key = "control_model."
404
+ else:
405
+ unet_key = "model.diffusion_model."
406
+
407
+ # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
408
+ if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema:
409
+ logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.")
410
+ logger.warning(
411
+ "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
412
+ " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
413
+ )
414
+ for key in keys:
415
+ if key.startswith("model.diffusion_model"):
416
+ flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
417
+ unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
418
+ else:
419
+ if sum(k.startswith("model_ema") for k in keys) > 100:
420
+ logger.warning(
421
+ "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
422
+ " weights (usually better for inference), please make sure to add the `--extract_ema` flag."
423
+ )
424
+
425
+ for key in keys:
426
+ if key.startswith(unet_key):
427
+ unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
428
+
429
+ new_checkpoint = {}
430
+
431
+ new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
432
+ new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
433
+ new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
434
+ new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
435
+
436
+ if config["class_embed_type"] is None:
437
+ # No parameters to port
438
+ ...
439
+ elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
440
+ new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
441
+ new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
442
+ new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
443
+ new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
444
+ else:
445
+ raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
446
+
447
+ if config["addition_embed_type"] == "text_time":
448
+ new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
449
+ new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
450
+ new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
451
+ new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
452
+
453
+ # Relevant to StableDiffusionUpscalePipeline
454
+ if "num_class_embeds" in config:
455
+ if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict):
456
+ new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"]
457
+
458
+ new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
459
+ new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
460
+
461
+ if not controlnet:
462
+ new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
463
+ new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
464
+ new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
465
+ new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
466
+
467
+ # Retrieves the keys for the input blocks only
468
+ num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
469
+ input_blocks = {
470
+ layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
471
+ for layer_id in range(num_input_blocks)
472
+ }
473
+
474
+ # Retrieves the keys for the middle blocks only
475
+ num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
476
+ middle_blocks = {
477
+ layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
478
+ for layer_id in range(num_middle_blocks)
479
+ }
480
+
481
+ # Retrieves the keys for the output blocks only
482
+ num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
483
+ output_blocks = {
484
+ layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
485
+ for layer_id in range(num_output_blocks)
486
+ }
487
+
488
+ for i in range(1, num_input_blocks):
489
+ block_id = (i - 1) // (config["layers_per_block"] + 1)
490
+ layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
491
+
492
+ resnets = [
493
+ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
494
+ ]
495
+ attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
496
+
497
+ if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
498
+ new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
499
+ f"input_blocks.{i}.0.op.weight"
500
+ )
501
+ new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
502
+ f"input_blocks.{i}.0.op.bias"
503
+ )
504
+
505
+ paths = renew_resnet_paths(resnets)
506
+ meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
507
+ assign_to_checkpoint(
508
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
509
+ )
510
+
511
+ if len(attentions):
512
+ paths = renew_attention_paths(attentions)
513
+
514
+ meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
515
+ assign_to_checkpoint(
516
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
517
+ )
518
+
519
+ resnet_0 = middle_blocks[0]
520
+ attentions = middle_blocks[1]
521
+ resnet_1 = middle_blocks[2]
522
+
523
+ resnet_0_paths = renew_resnet_paths(resnet_0)
524
+ assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
525
+
526
+ resnet_1_paths = renew_resnet_paths(resnet_1)
527
+ assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
528
+
529
+ attentions_paths = renew_attention_paths(attentions)
530
+ meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
531
+ assign_to_checkpoint(
532
+ attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
533
+ )
534
+
535
+ for i in range(num_output_blocks):
536
+ block_id = i // (config["layers_per_block"] + 1)
537
+ layer_in_block_id = i % (config["layers_per_block"] + 1)
538
+ output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
539
+ output_block_list = {}
540
+
541
+ for layer in output_block_layers:
542
+ layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
543
+ if layer_id in output_block_list:
544
+ output_block_list[layer_id].append(layer_name)
545
+ else:
546
+ output_block_list[layer_id] = [layer_name]
547
+
548
+ if len(output_block_list) > 1:
549
+ resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
550
+ attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
551
+
552
+ resnet_0_paths = renew_resnet_paths(resnets)
553
+ paths = renew_resnet_paths(resnets)
554
+
555
+ meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
556
+ assign_to_checkpoint(
557
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
558
+ )
559
+
560
+ output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
561
+ if ["conv.bias", "conv.weight"] in output_block_list.values():
562
+ index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
563
+ new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
564
+ f"output_blocks.{i}.{index}.conv.weight"
565
+ ]
566
+ new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
567
+ f"output_blocks.{i}.{index}.conv.bias"
568
+ ]
569
+
570
+ # Clear attentions as they have been attributed above.
571
+ if len(attentions) == 2:
572
+ attentions = []
573
+
574
+ if len(attentions):
575
+ paths = renew_attention_paths(attentions)
576
+ meta_path = {
577
+ "old": f"output_blocks.{i}.1",
578
+ "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
579
+ }
580
+ assign_to_checkpoint(
581
+ paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
582
+ )
583
+ else:
584
+ resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
585
+ for path in resnet_0_paths:
586
+ old_path = ".".join(["output_blocks", str(i), path["old"]])
587
+ new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
588
+
589
+ new_checkpoint[new_path] = unet_state_dict[old_path]
590
+
591
+ if controlnet:
592
+ # conditioning embedding
593
+
594
+ orig_index = 0
595
+
596
+ new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop(
597
+ f"input_hint_block.{orig_index}.weight"
598
+ )
599
+ new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop(
600
+ f"input_hint_block.{orig_index}.bias"
601
+ )
602
+
603
+ orig_index += 2
604
+
605
+ diffusers_index = 0
606
+
607
+ while diffusers_index < 6:
608
+ new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop(
609
+ f"input_hint_block.{orig_index}.weight"
610
+ )
611
+ new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop(
612
+ f"input_hint_block.{orig_index}.bias"
613
+ )
614
+ diffusers_index += 1
615
+ orig_index += 2
616
+
617
+ new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop(
618
+ f"input_hint_block.{orig_index}.weight"
619
+ )
620
+ new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop(
621
+ f"input_hint_block.{orig_index}.bias"
622
+ )
623
+
624
+ # down blocks
625
+ for i in range(num_input_blocks):
626
+ new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight")
627
+ new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias")
628
+
629
+ # mid block
630
+ new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight")
631
+ new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias")
632
+
633
+ return new_checkpoint
634
+
635
+
636
+ def convert_ldm_vae_checkpoint(checkpoint, config):
637
+ # extract state dict for VAE
638
+ vae_state_dict = {}
639
+ keys = list(checkpoint.keys())
640
+ vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else ""
641
+ for key in keys:
642
+ if key.startswith(vae_key):
643
+ vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
644
+
645
+ new_checkpoint = {}
646
+
647
+ new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
648
+ new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
649
+ new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
650
+ new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
651
+ new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
652
+ new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
653
+
654
+ new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
655
+ new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
656
+ new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
657
+ new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
658
+ new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
659
+ new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
660
+
661
+ new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
662
+ new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
663
+ new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
664
+ new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
665
+
666
+ # Retrieves the keys for the encoder down blocks only
667
+ num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
668
+ down_blocks = {
669
+ layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
670
+ }
671
+
672
+ # Retrieves the keys for the decoder up blocks only
673
+ num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
674
+ up_blocks = {
675
+ layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
676
+ }
677
+
678
+ for i in range(num_down_blocks):
679
+ resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
680
+
681
+ if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
682
+ new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
683
+ f"encoder.down.{i}.downsample.conv.weight"
684
+ )
685
+ new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
686
+ f"encoder.down.{i}.downsample.conv.bias"
687
+ )
688
+
689
+ paths = renew_vae_resnet_paths(resnets)
690
+ meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
691
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
692
+
693
+ mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
694
+ num_mid_res_blocks = 2
695
+ for i in range(1, num_mid_res_blocks + 1):
696
+ resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
697
+
698
+ paths = renew_vae_resnet_paths(resnets)
699
+ meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
700
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
701
+
702
+ mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
703
+ paths = renew_vae_attention_paths(mid_attentions)
704
+ meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
705
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
706
+ conv_attn_to_linear(new_checkpoint)
707
+
708
+ for i in range(num_up_blocks):
709
+ block_id = num_up_blocks - 1 - i
710
+ resnets = [
711
+ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
712
+ ]
713
+
714
+ if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
715
+ new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
716
+ f"decoder.up.{block_id}.upsample.conv.weight"
717
+ ]
718
+ new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
719
+ f"decoder.up.{block_id}.upsample.conv.bias"
720
+ ]
721
+
722
+ paths = renew_vae_resnet_paths(resnets)
723
+ meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
724
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
725
+
726
+ mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
727
+ num_mid_res_blocks = 2
728
+ for i in range(1, num_mid_res_blocks + 1):
729
+ resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
730
+
731
+ paths = renew_vae_resnet_paths(resnets)
732
+ meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
733
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
734
+
735
+ mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
736
+ paths = renew_vae_attention_paths(mid_attentions)
737
+ meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
738
+ assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
739
+ conv_attn_to_linear(new_checkpoint)
740
+ return new_checkpoint
741
+
742
+
743
+ def convert_ldm_bert_checkpoint(checkpoint, config):
744
+ def _copy_attn_layer(hf_attn_layer, pt_attn_layer):
745
+ hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight
746
+ hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight
747
+ hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight
748
+
749
+ hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight
750
+ hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias
751
+
752
+ def _copy_linear(hf_linear, pt_linear):
753
+ hf_linear.weight = pt_linear.weight
754
+ hf_linear.bias = pt_linear.bias
755
+
756
+ def _copy_layer(hf_layer, pt_layer):
757
+ # copy layer norms
758
+ _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0])
759
+ _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0])
760
+
761
+ # copy attn
762
+ _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1])
763
+
764
+ # copy MLP
765
+ pt_mlp = pt_layer[1][1]
766
+ _copy_linear(hf_layer.fc1, pt_mlp.net[0][0])
767
+ _copy_linear(hf_layer.fc2, pt_mlp.net[2])
768
+
769
+ def _copy_layers(hf_layers, pt_layers):
770
+ for i, hf_layer in enumerate(hf_layers):
771
+ if i != 0:
772
+ i += i
773
+ pt_layer = pt_layers[i : i + 2]
774
+ _copy_layer(hf_layer, pt_layer)
775
+
776
+ hf_model = LDMBertModel(config).eval()
777
+
778
+ # copy embeds
779
+ hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight
780
+ hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight
781
+
782
+ # copy layer norm
783
+ _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm)
784
+
785
+ # copy hidden layers
786
+ _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers)
787
+
788
+ _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits)
789
+
790
+ return hf_model
791
+
792
+
793
+ def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None):
794
+ if text_encoder is None:
795
+ config_name = "openai/clip-vit-large-patch14"
796
+ try:
797
+ config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only)
798
+ except Exception:
799
+ raise ValueError(
800
+ f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'."
801
+ )
802
+
803
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
804
+ with ctx():
805
+ text_model = CLIPTextModel(config)
806
+ else:
807
+ text_model = text_encoder
808
+
809
+ keys = list(checkpoint.keys())
810
+
811
+ text_model_dict = {}
812
+
813
+ remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"]
814
+
815
+ for key in keys:
816
+ for prefix in remove_prefixes:
817
+ if key.startswith(prefix):
818
+ text_model_dict[key[len(prefix + ".") :]] = checkpoint[key]
819
+
820
+ if is_accelerate_available():
821
+ for param_name, param in text_model_dict.items():
822
+ set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
823
+ else:
824
+ if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
825
+ text_model_dict.pop("text_model.embeddings.position_ids", None)
826
+
827
+ text_model.load_state_dict(text_model_dict)
828
+
829
+ return text_model
830
+
831
+
832
+ textenc_conversion_lst = [
833
+ ("positional_embedding", "text_model.embeddings.position_embedding.weight"),
834
+ ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"),
835
+ ("ln_final.weight", "text_model.final_layer_norm.weight"),
836
+ ("ln_final.bias", "text_model.final_layer_norm.bias"),
837
+ ("text_projection", "text_projection.weight"),
838
+ ]
839
+ textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst}
840
+
841
+ textenc_transformer_conversion_lst = [
842
+ # (stable-diffusion, HF Diffusers)
843
+ ("resblocks.", "text_model.encoder.layers."),
844
+ ("ln_1", "layer_norm1"),
845
+ ("ln_2", "layer_norm2"),
846
+ (".c_fc.", ".fc1."),
847
+ (".c_proj.", ".fc2."),
848
+ (".attn", ".self_attn"),
849
+ ("ln_final.", "transformer.text_model.final_layer_norm."),
850
+ ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
851
+ ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
852
+ ]
853
+ protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst}
854
+ textenc_pattern = re.compile("|".join(protected.keys()))
855
+
856
+
857
+ def convert_paint_by_example_checkpoint(checkpoint, local_files_only=False):
858
+ config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only)
859
+ model = PaintByExampleImageEncoder(config)
860
+
861
+ keys = list(checkpoint.keys())
862
+
863
+ text_model_dict = {}
864
+
865
+ for key in keys:
866
+ if key.startswith("cond_stage_model.transformer"):
867
+ text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key]
868
+
869
+ # load clip vision
870
+ model.model.load_state_dict(text_model_dict)
871
+
872
+ # load mapper
873
+ keys_mapper = {
874
+ k[len("cond_stage_model.mapper.res") :]: v
875
+ for k, v in checkpoint.items()
876
+ if k.startswith("cond_stage_model.mapper")
877
+ }
878
+
879
+ MAPPING = {
880
+ "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"],
881
+ "attn.c_proj": ["attn1.to_out.0"],
882
+ "ln_1": ["norm1"],
883
+ "ln_2": ["norm3"],
884
+ "mlp.c_fc": ["ff.net.0.proj"],
885
+ "mlp.c_proj": ["ff.net.2"],
886
+ }
887
+
888
+ mapped_weights = {}
889
+ for key, value in keys_mapper.items():
890
+ prefix = key[: len("blocks.i")]
891
+ suffix = key.split(prefix)[-1].split(".")[-1]
892
+ name = key.split(prefix)[-1].split(suffix)[0][1:-1]
893
+ mapped_names = MAPPING[name]
894
+
895
+ num_splits = len(mapped_names)
896
+ for i, mapped_name in enumerate(mapped_names):
897
+ new_name = ".".join([prefix, mapped_name, suffix])
898
+ shape = value.shape[0] // num_splits
899
+ mapped_weights[new_name] = value[i * shape : (i + 1) * shape]
900
+
901
+ model.mapper.load_state_dict(mapped_weights)
902
+
903
+ # load final layer norm
904
+ model.final_layer_norm.load_state_dict(
905
+ {
906
+ "bias": checkpoint["cond_stage_model.final_ln.bias"],
907
+ "weight": checkpoint["cond_stage_model.final_ln.weight"],
908
+ }
909
+ )
910
+
911
+ # load final proj
912
+ model.proj_out.load_state_dict(
913
+ {
914
+ "bias": checkpoint["proj_out.bias"],
915
+ "weight": checkpoint["proj_out.weight"],
916
+ }
917
+ )
918
+
919
+ # load uncond vector
920
+ model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"])
921
+ return model
922
+
923
+
924
+ def convert_open_clip_checkpoint(
925
+ checkpoint,
926
+ config_name,
927
+ prefix="cond_stage_model.model.",
928
+ has_projection=False,
929
+ local_files_only=False,
930
+ **config_kwargs,
931
+ ):
932
+ # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder")
933
+ # text_model = CLIPTextModelWithProjection.from_pretrained(
934
+ # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280
935
+ # )
936
+ try:
937
+ config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only)
938
+ except Exception:
939
+ raise ValueError(
940
+ f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'."
941
+ )
942
+
943
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
944
+ with ctx():
945
+ text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config)
946
+
947
+ keys = list(checkpoint.keys())
948
+
949
+ keys_to_ignore = []
950
+ if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23:
951
+ # make sure to remove all keys > 22
952
+ keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")]
953
+ keys_to_ignore += ["cond_stage_model.model.text_projection"]
954
+
955
+ text_model_dict = {}
956
+
957
+ if prefix + "text_projection" in checkpoint:
958
+ d_model = int(checkpoint[prefix + "text_projection"].shape[0])
959
+ else:
960
+ d_model = 1024
961
+
962
+ text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids")
963
+
964
+ for key in keys:
965
+ if key in keys_to_ignore:
966
+ continue
967
+ if key[len(prefix) :] in textenc_conversion_map:
968
+ if key.endswith("text_projection"):
969
+ value = checkpoint[key].T.contiguous()
970
+ else:
971
+ value = checkpoint[key]
972
+
973
+ text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value
974
+
975
+ if key.startswith(prefix + "transformer."):
976
+ new_key = key[len(prefix + "transformer.") :]
977
+ if new_key.endswith(".in_proj_weight"):
978
+ new_key = new_key[: -len(".in_proj_weight")]
979
+ new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
980
+ text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :]
981
+ text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :]
982
+ text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :]
983
+ elif new_key.endswith(".in_proj_bias"):
984
+ new_key = new_key[: -len(".in_proj_bias")]
985
+ new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
986
+ text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model]
987
+ text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2]
988
+ text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :]
989
+ else:
990
+ new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
991
+
992
+ text_model_dict[new_key] = checkpoint[key]
993
+
994
+ if is_accelerate_available():
995
+ for param_name, param in text_model_dict.items():
996
+ set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
997
+ else:
998
+ if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
999
+ text_model_dict.pop("text_model.embeddings.position_ids", None)
1000
+
1001
+ text_model.load_state_dict(text_model_dict)
1002
+
1003
+ return text_model
1004
+
1005
+
1006
+ def stable_unclip_image_encoder(original_config, local_files_only=False):
1007
+ """
1008
+ Returns the image processor and clip image encoder for the img2img unclip pipeline.
1009
+
1010
+ We currently know of two types of stable unclip models which separately use the clip and the openclip image
1011
+ encoders.
1012
+ """
1013
+
1014
+ image_embedder_config = original_config["model"]["params"]["embedder_config"]
1015
+
1016
+ sd_clip_image_embedder_class = image_embedder_config["target"]
1017
+ sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1]
1018
+
1019
+ if sd_clip_image_embedder_class == "ClipImageEmbedder":
1020
+ clip_model_name = image_embedder_config.params.model
1021
+
1022
+ if clip_model_name == "ViT-L/14":
1023
+ feature_extractor = CLIPImageProcessor()
1024
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
1025
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1026
+ )
1027
+ else:
1028
+ raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}")
1029
+
1030
+ elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder":
1031
+ feature_extractor = CLIPImageProcessor()
1032
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
1033
+ "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", local_files_only=local_files_only
1034
+ )
1035
+ else:
1036
+ raise NotImplementedError(
1037
+ f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}"
1038
+ )
1039
+
1040
+ return feature_extractor, image_encoder
1041
+
1042
+
1043
+ def stable_unclip_image_noising_components(
1044
+ original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None
1045
+ ):
1046
+ """
1047
+ Returns the noising components for the img2img and txt2img unclip pipelines.
1048
+
1049
+ Converts the stability noise augmentor into
1050
+ 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats
1051
+ 2. a `DDPMScheduler` for holding the noise schedule
1052
+
1053
+ If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided.
1054
+ """
1055
+ noise_aug_config = original_config["model"]["params"]["noise_aug_config"]
1056
+ noise_aug_class = noise_aug_config["target"]
1057
+ noise_aug_class = noise_aug_class.split(".")[-1]
1058
+
1059
+ if noise_aug_class == "CLIPEmbeddingNoiseAugmentation":
1060
+ noise_aug_config = noise_aug_config.params
1061
+ embedding_dim = noise_aug_config.timestep_dim
1062
+ max_noise_level = noise_aug_config.noise_schedule_config.timesteps
1063
+ beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule
1064
+
1065
+ image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim)
1066
+ image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule)
1067
+
1068
+ if "clip_stats_path" in noise_aug_config:
1069
+ if clip_stats_path is None:
1070
+ raise ValueError("This stable unclip config requires a `clip_stats_path`")
1071
+
1072
+ clip_mean, clip_std = torch.load(clip_stats_path, map_location=device)
1073
+ clip_mean = clip_mean[None, :]
1074
+ clip_std = clip_std[None, :]
1075
+
1076
+ clip_stats_state_dict = {
1077
+ "mean": clip_mean,
1078
+ "std": clip_std,
1079
+ }
1080
+
1081
+ image_normalizer.load_state_dict(clip_stats_state_dict)
1082
+ else:
1083
+ raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}")
1084
+
1085
+ return image_normalizer, image_noising_scheduler
1086
+
1087
+
1088
+ def convert_controlnet_checkpoint(
1089
+ checkpoint,
1090
+ original_config,
1091
+ checkpoint_path,
1092
+ image_size,
1093
+ upcast_attention,
1094
+ extract_ema,
1095
+ use_linear_projection=None,
1096
+ cross_attention_dim=None,
1097
+ ):
1098
+ ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True)
1099
+ ctrlnet_config["upcast_attention"] = upcast_attention
1100
+
1101
+ ctrlnet_config.pop("sample_size")
1102
+
1103
+ if use_linear_projection is not None:
1104
+ ctrlnet_config["use_linear_projection"] = use_linear_projection
1105
+
1106
+ if cross_attention_dim is not None:
1107
+ ctrlnet_config["cross_attention_dim"] = cross_attention_dim
1108
+
1109
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
1110
+ with ctx():
1111
+ controlnet = ControlNetModel(**ctrlnet_config)
1112
+
1113
+ # Some controlnet ckpt files are distributed independently from the rest of the
1114
+ # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/
1115
+ if "time_embed.0.weight" in checkpoint:
1116
+ skip_extract_state_dict = True
1117
+ else:
1118
+ skip_extract_state_dict = False
1119
+
1120
+ converted_ctrl_checkpoint = convert_ldm_unet_checkpoint(
1121
+ checkpoint,
1122
+ ctrlnet_config,
1123
+ path=checkpoint_path,
1124
+ extract_ema=extract_ema,
1125
+ controlnet=True,
1126
+ skip_extract_state_dict=skip_extract_state_dict,
1127
+ )
1128
+
1129
+ if is_accelerate_available():
1130
+ for param_name, param in converted_ctrl_checkpoint.items():
1131
+ set_module_tensor_to_device(controlnet, param_name, "cpu", value=param)
1132
+ else:
1133
+ controlnet.load_state_dict(converted_ctrl_checkpoint)
1134
+
1135
+ return controlnet
1136
+
1137
+
1138
+ def download_from_original_stable_diffusion_ckpt(
1139
+ checkpoint_path_or_dict: Union[str, Dict[str, torch.Tensor]],
1140
+ original_config_file: str = None,
1141
+ image_size: Optional[int] = None,
1142
+ prediction_type: str = None,
1143
+ model_type: str = None,
1144
+ extract_ema: bool = False,
1145
+ scheduler_type: str = "pndm",
1146
+ num_in_channels: Optional[int] = None,
1147
+ upcast_attention: Optional[bool] = None,
1148
+ device: str = None,
1149
+ from_safetensors: bool = False,
1150
+ stable_unclip: Optional[str] = None,
1151
+ stable_unclip_prior: Optional[str] = None,
1152
+ clip_stats_path: Optional[str] = None,
1153
+ controlnet: Optional[bool] = None,
1154
+ adapter: Optional[bool] = None,
1155
+ load_safety_checker: bool = True,
1156
+ pipeline_class: DiffusionPipeline = None,
1157
+ local_files_only=False,
1158
+ vae_path=None,
1159
+ vae=None,
1160
+ text_encoder=None,
1161
+ text_encoder_2=None,
1162
+ tokenizer=None,
1163
+ tokenizer_2=None,
1164
+ config_files=None,
1165
+ ) -> DiffusionPipeline:
1166
+ """
1167
+ Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
1168
+ config file.
1169
+
1170
+ Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the
1171
+ global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is
1172
+ recommended that you override the default values and/or supply an `original_config_file` wherever possible.
1173
+
1174
+ Args:
1175
+ checkpoint_path_or_dict (`str` or `dict`): Path to `.ckpt` file, or the state dict.
1176
+ original_config_file (`str`):
1177
+ Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically
1178
+ inferred by looking for a key that only exists in SD2.0 models.
1179
+ image_size (`int`, *optional*, defaults to 512):
1180
+ The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2
1181
+ Base. Use 768 for Stable Diffusion v2.
1182
+ prediction_type (`str`, *optional*):
1183
+ The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable
1184
+ Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2.
1185
+ num_in_channels (`int`, *optional*, defaults to None):
1186
+ The number of input channels. If `None`, it will be automatically inferred.
1187
+ scheduler_type (`str`, *optional*, defaults to 'pndm'):
1188
+ Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
1189
+ "ddim"]`.
1190
+ model_type (`str`, *optional*, defaults to `None`):
1191
+ The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder",
1192
+ "FrozenCLIPEmbedder", "PaintByExample"]`.
1193
+ is_img2img (`bool`, *optional*, defaults to `False`):
1194
+ Whether the model should be loaded as an img2img pipeline.
1195
+ extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for
1196
+ checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to
1197
+ `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for
1198
+ inference. Non-EMA weights are usually better to continue fine-tuning.
1199
+ upcast_attention (`bool`, *optional*, defaults to `None`):
1200
+ Whether the attention computation should always be upcasted. This is necessary when running stable
1201
+ diffusion 2.1.
1202
+ device (`str`, *optional*, defaults to `None`):
1203
+ The device to use. Pass `None` to determine automatically.
1204
+ from_safetensors (`str`, *optional*, defaults to `False`):
1205
+ If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.
1206
+ load_safety_checker (`bool`, *optional*, defaults to `True`):
1207
+ Whether to load the safety checker or not. Defaults to `True`.
1208
+ pipeline_class (`str`, *optional*, defaults to `None`):
1209
+ The pipeline class to use. Pass `None` to determine automatically.
1210
+ local_files_only (`bool`, *optional*, defaults to `False`):
1211
+ Whether or not to only look at local files (i.e., do not try to download the model).
1212
+ vae (`AutoencoderKL`, *optional*, defaults to `None`):
1213
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
1214
+ this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
1215
+ text_encoder (`CLIPTextModel`, *optional*, defaults to `None`):
1216
+ An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel)
1217
+ to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)
1218
+ variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
1219
+ tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`):
1220
+ An instance of
1221
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer)
1222
+ to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if
1223
+ needed.
1224
+ config_files (`Dict[str, str]`, *optional*, defaults to `None`):
1225
+ A dictionary mapping from config file names to their contents. If this parameter is `None`, the function
1226
+ will load the config files by itself, if needed. Valid keys are:
1227
+ - `v1`: Config file for Stable Diffusion v1
1228
+ - `v2`: Config file for Stable Diffusion v2
1229
+ - `xl`: Config file for Stable Diffusion XL
1230
+ - `xl_refiner`: Config file for Stable Diffusion XL Refiner
1231
+ return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
1232
+ """
1233
+
1234
+ # import pipelines here to avoid circular import error when using from_single_file method
1235
+ from diffusers import (
1236
+ LDMTextToImagePipeline,
1237
+ PaintByExamplePipeline,
1238
+ StableDiffusionControlNetPipeline,
1239
+ StableDiffusionInpaintPipeline,
1240
+ StableDiffusionPipeline,
1241
+ StableDiffusionUpscalePipeline,
1242
+ StableDiffusionXLControlNetInpaintPipeline,
1243
+ StableDiffusionXLImg2ImgPipeline,
1244
+ StableDiffusionXLInpaintPipeline,
1245
+ StableDiffusionXLPipeline,
1246
+ StableUnCLIPImg2ImgPipeline,
1247
+ StableUnCLIPPipeline,
1248
+ )
1249
+
1250
+ if prediction_type == "v-prediction":
1251
+ prediction_type = "v_prediction"
1252
+
1253
+ if isinstance(checkpoint_path_or_dict, str):
1254
+ if from_safetensors:
1255
+ from safetensors.torch import load_file as safe_load
1256
+
1257
+ checkpoint = safe_load(checkpoint_path_or_dict, device="cpu")
1258
+ else:
1259
+ if device is None:
1260
+ device = "cuda" if torch.cuda.is_available() else "cpu"
1261
+ checkpoint = torch.load(checkpoint_path_or_dict, map_location=device)
1262
+ else:
1263
+ checkpoint = torch.load(checkpoint_path_or_dict, map_location=device)
1264
+ elif isinstance(checkpoint_path_or_dict, dict):
1265
+ checkpoint = checkpoint_path_or_dict
1266
+
1267
+ # Sometimes models don't have the global_step item
1268
+ if "global_step" in checkpoint:
1269
+ global_step = checkpoint["global_step"]
1270
+ else:
1271
+ logger.debug("global_step key not found in model")
1272
+ global_step = None
1273
+
1274
+ # NOTE: this while loop isn't great but this controlnet checkpoint has one additional
1275
+ # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
1276
+ while "state_dict" in checkpoint:
1277
+ checkpoint = checkpoint["state_dict"]
1278
+
1279
+ if original_config_file is None:
1280
+ key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
1281
+ key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias"
1282
+ key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
1283
+ is_upscale = pipeline_class == StableDiffusionUpscalePipeline
1284
+
1285
+ config_url = None
1286
+
1287
+ # model_type = "v1"
1288
+ if config_files is not None and "v1" in config_files:
1289
+ original_config_file = config_files["v1"]
1290
+ else:
1291
+ config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
1292
+
1293
+ if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024:
1294
+ # model_type = "v2"
1295
+ if config_files is not None and "v2" in config_files:
1296
+ original_config_file = config_files["v2"]
1297
+ else:
1298
+ config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
1299
+ if global_step == 110000:
1300
+ # v2.1 needs to upcast attention
1301
+ upcast_attention = True
1302
+ elif key_name_sd_xl_base in checkpoint:
1303
+ # only base xl has two text embedders
1304
+ if config_files is not None and "xl" in config_files:
1305
+ original_config_file = config_files["xl"]
1306
+ else:
1307
+ config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
1308
+ elif key_name_sd_xl_refiner in checkpoint:
1309
+ # only refiner xl has embedder and one text embedders
1310
+ if config_files is not None and "xl_refiner" in config_files:
1311
+ original_config_file = config_files["xl_refiner"]
1312
+ else:
1313
+ config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml"
1314
+
1315
+ if is_upscale:
1316
+ config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
1317
+
1318
+ if config_url is not None:
1319
+ original_config_file = BytesIO(requests.get(config_url).content)
1320
+ else:
1321
+ with open(original_config_file, "r") as f:
1322
+ original_config_file = f.read()
1323
+ else:
1324
+ with open(original_config_file, "r") as f:
1325
+ original_config_file = f.read()
1326
+
1327
+ original_config = yaml.safe_load(original_config_file)
1328
+
1329
+ # Convert the text model.
1330
+ if (
1331
+ model_type is None
1332
+ and "cond_stage_config" in original_config["model"]["params"]
1333
+ and original_config["model"]["params"]["cond_stage_config"] is not None
1334
+ ):
1335
+ model_type = original_config["model"]["params"]["cond_stage_config"]["target"].split(".")[-1]
1336
+ logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}")
1337
+ elif model_type is None and original_config["model"]["params"]["network_config"] is not None:
1338
+ if original_config["model"]["params"]["network_config"]["params"]["context_dim"] == 2048:
1339
+ model_type = "SDXL"
1340
+ else:
1341
+ model_type = "SDXL-Refiner"
1342
+ if image_size is None:
1343
+ image_size = 1024
1344
+
1345
+ if pipeline_class is None:
1346
+ # Check if we have a SDXL or SD model and initialize default pipeline
1347
+ if model_type not in ["SDXL", "SDXL-Refiner"]:
1348
+ pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline
1349
+ else:
1350
+ pipeline_class = StableDiffusionXLPipeline if model_type == "SDXL" else StableDiffusionXLImg2ImgPipeline
1351
+
1352
+ if num_in_channels is None and pipeline_class in [
1353
+ StableDiffusionInpaintPipeline,
1354
+ StableDiffusionXLInpaintPipeline,
1355
+ StableDiffusionXLControlNetInpaintPipeline,
1356
+ ]:
1357
+ num_in_channels = 9
1358
+ if num_in_channels is None and pipeline_class == StableDiffusionUpscalePipeline:
1359
+ num_in_channels = 7
1360
+ elif num_in_channels is None:
1361
+ num_in_channels = 4
1362
+
1363
+ if "unet_config" in original_config["model"]["params"]:
1364
+ original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
1365
+
1366
+ if (
1367
+ "parameterization" in original_config["model"]["params"]
1368
+ and original_config["model"]["params"]["parameterization"] == "v"
1369
+ ):
1370
+ if prediction_type is None:
1371
+ # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"`
1372
+ # as it relies on a brittle global step parameter here
1373
+ prediction_type = "epsilon" if global_step == 875000 else "v_prediction"
1374
+ if image_size is None:
1375
+ # NOTE: For stable diffusion 2 base one has to pass `image_size==512`
1376
+ # as it relies on a brittle global step parameter here
1377
+ image_size = 512 if global_step == 875000 else 768
1378
+ else:
1379
+ if prediction_type is None:
1380
+ prediction_type = "epsilon"
1381
+ if image_size is None:
1382
+ image_size = 512
1383
+
1384
+ if controlnet is None and "control_stage_config" in original_config["model"]["params"]:
1385
+ path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else ""
1386
+ controlnet = convert_controlnet_checkpoint(
1387
+ checkpoint, original_config, path, image_size, upcast_attention, extract_ema
1388
+ )
1389
+
1390
+ if "timesteps" in original_config["model"]["params"]:
1391
+ num_train_timesteps = original_config["model"]["params"]["timesteps"]
1392
+ else:
1393
+ num_train_timesteps = 1000
1394
+
1395
+ if model_type in ["SDXL", "SDXL-Refiner"]:
1396
+ scheduler_dict = {
1397
+ "beta_schedule": "scaled_linear",
1398
+ "beta_start": 0.00085,
1399
+ "beta_end": 0.012,
1400
+ "interpolation_type": "linear",
1401
+ "num_train_timesteps": num_train_timesteps,
1402
+ "prediction_type": "epsilon",
1403
+ "sample_max_value": 1.0,
1404
+ "set_alpha_to_one": False,
1405
+ "skip_prk_steps": True,
1406
+ "steps_offset": 1,
1407
+ "timestep_spacing": "leading",
1408
+ }
1409
+ scheduler = EulerDiscreteScheduler.from_config(scheduler_dict)
1410
+ scheduler_type = "euler"
1411
+ else:
1412
+ if "linear_start" in original_config["model"]["params"]:
1413
+ beta_start = original_config["model"]["params"]["linear_start"]
1414
+ else:
1415
+ beta_start = 0.02
1416
+
1417
+ if "linear_end" in original_config["model"]["params"]:
1418
+ beta_end = original_config["model"]["params"]["linear_end"]
1419
+ else:
1420
+ beta_end = 0.085
1421
+ scheduler = DDIMScheduler(
1422
+ beta_end=beta_end,
1423
+ beta_schedule="scaled_linear",
1424
+ beta_start=beta_start,
1425
+ num_train_timesteps=num_train_timesteps,
1426
+ steps_offset=1,
1427
+ clip_sample=False,
1428
+ set_alpha_to_one=False,
1429
+ prediction_type=prediction_type,
1430
+ )
1431
+ # make sure scheduler works correctly with DDIM
1432
+ scheduler.register_to_config(clip_sample=False)
1433
+
1434
+ if scheduler_type == "pndm":
1435
+ config = dict(scheduler.config)
1436
+ config["skip_prk_steps"] = True
1437
+ scheduler = PNDMScheduler.from_config(config)
1438
+ elif scheduler_type == "lms":
1439
+ scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
1440
+ elif scheduler_type == "heun":
1441
+ scheduler = HeunDiscreteScheduler.from_config(scheduler.config)
1442
+ elif scheduler_type == "euler":
1443
+ scheduler = EulerDiscreteScheduler.from_config(scheduler.config)
1444
+ elif scheduler_type == "euler-ancestral":
1445
+ scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
1446
+ elif scheduler_type == "dpm":
1447
+ scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
1448
+ elif scheduler_type == "ddim":
1449
+ scheduler = scheduler
1450
+ else:
1451
+ raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!")
1452
+
1453
+ if pipeline_class == StableDiffusionUpscalePipeline:
1454
+ image_size = original_config["model"]["params"]["unet_config"]["params"]["image_size"]
1455
+
1456
+ # Convert the UNet2DConditionModel model.
1457
+ unet_config = create_unet_diffusers_config(original_config, image_size=image_size)
1458
+ unet_config["upcast_attention"] = upcast_attention
1459
+
1460
+ path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else ""
1461
+ converted_unet_checkpoint = convert_ldm_unet_checkpoint(
1462
+ checkpoint, unet_config, path=path, extract_ema=extract_ema
1463
+ )
1464
+
1465
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
1466
+ with ctx():
1467
+ unet = UNet2DConditionModel(**unet_config)
1468
+
1469
+ if is_accelerate_available():
1470
+ if model_type not in ["SDXL", "SDXL-Refiner"]: # SBM Delay this.
1471
+ for param_name, param in converted_unet_checkpoint.items():
1472
+ set_module_tensor_to_device(unet, param_name, "cpu", value=param)
1473
+ else:
1474
+ unet.load_state_dict(converted_unet_checkpoint)
1475
+
1476
+ # Convert the VAE model.
1477
+ if vae_path is None and vae is None:
1478
+ vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
1479
+ converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
1480
+
1481
+ if (
1482
+ "model" in original_config
1483
+ and "params" in original_config["model"]
1484
+ and "scale_factor" in original_config["model"]["params"]
1485
+ ):
1486
+ vae_scaling_factor = original_config["model"]["params"]["scale_factor"]
1487
+ else:
1488
+ vae_scaling_factor = 0.18215 # default SD scaling factor
1489
+
1490
+ vae_config["scaling_factor"] = vae_scaling_factor
1491
+
1492
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
1493
+ with ctx():
1494
+ vae = AutoencoderKL(**vae_config)
1495
+
1496
+ if is_accelerate_available():
1497
+ for param_name, param in converted_vae_checkpoint.items():
1498
+ set_module_tensor_to_device(vae, param_name, "cpu", value=param)
1499
+ else:
1500
+ vae.load_state_dict(converted_vae_checkpoint)
1501
+ elif vae is None:
1502
+ vae = AutoencoderKL.from_pretrained(vae_path, local_files_only=local_files_only)
1503
+
1504
+ if model_type == "FrozenOpenCLIPEmbedder":
1505
+ config_name = "stabilityai/stable-diffusion-2"
1506
+ config_kwargs = {"subfolder": "text_encoder"}
1507
+
1508
+ if text_encoder is None:
1509
+ text_model = convert_open_clip_checkpoint(
1510
+ checkpoint, config_name, local_files_only=local_files_only, **config_kwargs
1511
+ )
1512
+ else:
1513
+ text_model = text_encoder
1514
+
1515
+ try:
1516
+ tokenizer = CLIPTokenizer.from_pretrained(
1517
+ "stabilityai/stable-diffusion-2", subfolder="tokenizer", local_files_only=local_files_only
1518
+ )
1519
+ except Exception:
1520
+ raise ValueError(
1521
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'stabilityai/stable-diffusion-2'."
1522
+ )
1523
+
1524
+ if stable_unclip is None:
1525
+ if controlnet:
1526
+ pipe = pipeline_class(
1527
+ vae=vae,
1528
+ text_encoder=text_model,
1529
+ tokenizer=tokenizer,
1530
+ unet=unet,
1531
+ scheduler=scheduler,
1532
+ controlnet=controlnet,
1533
+ safety_checker=None,
1534
+ feature_extractor=None,
1535
+ )
1536
+ if hasattr(pipe, "requires_safety_checker"):
1537
+ pipe.requires_safety_checker = False
1538
+
1539
+ elif pipeline_class == StableDiffusionUpscalePipeline:
1540
+ scheduler = DDIMScheduler.from_pretrained(
1541
+ "stabilityai/stable-diffusion-x4-upscaler", subfolder="scheduler"
1542
+ )
1543
+ low_res_scheduler = DDPMScheduler.from_pretrained(
1544
+ "stabilityai/stable-diffusion-x4-upscaler", subfolder="low_res_scheduler"
1545
+ )
1546
+
1547
+ pipe = pipeline_class(
1548
+ vae=vae,
1549
+ text_encoder=text_model,
1550
+ tokenizer=tokenizer,
1551
+ unet=unet,
1552
+ scheduler=scheduler,
1553
+ low_res_scheduler=low_res_scheduler,
1554
+ safety_checker=None,
1555
+ feature_extractor=None,
1556
+ )
1557
+
1558
+ else:
1559
+ pipe = pipeline_class(
1560
+ vae=vae,
1561
+ text_encoder=text_model,
1562
+ tokenizer=tokenizer,
1563
+ unet=unet,
1564
+ scheduler=scheduler,
1565
+ safety_checker=None,
1566
+ feature_extractor=None,
1567
+ )
1568
+ if hasattr(pipe, "requires_safety_checker"):
1569
+ pipe.requires_safety_checker = False
1570
+
1571
+ else:
1572
+ image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components(
1573
+ original_config, clip_stats_path=clip_stats_path, device=device
1574
+ )
1575
+
1576
+ if stable_unclip == "img2img":
1577
+ feature_extractor, image_encoder = stable_unclip_image_encoder(original_config)
1578
+
1579
+ pipe = StableUnCLIPImg2ImgPipeline(
1580
+ # image encoding components
1581
+ feature_extractor=feature_extractor,
1582
+ image_encoder=image_encoder,
1583
+ # image noising components
1584
+ image_normalizer=image_normalizer,
1585
+ image_noising_scheduler=image_noising_scheduler,
1586
+ # regular denoising components
1587
+ tokenizer=tokenizer,
1588
+ text_encoder=text_model,
1589
+ unet=unet,
1590
+ scheduler=scheduler,
1591
+ # vae
1592
+ vae=vae,
1593
+ )
1594
+ elif stable_unclip == "txt2img":
1595
+ if stable_unclip_prior is None or stable_unclip_prior == "karlo":
1596
+ karlo_model = "kakaobrain/karlo-v1-alpha"
1597
+ prior = PriorTransformer.from_pretrained(
1598
+ karlo_model, subfolder="prior", local_files_only=local_files_only
1599
+ )
1600
+
1601
+ try:
1602
+ prior_tokenizer = CLIPTokenizer.from_pretrained(
1603
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1604
+ )
1605
+ except Exception:
1606
+ raise ValueError(
1607
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1608
+ )
1609
+ prior_text_model = CLIPTextModelWithProjection.from_pretrained(
1610
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1611
+ )
1612
+
1613
+ prior_scheduler = UnCLIPScheduler.from_pretrained(
1614
+ karlo_model, subfolder="prior_scheduler", local_files_only=local_files_only
1615
+ )
1616
+ prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config)
1617
+ else:
1618
+ raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}")
1619
+
1620
+ pipe = StableUnCLIPPipeline(
1621
+ # prior components
1622
+ prior_tokenizer=prior_tokenizer,
1623
+ prior_text_encoder=prior_text_model,
1624
+ prior=prior,
1625
+ prior_scheduler=prior_scheduler,
1626
+ # image noising components
1627
+ image_normalizer=image_normalizer,
1628
+ image_noising_scheduler=image_noising_scheduler,
1629
+ # regular denoising components
1630
+ tokenizer=tokenizer,
1631
+ text_encoder=text_model,
1632
+ unet=unet,
1633
+ scheduler=scheduler,
1634
+ # vae
1635
+ vae=vae,
1636
+ )
1637
+ else:
1638
+ raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}")
1639
+ elif model_type == "PaintByExample":
1640
+ vision_model = convert_paint_by_example_checkpoint(checkpoint)
1641
+ try:
1642
+ tokenizer = CLIPTokenizer.from_pretrained(
1643
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1644
+ )
1645
+ except Exception:
1646
+ raise ValueError(
1647
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1648
+ )
1649
+ try:
1650
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
1651
+ "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only
1652
+ )
1653
+ except Exception:
1654
+ raise ValueError(
1655
+ f"With local_files_only set to {local_files_only}, you must first locally save the feature_extractor in the following path: 'CompVis/stable-diffusion-safety-checker'."
1656
+ )
1657
+ pipe = PaintByExamplePipeline(
1658
+ vae=vae,
1659
+ image_encoder=vision_model,
1660
+ unet=unet,
1661
+ scheduler=scheduler,
1662
+ safety_checker=None,
1663
+ feature_extractor=feature_extractor,
1664
+ )
1665
+ elif model_type == "FrozenCLIPEmbedder":
1666
+ text_model = convert_ldm_clip_checkpoint(
1667
+ checkpoint, local_files_only=local_files_only, text_encoder=text_encoder
1668
+ )
1669
+ try:
1670
+ tokenizer = (
1671
+ CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only)
1672
+ if tokenizer is None
1673
+ else tokenizer
1674
+ )
1675
+ except Exception:
1676
+ raise ValueError(
1677
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1678
+ )
1679
+
1680
+ if load_safety_checker:
1681
+ safety_checker = StableDiffusionSafetyChecker.from_pretrained(
1682
+ "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only
1683
+ )
1684
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
1685
+ "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only
1686
+ )
1687
+ else:
1688
+ safety_checker = None
1689
+ feature_extractor = None
1690
+
1691
+ if controlnet:
1692
+ pipe = pipeline_class(
1693
+ vae=vae,
1694
+ text_encoder=text_model,
1695
+ tokenizer=tokenizer,
1696
+ unet=unet,
1697
+ controlnet=controlnet,
1698
+ scheduler=scheduler,
1699
+ safety_checker=safety_checker,
1700
+ feature_extractor=feature_extractor,
1701
+ )
1702
+ else:
1703
+ pipe = pipeline_class(
1704
+ vae=vae,
1705
+ text_encoder=text_model,
1706
+ tokenizer=tokenizer,
1707
+ unet=unet,
1708
+ scheduler=scheduler,
1709
+ safety_checker=safety_checker,
1710
+ feature_extractor=feature_extractor,
1711
+ )
1712
+ elif model_type in ["SDXL", "SDXL-Refiner"]:
1713
+ is_refiner = model_type == "SDXL-Refiner"
1714
+
1715
+ if (is_refiner is False) and (tokenizer is None):
1716
+ try:
1717
+ tokenizer = CLIPTokenizer.from_pretrained(
1718
+ "openai/clip-vit-large-patch14", local_files_only=local_files_only
1719
+ )
1720
+ except Exception:
1721
+ raise ValueError(
1722
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'."
1723
+ )
1724
+
1725
+ if (is_refiner is False) and (text_encoder is None):
1726
+ text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only)
1727
+
1728
+ if tokenizer_2 is None:
1729
+ try:
1730
+ tokenizer_2 = CLIPTokenizer.from_pretrained(
1731
+ "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!", local_files_only=local_files_only
1732
+ )
1733
+ except Exception:
1734
+ raise ValueError(
1735
+ f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' with `pad_token` set to '!'."
1736
+ )
1737
+
1738
+ if text_encoder_2 is None:
1739
+ config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
1740
+ config_kwargs = {"projection_dim": 1280}
1741
+ prefix = "conditioner.embedders.0.model." if is_refiner else "conditioner.embedders.1.model."
1742
+
1743
+ text_encoder_2 = convert_open_clip_checkpoint(
1744
+ checkpoint,
1745
+ config_name,
1746
+ prefix=prefix,
1747
+ has_projection=True,
1748
+ local_files_only=local_files_only,
1749
+ **config_kwargs,
1750
+ )
1751
+
1752
+ if is_accelerate_available(): # SBM Now move model to cpu.
1753
+ for param_name, param in converted_unet_checkpoint.items():
1754
+ set_module_tensor_to_device(unet, param_name, "cpu", value=param)
1755
+
1756
+ if controlnet:
1757
+ pipe = pipeline_class(
1758
+ vae=vae,
1759
+ text_encoder=text_encoder,
1760
+ tokenizer=tokenizer,
1761
+ text_encoder_2=text_encoder_2,
1762
+ tokenizer_2=tokenizer_2,
1763
+ unet=unet,
1764
+ controlnet=controlnet,
1765
+ scheduler=scheduler,
1766
+ force_zeros_for_empty_prompt=True,
1767
+ )
1768
+ elif adapter:
1769
+ pipe = pipeline_class(
1770
+ vae=vae,
1771
+ text_encoder=text_encoder,
1772
+ tokenizer=tokenizer,
1773
+ text_encoder_2=text_encoder_2,
1774
+ tokenizer_2=tokenizer_2,
1775
+ unet=unet,
1776
+ adapter=adapter,
1777
+ scheduler=scheduler,
1778
+ force_zeros_for_empty_prompt=True,
1779
+ )
1780
+
1781
+ else:
1782
+ pipeline_kwargs = {
1783
+ "vae": vae,
1784
+ "text_encoder": text_encoder,
1785
+ "tokenizer": tokenizer,
1786
+ "text_encoder_2": text_encoder_2,
1787
+ "tokenizer_2": tokenizer_2,
1788
+ "unet": unet,
1789
+ "scheduler": scheduler,
1790
+ }
1791
+
1792
+ if (pipeline_class == StableDiffusionXLImg2ImgPipeline) or (
1793
+ pipeline_class == StableDiffusionXLInpaintPipeline
1794
+ ):
1795
+ pipeline_kwargs.update({"requires_aesthetics_score": is_refiner})
1796
+
1797
+ if is_refiner:
1798
+ pipeline_kwargs.update({"force_zeros_for_empty_prompt": False})
1799
+
1800
+ pipe = pipeline_class(**pipeline_kwargs)
1801
+ else:
1802
+ text_config = create_ldm_bert_config(original_config)
1803
+ text_model = convert_ldm_bert_checkpoint(checkpoint, text_config)
1804
+ tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased", local_files_only=local_files_only)
1805
+ pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
1806
+
1807
+ return pipe
1808
+
1809
+
1810
+ def download_controlnet_from_original_ckpt(
1811
+ checkpoint_path: str,
1812
+ original_config_file: str,
1813
+ image_size: int = 512,
1814
+ extract_ema: bool = False,
1815
+ num_in_channels: Optional[int] = None,
1816
+ upcast_attention: Optional[bool] = None,
1817
+ device: str = None,
1818
+ from_safetensors: bool = False,
1819
+ use_linear_projection: Optional[bool] = None,
1820
+ cross_attention_dim: Optional[bool] = None,
1821
+ ) -> DiffusionPipeline:
1822
+ if from_safetensors:
1823
+ from safetensors import safe_open
1824
+
1825
+ checkpoint = {}
1826
+ with safe_open(checkpoint_path, framework="pt", device="cpu") as f:
1827
+ for key in f.keys():
1828
+ checkpoint[key] = f.get_tensor(key)
1829
+ else:
1830
+ if device is None:
1831
+ device = "cuda" if torch.cuda.is_available() else "cpu"
1832
+ checkpoint = torch.load(checkpoint_path, map_location=device)
1833
+ else:
1834
+ checkpoint = torch.load(checkpoint_path, map_location=device)
1835
+
1836
+ # NOTE: this while loop isn't great but this controlnet checkpoint has one additional
1837
+ # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
1838
+ while "state_dict" in checkpoint:
1839
+ checkpoint = checkpoint["state_dict"]
1840
+
1841
+ original_config = yaml.safe_load(original_config_file)
1842
+
1843
+ if num_in_channels is not None:
1844
+ original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
1845
+
1846
+ if "control_stage_config" not in original_config["model"]["params"]:
1847
+ raise ValueError("`control_stage_config` not present in original config")
1848
+
1849
+ controlnet = convert_controlnet_checkpoint(
1850
+ checkpoint,
1851
+ original_config,
1852
+ checkpoint_path,
1853
+ image_size,
1854
+ upcast_attention,
1855
+ extract_ema,
1856
+ use_linear_projection=use_linear_projection,
1857
+ cross_attention_dim=cross_attention_dim,
1858
+ )
1859
+
1860
+ return controlnet
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from functools import partial
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict
23
+ from flax.jax_utils import unreplicate
24
+ from flax.training.common_utils import shard
25
+ from packaging import version
26
+ from PIL import Image
27
+ from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel
28
+
29
+ from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel
30
+ from ...schedulers import (
31
+ FlaxDDIMScheduler,
32
+ FlaxDPMSolverMultistepScheduler,
33
+ FlaxLMSDiscreteScheduler,
34
+ FlaxPNDMScheduler,
35
+ )
36
+ from ...utils import deprecate, logging, replace_example_docstring
37
+ from ..pipeline_flax_utils import FlaxDiffusionPipeline
38
+ from .pipeline_output import FlaxStableDiffusionPipelineOutput
39
+ from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
40
+
41
+
42
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
+
44
+ # Set to True to use python for loop instead of jax.fori_loop for easier debugging
45
+ DEBUG = False
46
+
47
+ EXAMPLE_DOC_STRING = """
48
+ Examples:
49
+ ```py
50
+ >>> import jax
51
+ >>> import numpy as np
52
+ >>> from flax.jax_utils import replicate
53
+ >>> from flax.training.common_utils import shard
54
+
55
+ >>> from diffusers import FlaxStableDiffusionPipeline
56
+
57
+ >>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
58
+ ... "runwayml/stable-diffusion-v1-5", revision="bf16", dtype=jax.numpy.bfloat16
59
+ ... )
60
+
61
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
62
+
63
+ >>> prng_seed = jax.random.PRNGKey(0)
64
+ >>> num_inference_steps = 50
65
+
66
+ >>> num_samples = jax.device_count()
67
+ >>> prompt = num_samples * [prompt]
68
+ >>> prompt_ids = pipeline.prepare_inputs(prompt)
69
+ # shard inputs and rng
70
+
71
+ >>> params = replicate(params)
72
+ >>> prng_seed = jax.random.split(prng_seed, jax.device_count())
73
+ >>> prompt_ids = shard(prompt_ids)
74
+
75
+ >>> images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
76
+ >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
77
+ ```
78
+ """
79
+
80
+
81
+ class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline):
82
+ r"""
83
+ Flax-based pipeline for text-to-image generation using Stable Diffusion.
84
+
85
+ This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
86
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
87
+
88
+ Args:
89
+ vae ([`FlaxAutoencoderKL`]):
90
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
91
+ text_encoder ([`~transformers.FlaxCLIPTextModel`]):
92
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
93
+ tokenizer ([`~transformers.CLIPTokenizer`]):
94
+ A `CLIPTokenizer` to tokenize text.
95
+ unet ([`FlaxUNet2DConditionModel`]):
96
+ A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
97
+ scheduler ([`SchedulerMixin`]):
98
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
99
+ [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
100
+ [`FlaxDPMSolverMultistepScheduler`].
101
+ safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
102
+ Classification module that estimates whether generated images could be considered offensive or harmful.
103
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
104
+ about a model's potential harms.
105
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
106
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
107
+ """
108
+
109
+ def __init__(
110
+ self,
111
+ vae: FlaxAutoencoderKL,
112
+ text_encoder: FlaxCLIPTextModel,
113
+ tokenizer: CLIPTokenizer,
114
+ unet: FlaxUNet2DConditionModel,
115
+ scheduler: Union[
116
+ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
117
+ ],
118
+ safety_checker: FlaxStableDiffusionSafetyChecker,
119
+ feature_extractor: CLIPImageProcessor,
120
+ dtype: jnp.dtype = jnp.float32,
121
+ ):
122
+ super().__init__()
123
+ self.dtype = dtype
124
+
125
+ if safety_checker is None:
126
+ logger.warning(
127
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
128
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
129
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
130
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
131
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
132
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
133
+ )
134
+
135
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
136
+ version.parse(unet.config._diffusers_version).base_version
137
+ ) < version.parse("0.9.0.dev0")
138
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
139
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
140
+ deprecation_message = (
141
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
142
+ " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
143
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
144
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
145
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
146
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
147
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
148
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
149
+ " the `unet/config.json` file"
150
+ )
151
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
152
+ new_config = dict(unet.config)
153
+ new_config["sample_size"] = 64
154
+ unet._internal_dict = FrozenDict(new_config)
155
+
156
+ self.register_modules(
157
+ vae=vae,
158
+ text_encoder=text_encoder,
159
+ tokenizer=tokenizer,
160
+ unet=unet,
161
+ scheduler=scheduler,
162
+ safety_checker=safety_checker,
163
+ feature_extractor=feature_extractor,
164
+ )
165
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
166
+
167
+ def prepare_inputs(self, prompt: Union[str, List[str]]):
168
+ if not isinstance(prompt, (str, list)):
169
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
170
+
171
+ text_input = self.tokenizer(
172
+ prompt,
173
+ padding="max_length",
174
+ max_length=self.tokenizer.model_max_length,
175
+ truncation=True,
176
+ return_tensors="np",
177
+ )
178
+ return text_input.input_ids
179
+
180
+ def _get_has_nsfw_concepts(self, features, params):
181
+ has_nsfw_concepts = self.safety_checker(features, params)
182
+ return has_nsfw_concepts
183
+
184
+ def _run_safety_checker(self, images, safety_model_params, jit=False):
185
+ # safety_model_params should already be replicated when jit is True
186
+ pil_images = [Image.fromarray(image) for image in images]
187
+ features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
188
+
189
+ if jit:
190
+ features = shard(features)
191
+ has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
192
+ has_nsfw_concepts = unshard(has_nsfw_concepts)
193
+ safety_model_params = unreplicate(safety_model_params)
194
+ else:
195
+ has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
196
+
197
+ images_was_copied = False
198
+ for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
199
+ if has_nsfw_concept:
200
+ if not images_was_copied:
201
+ images_was_copied = True
202
+ images = images.copy()
203
+
204
+ images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
205
+
206
+ if any(has_nsfw_concepts):
207
+ warnings.warn(
208
+ "Potential NSFW content was detected in one or more images. A black image will be returned"
209
+ " instead. Try again with a different prompt and/or seed."
210
+ )
211
+
212
+ return images, has_nsfw_concepts
213
+
214
+ def _generate(
215
+ self,
216
+ prompt_ids: jnp.array,
217
+ params: Union[Dict, FrozenDict],
218
+ prng_seed: jax.Array,
219
+ num_inference_steps: int,
220
+ height: int,
221
+ width: int,
222
+ guidance_scale: float,
223
+ latents: Optional[jnp.ndarray] = None,
224
+ neg_prompt_ids: Optional[jnp.ndarray] = None,
225
+ ):
226
+ if height % 8 != 0 or width % 8 != 0:
227
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
228
+
229
+ # get prompt text embeddings
230
+ prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
231
+
232
+ # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
233
+ # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
234
+ batch_size = prompt_ids.shape[0]
235
+
236
+ max_length = prompt_ids.shape[-1]
237
+
238
+ if neg_prompt_ids is None:
239
+ uncond_input = self.tokenizer(
240
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
241
+ ).input_ids
242
+ else:
243
+ uncond_input = neg_prompt_ids
244
+ negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
245
+ context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
246
+
247
+ # Ensure model output will be `float32` before going into the scheduler
248
+ guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32)
249
+
250
+ latents_shape = (
251
+ batch_size,
252
+ self.unet.config.in_channels,
253
+ height // self.vae_scale_factor,
254
+ width // self.vae_scale_factor,
255
+ )
256
+ if latents is None:
257
+ latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
258
+ else:
259
+ if latents.shape != latents_shape:
260
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
261
+
262
+ def loop_body(step, args):
263
+ latents, scheduler_state = args
264
+ # For classifier free guidance, we need to do two forward passes.
265
+ # Here we concatenate the unconditional and text embeddings into a single batch
266
+ # to avoid doing two forward passes
267
+ latents_input = jnp.concatenate([latents] * 2)
268
+
269
+ t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
270
+ timestep = jnp.broadcast_to(t, latents_input.shape[0])
271
+
272
+ latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
273
+
274
+ # predict the noise residual
275
+ noise_pred = self.unet.apply(
276
+ {"params": params["unet"]},
277
+ jnp.array(latents_input),
278
+ jnp.array(timestep, dtype=jnp.int32),
279
+ encoder_hidden_states=context,
280
+ ).sample
281
+ # perform guidance
282
+ noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
283
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
284
+
285
+ # compute the previous noisy sample x_t -> x_t-1
286
+ latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
287
+ return latents, scheduler_state
288
+
289
+ scheduler_state = self.scheduler.set_timesteps(
290
+ params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape
291
+ )
292
+
293
+ # scale the initial noise by the standard deviation required by the scheduler
294
+ latents = latents * params["scheduler"].init_noise_sigma
295
+
296
+ if DEBUG:
297
+ # run with python for loop
298
+ for i in range(num_inference_steps):
299
+ latents, scheduler_state = loop_body(i, (latents, scheduler_state))
300
+ else:
301
+ latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state))
302
+
303
+ # scale and decode the image latents with vae
304
+ latents = 1 / self.vae.config.scaling_factor * latents
305
+ image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
306
+
307
+ image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
308
+ return image
309
+
310
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
311
+ def __call__(
312
+ self,
313
+ prompt_ids: jnp.array,
314
+ params: Union[Dict, FrozenDict],
315
+ prng_seed: jax.Array,
316
+ num_inference_steps: int = 50,
317
+ height: Optional[int] = None,
318
+ width: Optional[int] = None,
319
+ guidance_scale: Union[float, jnp.ndarray] = 7.5,
320
+ latents: jnp.ndarray = None,
321
+ neg_prompt_ids: jnp.ndarray = None,
322
+ return_dict: bool = True,
323
+ jit: bool = False,
324
+ ):
325
+ r"""
326
+ The call function to the pipeline for generation.
327
+
328
+ Args:
329
+ prompt (`str` or `List[str]`, *optional*):
330
+ The prompt or prompts to guide image generation.
331
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
332
+ The height in pixels of the generated image.
333
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
334
+ The width in pixels of the generated image.
335
+ num_inference_steps (`int`, *optional*, defaults to 50):
336
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
337
+ expense of slower inference.
338
+ guidance_scale (`float`, *optional*, defaults to 7.5):
339
+ A higher guidance scale value encourages the model to generate images closely linked to the text
340
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
341
+ latents (`jnp.ndarray`, *optional*):
342
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
343
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
344
+ array is generated by sampling using the supplied random `generator`.
345
+ jit (`bool`, defaults to `False`):
346
+ Whether to run `pmap` versions of the generation and safety scoring functions.
347
+
348
+ <Tip warning={true}>
349
+
350
+ This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
351
+ future release.
352
+
353
+ </Tip>
354
+
355
+ return_dict (`bool`, *optional*, defaults to `True`):
356
+ Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
357
+ a plain tuple.
358
+
359
+ Examples:
360
+
361
+ Returns:
362
+ [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
363
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
364
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated images
365
+ and the second element is a list of `bool`s indicating whether the corresponding generated image
366
+ contains "not-safe-for-work" (nsfw) content.
367
+ """
368
+ # 0. Default height and width to unet
369
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
370
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
371
+
372
+ if isinstance(guidance_scale, float):
373
+ # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
374
+ # shape information, as they may be sharded (when `jit` is `True`), or not.
375
+ guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
376
+ if len(prompt_ids.shape) > 2:
377
+ # Assume sharded
378
+ guidance_scale = guidance_scale[:, None]
379
+
380
+ if jit:
381
+ images = _p_generate(
382
+ self,
383
+ prompt_ids,
384
+ params,
385
+ prng_seed,
386
+ num_inference_steps,
387
+ height,
388
+ width,
389
+ guidance_scale,
390
+ latents,
391
+ neg_prompt_ids,
392
+ )
393
+ else:
394
+ images = self._generate(
395
+ prompt_ids,
396
+ params,
397
+ prng_seed,
398
+ num_inference_steps,
399
+ height,
400
+ width,
401
+ guidance_scale,
402
+ latents,
403
+ neg_prompt_ids,
404
+ )
405
+
406
+ if self.safety_checker is not None:
407
+ safety_params = params["safety_checker"]
408
+ images_uint8_casted = (images * 255).round().astype("uint8")
409
+ num_devices, batch_size = images.shape[:2]
410
+
411
+ images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
412
+ images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
413
+ images = np.asarray(images).copy()
414
+
415
+ # block images
416
+ if any(has_nsfw_concept):
417
+ for i, is_nsfw in enumerate(has_nsfw_concept):
418
+ if is_nsfw:
419
+ images[i, 0] = np.asarray(images_uint8_casted[i])
420
+
421
+ images = images.reshape(num_devices, batch_size, height, width, 3)
422
+ else:
423
+ images = np.asarray(images)
424
+ has_nsfw_concept = False
425
+
426
+ if not return_dict:
427
+ return (images, has_nsfw_concept)
428
+
429
+ return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
430
+
431
+
432
+ # Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation.
433
+ # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
434
+ @partial(
435
+ jax.pmap,
436
+ in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0),
437
+ static_broadcasted_argnums=(0, 4, 5, 6),
438
+ )
439
+ def _p_generate(
440
+ pipe,
441
+ prompt_ids,
442
+ params,
443
+ prng_seed,
444
+ num_inference_steps,
445
+ height,
446
+ width,
447
+ guidance_scale,
448
+ latents,
449
+ neg_prompt_ids,
450
+ ):
451
+ return pipe._generate(
452
+ prompt_ids,
453
+ params,
454
+ prng_seed,
455
+ num_inference_steps,
456
+ height,
457
+ width,
458
+ guidance_scale,
459
+ latents,
460
+ neg_prompt_ids,
461
+ )
462
+
463
+
464
+ @partial(jax.pmap, static_broadcasted_argnums=(0,))
465
+ def _p_get_has_nsfw_concepts(pipe, features, params):
466
+ return pipe._get_has_nsfw_concepts(features, params)
467
+
468
+
469
+ def unshard(x: jnp.ndarray):
470
+ # einops.rearrange(x, 'd b ... -> (d b) ...')
471
+ num_devices, batch_size = x.shape[:2]
472
+ rest = x.shape[2:]
473
+ return x.reshape(num_devices * batch_size, *rest)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from functools import partial
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict
23
+ from flax.jax_utils import unreplicate
24
+ from flax.training.common_utils import shard
25
+ from PIL import Image
26
+ from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel
27
+
28
+ from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel
29
+ from ...schedulers import (
30
+ FlaxDDIMScheduler,
31
+ FlaxDPMSolverMultistepScheduler,
32
+ FlaxLMSDiscreteScheduler,
33
+ FlaxPNDMScheduler,
34
+ )
35
+ from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring
36
+ from ..pipeline_flax_utils import FlaxDiffusionPipeline
37
+ from .pipeline_output import FlaxStableDiffusionPipelineOutput
38
+ from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ # Set to True to use python for loop instead of jax.fori_loop for easier debugging
44
+ DEBUG = False
45
+
46
+ EXAMPLE_DOC_STRING = """
47
+ Examples:
48
+ ```py
49
+ >>> import jax
50
+ >>> import numpy as np
51
+ >>> import jax.numpy as jnp
52
+ >>> from flax.jax_utils import replicate
53
+ >>> from flax.training.common_utils import shard
54
+ >>> import requests
55
+ >>> from io import BytesIO
56
+ >>> from PIL import Image
57
+ >>> from diffusers import FlaxStableDiffusionImg2ImgPipeline
58
+
59
+
60
+ >>> def create_key(seed=0):
61
+ ... return jax.random.PRNGKey(seed)
62
+
63
+
64
+ >>> rng = create_key(0)
65
+
66
+ >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
67
+ >>> response = requests.get(url)
68
+ >>> init_img = Image.open(BytesIO(response.content)).convert("RGB")
69
+ >>> init_img = init_img.resize((768, 512))
70
+
71
+ >>> prompts = "A fantasy landscape, trending on artstation"
72
+
73
+ >>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained(
74
+ ... "CompVis/stable-diffusion-v1-4",
75
+ ... revision="flax",
76
+ ... dtype=jnp.bfloat16,
77
+ ... )
78
+
79
+ >>> num_samples = jax.device_count()
80
+ >>> rng = jax.random.split(rng, jax.device_count())
81
+ >>> prompt_ids, processed_image = pipeline.prepare_inputs(
82
+ ... prompt=[prompts] * num_samples, image=[init_img] * num_samples
83
+ ... )
84
+ >>> p_params = replicate(params)
85
+ >>> prompt_ids = shard(prompt_ids)
86
+ >>> processed_image = shard(processed_image)
87
+
88
+ >>> output = pipeline(
89
+ ... prompt_ids=prompt_ids,
90
+ ... image=processed_image,
91
+ ... params=p_params,
92
+ ... prng_seed=rng,
93
+ ... strength=0.75,
94
+ ... num_inference_steps=50,
95
+ ... jit=True,
96
+ ... height=512,
97
+ ... width=768,
98
+ ... ).images
99
+
100
+ >>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
101
+ ```
102
+ """
103
+
104
+
105
+ class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline):
106
+ r"""
107
+ Flax-based pipeline for text-guided image-to-image generation using Stable Diffusion.
108
+
109
+ This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
110
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
111
+
112
+ Args:
113
+ vae ([`FlaxAutoencoderKL`]):
114
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
115
+ text_encoder ([`~transformers.FlaxCLIPTextModel`]):
116
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
117
+ tokenizer ([`~transformers.CLIPTokenizer`]):
118
+ A `CLIPTokenizer` to tokenize text.
119
+ unet ([`FlaxUNet2DConditionModel`]):
120
+ A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
121
+ scheduler ([`SchedulerMixin`]):
122
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
123
+ [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
124
+ [`FlaxDPMSolverMultistepScheduler`].
125
+ safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
126
+ Classification module that estimates whether generated images could be considered offensive or harmful.
127
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
128
+ about a model's potential harms.
129
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
130
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
131
+ """
132
+
133
+ def __init__(
134
+ self,
135
+ vae: FlaxAutoencoderKL,
136
+ text_encoder: FlaxCLIPTextModel,
137
+ tokenizer: CLIPTokenizer,
138
+ unet: FlaxUNet2DConditionModel,
139
+ scheduler: Union[
140
+ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
141
+ ],
142
+ safety_checker: FlaxStableDiffusionSafetyChecker,
143
+ feature_extractor: CLIPImageProcessor,
144
+ dtype: jnp.dtype = jnp.float32,
145
+ ):
146
+ super().__init__()
147
+ self.dtype = dtype
148
+
149
+ if safety_checker is None:
150
+ logger.warning(
151
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
152
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
153
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
154
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
155
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
156
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
157
+ )
158
+
159
+ self.register_modules(
160
+ vae=vae,
161
+ text_encoder=text_encoder,
162
+ tokenizer=tokenizer,
163
+ unet=unet,
164
+ scheduler=scheduler,
165
+ safety_checker=safety_checker,
166
+ feature_extractor=feature_extractor,
167
+ )
168
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
169
+
170
+ def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]):
171
+ if not isinstance(prompt, (str, list)):
172
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
173
+
174
+ if not isinstance(image, (Image.Image, list)):
175
+ raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
176
+
177
+ if isinstance(image, Image.Image):
178
+ image = [image]
179
+
180
+ processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image])
181
+
182
+ text_input = self.tokenizer(
183
+ prompt,
184
+ padding="max_length",
185
+ max_length=self.tokenizer.model_max_length,
186
+ truncation=True,
187
+ return_tensors="np",
188
+ )
189
+ return text_input.input_ids, processed_images
190
+
191
+ def _get_has_nsfw_concepts(self, features, params):
192
+ has_nsfw_concepts = self.safety_checker(features, params)
193
+ return has_nsfw_concepts
194
+
195
+ def _run_safety_checker(self, images, safety_model_params, jit=False):
196
+ # safety_model_params should already be replicated when jit is True
197
+ pil_images = [Image.fromarray(image) for image in images]
198
+ features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
199
+
200
+ if jit:
201
+ features = shard(features)
202
+ has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
203
+ has_nsfw_concepts = unshard(has_nsfw_concepts)
204
+ safety_model_params = unreplicate(safety_model_params)
205
+ else:
206
+ has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
207
+
208
+ images_was_copied = False
209
+ for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
210
+ if has_nsfw_concept:
211
+ if not images_was_copied:
212
+ images_was_copied = True
213
+ images = images.copy()
214
+
215
+ images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
216
+
217
+ if any(has_nsfw_concepts):
218
+ warnings.warn(
219
+ "Potential NSFW content was detected in one or more images. A black image will be returned"
220
+ " instead. Try again with a different prompt and/or seed."
221
+ )
222
+
223
+ return images, has_nsfw_concepts
224
+
225
+ def get_timestep_start(self, num_inference_steps, strength):
226
+ # get the original timestep using init_timestep
227
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
228
+
229
+ t_start = max(num_inference_steps - init_timestep, 0)
230
+
231
+ return t_start
232
+
233
+ def _generate(
234
+ self,
235
+ prompt_ids: jnp.ndarray,
236
+ image: jnp.ndarray,
237
+ params: Union[Dict, FrozenDict],
238
+ prng_seed: jax.Array,
239
+ start_timestep: int,
240
+ num_inference_steps: int,
241
+ height: int,
242
+ width: int,
243
+ guidance_scale: float,
244
+ noise: Optional[jnp.ndarray] = None,
245
+ neg_prompt_ids: Optional[jnp.ndarray] = None,
246
+ ):
247
+ if height % 8 != 0 or width % 8 != 0:
248
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
249
+
250
+ # get prompt text embeddings
251
+ prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
252
+
253
+ # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
254
+ # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
255
+ batch_size = prompt_ids.shape[0]
256
+
257
+ max_length = prompt_ids.shape[-1]
258
+
259
+ if neg_prompt_ids is None:
260
+ uncond_input = self.tokenizer(
261
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
262
+ ).input_ids
263
+ else:
264
+ uncond_input = neg_prompt_ids
265
+ negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
266
+ context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
267
+
268
+ latents_shape = (
269
+ batch_size,
270
+ self.unet.config.in_channels,
271
+ height // self.vae_scale_factor,
272
+ width // self.vae_scale_factor,
273
+ )
274
+ if noise is None:
275
+ noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
276
+ else:
277
+ if noise.shape != latents_shape:
278
+ raise ValueError(f"Unexpected latents shape, got {noise.shape}, expected {latents_shape}")
279
+
280
+ # Create init_latents
281
+ init_latent_dist = self.vae.apply({"params": params["vae"]}, image, method=self.vae.encode).latent_dist
282
+ init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2))
283
+ init_latents = self.vae.config.scaling_factor * init_latents
284
+
285
+ def loop_body(step, args):
286
+ latents, scheduler_state = args
287
+ # For classifier free guidance, we need to do two forward passes.
288
+ # Here we concatenate the unconditional and text embeddings into a single batch
289
+ # to avoid doing two forward passes
290
+ latents_input = jnp.concatenate([latents] * 2)
291
+
292
+ t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
293
+ timestep = jnp.broadcast_to(t, latents_input.shape[0])
294
+
295
+ latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
296
+
297
+ # predict the noise residual
298
+ noise_pred = self.unet.apply(
299
+ {"params": params["unet"]},
300
+ jnp.array(latents_input),
301
+ jnp.array(timestep, dtype=jnp.int32),
302
+ encoder_hidden_states=context,
303
+ ).sample
304
+ # perform guidance
305
+ noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
306
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
307
+
308
+ # compute the previous noisy sample x_t -> x_t-1
309
+ latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
310
+ return latents, scheduler_state
311
+
312
+ scheduler_state = self.scheduler.set_timesteps(
313
+ params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape
314
+ )
315
+
316
+ latent_timestep = scheduler_state.timesteps[start_timestep : start_timestep + 1].repeat(batch_size)
317
+
318
+ latents = self.scheduler.add_noise(params["scheduler"], init_latents, noise, latent_timestep)
319
+
320
+ # scale the initial noise by the standard deviation required by the scheduler
321
+ latents = latents * params["scheduler"].init_noise_sigma
322
+
323
+ if DEBUG:
324
+ # run with python for loop
325
+ for i in range(start_timestep, num_inference_steps):
326
+ latents, scheduler_state = loop_body(i, (latents, scheduler_state))
327
+ else:
328
+ latents, _ = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state))
329
+
330
+ # scale and decode the image latents with vae
331
+ latents = 1 / self.vae.config.scaling_factor * latents
332
+ image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
333
+
334
+ image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
335
+ return image
336
+
337
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
338
+ def __call__(
339
+ self,
340
+ prompt_ids: jnp.ndarray,
341
+ image: jnp.ndarray,
342
+ params: Union[Dict, FrozenDict],
343
+ prng_seed: jax.Array,
344
+ strength: float = 0.8,
345
+ num_inference_steps: int = 50,
346
+ height: Optional[int] = None,
347
+ width: Optional[int] = None,
348
+ guidance_scale: Union[float, jnp.ndarray] = 7.5,
349
+ noise: jnp.ndarray = None,
350
+ neg_prompt_ids: jnp.ndarray = None,
351
+ return_dict: bool = True,
352
+ jit: bool = False,
353
+ ):
354
+ r"""
355
+ The call function to the pipeline for generation.
356
+
357
+ Args:
358
+ prompt_ids (`jnp.ndarray`):
359
+ The prompt or prompts to guide image generation.
360
+ image (`jnp.ndarray`):
361
+ Array representing an image batch to be used as the starting point.
362
+ params (`Dict` or `FrozenDict`):
363
+ Dictionary containing the model parameters/weights.
364
+ prng_seed (`jax.Array` or `jax.Array`):
365
+ Array containing random number generator key.
366
+ strength (`float`, *optional*, defaults to 0.8):
367
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
368
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
369
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
370
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
371
+ essentially ignores `image`.
372
+ num_inference_steps (`int`, *optional*, defaults to 50):
373
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
374
+ expense of slower inference. This parameter is modulated by `strength`.
375
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
376
+ The height in pixels of the generated image.
377
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
378
+ The width in pixels of the generated image.
379
+ guidance_scale (`float`, *optional*, defaults to 7.5):
380
+ A higher guidance scale value encourages the model to generate images closely linked to the text
381
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
382
+ noise (`jnp.ndarray`, *optional*):
383
+ Pre-generated noisy latents sampled from a Gaussian distribution to be used as inputs for image
384
+ generation. Can be used to tweak the same generation with different prompts. The array is generated by
385
+ sampling using the supplied random `generator`.
386
+ return_dict (`bool`, *optional*, defaults to `True`):
387
+ Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
388
+ a plain tuple.
389
+ jit (`bool`, defaults to `False`):
390
+ Whether to run `pmap` versions of the generation and safety scoring functions.
391
+
392
+ <Tip warning={true}>
393
+
394
+ This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
395
+ future release.
396
+
397
+ </Tip>
398
+
399
+ Examples:
400
+
401
+ Returns:
402
+ [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
403
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
404
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated images
405
+ and the second element is a list of `bool`s indicating whether the corresponding generated image
406
+ contains "not-safe-for-work" (nsfw) content.
407
+ """
408
+ # 0. Default height and width to unet
409
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
410
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
411
+
412
+ if isinstance(guidance_scale, float):
413
+ # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
414
+ # shape information, as they may be sharded (when `jit` is `True`), or not.
415
+ guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
416
+ if len(prompt_ids.shape) > 2:
417
+ # Assume sharded
418
+ guidance_scale = guidance_scale[:, None]
419
+
420
+ start_timestep = self.get_timestep_start(num_inference_steps, strength)
421
+
422
+ if jit:
423
+ images = _p_generate(
424
+ self,
425
+ prompt_ids,
426
+ image,
427
+ params,
428
+ prng_seed,
429
+ start_timestep,
430
+ num_inference_steps,
431
+ height,
432
+ width,
433
+ guidance_scale,
434
+ noise,
435
+ neg_prompt_ids,
436
+ )
437
+ else:
438
+ images = self._generate(
439
+ prompt_ids,
440
+ image,
441
+ params,
442
+ prng_seed,
443
+ start_timestep,
444
+ num_inference_steps,
445
+ height,
446
+ width,
447
+ guidance_scale,
448
+ noise,
449
+ neg_prompt_ids,
450
+ )
451
+
452
+ if self.safety_checker is not None:
453
+ safety_params = params["safety_checker"]
454
+ images_uint8_casted = (images * 255).round().astype("uint8")
455
+ num_devices, batch_size = images.shape[:2]
456
+
457
+ images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
458
+ images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
459
+ images = np.asarray(images)
460
+
461
+ # block images
462
+ if any(has_nsfw_concept):
463
+ for i, is_nsfw in enumerate(has_nsfw_concept):
464
+ if is_nsfw:
465
+ images[i] = np.asarray(images_uint8_casted[i])
466
+
467
+ images = images.reshape(num_devices, batch_size, height, width, 3)
468
+ else:
469
+ images = np.asarray(images)
470
+ has_nsfw_concept = False
471
+
472
+ if not return_dict:
473
+ return (images, has_nsfw_concept)
474
+
475
+ return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
476
+
477
+
478
+ # Static argnums are pipe, start_timestep, num_inference_steps, height, width. A change would trigger recompilation.
479
+ # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
480
+ @partial(
481
+ jax.pmap,
482
+ in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0),
483
+ static_broadcasted_argnums=(0, 5, 6, 7, 8),
484
+ )
485
+ def _p_generate(
486
+ pipe,
487
+ prompt_ids,
488
+ image,
489
+ params,
490
+ prng_seed,
491
+ start_timestep,
492
+ num_inference_steps,
493
+ height,
494
+ width,
495
+ guidance_scale,
496
+ noise,
497
+ neg_prompt_ids,
498
+ ):
499
+ return pipe._generate(
500
+ prompt_ids,
501
+ image,
502
+ params,
503
+ prng_seed,
504
+ start_timestep,
505
+ num_inference_steps,
506
+ height,
507
+ width,
508
+ guidance_scale,
509
+ noise,
510
+ neg_prompt_ids,
511
+ )
512
+
513
+
514
+ @partial(jax.pmap, static_broadcasted_argnums=(0,))
515
+ def _p_get_has_nsfw_concepts(pipe, features, params):
516
+ return pipe._get_has_nsfw_concepts(features, params)
517
+
518
+
519
+ def unshard(x: jnp.ndarray):
520
+ # einops.rearrange(x, 'd b ... -> (d b) ...')
521
+ num_devices, batch_size = x.shape[:2]
522
+ rest = x.shape[2:]
523
+ return x.reshape(num_devices * batch_size, *rest)
524
+
525
+
526
+ def preprocess(image, dtype):
527
+ w, h = image.size
528
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
529
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
530
+ image = jnp.array(image).astype(dtype) / 255.0
531
+ image = image[None].transpose(0, 3, 1, 2)
532
+ return 2.0 * image - 1.0
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py ADDED
@@ -0,0 +1,1032 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ from packaging import version
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
21
+
22
+ from ...configuration_utils import FrozenDict
23
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
24
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
26
+ from ...models.lora import adjust_lora_scale_text_encoder
27
+ from ...schedulers import KarrasDiffusionSchedulers
28
+ from ...utils import (
29
+ USE_PEFT_BACKEND,
30
+ deprecate,
31
+ logging,
32
+ replace_example_docstring,
33
+ scale_lora_layers,
34
+ unscale_lora_layers,
35
+ )
36
+ from ...utils.torch_utils import randn_tensor
37
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
38
+ from .pipeline_output import StableDiffusionPipelineOutput
39
+ from .safety_checker import StableDiffusionSafetyChecker
40
+
41
+
42
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
+
44
+ EXAMPLE_DOC_STRING = """
45
+ Examples:
46
+ ```py
47
+ >>> import torch
48
+ >>> from diffusers import StableDiffusionPipeline
49
+
50
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
51
+ >>> pipe = pipe.to("cuda")
52
+
53
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
54
+ >>> image = pipe(prompt).images[0]
55
+ ```
56
+ """
57
+
58
+
59
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
60
+ """
61
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
62
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
63
+ """
64
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
65
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
66
+ # rescale the results from guidance (fixes overexposure)
67
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
68
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
69
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
70
+ return noise_cfg
71
+
72
+
73
+ def retrieve_timesteps(
74
+ scheduler,
75
+ num_inference_steps: Optional[int] = None,
76
+ device: Optional[Union[str, torch.device]] = None,
77
+ timesteps: Optional[List[int]] = None,
78
+ **kwargs,
79
+ ):
80
+ """
81
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
82
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
83
+
84
+ Args:
85
+ scheduler (`SchedulerMixin`):
86
+ The scheduler to get timesteps from.
87
+ num_inference_steps (`int`):
88
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
89
+ `timesteps` must be `None`.
90
+ device (`str` or `torch.device`, *optional*):
91
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
92
+ timesteps (`List[int]`, *optional*):
93
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
94
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
95
+ must be `None`.
96
+
97
+ Returns:
98
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
99
+ second element is the number of inference steps.
100
+ """
101
+ if timesteps is not None:
102
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
103
+ if not accepts_timesteps:
104
+ raise ValueError(
105
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
106
+ f" timestep schedules. Please check whether you are using the correct scheduler."
107
+ )
108
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
109
+ timesteps = scheduler.timesteps
110
+ num_inference_steps = len(timesteps)
111
+ else:
112
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
113
+ timesteps = scheduler.timesteps
114
+ return timesteps, num_inference_steps
115
+
116
+
117
+ class StableDiffusionPipeline(
118
+ DiffusionPipeline,
119
+ StableDiffusionMixin,
120
+ TextualInversionLoaderMixin,
121
+ LoraLoaderMixin,
122
+ IPAdapterMixin,
123
+ FromSingleFileMixin,
124
+ ):
125
+ r"""
126
+ Pipeline for text-to-image generation using Stable Diffusion.
127
+
128
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
129
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
130
+
131
+ The pipeline also inherits the following loading methods:
132
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
133
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
134
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
135
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
136
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
137
+
138
+ Args:
139
+ vae ([`AutoencoderKL`]):
140
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
141
+ text_encoder ([`~transformers.CLIPTextModel`]):
142
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
143
+ tokenizer ([`~transformers.CLIPTokenizer`]):
144
+ A `CLIPTokenizer` to tokenize text.
145
+ unet ([`UNet2DConditionModel`]):
146
+ A `UNet2DConditionModel` to denoise the encoded image latents.
147
+ scheduler ([`SchedulerMixin`]):
148
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
149
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
150
+ safety_checker ([`StableDiffusionSafetyChecker`]):
151
+ Classification module that estimates whether generated images could be considered offensive or harmful.
152
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
153
+ about a model's potential harms.
154
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
155
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
156
+ """
157
+
158
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
159
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
160
+ _exclude_from_cpu_offload = ["safety_checker"]
161
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
162
+
163
+ def __init__(
164
+ self,
165
+ vae: AutoencoderKL,
166
+ text_encoder: CLIPTextModel,
167
+ tokenizer: CLIPTokenizer,
168
+ unet: UNet2DConditionModel,
169
+ scheduler: KarrasDiffusionSchedulers,
170
+ safety_checker: StableDiffusionSafetyChecker,
171
+ feature_extractor: CLIPImageProcessor,
172
+ image_encoder: CLIPVisionModelWithProjection = None,
173
+ requires_safety_checker: bool = True,
174
+ ):
175
+ super().__init__()
176
+
177
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
178
+ deprecation_message = (
179
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
180
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
181
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
182
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
183
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
184
+ " file"
185
+ )
186
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
187
+ new_config = dict(scheduler.config)
188
+ new_config["steps_offset"] = 1
189
+ scheduler._internal_dict = FrozenDict(new_config)
190
+
191
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
192
+ deprecation_message = (
193
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
194
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
195
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
196
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
197
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
198
+ )
199
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
200
+ new_config = dict(scheduler.config)
201
+ new_config["clip_sample"] = False
202
+ scheduler._internal_dict = FrozenDict(new_config)
203
+
204
+ if safety_checker is None and requires_safety_checker:
205
+ logger.warning(
206
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
207
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
208
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
209
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
210
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
211
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
212
+ )
213
+
214
+ if safety_checker is not None and feature_extractor is None:
215
+ raise ValueError(
216
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
217
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
218
+ )
219
+
220
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
221
+ version.parse(unet.config._diffusers_version).base_version
222
+ ) < version.parse("0.9.0.dev0")
223
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
224
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
225
+ deprecation_message = (
226
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
227
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
228
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
229
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
230
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
231
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
232
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
233
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
234
+ " the `unet/config.json` file"
235
+ )
236
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
237
+ new_config = dict(unet.config)
238
+ new_config["sample_size"] = 64
239
+ unet._internal_dict = FrozenDict(new_config)
240
+
241
+ self.register_modules(
242
+ vae=vae,
243
+ text_encoder=text_encoder,
244
+ tokenizer=tokenizer,
245
+ unet=unet,
246
+ scheduler=scheduler,
247
+ safety_checker=safety_checker,
248
+ feature_extractor=feature_extractor,
249
+ image_encoder=image_encoder,
250
+ )
251
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
252
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
253
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
254
+
255
+ def _encode_prompt(
256
+ self,
257
+ prompt,
258
+ device,
259
+ num_images_per_prompt,
260
+ do_classifier_free_guidance,
261
+ negative_prompt=None,
262
+ prompt_embeds: Optional[torch.FloatTensor] = None,
263
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
264
+ lora_scale: Optional[float] = None,
265
+ **kwargs,
266
+ ):
267
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
268
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
269
+
270
+ prompt_embeds_tuple = self.encode_prompt(
271
+ prompt=prompt,
272
+ device=device,
273
+ num_images_per_prompt=num_images_per_prompt,
274
+ do_classifier_free_guidance=do_classifier_free_guidance,
275
+ negative_prompt=negative_prompt,
276
+ prompt_embeds=prompt_embeds,
277
+ negative_prompt_embeds=negative_prompt_embeds,
278
+ lora_scale=lora_scale,
279
+ **kwargs,
280
+ )
281
+
282
+ # concatenate for backwards comp
283
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
284
+
285
+ return prompt_embeds
286
+
287
+ def encode_prompt(
288
+ self,
289
+ prompt,
290
+ device,
291
+ num_images_per_prompt,
292
+ do_classifier_free_guidance,
293
+ negative_prompt=None,
294
+ prompt_embeds: Optional[torch.FloatTensor] = None,
295
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
296
+ lora_scale: Optional[float] = None,
297
+ clip_skip: Optional[int] = None,
298
+ ):
299
+ r"""
300
+ Encodes the prompt into text encoder hidden states.
301
+
302
+ Args:
303
+ prompt (`str` or `List[str]`, *optional*):
304
+ prompt to be encoded
305
+ device: (`torch.device`):
306
+ torch device
307
+ num_images_per_prompt (`int`):
308
+ number of images that should be generated per prompt
309
+ do_classifier_free_guidance (`bool`):
310
+ whether to use classifier free guidance or not
311
+ negative_prompt (`str` or `List[str]`, *optional*):
312
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
313
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
314
+ less than `1`).
315
+ prompt_embeds (`torch.FloatTensor`, *optional*):
316
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
317
+ provided, text embeddings will be generated from `prompt` input argument.
318
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
319
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
320
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
321
+ argument.
322
+ lora_scale (`float`, *optional*):
323
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
324
+ clip_skip (`int`, *optional*):
325
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
326
+ the output of the pre-final layer will be used for computing the prompt embeddings.
327
+ """
328
+ # set lora scale so that monkey patched LoRA
329
+ # function of text encoder can correctly access it
330
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
331
+ self._lora_scale = lora_scale
332
+
333
+ # dynamically adjust the LoRA scale
334
+ if not USE_PEFT_BACKEND:
335
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
336
+ else:
337
+ scale_lora_layers(self.text_encoder, lora_scale)
338
+
339
+ if prompt is not None and isinstance(prompt, str):
340
+ batch_size = 1
341
+ elif prompt is not None and isinstance(prompt, list):
342
+ batch_size = len(prompt)
343
+ else:
344
+ batch_size = prompt_embeds.shape[0]
345
+
346
+ if prompt_embeds is None:
347
+ # textual inversion: process multi-vector tokens if necessary
348
+ if isinstance(self, TextualInversionLoaderMixin):
349
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
350
+
351
+ text_inputs = self.tokenizer(
352
+ prompt,
353
+ padding="max_length",
354
+ max_length=self.tokenizer.model_max_length,
355
+ truncation=True,
356
+ return_tensors="pt",
357
+ )
358
+ text_input_ids = text_inputs.input_ids
359
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
360
+
361
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
362
+ text_input_ids, untruncated_ids
363
+ ):
364
+ removed_text = self.tokenizer.batch_decode(
365
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
366
+ )
367
+ logger.warning(
368
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
369
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
370
+ )
371
+
372
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
373
+ attention_mask = text_inputs.attention_mask.to(device)
374
+ else:
375
+ attention_mask = None
376
+
377
+ if clip_skip is None:
378
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
379
+ prompt_embeds = prompt_embeds[0]
380
+ else:
381
+ prompt_embeds = self.text_encoder(
382
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
383
+ )
384
+ # Access the `hidden_states` first, that contains a tuple of
385
+ # all the hidden states from the encoder layers. Then index into
386
+ # the tuple to access the hidden states from the desired layer.
387
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
388
+ # We also need to apply the final LayerNorm here to not mess with the
389
+ # representations. The `last_hidden_states` that we typically use for
390
+ # obtaining the final prompt representations passes through the LayerNorm
391
+ # layer.
392
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
393
+
394
+ if self.text_encoder is not None:
395
+ prompt_embeds_dtype = self.text_encoder.dtype
396
+ elif self.unet is not None:
397
+ prompt_embeds_dtype = self.unet.dtype
398
+ else:
399
+ prompt_embeds_dtype = prompt_embeds.dtype
400
+
401
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
402
+
403
+ bs_embed, seq_len, _ = prompt_embeds.shape
404
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
405
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
406
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
407
+
408
+ # get unconditional embeddings for classifier free guidance
409
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
410
+ uncond_tokens: List[str]
411
+ if negative_prompt is None:
412
+ uncond_tokens = [""] * batch_size
413
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
414
+ raise TypeError(
415
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
416
+ f" {type(prompt)}."
417
+ )
418
+ elif isinstance(negative_prompt, str):
419
+ uncond_tokens = [negative_prompt]
420
+ elif batch_size != len(negative_prompt):
421
+ raise ValueError(
422
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
423
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
424
+ " the batch size of `prompt`."
425
+ )
426
+ else:
427
+ uncond_tokens = negative_prompt
428
+
429
+ # textual inversion: process multi-vector tokens if necessary
430
+ if isinstance(self, TextualInversionLoaderMixin):
431
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
432
+
433
+ max_length = prompt_embeds.shape[1]
434
+ uncond_input = self.tokenizer(
435
+ uncond_tokens,
436
+ padding="max_length",
437
+ max_length=max_length,
438
+ truncation=True,
439
+ return_tensors="pt",
440
+ )
441
+
442
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
443
+ attention_mask = uncond_input.attention_mask.to(device)
444
+ else:
445
+ attention_mask = None
446
+
447
+ negative_prompt_embeds = self.text_encoder(
448
+ uncond_input.input_ids.to(device),
449
+ attention_mask=attention_mask,
450
+ )
451
+ negative_prompt_embeds = negative_prompt_embeds[0]
452
+
453
+ if do_classifier_free_guidance:
454
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
455
+ seq_len = negative_prompt_embeds.shape[1]
456
+
457
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
458
+
459
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
460
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
461
+
462
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
463
+ # Retrieve the original scale by scaling back the LoRA layers
464
+ unscale_lora_layers(self.text_encoder, lora_scale)
465
+
466
+ return prompt_embeds, negative_prompt_embeds
467
+
468
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
469
+ dtype = next(self.image_encoder.parameters()).dtype
470
+
471
+ if not isinstance(image, torch.Tensor):
472
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
473
+
474
+ image = image.to(device=device, dtype=dtype)
475
+ if output_hidden_states:
476
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
477
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
478
+ uncond_image_enc_hidden_states = self.image_encoder(
479
+ torch.zeros_like(image), output_hidden_states=True
480
+ ).hidden_states[-2]
481
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
482
+ num_images_per_prompt, dim=0
483
+ )
484
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
485
+ else:
486
+ image_embeds = self.image_encoder(image).image_embeds
487
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
488
+ uncond_image_embeds = torch.zeros_like(image_embeds)
489
+
490
+ return image_embeds, uncond_image_embeds
491
+
492
+ def prepare_ip_adapter_image_embeds(
493
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
494
+ ):
495
+ if ip_adapter_image_embeds is None:
496
+ if not isinstance(ip_adapter_image, list):
497
+ ip_adapter_image = [ip_adapter_image]
498
+
499
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
500
+ raise ValueError(
501
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
502
+ )
503
+
504
+ image_embeds = []
505
+ for single_ip_adapter_image, image_proj_layer in zip(
506
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
507
+ ):
508
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
509
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
510
+ single_ip_adapter_image, device, 1, output_hidden_state
511
+ )
512
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
513
+ single_negative_image_embeds = torch.stack(
514
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
515
+ )
516
+
517
+ if do_classifier_free_guidance:
518
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
519
+ single_image_embeds = single_image_embeds.to(device)
520
+
521
+ image_embeds.append(single_image_embeds)
522
+ else:
523
+ repeat_dims = [1]
524
+ image_embeds = []
525
+ for single_image_embeds in ip_adapter_image_embeds:
526
+ if do_classifier_free_guidance:
527
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
528
+ single_image_embeds = single_image_embeds.repeat(
529
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
530
+ )
531
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
532
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
533
+ )
534
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
535
+ else:
536
+ single_image_embeds = single_image_embeds.repeat(
537
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
538
+ )
539
+ image_embeds.append(single_image_embeds)
540
+
541
+ return image_embeds
542
+
543
+ def run_safety_checker(self, image, device, dtype):
544
+ if self.safety_checker is None:
545
+ has_nsfw_concept = None
546
+ else:
547
+ if torch.is_tensor(image):
548
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
549
+ else:
550
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
551
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
552
+ image, has_nsfw_concept = self.safety_checker(
553
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
554
+ )
555
+ return image, has_nsfw_concept
556
+
557
+ def decode_latents(self, latents):
558
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
559
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
560
+
561
+ latents = 1 / self.vae.config.scaling_factor * latents
562
+ image = self.vae.decode(latents, return_dict=False)[0]
563
+ image = (image / 2 + 0.5).clamp(0, 1)
564
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
565
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
566
+ return image
567
+
568
+ def prepare_extra_step_kwargs(self, generator, eta):
569
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
570
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
571
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
572
+ # and should be between [0, 1]
573
+
574
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
575
+ extra_step_kwargs = {}
576
+ if accepts_eta:
577
+ extra_step_kwargs["eta"] = eta
578
+
579
+ # check if the scheduler accepts generator
580
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
581
+ if accepts_generator:
582
+ extra_step_kwargs["generator"] = generator
583
+ return extra_step_kwargs
584
+
585
+ def check_inputs(
586
+ self,
587
+ prompt,
588
+ height,
589
+ width,
590
+ callback_steps,
591
+ negative_prompt=None,
592
+ prompt_embeds=None,
593
+ negative_prompt_embeds=None,
594
+ ip_adapter_image=None,
595
+ ip_adapter_image_embeds=None,
596
+ callback_on_step_end_tensor_inputs=None,
597
+ ):
598
+ if height % 8 != 0 or width % 8 != 0:
599
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
600
+
601
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
602
+ raise ValueError(
603
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
604
+ f" {type(callback_steps)}."
605
+ )
606
+ if callback_on_step_end_tensor_inputs is not None and not all(
607
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
608
+ ):
609
+ raise ValueError(
610
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
611
+ )
612
+
613
+ if prompt is not None and prompt_embeds is not None:
614
+ raise ValueError(
615
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
616
+ " only forward one of the two."
617
+ )
618
+ elif prompt is None and prompt_embeds is None:
619
+ raise ValueError(
620
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
621
+ )
622
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
623
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
624
+
625
+ if negative_prompt is not None and negative_prompt_embeds is not None:
626
+ raise ValueError(
627
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
628
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
629
+ )
630
+
631
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
632
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
633
+ raise ValueError(
634
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
635
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
636
+ f" {negative_prompt_embeds.shape}."
637
+ )
638
+
639
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
640
+ raise ValueError(
641
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
642
+ )
643
+
644
+ if ip_adapter_image_embeds is not None:
645
+ if not isinstance(ip_adapter_image_embeds, list):
646
+ raise ValueError(
647
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
648
+ )
649
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
650
+ raise ValueError(
651
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
652
+ )
653
+
654
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
655
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
656
+ if isinstance(generator, list) and len(generator) != batch_size:
657
+ raise ValueError(
658
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
659
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
660
+ )
661
+
662
+ if latents is None:
663
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
664
+ else:
665
+ latents = latents.to(device)
666
+
667
+ # scale the initial noise by the standard deviation required by the scheduler
668
+ latents = latents * self.scheduler.init_noise_sigma
669
+ return latents
670
+
671
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
672
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
673
+ """
674
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
675
+
676
+ Args:
677
+ timesteps (`torch.Tensor`):
678
+ generate embedding vectors at these timesteps
679
+ embedding_dim (`int`, *optional*, defaults to 512):
680
+ dimension of the embeddings to generate
681
+ dtype:
682
+ data type of the generated embeddings
683
+
684
+ Returns:
685
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
686
+ """
687
+ assert len(w.shape) == 1
688
+ w = w * 1000.0
689
+
690
+ half_dim = embedding_dim // 2
691
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
692
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
693
+ emb = w.to(dtype)[:, None] * emb[None, :]
694
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
695
+ if embedding_dim % 2 == 1: # zero pad
696
+ emb = torch.nn.functional.pad(emb, (0, 1))
697
+ assert emb.shape == (w.shape[0], embedding_dim)
698
+ return emb
699
+
700
+ @property
701
+ def guidance_scale(self):
702
+ return self._guidance_scale
703
+
704
+ @property
705
+ def guidance_rescale(self):
706
+ return self._guidance_rescale
707
+
708
+ @property
709
+ def clip_skip(self):
710
+ return self._clip_skip
711
+
712
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
713
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
714
+ # corresponds to doing no classifier free guidance.
715
+ @property
716
+ def do_classifier_free_guidance(self):
717
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
718
+
719
+ @property
720
+ def cross_attention_kwargs(self):
721
+ return self._cross_attention_kwargs
722
+
723
+ @property
724
+ def num_timesteps(self):
725
+ return self._num_timesteps
726
+
727
+ @property
728
+ def interrupt(self):
729
+ return self._interrupt
730
+
731
+ @torch.no_grad()
732
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
733
+ def __call__(
734
+ self,
735
+ prompt: Union[str, List[str]] = None,
736
+ height: Optional[int] = None,
737
+ width: Optional[int] = None,
738
+ num_inference_steps: int = 50,
739
+ timesteps: List[int] = None,
740
+ guidance_scale: float = 7.5,
741
+ negative_prompt: Optional[Union[str, List[str]]] = None,
742
+ num_images_per_prompt: Optional[int] = 1,
743
+ eta: float = 0.0,
744
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
745
+ latents: Optional[torch.FloatTensor] = None,
746
+ prompt_embeds: Optional[torch.FloatTensor] = None,
747
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
748
+ ip_adapter_image: Optional[PipelineImageInput] = None,
749
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
750
+ output_type: Optional[str] = "pil",
751
+ return_dict: bool = True,
752
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
753
+ guidance_rescale: float = 0.0,
754
+ clip_skip: Optional[int] = None,
755
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
756
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
757
+ **kwargs,
758
+ ):
759
+ r"""
760
+ The call function to the pipeline for generation.
761
+
762
+ Args:
763
+ prompt (`str` or `List[str]`, *optional*):
764
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
765
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
766
+ The height in pixels of the generated image.
767
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
768
+ The width in pixels of the generated image.
769
+ num_inference_steps (`int`, *optional*, defaults to 50):
770
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
771
+ expense of slower inference.
772
+ timesteps (`List[int]`, *optional*):
773
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
774
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
775
+ passed will be used. Must be in descending order.
776
+ guidance_scale (`float`, *optional*, defaults to 7.5):
777
+ A higher guidance scale value encourages the model to generate images closely linked to the text
778
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
779
+ negative_prompt (`str` or `List[str]`, *optional*):
780
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
781
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
782
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
783
+ The number of images to generate per prompt.
784
+ eta (`float`, *optional*, defaults to 0.0):
785
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
786
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
787
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
788
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
789
+ generation deterministic.
790
+ latents (`torch.FloatTensor`, *optional*):
791
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
792
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
793
+ tensor is generated by sampling using the supplied random `generator`.
794
+ prompt_embeds (`torch.FloatTensor`, *optional*):
795
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
796
+ provided, text embeddings are generated from the `prompt` input argument.
797
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
798
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
799
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
800
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
801
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
802
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
803
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
804
+ if `do_classifier_free_guidance` is set to `True`.
805
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
806
+ output_type (`str`, *optional*, defaults to `"pil"`):
807
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
808
+ return_dict (`bool`, *optional*, defaults to `True`):
809
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
810
+ plain tuple.
811
+ cross_attention_kwargs (`dict`, *optional*):
812
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
813
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
814
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
815
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
816
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
817
+ using zero terminal SNR.
818
+ clip_skip (`int`, *optional*):
819
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
820
+ the output of the pre-final layer will be used for computing the prompt embeddings.
821
+ callback_on_step_end (`Callable`, *optional*):
822
+ A function that calls at the end of each denoising steps during the inference. The function is called
823
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
824
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
825
+ `callback_on_step_end_tensor_inputs`.
826
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
827
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
828
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
829
+ `._callback_tensor_inputs` attribute of your pipeline class.
830
+
831
+ Examples:
832
+
833
+ Returns:
834
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
835
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
836
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
837
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
838
+ "not-safe-for-work" (nsfw) content.
839
+ """
840
+
841
+ callback = kwargs.pop("callback", None)
842
+ callback_steps = kwargs.pop("callback_steps", None)
843
+
844
+ if callback is not None:
845
+ deprecate(
846
+ "callback",
847
+ "1.0.0",
848
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
849
+ )
850
+ if callback_steps is not None:
851
+ deprecate(
852
+ "callback_steps",
853
+ "1.0.0",
854
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
855
+ )
856
+
857
+ # 0. Default height and width to unet
858
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
859
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
860
+ # to deal with lora scaling and other possible forward hooks
861
+
862
+ # 1. Check inputs. Raise error if not correct
863
+ self.check_inputs(
864
+ prompt,
865
+ height,
866
+ width,
867
+ callback_steps,
868
+ negative_prompt,
869
+ prompt_embeds,
870
+ negative_prompt_embeds,
871
+ ip_adapter_image,
872
+ ip_adapter_image_embeds,
873
+ callback_on_step_end_tensor_inputs,
874
+ )
875
+
876
+ self._guidance_scale = guidance_scale
877
+ self._guidance_rescale = guidance_rescale
878
+ self._clip_skip = clip_skip
879
+ self._cross_attention_kwargs = cross_attention_kwargs
880
+ self._interrupt = False
881
+
882
+ # 2. Define call parameters
883
+ if prompt is not None and isinstance(prompt, str):
884
+ batch_size = 1
885
+ elif prompt is not None and isinstance(prompt, list):
886
+ batch_size = len(prompt)
887
+ else:
888
+ batch_size = prompt_embeds.shape[0]
889
+
890
+ device = self._execution_device
891
+
892
+ # 3. Encode input prompt
893
+ lora_scale = (
894
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
895
+ )
896
+
897
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
898
+ prompt,
899
+ device,
900
+ num_images_per_prompt,
901
+ self.do_classifier_free_guidance,
902
+ negative_prompt,
903
+ prompt_embeds=prompt_embeds,
904
+ negative_prompt_embeds=negative_prompt_embeds,
905
+ lora_scale=lora_scale,
906
+ clip_skip=self.clip_skip,
907
+ )
908
+
909
+ # For classifier free guidance, we need to do two forward passes.
910
+ # Here we concatenate the unconditional and text embeddings into a single batch
911
+ # to avoid doing two forward passes
912
+ if self.do_classifier_free_guidance:
913
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
914
+
915
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
916
+ image_embeds = self.prepare_ip_adapter_image_embeds(
917
+ ip_adapter_image,
918
+ ip_adapter_image_embeds,
919
+ device,
920
+ batch_size * num_images_per_prompt,
921
+ self.do_classifier_free_guidance,
922
+ )
923
+
924
+ # 4. Prepare timesteps
925
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
926
+
927
+ # 5. Prepare latent variables
928
+ num_channels_latents = self.unet.config.in_channels
929
+ latents = self.prepare_latents(
930
+ batch_size * num_images_per_prompt,
931
+ num_channels_latents,
932
+ height,
933
+ width,
934
+ prompt_embeds.dtype,
935
+ device,
936
+ generator,
937
+ latents,
938
+ )
939
+
940
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
941
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
942
+
943
+ # 6.1 Add image embeds for IP-Adapter
944
+ added_cond_kwargs = (
945
+ {"image_embeds": image_embeds}
946
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
947
+ else None
948
+ )
949
+
950
+ # 6.2 Optionally get Guidance Scale Embedding
951
+ timestep_cond = None
952
+ if self.unet.config.time_cond_proj_dim is not None:
953
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
954
+ timestep_cond = self.get_guidance_scale_embedding(
955
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
956
+ ).to(device=device, dtype=latents.dtype)
957
+
958
+ # 7. Denoising loop
959
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
960
+ self._num_timesteps = len(timesteps)
961
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
962
+ for i, t in enumerate(timesteps):
963
+ if self.interrupt:
964
+ continue
965
+
966
+ # expand the latents if we are doing classifier free guidance
967
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
968
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
969
+
970
+ # predict the noise residual
971
+ noise_pred = self.unet(
972
+ latent_model_input,
973
+ t,
974
+ encoder_hidden_states=prompt_embeds,
975
+ timestep_cond=timestep_cond,
976
+ cross_attention_kwargs=self.cross_attention_kwargs,
977
+ added_cond_kwargs=added_cond_kwargs,
978
+ return_dict=False,
979
+ )[0]
980
+
981
+ # perform guidance
982
+ if self.do_classifier_free_guidance:
983
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
984
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
985
+
986
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
987
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
988
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
989
+
990
+ # compute the previous noisy sample x_t -> x_t-1
991
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
992
+
993
+ if callback_on_step_end is not None:
994
+ callback_kwargs = {}
995
+ for k in callback_on_step_end_tensor_inputs:
996
+ callback_kwargs[k] = locals()[k]
997
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
998
+
999
+ latents = callback_outputs.pop("latents", latents)
1000
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1001
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1002
+
1003
+ # call the callback, if provided
1004
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1005
+ progress_bar.update()
1006
+ if callback is not None and i % callback_steps == 0:
1007
+ step_idx = i // getattr(self.scheduler, "order", 1)
1008
+ callback(step_idx, t, latents)
1009
+
1010
+ if not output_type == "latent":
1011
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1012
+ 0
1013
+ ]
1014
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1015
+ else:
1016
+ image = latents
1017
+ has_nsfw_concept = None
1018
+
1019
+ if has_nsfw_concept is None:
1020
+ do_denormalize = [True] * image.shape[0]
1021
+ else:
1022
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1023
+
1024
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1025
+
1026
+ # Offload all models
1027
+ self.maybe_free_model_hooks()
1028
+
1029
+ if not return_dict:
1030
+ return (image, has_nsfw_concept)
1031
+
1032
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, List, Optional, Union
17
+
18
+ import PIL.Image
19
+ import torch
20
+ from packaging import version
21
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
22
+
23
+ from ...configuration_utils import FrozenDict
24
+ from ...image_processor import VaeImageProcessor
25
+ from ...models import AutoencoderKL, UNet2DConditionModel
26
+ from ...schedulers import KarrasDiffusionSchedulers
27
+ from ...utils import deprecate, logging
28
+ from ...utils.torch_utils import randn_tensor
29
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
30
+ from . import StableDiffusionPipelineOutput
31
+ from .safety_checker import StableDiffusionSafetyChecker
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class StableDiffusionImageVariationPipeline(DiffusionPipeline, StableDiffusionMixin):
38
+ r"""
39
+ Pipeline to generate image variations from an input image using Stable Diffusion.
40
+
41
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
42
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
43
+
44
+ Args:
45
+ vae ([`AutoencoderKL`]):
46
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
47
+ image_encoder ([`~transformers.CLIPVisionModelWithProjection`]):
48
+ Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
49
+ text_encoder ([`~transformers.CLIPTextModel`]):
50
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
51
+ tokenizer ([`~transformers.CLIPTokenizer`]):
52
+ A `CLIPTokenizer` to tokenize text.
53
+ unet ([`UNet2DConditionModel`]):
54
+ A `UNet2DConditionModel` to denoise the encoded image latents.
55
+ scheduler ([`SchedulerMixin`]):
56
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
57
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
58
+ safety_checker ([`StableDiffusionSafetyChecker`]):
59
+ Classification module that estimates whether generated images could be considered offensive or harmful.
60
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
61
+ about a model's potential harms.
62
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
63
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
64
+ """
65
+
66
+ # TODO: feature_extractor is required to encode images (if they are in PIL format),
67
+ # we should give a descriptive message if the pipeline doesn't have one.
68
+ _optional_components = ["safety_checker"]
69
+ model_cpu_offload_seq = "image_encoder->unet->vae"
70
+ _exclude_from_cpu_offload = ["safety_checker"]
71
+
72
+ def __init__(
73
+ self,
74
+ vae: AutoencoderKL,
75
+ image_encoder: CLIPVisionModelWithProjection,
76
+ unet: UNet2DConditionModel,
77
+ scheduler: KarrasDiffusionSchedulers,
78
+ safety_checker: StableDiffusionSafetyChecker,
79
+ feature_extractor: CLIPImageProcessor,
80
+ requires_safety_checker: bool = True,
81
+ ):
82
+ super().__init__()
83
+
84
+ if safety_checker is None and requires_safety_checker:
85
+ logger.warning(
86
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
87
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
88
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
89
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
90
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
91
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
92
+ )
93
+
94
+ if safety_checker is not None and feature_extractor is None:
95
+ raise ValueError(
96
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
97
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
98
+ )
99
+
100
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
101
+ version.parse(unet.config._diffusers_version).base_version
102
+ ) < version.parse("0.9.0.dev0")
103
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
104
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
105
+ deprecation_message = (
106
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
107
+ " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
108
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
109
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
110
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
111
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
112
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
113
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
114
+ " the `unet/config.json` file"
115
+ )
116
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
117
+ new_config = dict(unet.config)
118
+ new_config["sample_size"] = 64
119
+ unet._internal_dict = FrozenDict(new_config)
120
+
121
+ self.register_modules(
122
+ vae=vae,
123
+ image_encoder=image_encoder,
124
+ unet=unet,
125
+ scheduler=scheduler,
126
+ safety_checker=safety_checker,
127
+ feature_extractor=feature_extractor,
128
+ )
129
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
130
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
131
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
132
+
133
+ def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
134
+ dtype = next(self.image_encoder.parameters()).dtype
135
+
136
+ if not isinstance(image, torch.Tensor):
137
+ image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
138
+
139
+ image = image.to(device=device, dtype=dtype)
140
+ image_embeddings = self.image_encoder(image).image_embeds
141
+ image_embeddings = image_embeddings.unsqueeze(1)
142
+
143
+ # duplicate image embeddings for each generation per prompt, using mps friendly method
144
+ bs_embed, seq_len, _ = image_embeddings.shape
145
+ image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
146
+ image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
147
+
148
+ if do_classifier_free_guidance:
149
+ negative_prompt_embeds = torch.zeros_like(image_embeddings)
150
+
151
+ # For classifier free guidance, we need to do two forward passes.
152
+ # Here we concatenate the unconditional and text embeddings into a single batch
153
+ # to avoid doing two forward passes
154
+ image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
155
+
156
+ return image_embeddings
157
+
158
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
159
+ def run_safety_checker(self, image, device, dtype):
160
+ if self.safety_checker is None:
161
+ has_nsfw_concept = None
162
+ else:
163
+ if torch.is_tensor(image):
164
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
165
+ else:
166
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
167
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
168
+ image, has_nsfw_concept = self.safety_checker(
169
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
170
+ )
171
+ return image, has_nsfw_concept
172
+
173
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
174
+ def decode_latents(self, latents):
175
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
176
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
177
+
178
+ latents = 1 / self.vae.config.scaling_factor * latents
179
+ image = self.vae.decode(latents, return_dict=False)[0]
180
+ image = (image / 2 + 0.5).clamp(0, 1)
181
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
182
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
183
+ return image
184
+
185
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
186
+ def prepare_extra_step_kwargs(self, generator, eta):
187
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
188
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
189
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
190
+ # and should be between [0, 1]
191
+
192
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
193
+ extra_step_kwargs = {}
194
+ if accepts_eta:
195
+ extra_step_kwargs["eta"] = eta
196
+
197
+ # check if the scheduler accepts generator
198
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
199
+ if accepts_generator:
200
+ extra_step_kwargs["generator"] = generator
201
+ return extra_step_kwargs
202
+
203
+ def check_inputs(self, image, height, width, callback_steps):
204
+ if (
205
+ not isinstance(image, torch.Tensor)
206
+ and not isinstance(image, PIL.Image.Image)
207
+ and not isinstance(image, list)
208
+ ):
209
+ raise ValueError(
210
+ "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
211
+ f" {type(image)}"
212
+ )
213
+
214
+ if height % 8 != 0 or width % 8 != 0:
215
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
216
+
217
+ if (callback_steps is None) or (
218
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
219
+ ):
220
+ raise ValueError(
221
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
222
+ f" {type(callback_steps)}."
223
+ )
224
+
225
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
226
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
227
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
228
+ if isinstance(generator, list) and len(generator) != batch_size:
229
+ raise ValueError(
230
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
231
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
232
+ )
233
+
234
+ if latents is None:
235
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
236
+ else:
237
+ latents = latents.to(device)
238
+
239
+ # scale the initial noise by the standard deviation required by the scheduler
240
+ latents = latents * self.scheduler.init_noise_sigma
241
+ return latents
242
+
243
+ @torch.no_grad()
244
+ def __call__(
245
+ self,
246
+ image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor],
247
+ height: Optional[int] = None,
248
+ width: Optional[int] = None,
249
+ num_inference_steps: int = 50,
250
+ guidance_scale: float = 7.5,
251
+ num_images_per_prompt: Optional[int] = 1,
252
+ eta: float = 0.0,
253
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
254
+ latents: Optional[torch.FloatTensor] = None,
255
+ output_type: Optional[str] = "pil",
256
+ return_dict: bool = True,
257
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
258
+ callback_steps: int = 1,
259
+ ):
260
+ r"""
261
+ The call function to the pipeline for generation.
262
+
263
+ Args:
264
+ image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):
265
+ Image or images to guide image generation. If you provide a tensor, it needs to be compatible with
266
+ [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json).
267
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
268
+ The height in pixels of the generated image.
269
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
270
+ The width in pixels of the generated image.
271
+ num_inference_steps (`int`, *optional*, defaults to 50):
272
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
273
+ expense of slower inference. This parameter is modulated by `strength`.
274
+ guidance_scale (`float`, *optional*, defaults to 7.5):
275
+ A higher guidance scale value encourages the model to generate images closely linked to the text
276
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
277
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
278
+ The number of images to generate per prompt.
279
+ eta (`float`, *optional*, defaults to 0.0):
280
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
281
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
282
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
283
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
284
+ generation deterministic.
285
+ latents (`torch.FloatTensor`, *optional*):
286
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
287
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
288
+ tensor is generated by sampling using the supplied random `generator`.
289
+ output_type (`str`, *optional*, defaults to `"pil"`):
290
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
291
+ return_dict (`bool`, *optional*, defaults to `True`):
292
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
293
+ plain tuple.
294
+ callback (`Callable`, *optional*):
295
+ A function that calls every `callback_steps` steps during inference. The function is called with the
296
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
297
+ callback_steps (`int`, *optional*, defaults to 1):
298
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
299
+ every step.
300
+
301
+ Returns:
302
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
303
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
304
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
305
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
306
+ "not-safe-for-work" (nsfw) content.
307
+
308
+ Examples:
309
+
310
+ ```py
311
+ from diffusers import StableDiffusionImageVariationPipeline
312
+ from PIL import Image
313
+ from io import BytesIO
314
+ import requests
315
+
316
+ pipe = StableDiffusionImageVariationPipeline.from_pretrained(
317
+ "lambdalabs/sd-image-variations-diffusers", revision="v2.0"
318
+ )
319
+ pipe = pipe.to("cuda")
320
+
321
+ url = "https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200"
322
+
323
+ response = requests.get(url)
324
+ image = Image.open(BytesIO(response.content)).convert("RGB")
325
+
326
+ out = pipe(image, num_images_per_prompt=3, guidance_scale=15)
327
+ out["images"][0].save("result.jpg")
328
+ ```
329
+ """
330
+ # 0. Default height and width to unet
331
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
332
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
333
+
334
+ # 1. Check inputs. Raise error if not correct
335
+ self.check_inputs(image, height, width, callback_steps)
336
+
337
+ # 2. Define call parameters
338
+ if isinstance(image, PIL.Image.Image):
339
+ batch_size = 1
340
+ elif isinstance(image, list):
341
+ batch_size = len(image)
342
+ else:
343
+ batch_size = image.shape[0]
344
+ device = self._execution_device
345
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
346
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
347
+ # corresponds to doing no classifier free guidance.
348
+ do_classifier_free_guidance = guidance_scale > 1.0
349
+
350
+ # 3. Encode input image
351
+ image_embeddings = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance)
352
+
353
+ # 4. Prepare timesteps
354
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
355
+ timesteps = self.scheduler.timesteps
356
+
357
+ # 5. Prepare latent variables
358
+ num_channels_latents = self.unet.config.in_channels
359
+ latents = self.prepare_latents(
360
+ batch_size * num_images_per_prompt,
361
+ num_channels_latents,
362
+ height,
363
+ width,
364
+ image_embeddings.dtype,
365
+ device,
366
+ generator,
367
+ latents,
368
+ )
369
+
370
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
371
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
372
+
373
+ # 7. Denoising loop
374
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
375
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
376
+ for i, t in enumerate(timesteps):
377
+ # expand the latents if we are doing classifier free guidance
378
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
379
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
380
+
381
+ # predict the noise residual
382
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample
383
+
384
+ # perform guidance
385
+ if do_classifier_free_guidance:
386
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
387
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
388
+
389
+ # compute the previous noisy sample x_t -> x_t-1
390
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
391
+
392
+ # call the callback, if provided
393
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
394
+ progress_bar.update()
395
+ if callback is not None and i % callback_steps == 0:
396
+ step_idx = i // getattr(self.scheduler, "order", 1)
397
+ callback(step_idx, t, latents)
398
+
399
+ self.maybe_free_model_hooks()
400
+
401
+ if not output_type == "latent":
402
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
403
+ image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype)
404
+ else:
405
+ image = latents
406
+ has_nsfw_concept = None
407
+
408
+ if has_nsfw_concept is None:
409
+ do_denormalize = [True] * image.shape[0]
410
+ else:
411
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
412
+
413
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
414
+
415
+ self.maybe_free_model_hooks()
416
+
417
+ if not return_dict:
418
+ return (image, has_nsfw_concept)
419
+
420
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py ADDED
@@ -0,0 +1,807 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The InstructPix2Pix Authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
22
+
23
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
24
+ from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
26
+ from ...schedulers import KarrasDiffusionSchedulers
27
+ from ...utils import PIL_INTERPOLATION, deprecate, logging
28
+ from ...utils.torch_utils import randn_tensor
29
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
30
+ from . import StableDiffusionPipelineOutput
31
+ from .safety_checker import StableDiffusionSafetyChecker
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
38
+ def preprocess(image):
39
+ deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead"
40
+ deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False)
41
+ if isinstance(image, torch.Tensor):
42
+ return image
43
+ elif isinstance(image, PIL.Image.Image):
44
+ image = [image]
45
+
46
+ if isinstance(image[0], PIL.Image.Image):
47
+ w, h = image[0].size
48
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
49
+
50
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
51
+ image = np.concatenate(image, axis=0)
52
+ image = np.array(image).astype(np.float32) / 255.0
53
+ image = image.transpose(0, 3, 1, 2)
54
+ image = 2.0 * image - 1.0
55
+ image = torch.from_numpy(image)
56
+ elif isinstance(image[0], torch.Tensor):
57
+ image = torch.cat(image, dim=0)
58
+ return image
59
+
60
+
61
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
62
+ def retrieve_latents(
63
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
64
+ ):
65
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
66
+ return encoder_output.latent_dist.sample(generator)
67
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
68
+ return encoder_output.latent_dist.mode()
69
+ elif hasattr(encoder_output, "latents"):
70
+ return encoder_output.latents
71
+ else:
72
+ raise AttributeError("Could not access latents of provided encoder_output")
73
+
74
+
75
+ class StableDiffusionInstructPix2PixPipeline(
76
+ DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin
77
+ ):
78
+ r"""
79
+ Pipeline for pixel-level image editing by following text instructions (based on Stable Diffusion).
80
+
81
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
82
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
83
+
84
+ The pipeline also inherits the following loading methods:
85
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
86
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
87
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
88
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
89
+
90
+ Args:
91
+ vae ([`AutoencoderKL`]):
92
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
93
+ text_encoder ([`~transformers.CLIPTextModel`]):
94
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
95
+ tokenizer ([`~transformers.CLIPTokenizer`]):
96
+ A `CLIPTokenizer` to tokenize text.
97
+ unet ([`UNet2DConditionModel`]):
98
+ A `UNet2DConditionModel` to denoise the encoded image latents.
99
+ scheduler ([`SchedulerMixin`]):
100
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
101
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
102
+ safety_checker ([`StableDiffusionSafetyChecker`]):
103
+ Classification module that estimates whether generated images could be considered offensive or harmful.
104
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
105
+ about a model's potential harms.
106
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
107
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
108
+ """
109
+
110
+ model_cpu_offload_seq = "text_encoder->unet->vae"
111
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
112
+ _exclude_from_cpu_offload = ["safety_checker"]
113
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "image_latents"]
114
+
115
+ def __init__(
116
+ self,
117
+ vae: AutoencoderKL,
118
+ text_encoder: CLIPTextModel,
119
+ tokenizer: CLIPTokenizer,
120
+ unet: UNet2DConditionModel,
121
+ scheduler: KarrasDiffusionSchedulers,
122
+ safety_checker: StableDiffusionSafetyChecker,
123
+ feature_extractor: CLIPImageProcessor,
124
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
125
+ requires_safety_checker: bool = True,
126
+ ):
127
+ super().__init__()
128
+
129
+ if safety_checker is None and requires_safety_checker:
130
+ logger.warning(
131
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
132
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
133
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
134
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
135
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
136
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
137
+ )
138
+
139
+ if safety_checker is not None and feature_extractor is None:
140
+ raise ValueError(
141
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
142
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
143
+ )
144
+
145
+ self.register_modules(
146
+ vae=vae,
147
+ text_encoder=text_encoder,
148
+ tokenizer=tokenizer,
149
+ unet=unet,
150
+ scheduler=scheduler,
151
+ safety_checker=safety_checker,
152
+ feature_extractor=feature_extractor,
153
+ image_encoder=image_encoder,
154
+ )
155
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
156
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
157
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
158
+
159
+ @torch.no_grad()
160
+ def __call__(
161
+ self,
162
+ prompt: Union[str, List[str]] = None,
163
+ image: PipelineImageInput = None,
164
+ num_inference_steps: int = 100,
165
+ guidance_scale: float = 7.5,
166
+ image_guidance_scale: float = 1.5,
167
+ negative_prompt: Optional[Union[str, List[str]]] = None,
168
+ num_images_per_prompt: Optional[int] = 1,
169
+ eta: float = 0.0,
170
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
171
+ latents: Optional[torch.FloatTensor] = None,
172
+ prompt_embeds: Optional[torch.FloatTensor] = None,
173
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
174
+ ip_adapter_image: Optional[PipelineImageInput] = None,
175
+ output_type: Optional[str] = "pil",
176
+ return_dict: bool = True,
177
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
178
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
179
+ **kwargs,
180
+ ):
181
+ r"""
182
+ The call function to the pipeline for generation.
183
+
184
+ Args:
185
+ prompt (`str` or `List[str]`, *optional*):
186
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
187
+ image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
188
+ `Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept
189
+ image latents as `image`, but if passing latents directly it is not encoded again.
190
+ num_inference_steps (`int`, *optional*, defaults to 100):
191
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
192
+ expense of slower inference.
193
+ guidance_scale (`float`, *optional*, defaults to 7.5):
194
+ A higher guidance scale value encourages the model to generate images closely linked to the text
195
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
196
+ image_guidance_scale (`float`, *optional*, defaults to 1.5):
197
+ Push the generated image towards the inital `image`. Image guidance scale is enabled by setting
198
+ `image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely
199
+ linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a
200
+ value of at least `1`.
201
+ negative_prompt (`str` or `List[str]`, *optional*):
202
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
203
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
204
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
205
+ The number of images to generate per prompt.
206
+ eta (`float`, *optional*, defaults to 0.0):
207
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
208
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
209
+ generator (`torch.Generator`, *optional*):
210
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
211
+ generation deterministic.
212
+ latents (`torch.FloatTensor`, *optional*):
213
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
214
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
215
+ tensor is generated by sampling using the supplied random `generator`.
216
+ prompt_embeds (`torch.FloatTensor`, *optional*):
217
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
218
+ provided, text embeddings are generated from the `prompt` input argument.
219
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
220
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
221
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
222
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
223
+ Optional image input to work with IP Adapters.
224
+ output_type (`str`, *optional*, defaults to `"pil"`):
225
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
226
+ return_dict (`bool`, *optional*, defaults to `True`):
227
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
228
+ plain tuple.
229
+ callback_on_step_end (`Callable`, *optional*):
230
+ A function that calls at the end of each denoising steps during the inference. The function is called
231
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
232
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
233
+ `callback_on_step_end_tensor_inputs`.
234
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
235
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
236
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
237
+ `._callback_tensor_inputs` attribute of your pipeline class.
238
+
239
+ Examples:
240
+
241
+ ```py
242
+ >>> import PIL
243
+ >>> import requests
244
+ >>> import torch
245
+ >>> from io import BytesIO
246
+
247
+ >>> from diffusers import StableDiffusionInstructPix2PixPipeline
248
+
249
+
250
+ >>> def download_image(url):
251
+ ... response = requests.get(url)
252
+ ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
253
+
254
+
255
+ >>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png"
256
+
257
+ >>> image = download_image(img_url).resize((512, 512))
258
+
259
+ >>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
260
+ ... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16
261
+ ... )
262
+ >>> pipe = pipe.to("cuda")
263
+
264
+ >>> prompt = "make the mountains snowy"
265
+ >>> image = pipe(prompt=prompt, image=image).images[0]
266
+ ```
267
+
268
+ Returns:
269
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
270
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
271
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
272
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
273
+ "not-safe-for-work" (nsfw) content.
274
+ """
275
+
276
+ callback = kwargs.pop("callback", None)
277
+ callback_steps = kwargs.pop("callback_steps", None)
278
+
279
+ if callback is not None:
280
+ deprecate(
281
+ "callback",
282
+ "1.0.0",
283
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
284
+ )
285
+ if callback_steps is not None:
286
+ deprecate(
287
+ "callback_steps",
288
+ "1.0.0",
289
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
290
+ )
291
+
292
+ # 0. Check inputs
293
+ self.check_inputs(
294
+ prompt,
295
+ callback_steps,
296
+ negative_prompt,
297
+ prompt_embeds,
298
+ negative_prompt_embeds,
299
+ callback_on_step_end_tensor_inputs,
300
+ )
301
+ self._guidance_scale = guidance_scale
302
+ self._image_guidance_scale = image_guidance_scale
303
+
304
+ device = self._execution_device
305
+
306
+ if ip_adapter_image is not None:
307
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
308
+ image_embeds, negative_image_embeds = self.encode_image(
309
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
310
+ )
311
+ if self.do_classifier_free_guidance:
312
+ image_embeds = torch.cat([image_embeds, negative_image_embeds, negative_image_embeds])
313
+
314
+ if image is None:
315
+ raise ValueError("`image` input cannot be undefined.")
316
+
317
+ # 1. Define call parameters
318
+ if prompt is not None and isinstance(prompt, str):
319
+ batch_size = 1
320
+ elif prompt is not None and isinstance(prompt, list):
321
+ batch_size = len(prompt)
322
+ else:
323
+ batch_size = prompt_embeds.shape[0]
324
+
325
+ device = self._execution_device
326
+
327
+ # 2. Encode input prompt
328
+ prompt_embeds = self._encode_prompt(
329
+ prompt,
330
+ device,
331
+ num_images_per_prompt,
332
+ self.do_classifier_free_guidance,
333
+ negative_prompt,
334
+ prompt_embeds=prompt_embeds,
335
+ negative_prompt_embeds=negative_prompt_embeds,
336
+ )
337
+
338
+ # 3. Preprocess image
339
+ image = self.image_processor.preprocess(image)
340
+
341
+ # 4. set timesteps
342
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
343
+ timesteps = self.scheduler.timesteps
344
+
345
+ # 5. Prepare Image latents
346
+ image_latents = self.prepare_image_latents(
347
+ image,
348
+ batch_size,
349
+ num_images_per_prompt,
350
+ prompt_embeds.dtype,
351
+ device,
352
+ self.do_classifier_free_guidance,
353
+ )
354
+
355
+ height, width = image_latents.shape[-2:]
356
+ height = height * self.vae_scale_factor
357
+ width = width * self.vae_scale_factor
358
+
359
+ # 6. Prepare latent variables
360
+ num_channels_latents = self.vae.config.latent_channels
361
+ latents = self.prepare_latents(
362
+ batch_size * num_images_per_prompt,
363
+ num_channels_latents,
364
+ height,
365
+ width,
366
+ prompt_embeds.dtype,
367
+ device,
368
+ generator,
369
+ latents,
370
+ )
371
+
372
+ # 7. Check that shapes of latents and image match the UNet channels
373
+ num_channels_image = image_latents.shape[1]
374
+ if num_channels_latents + num_channels_image != self.unet.config.in_channels:
375
+ raise ValueError(
376
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
377
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
378
+ f" `num_channels_image`: {num_channels_image} "
379
+ f" = {num_channels_latents+num_channels_image}. Please verify the config of"
380
+ " `pipeline.unet` or your `image` input."
381
+ )
382
+
383
+ # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
384
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
385
+
386
+ # 8.1 Add image embeds for IP-Adapter
387
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
388
+
389
+ # 9. Denoising loop
390
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
391
+ self._num_timesteps = len(timesteps)
392
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
393
+ for i, t in enumerate(timesteps):
394
+ # Expand the latents if we are doing classifier free guidance.
395
+ # The latents are expanded 3 times because for pix2pix the guidance\
396
+ # is applied for both the text and the input image.
397
+ latent_model_input = torch.cat([latents] * 3) if self.do_classifier_free_guidance else latents
398
+
399
+ # concat latents, image_latents in the channel dimension
400
+ scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
401
+ scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1)
402
+
403
+ # predict the noise residual
404
+ noise_pred = self.unet(
405
+ scaled_latent_model_input,
406
+ t,
407
+ encoder_hidden_states=prompt_embeds,
408
+ added_cond_kwargs=added_cond_kwargs,
409
+ return_dict=False,
410
+ )[0]
411
+
412
+ # perform guidance
413
+ if self.do_classifier_free_guidance:
414
+ noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3)
415
+ noise_pred = (
416
+ noise_pred_uncond
417
+ + self.guidance_scale * (noise_pred_text - noise_pred_image)
418
+ + self.image_guidance_scale * (noise_pred_image - noise_pred_uncond)
419
+ )
420
+
421
+ # compute the previous noisy sample x_t -> x_t-1
422
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
423
+
424
+ if callback_on_step_end is not None:
425
+ callback_kwargs = {}
426
+ for k in callback_on_step_end_tensor_inputs:
427
+ callback_kwargs[k] = locals()[k]
428
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
429
+
430
+ latents = callback_outputs.pop("latents", latents)
431
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
432
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
433
+ image_latents = callback_outputs.pop("image_latents", image_latents)
434
+
435
+ # call the callback, if provided
436
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
437
+ progress_bar.update()
438
+ if callback is not None and i % callback_steps == 0:
439
+ step_idx = i // getattr(self.scheduler, "order", 1)
440
+ callback(step_idx, t, latents)
441
+
442
+ if not output_type == "latent":
443
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
444
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
445
+ else:
446
+ image = latents
447
+ has_nsfw_concept = None
448
+
449
+ if has_nsfw_concept is None:
450
+ do_denormalize = [True] * image.shape[0]
451
+ else:
452
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
453
+
454
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
455
+
456
+ # Offload all models
457
+ self.maybe_free_model_hooks()
458
+
459
+ if not return_dict:
460
+ return (image, has_nsfw_concept)
461
+
462
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
463
+
464
+ def _encode_prompt(
465
+ self,
466
+ prompt,
467
+ device,
468
+ num_images_per_prompt,
469
+ do_classifier_free_guidance,
470
+ negative_prompt=None,
471
+ prompt_embeds: Optional[torch.FloatTensor] = None,
472
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
473
+ ):
474
+ r"""
475
+ Encodes the prompt into text encoder hidden states.
476
+
477
+ Args:
478
+ prompt (`str` or `List[str]`, *optional*):
479
+ prompt to be encoded
480
+ device: (`torch.device`):
481
+ torch device
482
+ num_images_per_prompt (`int`):
483
+ number of images that should be generated per prompt
484
+ do_classifier_free_guidance (`bool`):
485
+ whether to use classifier free guidance or not
486
+ negative_ prompt (`str` or `List[str]`, *optional*):
487
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
488
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
489
+ less than `1`).
490
+ prompt_embeds (`torch.FloatTensor`, *optional*):
491
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
492
+ provided, text embeddings will be generated from `prompt` input argument.
493
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
494
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
495
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
496
+ argument.
497
+ """
498
+ if prompt is not None and isinstance(prompt, str):
499
+ batch_size = 1
500
+ elif prompt is not None and isinstance(prompt, list):
501
+ batch_size = len(prompt)
502
+ else:
503
+ batch_size = prompt_embeds.shape[0]
504
+
505
+ if prompt_embeds is None:
506
+ # textual inversion: process multi-vector tokens if necessary
507
+ if isinstance(self, TextualInversionLoaderMixin):
508
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
509
+
510
+ text_inputs = self.tokenizer(
511
+ prompt,
512
+ padding="max_length",
513
+ max_length=self.tokenizer.model_max_length,
514
+ truncation=True,
515
+ return_tensors="pt",
516
+ )
517
+ text_input_ids = text_inputs.input_ids
518
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
519
+
520
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
521
+ text_input_ids, untruncated_ids
522
+ ):
523
+ removed_text = self.tokenizer.batch_decode(
524
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
525
+ )
526
+ logger.warning(
527
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
528
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
529
+ )
530
+
531
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
532
+ attention_mask = text_inputs.attention_mask.to(device)
533
+ else:
534
+ attention_mask = None
535
+
536
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
537
+ prompt_embeds = prompt_embeds[0]
538
+
539
+ if self.text_encoder is not None:
540
+ prompt_embeds_dtype = self.text_encoder.dtype
541
+ else:
542
+ prompt_embeds_dtype = self.unet.dtype
543
+
544
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
545
+
546
+ bs_embed, seq_len, _ = prompt_embeds.shape
547
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
548
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
549
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
550
+
551
+ # get unconditional embeddings for classifier free guidance
552
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
553
+ uncond_tokens: List[str]
554
+ if negative_prompt is None:
555
+ uncond_tokens = [""] * batch_size
556
+ elif type(prompt) is not type(negative_prompt):
557
+ raise TypeError(
558
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
559
+ f" {type(prompt)}."
560
+ )
561
+ elif isinstance(negative_prompt, str):
562
+ uncond_tokens = [negative_prompt]
563
+ elif batch_size != len(negative_prompt):
564
+ raise ValueError(
565
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
566
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
567
+ " the batch size of `prompt`."
568
+ )
569
+ else:
570
+ uncond_tokens = negative_prompt
571
+
572
+ # textual inversion: process multi-vector tokens if necessary
573
+ if isinstance(self, TextualInversionLoaderMixin):
574
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
575
+
576
+ max_length = prompt_embeds.shape[1]
577
+ uncond_input = self.tokenizer(
578
+ uncond_tokens,
579
+ padding="max_length",
580
+ max_length=max_length,
581
+ truncation=True,
582
+ return_tensors="pt",
583
+ )
584
+
585
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
586
+ attention_mask = uncond_input.attention_mask.to(device)
587
+ else:
588
+ attention_mask = None
589
+
590
+ negative_prompt_embeds = self.text_encoder(
591
+ uncond_input.input_ids.to(device),
592
+ attention_mask=attention_mask,
593
+ )
594
+ negative_prompt_embeds = negative_prompt_embeds[0]
595
+
596
+ if do_classifier_free_guidance:
597
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
598
+ seq_len = negative_prompt_embeds.shape[1]
599
+
600
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
601
+
602
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
603
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
604
+
605
+ # For classifier free guidance, we need to do two forward passes.
606
+ # Here we concatenate the unconditional and text embeddings into a single batch
607
+ # to avoid doing two forward passes
608
+ # pix2pix has two negative embeddings, and unlike in other pipelines latents are ordered [prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]
609
+ prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds])
610
+
611
+ return prompt_embeds
612
+
613
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
614
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
615
+ dtype = next(self.image_encoder.parameters()).dtype
616
+
617
+ if not isinstance(image, torch.Tensor):
618
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
619
+
620
+ image = image.to(device=device, dtype=dtype)
621
+ if output_hidden_states:
622
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
623
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
624
+ uncond_image_enc_hidden_states = self.image_encoder(
625
+ torch.zeros_like(image), output_hidden_states=True
626
+ ).hidden_states[-2]
627
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
628
+ num_images_per_prompt, dim=0
629
+ )
630
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
631
+ else:
632
+ image_embeds = self.image_encoder(image).image_embeds
633
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
634
+ uncond_image_embeds = torch.zeros_like(image_embeds)
635
+
636
+ return image_embeds, uncond_image_embeds
637
+
638
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
639
+ def run_safety_checker(self, image, device, dtype):
640
+ if self.safety_checker is None:
641
+ has_nsfw_concept = None
642
+ else:
643
+ if torch.is_tensor(image):
644
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
645
+ else:
646
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
647
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
648
+ image, has_nsfw_concept = self.safety_checker(
649
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
650
+ )
651
+ return image, has_nsfw_concept
652
+
653
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
654
+ def prepare_extra_step_kwargs(self, generator, eta):
655
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
656
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
657
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
658
+ # and should be between [0, 1]
659
+
660
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
661
+ extra_step_kwargs = {}
662
+ if accepts_eta:
663
+ extra_step_kwargs["eta"] = eta
664
+
665
+ # check if the scheduler accepts generator
666
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
667
+ if accepts_generator:
668
+ extra_step_kwargs["generator"] = generator
669
+ return extra_step_kwargs
670
+
671
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
672
+ def decode_latents(self, latents):
673
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
674
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
675
+
676
+ latents = 1 / self.vae.config.scaling_factor * latents
677
+ image = self.vae.decode(latents, return_dict=False)[0]
678
+ image = (image / 2 + 0.5).clamp(0, 1)
679
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
680
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
681
+ return image
682
+
683
+ def check_inputs(
684
+ self,
685
+ prompt,
686
+ callback_steps,
687
+ negative_prompt=None,
688
+ prompt_embeds=None,
689
+ negative_prompt_embeds=None,
690
+ callback_on_step_end_tensor_inputs=None,
691
+ ):
692
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
693
+ raise ValueError(
694
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
695
+ f" {type(callback_steps)}."
696
+ )
697
+
698
+ if callback_on_step_end_tensor_inputs is not None and not all(
699
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
700
+ ):
701
+ raise ValueError(
702
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
703
+ )
704
+
705
+ if prompt is not None and prompt_embeds is not None:
706
+ raise ValueError(
707
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
708
+ " only forward one of the two."
709
+ )
710
+ elif prompt is None and prompt_embeds is None:
711
+ raise ValueError(
712
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
713
+ )
714
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
715
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
716
+
717
+ if negative_prompt is not None and negative_prompt_embeds is not None:
718
+ raise ValueError(
719
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
720
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
721
+ )
722
+
723
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
724
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
725
+ raise ValueError(
726
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
727
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
728
+ f" {negative_prompt_embeds.shape}."
729
+ )
730
+
731
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
732
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
733
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
734
+ if isinstance(generator, list) and len(generator) != batch_size:
735
+ raise ValueError(
736
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
737
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
738
+ )
739
+
740
+ if latents is None:
741
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
742
+ else:
743
+ latents = latents.to(device)
744
+
745
+ # scale the initial noise by the standard deviation required by the scheduler
746
+ latents = latents * self.scheduler.init_noise_sigma
747
+ return latents
748
+
749
+ def prepare_image_latents(
750
+ self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None
751
+ ):
752
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
753
+ raise ValueError(
754
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
755
+ )
756
+
757
+ image = image.to(device=device, dtype=dtype)
758
+
759
+ batch_size = batch_size * num_images_per_prompt
760
+
761
+ if image.shape[1] == 4:
762
+ image_latents = image
763
+ else:
764
+ image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax")
765
+
766
+ if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
767
+ # expand image_latents for batch_size
768
+ deprecation_message = (
769
+ f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial"
770
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
771
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
772
+ " your script to pass as many initial images as text prompts to suppress this warning."
773
+ )
774
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
775
+ additional_image_per_prompt = batch_size // image_latents.shape[0]
776
+ image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
777
+ elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
778
+ raise ValueError(
779
+ f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
780
+ )
781
+ else:
782
+ image_latents = torch.cat([image_latents], dim=0)
783
+
784
+ if do_classifier_free_guidance:
785
+ uncond_image_latents = torch.zeros_like(image_latents)
786
+ image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0)
787
+
788
+ return image_latents
789
+
790
+ @property
791
+ def guidance_scale(self):
792
+ return self._guidance_scale
793
+
794
+ @property
795
+ def image_guidance_scale(self):
796
+ return self._image_guidance_scale
797
+
798
+ @property
799
+ def num_timesteps(self):
800
+ return self._num_timesteps
801
+
802
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
803
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
804
+ # corresponds to doing no classifier free guidance.
805
+ @property
806
+ def do_classifier_free_guidance(self):
807
+ return self.guidance_scale > 1.0 and self.image_guidance_scale >= 1.0
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
20
+ from transformers.models.clip.modeling_clip import CLIPTextModelOutput
21
+
22
+ from ...image_processor import VaeImageProcessor
23
+ from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
24
+ from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel
25
+ from ...models.embeddings import get_timestep_embedding
26
+ from ...models.lora import adjust_lora_scale_text_encoder
27
+ from ...schedulers import KarrasDiffusionSchedulers
28
+ from ...utils import (
29
+ USE_PEFT_BACKEND,
30
+ deprecate,
31
+ logging,
32
+ replace_example_docstring,
33
+ scale_lora_layers,
34
+ unscale_lora_layers,
35
+ )
36
+ from ...utils.torch_utils import randn_tensor
37
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin
38
+ from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ EXAMPLE_DOC_STRING = """
44
+ Examples:
45
+ ```py
46
+ >>> import torch
47
+ >>> from diffusers import StableUnCLIPPipeline
48
+
49
+ >>> pipe = StableUnCLIPPipeline.from_pretrained(
50
+ ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16
51
+ ... ) # TODO update model path
52
+ >>> pipe = pipe.to("cuda")
53
+
54
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
55
+ >>> images = pipe(prompt).images
56
+ >>> images[0].save("astronaut_horse.png")
57
+ ```
58
+ """
59
+
60
+
61
+ class StableUnCLIPPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin):
62
+ """
63
+ Pipeline for text-to-image generation using stable unCLIP.
64
+
65
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
66
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
67
+
68
+ The pipeline also inherits the following loading methods:
69
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
70
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
71
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
72
+
73
+ Args:
74
+ prior_tokenizer ([`CLIPTokenizer`]):
75
+ A [`CLIPTokenizer`].
76
+ prior_text_encoder ([`CLIPTextModelWithProjection`]):
77
+ Frozen [`CLIPTextModelWithProjection`] text-encoder.
78
+ prior ([`PriorTransformer`]):
79
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
80
+ prior_scheduler ([`KarrasDiffusionSchedulers`]):
81
+ Scheduler used in the prior denoising process.
82
+ image_normalizer ([`StableUnCLIPImageNormalizer`]):
83
+ Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image
84
+ embeddings after the noise has been applied.
85
+ image_noising_scheduler ([`KarrasDiffusionSchedulers`]):
86
+ Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined
87
+ by the `noise_level`.
88
+ tokenizer ([`CLIPTokenizer`]):
89
+ A [`CLIPTokenizer`].
90
+ text_encoder ([`CLIPTextModel`]):
91
+ Frozen [`CLIPTextModel`] text-encoder.
92
+ unet ([`UNet2DConditionModel`]):
93
+ A [`UNet2DConditionModel`] to denoise the encoded image latents.
94
+ scheduler ([`KarrasDiffusionSchedulers`]):
95
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents.
96
+ vae ([`AutoencoderKL`]):
97
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
98
+ """
99
+
100
+ _exclude_from_cpu_offload = ["prior", "image_normalizer"]
101
+ model_cpu_offload_seq = "text_encoder->prior_text_encoder->unet->vae"
102
+
103
+ # prior components
104
+ prior_tokenizer: CLIPTokenizer
105
+ prior_text_encoder: CLIPTextModelWithProjection
106
+ prior: PriorTransformer
107
+ prior_scheduler: KarrasDiffusionSchedulers
108
+
109
+ # image noising components
110
+ image_normalizer: StableUnCLIPImageNormalizer
111
+ image_noising_scheduler: KarrasDiffusionSchedulers
112
+
113
+ # regular denoising components
114
+ tokenizer: CLIPTokenizer
115
+ text_encoder: CLIPTextModel
116
+ unet: UNet2DConditionModel
117
+ scheduler: KarrasDiffusionSchedulers
118
+
119
+ vae: AutoencoderKL
120
+
121
+ def __init__(
122
+ self,
123
+ # prior components
124
+ prior_tokenizer: CLIPTokenizer,
125
+ prior_text_encoder: CLIPTextModelWithProjection,
126
+ prior: PriorTransformer,
127
+ prior_scheduler: KarrasDiffusionSchedulers,
128
+ # image noising components
129
+ image_normalizer: StableUnCLIPImageNormalizer,
130
+ image_noising_scheduler: KarrasDiffusionSchedulers,
131
+ # regular denoising components
132
+ tokenizer: CLIPTokenizer,
133
+ text_encoder: CLIPTextModelWithProjection,
134
+ unet: UNet2DConditionModel,
135
+ scheduler: KarrasDiffusionSchedulers,
136
+ # vae
137
+ vae: AutoencoderKL,
138
+ ):
139
+ super().__init__()
140
+
141
+ self.register_modules(
142
+ prior_tokenizer=prior_tokenizer,
143
+ prior_text_encoder=prior_text_encoder,
144
+ prior=prior,
145
+ prior_scheduler=prior_scheduler,
146
+ image_normalizer=image_normalizer,
147
+ image_noising_scheduler=image_noising_scheduler,
148
+ tokenizer=tokenizer,
149
+ text_encoder=text_encoder,
150
+ unet=unet,
151
+ scheduler=scheduler,
152
+ vae=vae,
153
+ )
154
+
155
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
156
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
157
+
158
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder
159
+ def _encode_prior_prompt(
160
+ self,
161
+ prompt,
162
+ device,
163
+ num_images_per_prompt,
164
+ do_classifier_free_guidance,
165
+ text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
166
+ text_attention_mask: Optional[torch.Tensor] = None,
167
+ ):
168
+ if text_model_output is None:
169
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
170
+ # get prompt text embeddings
171
+ text_inputs = self.prior_tokenizer(
172
+ prompt,
173
+ padding="max_length",
174
+ max_length=self.prior_tokenizer.model_max_length,
175
+ truncation=True,
176
+ return_tensors="pt",
177
+ )
178
+ text_input_ids = text_inputs.input_ids
179
+ text_mask = text_inputs.attention_mask.bool().to(device)
180
+
181
+ untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
182
+
183
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
184
+ text_input_ids, untruncated_ids
185
+ ):
186
+ removed_text = self.prior_tokenizer.batch_decode(
187
+ untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1]
188
+ )
189
+ logger.warning(
190
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
191
+ f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}"
192
+ )
193
+ text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length]
194
+
195
+ prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device))
196
+
197
+ prompt_embeds = prior_text_encoder_output.text_embeds
198
+ text_enc_hid_states = prior_text_encoder_output.last_hidden_state
199
+
200
+ else:
201
+ batch_size = text_model_output[0].shape[0]
202
+ prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1]
203
+ text_mask = text_attention_mask
204
+
205
+ prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
206
+ text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0)
207
+ text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
208
+
209
+ if do_classifier_free_guidance:
210
+ uncond_tokens = [""] * batch_size
211
+
212
+ uncond_input = self.prior_tokenizer(
213
+ uncond_tokens,
214
+ padding="max_length",
215
+ max_length=self.prior_tokenizer.model_max_length,
216
+ truncation=True,
217
+ return_tensors="pt",
218
+ )
219
+ uncond_text_mask = uncond_input.attention_mask.bool().to(device)
220
+ negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder(
221
+ uncond_input.input_ids.to(device)
222
+ )
223
+
224
+ negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds
225
+ uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state
226
+
227
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
228
+
229
+ seq_len = negative_prompt_embeds.shape[1]
230
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
231
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
232
+
233
+ seq_len = uncond_text_enc_hid_states.shape[1]
234
+ uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1)
235
+ uncond_text_enc_hid_states = uncond_text_enc_hid_states.view(
236
+ batch_size * num_images_per_prompt, seq_len, -1
237
+ )
238
+ uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
239
+
240
+ # done duplicates
241
+
242
+ # For classifier free guidance, we need to do two forward passes.
243
+ # Here we concatenate the unconditional and text embeddings into a single batch
244
+ # to avoid doing two forward passes
245
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
246
+ text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states])
247
+
248
+ text_mask = torch.cat([uncond_text_mask, text_mask])
249
+
250
+ return prompt_embeds, text_enc_hid_states, text_mask
251
+
252
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
253
+ def _encode_prompt(
254
+ self,
255
+ prompt,
256
+ device,
257
+ num_images_per_prompt,
258
+ do_classifier_free_guidance,
259
+ negative_prompt=None,
260
+ prompt_embeds: Optional[torch.FloatTensor] = None,
261
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
262
+ lora_scale: Optional[float] = None,
263
+ **kwargs,
264
+ ):
265
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
266
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
267
+
268
+ prompt_embeds_tuple = self.encode_prompt(
269
+ prompt=prompt,
270
+ device=device,
271
+ num_images_per_prompt=num_images_per_prompt,
272
+ do_classifier_free_guidance=do_classifier_free_guidance,
273
+ negative_prompt=negative_prompt,
274
+ prompt_embeds=prompt_embeds,
275
+ negative_prompt_embeds=negative_prompt_embeds,
276
+ lora_scale=lora_scale,
277
+ **kwargs,
278
+ )
279
+
280
+ # concatenate for backwards comp
281
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
282
+
283
+ return prompt_embeds
284
+
285
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
286
+ def encode_prompt(
287
+ self,
288
+ prompt,
289
+ device,
290
+ num_images_per_prompt,
291
+ do_classifier_free_guidance,
292
+ negative_prompt=None,
293
+ prompt_embeds: Optional[torch.FloatTensor] = None,
294
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
295
+ lora_scale: Optional[float] = None,
296
+ clip_skip: Optional[int] = None,
297
+ ):
298
+ r"""
299
+ Encodes the prompt into text encoder hidden states.
300
+
301
+ Args:
302
+ prompt (`str` or `List[str]`, *optional*):
303
+ prompt to be encoded
304
+ device: (`torch.device`):
305
+ torch device
306
+ num_images_per_prompt (`int`):
307
+ number of images that should be generated per prompt
308
+ do_classifier_free_guidance (`bool`):
309
+ whether to use classifier free guidance or not
310
+ negative_prompt (`str` or `List[str]`, *optional*):
311
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
312
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
313
+ less than `1`).
314
+ prompt_embeds (`torch.FloatTensor`, *optional*):
315
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
316
+ provided, text embeddings will be generated from `prompt` input argument.
317
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
318
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
319
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
320
+ argument.
321
+ lora_scale (`float`, *optional*):
322
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
323
+ clip_skip (`int`, *optional*):
324
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
325
+ the output of the pre-final layer will be used for computing the prompt embeddings.
326
+ """
327
+ # set lora scale so that monkey patched LoRA
328
+ # function of text encoder can correctly access it
329
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
330
+ self._lora_scale = lora_scale
331
+
332
+ # dynamically adjust the LoRA scale
333
+ if not USE_PEFT_BACKEND:
334
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
335
+ else:
336
+ scale_lora_layers(self.text_encoder, lora_scale)
337
+
338
+ if prompt is not None and isinstance(prompt, str):
339
+ batch_size = 1
340
+ elif prompt is not None and isinstance(prompt, list):
341
+ batch_size = len(prompt)
342
+ else:
343
+ batch_size = prompt_embeds.shape[0]
344
+
345
+ if prompt_embeds is None:
346
+ # textual inversion: process multi-vector tokens if necessary
347
+ if isinstance(self, TextualInversionLoaderMixin):
348
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
349
+
350
+ text_inputs = self.tokenizer(
351
+ prompt,
352
+ padding="max_length",
353
+ max_length=self.tokenizer.model_max_length,
354
+ truncation=True,
355
+ return_tensors="pt",
356
+ )
357
+ text_input_ids = text_inputs.input_ids
358
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
359
+
360
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
361
+ text_input_ids, untruncated_ids
362
+ ):
363
+ removed_text = self.tokenizer.batch_decode(
364
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
365
+ )
366
+ logger.warning(
367
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
368
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
369
+ )
370
+
371
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
372
+ attention_mask = text_inputs.attention_mask.to(device)
373
+ else:
374
+ attention_mask = None
375
+
376
+ if clip_skip is None:
377
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
378
+ prompt_embeds = prompt_embeds[0]
379
+ else:
380
+ prompt_embeds = self.text_encoder(
381
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
382
+ )
383
+ # Access the `hidden_states` first, that contains a tuple of
384
+ # all the hidden states from the encoder layers. Then index into
385
+ # the tuple to access the hidden states from the desired layer.
386
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
387
+ # We also need to apply the final LayerNorm here to not mess with the
388
+ # representations. The `last_hidden_states` that we typically use for
389
+ # obtaining the final prompt representations passes through the LayerNorm
390
+ # layer.
391
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
392
+
393
+ if self.text_encoder is not None:
394
+ prompt_embeds_dtype = self.text_encoder.dtype
395
+ elif self.unet is not None:
396
+ prompt_embeds_dtype = self.unet.dtype
397
+ else:
398
+ prompt_embeds_dtype = prompt_embeds.dtype
399
+
400
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
401
+
402
+ bs_embed, seq_len, _ = prompt_embeds.shape
403
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
404
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
405
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
406
+
407
+ # get unconditional embeddings for classifier free guidance
408
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
409
+ uncond_tokens: List[str]
410
+ if negative_prompt is None:
411
+ uncond_tokens = [""] * batch_size
412
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
413
+ raise TypeError(
414
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
415
+ f" {type(prompt)}."
416
+ )
417
+ elif isinstance(negative_prompt, str):
418
+ uncond_tokens = [negative_prompt]
419
+ elif batch_size != len(negative_prompt):
420
+ raise ValueError(
421
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
422
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
423
+ " the batch size of `prompt`."
424
+ )
425
+ else:
426
+ uncond_tokens = negative_prompt
427
+
428
+ # textual inversion: process multi-vector tokens if necessary
429
+ if isinstance(self, TextualInversionLoaderMixin):
430
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
431
+
432
+ max_length = prompt_embeds.shape[1]
433
+ uncond_input = self.tokenizer(
434
+ uncond_tokens,
435
+ padding="max_length",
436
+ max_length=max_length,
437
+ truncation=True,
438
+ return_tensors="pt",
439
+ )
440
+
441
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
442
+ attention_mask = uncond_input.attention_mask.to(device)
443
+ else:
444
+ attention_mask = None
445
+
446
+ negative_prompt_embeds = self.text_encoder(
447
+ uncond_input.input_ids.to(device),
448
+ attention_mask=attention_mask,
449
+ )
450
+ negative_prompt_embeds = negative_prompt_embeds[0]
451
+
452
+ if do_classifier_free_guidance:
453
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
454
+ seq_len = negative_prompt_embeds.shape[1]
455
+
456
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
457
+
458
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
459
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
460
+
461
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
462
+ # Retrieve the original scale by scaling back the LoRA layers
463
+ unscale_lora_layers(self.text_encoder, lora_scale)
464
+
465
+ return prompt_embeds, negative_prompt_embeds
466
+
467
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
468
+ def decode_latents(self, latents):
469
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
470
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
471
+
472
+ latents = 1 / self.vae.config.scaling_factor * latents
473
+ image = self.vae.decode(latents, return_dict=False)[0]
474
+ image = (image / 2 + 0.5).clamp(0, 1)
475
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
476
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
477
+ return image
478
+
479
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler
480
+ def prepare_prior_extra_step_kwargs(self, generator, eta):
481
+ # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature
482
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers.
483
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
484
+ # and should be between [0, 1]
485
+
486
+ accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys())
487
+ extra_step_kwargs = {}
488
+ if accepts_eta:
489
+ extra_step_kwargs["eta"] = eta
490
+
491
+ # check if the prior_scheduler accepts generator
492
+ accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys())
493
+ if accepts_generator:
494
+ extra_step_kwargs["generator"] = generator
495
+ return extra_step_kwargs
496
+
497
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
498
+ def prepare_extra_step_kwargs(self, generator, eta):
499
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
500
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
501
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
502
+ # and should be between [0, 1]
503
+
504
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
505
+ extra_step_kwargs = {}
506
+ if accepts_eta:
507
+ extra_step_kwargs["eta"] = eta
508
+
509
+ # check if the scheduler accepts generator
510
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
511
+ if accepts_generator:
512
+ extra_step_kwargs["generator"] = generator
513
+ return extra_step_kwargs
514
+
515
+ def check_inputs(
516
+ self,
517
+ prompt,
518
+ height,
519
+ width,
520
+ callback_steps,
521
+ noise_level,
522
+ negative_prompt=None,
523
+ prompt_embeds=None,
524
+ negative_prompt_embeds=None,
525
+ ):
526
+ if height % 8 != 0 or width % 8 != 0:
527
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
528
+
529
+ if (callback_steps is None) or (
530
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
531
+ ):
532
+ raise ValueError(
533
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
534
+ f" {type(callback_steps)}."
535
+ )
536
+
537
+ if prompt is not None and prompt_embeds is not None:
538
+ raise ValueError(
539
+ "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two."
540
+ )
541
+
542
+ if prompt is None and prompt_embeds is None:
543
+ raise ValueError(
544
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
545
+ )
546
+
547
+ if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
548
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
549
+
550
+ if negative_prompt is not None and negative_prompt_embeds is not None:
551
+ raise ValueError(
552
+ "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined."
553
+ )
554
+
555
+ if prompt is not None and negative_prompt is not None:
556
+ if type(prompt) is not type(negative_prompt):
557
+ raise TypeError(
558
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
559
+ f" {type(prompt)}."
560
+ )
561
+
562
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
563
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
564
+ raise ValueError(
565
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
566
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
567
+ f" {negative_prompt_embeds.shape}."
568
+ )
569
+
570
+ if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps:
571
+ raise ValueError(
572
+ f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive."
573
+ )
574
+
575
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
576
+ def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
577
+ if latents is None:
578
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
579
+ else:
580
+ if latents.shape != shape:
581
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
582
+ latents = latents.to(device)
583
+
584
+ latents = latents * scheduler.init_noise_sigma
585
+ return latents
586
+
587
+ def noise_image_embeddings(
588
+ self,
589
+ image_embeds: torch.Tensor,
590
+ noise_level: int,
591
+ noise: Optional[torch.FloatTensor] = None,
592
+ generator: Optional[torch.Generator] = None,
593
+ ):
594
+ """
595
+ Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher
596
+ `noise_level` increases the variance in the final un-noised images.
597
+
598
+ The noise is applied in two ways:
599
+ 1. A noise schedule is applied directly to the embeddings.
600
+ 2. A vector of sinusoidal time embeddings are appended to the output.
601
+
602
+ In both cases, the amount of noise is controlled by the same `noise_level`.
603
+
604
+ The embeddings are normalized before the noise is applied and un-normalized after the noise is applied.
605
+ """
606
+ if noise is None:
607
+ noise = randn_tensor(
608
+ image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype
609
+ )
610
+
611
+ noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device)
612
+
613
+ self.image_normalizer.to(image_embeds.device)
614
+ image_embeds = self.image_normalizer.scale(image_embeds)
615
+
616
+ image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise)
617
+
618
+ image_embeds = self.image_normalizer.unscale(image_embeds)
619
+
620
+ noise_level = get_timestep_embedding(
621
+ timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0
622
+ )
623
+
624
+ # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors,
625
+ # but we might actually be running in fp16. so we need to cast here.
626
+ # there might be better ways to encapsulate this.
627
+ noise_level = noise_level.to(image_embeds.dtype)
628
+
629
+ image_embeds = torch.cat((image_embeds, noise_level), 1)
630
+
631
+ return image_embeds
632
+
633
+ @torch.no_grad()
634
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
635
+ def __call__(
636
+ self,
637
+ # regular denoising process args
638
+ prompt: Optional[Union[str, List[str]]] = None,
639
+ height: Optional[int] = None,
640
+ width: Optional[int] = None,
641
+ num_inference_steps: int = 20,
642
+ guidance_scale: float = 10.0,
643
+ negative_prompt: Optional[Union[str, List[str]]] = None,
644
+ num_images_per_prompt: Optional[int] = 1,
645
+ eta: float = 0.0,
646
+ generator: Optional[torch.Generator] = None,
647
+ latents: Optional[torch.FloatTensor] = None,
648
+ prompt_embeds: Optional[torch.FloatTensor] = None,
649
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
650
+ output_type: Optional[str] = "pil",
651
+ return_dict: bool = True,
652
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
653
+ callback_steps: int = 1,
654
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
655
+ noise_level: int = 0,
656
+ # prior args
657
+ prior_num_inference_steps: int = 25,
658
+ prior_guidance_scale: float = 4.0,
659
+ prior_latents: Optional[torch.FloatTensor] = None,
660
+ clip_skip: Optional[int] = None,
661
+ ):
662
+ """
663
+ The call function to the pipeline for generation.
664
+
665
+ Args:
666
+ prompt (`str` or `List[str]`, *optional*):
667
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
668
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
669
+ The height in pixels of the generated image.
670
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
671
+ The width in pixels of the generated image.
672
+ num_inference_steps (`int`, *optional*, defaults to 20):
673
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
674
+ expense of slower inference.
675
+ guidance_scale (`float`, *optional*, defaults to 10.0):
676
+ A higher guidance scale value encourages the model to generate images closely linked to the text
677
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
678
+ negative_prompt (`str` or `List[str]`, *optional*):
679
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
680
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
681
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
682
+ The number of images to generate per prompt.
683
+ eta (`float`, *optional*, defaults to 0.0):
684
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
685
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
686
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
687
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
688
+ generation deterministic.
689
+ latents (`torch.FloatTensor`, *optional*):
690
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
691
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
692
+ tensor is generated by sampling using the supplied random `generator`.
693
+ prompt_embeds (`torch.FloatTensor`, *optional*):
694
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
695
+ provided, text embeddings are generated from the `prompt` input argument.
696
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
697
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
698
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
699
+ output_type (`str`, *optional*, defaults to `"pil"`):
700
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
701
+ return_dict (`bool`, *optional*, defaults to `True`):
702
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
703
+ callback (`Callable`, *optional*):
704
+ A function that calls every `callback_steps` steps during inference. The function is called with the
705
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
706
+ callback_steps (`int`, *optional*, defaults to 1):
707
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
708
+ every step.
709
+ cross_attention_kwargs (`dict`, *optional*):
710
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
711
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
712
+ noise_level (`int`, *optional*, defaults to `0`):
713
+ The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in
714
+ the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details.
715
+ prior_num_inference_steps (`int`, *optional*, defaults to 25):
716
+ The number of denoising steps in the prior denoising process. More denoising steps usually lead to a
717
+ higher quality image at the expense of slower inference.
718
+ prior_guidance_scale (`float`, *optional*, defaults to 4.0):
719
+ A higher guidance scale value encourages the model to generate images closely linked to the text
720
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
721
+ prior_latents (`torch.FloatTensor`, *optional*):
722
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
723
+ embedding generation in the prior denoising process. Can be used to tweak the same generation with
724
+ different prompts. If not provided, a latents tensor is generated by sampling using the supplied random
725
+ `generator`.
726
+ clip_skip (`int`, *optional*):
727
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
728
+ the output of the pre-final layer will be used for computing the prompt embeddings.
729
+ Examples:
730
+
731
+ Returns:
732
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
733
+ [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning
734
+ a tuple, the first element is a list with the generated images.
735
+ """
736
+ # 0. Default height and width to unet
737
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
738
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
739
+
740
+ # 1. Check inputs. Raise error if not correct
741
+ self.check_inputs(
742
+ prompt=prompt,
743
+ height=height,
744
+ width=width,
745
+ callback_steps=callback_steps,
746
+ noise_level=noise_level,
747
+ negative_prompt=negative_prompt,
748
+ prompt_embeds=prompt_embeds,
749
+ negative_prompt_embeds=negative_prompt_embeds,
750
+ )
751
+
752
+ # 2. Define call parameters
753
+ if prompt is not None and isinstance(prompt, str):
754
+ batch_size = 1
755
+ elif prompt is not None and isinstance(prompt, list):
756
+ batch_size = len(prompt)
757
+ else:
758
+ batch_size = prompt_embeds.shape[0]
759
+
760
+ batch_size = batch_size * num_images_per_prompt
761
+
762
+ device = self._execution_device
763
+
764
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
765
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
766
+ # corresponds to doing no classifier free guidance.
767
+ prior_do_classifier_free_guidance = prior_guidance_scale > 1.0
768
+
769
+ # 3. Encode input prompt
770
+ prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt(
771
+ prompt=prompt,
772
+ device=device,
773
+ num_images_per_prompt=num_images_per_prompt,
774
+ do_classifier_free_guidance=prior_do_classifier_free_guidance,
775
+ )
776
+
777
+ # 4. Prepare prior timesteps
778
+ self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device)
779
+ prior_timesteps_tensor = self.prior_scheduler.timesteps
780
+
781
+ # 5. Prepare prior latent variables
782
+ embedding_dim = self.prior.config.embedding_dim
783
+ prior_latents = self.prepare_latents(
784
+ (batch_size, embedding_dim),
785
+ prior_prompt_embeds.dtype,
786
+ device,
787
+ generator,
788
+ prior_latents,
789
+ self.prior_scheduler,
790
+ )
791
+
792
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
793
+ prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta)
794
+
795
+ # 7. Prior denoising loop
796
+ for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
797
+ # expand the latents if we are doing classifier free guidance
798
+ latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents
799
+ latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t)
800
+
801
+ predicted_image_embedding = self.prior(
802
+ latent_model_input,
803
+ timestep=t,
804
+ proj_embedding=prior_prompt_embeds,
805
+ encoder_hidden_states=prior_text_encoder_hidden_states,
806
+ attention_mask=prior_text_mask,
807
+ ).predicted_image_embedding
808
+
809
+ if prior_do_classifier_free_guidance:
810
+ predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
811
+ predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
812
+ predicted_image_embedding_text - predicted_image_embedding_uncond
813
+ )
814
+
815
+ prior_latents = self.prior_scheduler.step(
816
+ predicted_image_embedding,
817
+ timestep=t,
818
+ sample=prior_latents,
819
+ **prior_extra_step_kwargs,
820
+ return_dict=False,
821
+ )[0]
822
+
823
+ if callback is not None and i % callback_steps == 0:
824
+ callback(i, t, prior_latents)
825
+
826
+ prior_latents = self.prior.post_process_latents(prior_latents)
827
+
828
+ image_embeds = prior_latents
829
+
830
+ # done prior
831
+
832
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
833
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
834
+ # corresponds to doing no classifier free guidance.
835
+ do_classifier_free_guidance = guidance_scale > 1.0
836
+
837
+ # 8. Encode input prompt
838
+ text_encoder_lora_scale = (
839
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
840
+ )
841
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
842
+ prompt=prompt,
843
+ device=device,
844
+ num_images_per_prompt=num_images_per_prompt,
845
+ do_classifier_free_guidance=do_classifier_free_guidance,
846
+ negative_prompt=negative_prompt,
847
+ prompt_embeds=prompt_embeds,
848
+ negative_prompt_embeds=negative_prompt_embeds,
849
+ lora_scale=text_encoder_lora_scale,
850
+ clip_skip=clip_skip,
851
+ )
852
+ # For classifier free guidance, we need to do two forward passes.
853
+ # Here we concatenate the unconditional and text embeddings into a single batch
854
+ # to avoid doing two forward passes
855
+ if do_classifier_free_guidance:
856
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
857
+
858
+ # 9. Prepare image embeddings
859
+ image_embeds = self.noise_image_embeddings(
860
+ image_embeds=image_embeds,
861
+ noise_level=noise_level,
862
+ generator=generator,
863
+ )
864
+
865
+ if do_classifier_free_guidance:
866
+ negative_prompt_embeds = torch.zeros_like(image_embeds)
867
+
868
+ # For classifier free guidance, we need to do two forward passes.
869
+ # Here we concatenate the unconditional and text embeddings into a single batch
870
+ # to avoid doing two forward passes
871
+ image_embeds = torch.cat([negative_prompt_embeds, image_embeds])
872
+
873
+ # 10. Prepare timesteps
874
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
875
+ timesteps = self.scheduler.timesteps
876
+
877
+ # 11. Prepare latent variables
878
+ num_channels_latents = self.unet.config.in_channels
879
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
880
+ latents = self.prepare_latents(
881
+ shape=shape,
882
+ dtype=prompt_embeds.dtype,
883
+ device=device,
884
+ generator=generator,
885
+ latents=latents,
886
+ scheduler=self.scheduler,
887
+ )
888
+
889
+ # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
890
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
891
+
892
+ # 13. Denoising loop
893
+ for i, t in enumerate(self.progress_bar(timesteps)):
894
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
895
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
896
+
897
+ # predict the noise residual
898
+ noise_pred = self.unet(
899
+ latent_model_input,
900
+ t,
901
+ encoder_hidden_states=prompt_embeds,
902
+ class_labels=image_embeds,
903
+ cross_attention_kwargs=cross_attention_kwargs,
904
+ return_dict=False,
905
+ )[0]
906
+
907
+ # perform guidance
908
+ if do_classifier_free_guidance:
909
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
910
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
911
+
912
+ # compute the previous noisy sample x_t -> x_t-1
913
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
914
+
915
+ if callback is not None and i % callback_steps == 0:
916
+ step_idx = i // getattr(self.scheduler, "order", 1)
917
+ callback(step_idx, t, latents)
918
+
919
+ if not output_type == "latent":
920
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
921
+ else:
922
+ image = latents
923
+
924
+ image = self.image_processor.postprocess(image, output_type=output_type)
925
+
926
+ # Offload all models
927
+ self.maybe_free_model_hooks()
928
+
929
+ if not return_dict:
930
+ return (image,)
931
+
932
+ return ImagePipelineOutput(images=image)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/safety_checker_flax.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional, Tuple
16
+
17
+ import jax
18
+ import jax.numpy as jnp
19
+ from flax import linen as nn
20
+ from flax.core.frozen_dict import FrozenDict
21
+ from transformers import CLIPConfig, FlaxPreTrainedModel
22
+ from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
23
+
24
+
25
+ def jax_cosine_distance(emb_1, emb_2, eps=1e-12):
26
+ norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T
27
+ norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T
28
+ return jnp.matmul(norm_emb_1, norm_emb_2.T)
29
+
30
+
31
+ class FlaxStableDiffusionSafetyCheckerModule(nn.Module):
32
+ config: CLIPConfig
33
+ dtype: jnp.dtype = jnp.float32
34
+
35
+ def setup(self):
36
+ self.vision_model = FlaxCLIPVisionModule(self.config.vision_config)
37
+ self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype)
38
+
39
+ self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim))
40
+ self.special_care_embeds = self.param(
41
+ "special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim)
42
+ )
43
+
44
+ self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,))
45
+ self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,))
46
+
47
+ def __call__(self, clip_input):
48
+ pooled_output = self.vision_model(clip_input)[1]
49
+ image_embeds = self.visual_projection(pooled_output)
50
+
51
+ special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds)
52
+ cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds)
53
+
54
+ # increase this value to create a stronger `nfsw` filter
55
+ # at the cost of increasing the possibility of filtering benign image inputs
56
+ adjustment = 0.0
57
+
58
+ special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
59
+ special_scores = jnp.round(special_scores, 3)
60
+ is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True)
61
+ # Use a lower threshold if an image has any special care concept
62
+ special_adjustment = is_special_care * 0.01
63
+
64
+ concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
65
+ concept_scores = jnp.round(concept_scores, 3)
66
+ has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1)
67
+
68
+ return has_nsfw_concepts
69
+
70
+
71
+ class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel):
72
+ config_class = CLIPConfig
73
+ main_input_name = "clip_input"
74
+ module_class = FlaxStableDiffusionSafetyCheckerModule
75
+
76
+ def __init__(
77
+ self,
78
+ config: CLIPConfig,
79
+ input_shape: Optional[Tuple] = None,
80
+ seed: int = 0,
81
+ dtype: jnp.dtype = jnp.float32,
82
+ _do_init: bool = True,
83
+ **kwargs,
84
+ ):
85
+ if input_shape is None:
86
+ input_shape = (1, 224, 224, 3)
87
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
88
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
89
+
90
+ def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
91
+ # init input tensor
92
+ clip_input = jax.random.normal(rng, input_shape)
93
+
94
+ params_rng, dropout_rng = jax.random.split(rng)
95
+ rngs = {"params": params_rng, "dropout": dropout_rng}
96
+
97
+ random_params = self.module.init(rngs, clip_input)["params"]
98
+
99
+ return random_params
100
+
101
+ def __call__(
102
+ self,
103
+ clip_input,
104
+ params: dict = None,
105
+ ):
106
+ clip_input = jnp.transpose(clip_input, (0, 2, 3, 1))
107
+
108
+ return self.module.apply(
109
+ {"params": params or self.params},
110
+ jnp.array(clip_input, dtype=jnp.float32),
111
+ rngs={},
112
+ )
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional, Union
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from ...configuration_utils import ConfigMixin, register_to_config
21
+ from ...models.modeling_utils import ModelMixin
22
+
23
+
24
+ class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin):
25
+ """
26
+ This class is used to hold the mean and standard deviation of the CLIP embedder used in stable unCLIP.
27
+
28
+ It is used to normalize the image embeddings before the noise is applied and un-normalize the noised image
29
+ embeddings.
30
+ """
31
+
32
+ @register_to_config
33
+ def __init__(
34
+ self,
35
+ embedding_dim: int = 768,
36
+ ):
37
+ super().__init__()
38
+
39
+ self.mean = nn.Parameter(torch.zeros(1, embedding_dim))
40
+ self.std = nn.Parameter(torch.ones(1, embedding_dim))
41
+
42
+ def to(
43
+ self,
44
+ torch_device: Optional[Union[str, torch.device]] = None,
45
+ torch_dtype: Optional[torch.dtype] = None,
46
+ ):
47
+ self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype))
48
+ self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype))
49
+ return self
50
+
51
+ def scale(self, embeds):
52
+ embeds = (embeds - self.mean) * 1.0 / self.std
53
+ return embeds
54
+
55
+ def unscale(self, embeds):
56
+ embeds = (embeds * self.std) + self.mean
57
+ return embeds
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_k_diffusion_available,
9
+ is_k_diffusion_version,
10
+ is_torch_available,
11
+ is_transformers_available,
12
+ )
13
+
14
+
15
+ _dummy_objects = {}
16
+ _import_structure = {}
17
+
18
+
19
+ try:
20
+ if not (
21
+ is_transformers_available()
22
+ and is_torch_available()
23
+ and is_k_diffusion_available()
24
+ and is_k_diffusion_version(">=", "0.0.12")
25
+ ):
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403
29
+
30
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects))
31
+ else:
32
+ _import_structure["pipeline_stable_diffusion_k_diffusion"] = ["StableDiffusionKDiffusionPipeline"]
33
+ _import_structure["pipeline_stable_diffusion_xl_k_diffusion"] = ["StableDiffusionXLKDiffusionPipeline"]
34
+
35
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
36
+ try:
37
+ if not (
38
+ is_transformers_available()
39
+ and is_torch_available()
40
+ and is_k_diffusion_available()
41
+ and is_k_diffusion_version(">=", "0.0.12")
42
+ ):
43
+ raise OptionalDependencyNotAvailable()
44
+
45
+ except OptionalDependencyNotAvailable:
46
+ from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import *
47
+ else:
48
+ from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
49
+ from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline
50
+
51
+ else:
52
+ import sys
53
+
54
+ sys.modules[__name__] = _LazyModule(
55
+ __name__,
56
+ globals()["__file__"],
57
+ _import_structure,
58
+ module_spec=__spec__,
59
+ )
60
+
61
+ for name, value in _dummy_objects.items():
62
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_ldm3d/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__init__.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+ from typing import TYPE_CHECKING, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL
7
+ from PIL import Image
8
+
9
+ from ...utils import (
10
+ DIFFUSERS_SLOW_IMPORT,
11
+ BaseOutput,
12
+ OptionalDependencyNotAvailable,
13
+ _LazyModule,
14
+ get_objects_from_module,
15
+ is_torch_available,
16
+ is_transformers_available,
17
+ )
18
+
19
+
20
+ @dataclass
21
+ class SafetyConfig(object):
22
+ WEAK = {
23
+ "sld_warmup_steps": 15,
24
+ "sld_guidance_scale": 20,
25
+ "sld_threshold": 0.0,
26
+ "sld_momentum_scale": 0.0,
27
+ "sld_mom_beta": 0.0,
28
+ }
29
+ MEDIUM = {
30
+ "sld_warmup_steps": 10,
31
+ "sld_guidance_scale": 1000,
32
+ "sld_threshold": 0.01,
33
+ "sld_momentum_scale": 0.3,
34
+ "sld_mom_beta": 0.4,
35
+ }
36
+ STRONG = {
37
+ "sld_warmup_steps": 7,
38
+ "sld_guidance_scale": 2000,
39
+ "sld_threshold": 0.025,
40
+ "sld_momentum_scale": 0.5,
41
+ "sld_mom_beta": 0.7,
42
+ }
43
+ MAX = {
44
+ "sld_warmup_steps": 0,
45
+ "sld_guidance_scale": 5000,
46
+ "sld_threshold": 1.0,
47
+ "sld_momentum_scale": 0.5,
48
+ "sld_mom_beta": 0.7,
49
+ }
50
+
51
+
52
+ _dummy_objects = {}
53
+ _additional_imports = {}
54
+ _import_structure = {}
55
+
56
+ _additional_imports.update({"SafetyConfig": SafetyConfig})
57
+
58
+ try:
59
+ if not (is_transformers_available() and is_torch_available()):
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ from ...utils import dummy_torch_and_transformers_objects
63
+
64
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
65
+ else:
66
+ _import_structure.update(
67
+ {
68
+ "pipeline_output": ["StableDiffusionSafePipelineOutput"],
69
+ "pipeline_stable_diffusion_safe": ["StableDiffusionPipelineSafe"],
70
+ "safety_checker": ["StableDiffusionSafetyChecker"],
71
+ }
72
+ )
73
+
74
+
75
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
76
+ try:
77
+ if not (is_transformers_available() and is_torch_available()):
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ from ...utils.dummy_torch_and_transformers_objects import *
81
+ else:
82
+ from .pipeline_output import StableDiffusionSafePipelineOutput
83
+ from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe
84
+ from .safety_checker import SafeStableDiffusionSafetyChecker
85
+
86
+ else:
87
+ import sys
88
+
89
+ sys.modules[__name__] = _LazyModule(
90
+ __name__,
91
+ globals()["__file__"],
92
+ _import_structure,
93
+ module_spec=__spec__,
94
+ )
95
+
96
+ for name, value in _dummy_objects.items():
97
+ setattr(sys.modules[__name__], name, value)
98
+ for name, value in _additional_imports.items():
99
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/pipeline_output.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/pipeline_stable_diffusion_safe.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/__pycache__/safety_checker.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+
7
+ from ...utils import (
8
+ BaseOutput,
9
+ )
10
+
11
+
12
+ @dataclass
13
+ class StableDiffusionSafePipelineOutput(BaseOutput):
14
+ """
15
+ Output class for Safe Stable Diffusion pipelines.
16
+
17
+ Args:
18
+ images (`List[PIL.Image.Image]` or `np.ndarray`)
19
+ List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
20
+ num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
21
+ nsfw_content_detected (`List[bool]`)
22
+ List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
23
+ (nsfw) content, or `None` if safety checking could not be performed.
24
+ images (`List[PIL.Image.Image]` or `np.ndarray`)
25
+ List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work"
26
+ (nsfw) content, or `None` if no safety check was performed or no images were flagged.
27
+ applied_safety_concept (`str`)
28
+ The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled
29
+ """
30
+
31
+ images: Union[List[PIL.Image.Image], np.ndarray]
32
+ nsfw_content_detected: Optional[List[bool]]
33
+ unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]]
34
+ applied_safety_concept: Optional[str]
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import warnings
3
+ from typing import Callable, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ from packaging import version
8
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
9
+
10
+ from ...configuration_utils import FrozenDict
11
+ from ...image_processor import PipelineImageInput
12
+ from ...loaders import IPAdapterMixin
13
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
14
+ from ...schedulers import KarrasDiffusionSchedulers
15
+ from ...utils import deprecate, logging
16
+ from ...utils.torch_utils import randn_tensor
17
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
18
+ from . import StableDiffusionSafePipelineOutput
19
+ from .safety_checker import SafeStableDiffusionSafetyChecker
20
+
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+
25
+ class StableDiffusionPipelineSafe(DiffusionPipeline, StableDiffusionMixin, IPAdapterMixin):
26
+ r"""
27
+ Pipeline based on the [`StableDiffusionPipeline`] for text-to-image generation using Safe Latent Diffusion.
28
+
29
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
30
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
31
+
32
+ The pipeline also inherits the following loading methods:
33
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
34
+
35
+ Args:
36
+ vae ([`AutoencoderKL`]):
37
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
38
+ text_encoder ([`~transformers.CLIPTextModel`]):
39
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
40
+ tokenizer ([`~transformers.CLIPTokenizer`]):
41
+ A `CLIPTokenizer` to tokenize text.
42
+ unet ([`UNet2DConditionModel`]):
43
+ A `UNet2DConditionModel` to denoise the encoded image latents.
44
+ scheduler ([`SchedulerMixin`]):
45
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
46
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
47
+ safety_checker ([`StableDiffusionSafetyChecker`]):
48
+ Classification module that estimates whether generated images could be considered offensive or harmful.
49
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
50
+ about a model's potential harms.
51
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
52
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
53
+ """
54
+
55
+ model_cpu_offload_seq = "text_encoder->unet->vae"
56
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
57
+
58
+ def __init__(
59
+ self,
60
+ vae: AutoencoderKL,
61
+ text_encoder: CLIPTextModel,
62
+ tokenizer: CLIPTokenizer,
63
+ unet: UNet2DConditionModel,
64
+ scheduler: KarrasDiffusionSchedulers,
65
+ safety_checker: SafeStableDiffusionSafetyChecker,
66
+ feature_extractor: CLIPImageProcessor,
67
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
68
+ requires_safety_checker: bool = True,
69
+ ):
70
+ super().__init__()
71
+ safety_concept: Optional[str] = (
72
+ "an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity,"
73
+ " bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child"
74
+ " abuse, brutality, cruelty"
75
+ )
76
+
77
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
78
+ deprecation_message = (
79
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
80
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
81
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
82
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
83
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
84
+ " file"
85
+ )
86
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
87
+ new_config = dict(scheduler.config)
88
+ new_config["steps_offset"] = 1
89
+ scheduler._internal_dict = FrozenDict(new_config)
90
+
91
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
92
+ deprecation_message = (
93
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
94
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
95
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
96
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
97
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
98
+ )
99
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
100
+ new_config = dict(scheduler.config)
101
+ new_config["clip_sample"] = False
102
+ scheduler._internal_dict = FrozenDict(new_config)
103
+
104
+ if safety_checker is None and requires_safety_checker:
105
+ logger.warning(
106
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
107
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
108
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
109
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
110
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
111
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
112
+ )
113
+
114
+ if safety_checker is not None and feature_extractor is None:
115
+ raise ValueError(
116
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
117
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
118
+ )
119
+
120
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
121
+ version.parse(unet.config._diffusers_version).base_version
122
+ ) < version.parse("0.9.0.dev0")
123
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
124
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
125
+ deprecation_message = (
126
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
127
+ " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
128
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
129
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
130
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
131
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
132
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
133
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
134
+ " the `unet/config.json` file"
135
+ )
136
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
137
+ new_config = dict(unet.config)
138
+ new_config["sample_size"] = 64
139
+ unet._internal_dict = FrozenDict(new_config)
140
+
141
+ self.register_modules(
142
+ vae=vae,
143
+ text_encoder=text_encoder,
144
+ tokenizer=tokenizer,
145
+ unet=unet,
146
+ scheduler=scheduler,
147
+ safety_checker=safety_checker,
148
+ feature_extractor=feature_extractor,
149
+ image_encoder=image_encoder,
150
+ )
151
+ self._safety_text_concept = safety_concept
152
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
153
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
154
+
155
+ @property
156
+ def safety_concept(self):
157
+ r"""
158
+ Getter method for the safety concept used with SLD
159
+
160
+ Returns:
161
+ `str`: The text describing the safety concept
162
+ """
163
+ return self._safety_text_concept
164
+
165
+ @safety_concept.setter
166
+ def safety_concept(self, concept):
167
+ r"""
168
+ Setter method for the safety concept used with SLD
169
+
170
+ Args:
171
+ concept (`str`):
172
+ The text of the new safety concept
173
+ """
174
+ self._safety_text_concept = concept
175
+
176
+ def _encode_prompt(
177
+ self,
178
+ prompt,
179
+ device,
180
+ num_images_per_prompt,
181
+ do_classifier_free_guidance,
182
+ negative_prompt,
183
+ enable_safety_guidance,
184
+ ):
185
+ r"""
186
+ Encodes the prompt into text encoder hidden states.
187
+
188
+ Args:
189
+ prompt (`str` or `List[str]`):
190
+ prompt to be encoded
191
+ device: (`torch.device`):
192
+ torch device
193
+ num_images_per_prompt (`int`):
194
+ number of images that should be generated per prompt
195
+ do_classifier_free_guidance (`bool`):
196
+ whether to use classifier free guidance or not
197
+ negative_prompt (`str` or `List[str]`):
198
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
199
+ if `guidance_scale` is less than `1`).
200
+ """
201
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
202
+
203
+ text_inputs = self.tokenizer(
204
+ prompt,
205
+ padding="max_length",
206
+ max_length=self.tokenizer.model_max_length,
207
+ truncation=True,
208
+ return_tensors="pt",
209
+ )
210
+ text_input_ids = text_inputs.input_ids
211
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
212
+
213
+ if not torch.equal(text_input_ids, untruncated_ids):
214
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
215
+ logger.warning(
216
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
217
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
218
+ )
219
+
220
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
221
+ attention_mask = text_inputs.attention_mask.to(device)
222
+ else:
223
+ attention_mask = None
224
+
225
+ prompt_embeds = self.text_encoder(
226
+ text_input_ids.to(device),
227
+ attention_mask=attention_mask,
228
+ )
229
+ prompt_embeds = prompt_embeds[0]
230
+
231
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
232
+ bs_embed, seq_len, _ = prompt_embeds.shape
233
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
234
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
235
+
236
+ # get unconditional embeddings for classifier free guidance
237
+ if do_classifier_free_guidance:
238
+ uncond_tokens: List[str]
239
+ if negative_prompt is None:
240
+ uncond_tokens = [""] * batch_size
241
+ elif type(prompt) is not type(negative_prompt):
242
+ raise TypeError(
243
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
244
+ f" {type(prompt)}."
245
+ )
246
+ elif isinstance(negative_prompt, str):
247
+ uncond_tokens = [negative_prompt]
248
+ elif batch_size != len(negative_prompt):
249
+ raise ValueError(
250
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
251
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
252
+ " the batch size of `prompt`."
253
+ )
254
+ else:
255
+ uncond_tokens = negative_prompt
256
+
257
+ max_length = text_input_ids.shape[-1]
258
+ uncond_input = self.tokenizer(
259
+ uncond_tokens,
260
+ padding="max_length",
261
+ max_length=max_length,
262
+ truncation=True,
263
+ return_tensors="pt",
264
+ )
265
+
266
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
267
+ attention_mask = uncond_input.attention_mask.to(device)
268
+ else:
269
+ attention_mask = None
270
+
271
+ negative_prompt_embeds = self.text_encoder(
272
+ uncond_input.input_ids.to(device),
273
+ attention_mask=attention_mask,
274
+ )
275
+ negative_prompt_embeds = negative_prompt_embeds[0]
276
+
277
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
278
+ seq_len = negative_prompt_embeds.shape[1]
279
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
280
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
281
+
282
+ # Encode the safety concept text
283
+ if enable_safety_guidance:
284
+ safety_concept_input = self.tokenizer(
285
+ [self._safety_text_concept],
286
+ padding="max_length",
287
+ max_length=max_length,
288
+ truncation=True,
289
+ return_tensors="pt",
290
+ )
291
+ safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0]
292
+
293
+ # duplicate safety embeddings for each generation per prompt, using mps friendly method
294
+ seq_len = safety_embeddings.shape[1]
295
+ safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1)
296
+ safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
297
+
298
+ # For classifier free guidance + sld, we need to do three forward passes.
299
+ # Here we concatenate the unconditional and text embeddings into a single batch
300
+ # to avoid doing three forward passes
301
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, safety_embeddings])
302
+
303
+ else:
304
+ # For classifier free guidance, we need to do two forward passes.
305
+ # Here we concatenate the unconditional and text embeddings into a single batch
306
+ # to avoid doing two forward passes
307
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
308
+
309
+ return prompt_embeds
310
+
311
+ def run_safety_checker(self, image, device, dtype, enable_safety_guidance):
312
+ if self.safety_checker is not None:
313
+ images = image.copy()
314
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
315
+ image, has_nsfw_concept = self.safety_checker(
316
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
317
+ )
318
+ flagged_images = np.zeros((2, *image.shape[1:]))
319
+ if any(has_nsfw_concept):
320
+ logger.warning(
321
+ "Potential NSFW content was detected in one or more images. A black image will be returned"
322
+ " instead."
323
+ f"{'You may look at this images in the `unsafe_images` variable of the output at your own discretion.' if enable_safety_guidance else 'Try again with a different prompt and/or seed.'}"
324
+ )
325
+ for idx, has_nsfw_concept in enumerate(has_nsfw_concept):
326
+ if has_nsfw_concept:
327
+ flagged_images[idx] = images[idx]
328
+ image[idx] = np.zeros(image[idx].shape) # black image
329
+ else:
330
+ has_nsfw_concept = None
331
+ flagged_images = None
332
+ return image, has_nsfw_concept, flagged_images
333
+
334
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
335
+ def decode_latents(self, latents):
336
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
337
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
338
+
339
+ latents = 1 / self.vae.config.scaling_factor * latents
340
+ image = self.vae.decode(latents, return_dict=False)[0]
341
+ image = (image / 2 + 0.5).clamp(0, 1)
342
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
343
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
344
+ return image
345
+
346
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
347
+ def prepare_extra_step_kwargs(self, generator, eta):
348
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
349
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
350
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
351
+ # and should be between [0, 1]
352
+
353
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
354
+ extra_step_kwargs = {}
355
+ if accepts_eta:
356
+ extra_step_kwargs["eta"] = eta
357
+
358
+ # check if the scheduler accepts generator
359
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
360
+ if accepts_generator:
361
+ extra_step_kwargs["generator"] = generator
362
+ return extra_step_kwargs
363
+
364
+ # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs
365
+ def check_inputs(
366
+ self,
367
+ prompt,
368
+ height,
369
+ width,
370
+ callback_steps,
371
+ negative_prompt=None,
372
+ prompt_embeds=None,
373
+ negative_prompt_embeds=None,
374
+ callback_on_step_end_tensor_inputs=None,
375
+ ):
376
+ if height % 8 != 0 or width % 8 != 0:
377
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
378
+
379
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
380
+ raise ValueError(
381
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
382
+ f" {type(callback_steps)}."
383
+ )
384
+ if callback_on_step_end_tensor_inputs is not None and not all(
385
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
386
+ ):
387
+ raise ValueError(
388
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
389
+ )
390
+
391
+ if prompt is not None and prompt_embeds is not None:
392
+ raise ValueError(
393
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
394
+ " only forward one of the two."
395
+ )
396
+ elif prompt is None and prompt_embeds is None:
397
+ raise ValueError(
398
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
399
+ )
400
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
401
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
402
+
403
+ if negative_prompt is not None and negative_prompt_embeds is not None:
404
+ raise ValueError(
405
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
406
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
407
+ )
408
+
409
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
410
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
411
+ raise ValueError(
412
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
413
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
414
+ f" {negative_prompt_embeds.shape}."
415
+ )
416
+
417
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
418
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
419
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
420
+ if isinstance(generator, list) and len(generator) != batch_size:
421
+ raise ValueError(
422
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
423
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
424
+ )
425
+
426
+ if latents is None:
427
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
428
+ else:
429
+ latents = latents.to(device)
430
+
431
+ # scale the initial noise by the standard deviation required by the scheduler
432
+ latents = latents * self.scheduler.init_noise_sigma
433
+ return latents
434
+
435
+ def perform_safety_guidance(
436
+ self,
437
+ enable_safety_guidance,
438
+ safety_momentum,
439
+ noise_guidance,
440
+ noise_pred_out,
441
+ i,
442
+ sld_guidance_scale,
443
+ sld_warmup_steps,
444
+ sld_threshold,
445
+ sld_momentum_scale,
446
+ sld_mom_beta,
447
+ ):
448
+ # Perform SLD guidance
449
+ if enable_safety_guidance:
450
+ if safety_momentum is None:
451
+ safety_momentum = torch.zeros_like(noise_guidance)
452
+ noise_pred_text, noise_pred_uncond = noise_pred_out[0], noise_pred_out[1]
453
+ noise_pred_safety_concept = noise_pred_out[2]
454
+
455
+ # Equation 6
456
+ scale = torch.clamp(torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0)
457
+
458
+ # Equation 6
459
+ safety_concept_scale = torch.where(
460
+ (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, torch.zeros_like(scale), scale
461
+ )
462
+
463
+ # Equation 4
464
+ noise_guidance_safety = torch.mul((noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)
465
+
466
+ # Equation 7
467
+ noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum
468
+
469
+ # Equation 8
470
+ safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety
471
+
472
+ if i >= sld_warmup_steps: # Warmup
473
+ # Equation 3
474
+ noise_guidance = noise_guidance - noise_guidance_safety
475
+ return noise_guidance, safety_momentum
476
+
477
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
478
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
479
+ dtype = next(self.image_encoder.parameters()).dtype
480
+
481
+ if not isinstance(image, torch.Tensor):
482
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
483
+
484
+ image = image.to(device=device, dtype=dtype)
485
+ if output_hidden_states:
486
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
487
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
488
+ uncond_image_enc_hidden_states = self.image_encoder(
489
+ torch.zeros_like(image), output_hidden_states=True
490
+ ).hidden_states[-2]
491
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
492
+ num_images_per_prompt, dim=0
493
+ )
494
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
495
+ else:
496
+ image_embeds = self.image_encoder(image).image_embeds
497
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
498
+ uncond_image_embeds = torch.zeros_like(image_embeds)
499
+
500
+ return image_embeds, uncond_image_embeds
501
+
502
+ @torch.no_grad()
503
+ def __call__(
504
+ self,
505
+ prompt: Union[str, List[str]],
506
+ height: Optional[int] = None,
507
+ width: Optional[int] = None,
508
+ num_inference_steps: int = 50,
509
+ guidance_scale: float = 7.5,
510
+ negative_prompt: Optional[Union[str, List[str]]] = None,
511
+ num_images_per_prompt: Optional[int] = 1,
512
+ eta: float = 0.0,
513
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
514
+ latents: Optional[torch.FloatTensor] = None,
515
+ ip_adapter_image: Optional[PipelineImageInput] = None,
516
+ output_type: Optional[str] = "pil",
517
+ return_dict: bool = True,
518
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
519
+ callback_steps: int = 1,
520
+ sld_guidance_scale: Optional[float] = 1000,
521
+ sld_warmup_steps: Optional[int] = 10,
522
+ sld_threshold: Optional[float] = 0.01,
523
+ sld_momentum_scale: Optional[float] = 0.3,
524
+ sld_mom_beta: Optional[float] = 0.4,
525
+ ):
526
+ r"""
527
+ The call function to the pipeline for generation.
528
+
529
+ Args:
530
+ prompt (`str` or `List[str]`):
531
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
532
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
533
+ The height in pixels of the generated image.
534
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
535
+ The width in pixels of the generated image.
536
+ num_inference_steps (`int`, *optional*, defaults to 50):
537
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
538
+ expense of slower inference.
539
+ guidance_scale (`float`, *optional*, defaults to 7.5):
540
+ A higher guidance scale value encourages the model to generate images closely linked to the text
541
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
542
+ negative_prompt (`str` or `List[str]`, *optional*):
543
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
544
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
545
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
546
+ The number of images to generate per prompt.
547
+ eta (`float`, *optional*, defaults to 0.0):
548
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
549
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
550
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
551
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
552
+ generation deterministic.
553
+ latents (`torch.FloatTensor`, *optional*):
554
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
555
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
556
+ tensor is generated by sampling using the supplied random `generator`.
557
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
558
+ Optional image input to work with IP Adapters.
559
+ output_type (`str`, *optional*, defaults to `"pil"`):
560
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
561
+ return_dict (`bool`, *optional*, defaults to `True`):
562
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
563
+ plain tuple.
564
+ callback (`Callable`, *optional*):
565
+ A function that calls every `callback_steps` steps during inference. The function is called with the
566
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
567
+ callback_steps (`int`, *optional*, defaults to 1):
568
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
569
+ every step.
570
+ sld_guidance_scale (`float`, *optional*, defaults to 1000):
571
+ If `sld_guidance_scale < 1`, safety guidance is disabled.
572
+ sld_warmup_steps (`int`, *optional*, defaults to 10):
573
+ Number of warmup steps for safety guidance. SLD is only be applied for diffusion steps greater than
574
+ `sld_warmup_steps`.
575
+ sld_threshold (`float`, *optional*, defaults to 0.01):
576
+ Threshold that separates the hyperplane between appropriate and inappropriate images.
577
+ sld_momentum_scale (`float`, *optional*, defaults to 0.3):
578
+ Scale of the SLD momentum to be added to the safety guidance at each diffusion step. If set to 0.0,
579
+ momentum is disabled. Momentum is built up during warmup for diffusion steps smaller than
580
+ `sld_warmup_steps`.
581
+ sld_mom_beta (`float`, *optional*, defaults to 0.4):
582
+ Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous
583
+ momentum is kept. Momentum is built up during warmup for diffusion steps smaller than
584
+ `sld_warmup_steps`.
585
+
586
+ Returns:
587
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
588
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
589
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
590
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
591
+ "not-safe-for-work" (nsfw) content.
592
+
593
+ Examples:
594
+
595
+ ```py
596
+ import torch
597
+ from diffusers import StableDiffusionPipelineSafe
598
+ from diffusers.pipelines.stable_diffusion_safe import SafetyConfig
599
+
600
+ pipeline = StableDiffusionPipelineSafe.from_pretrained(
601
+ "AIML-TUDA/stable-diffusion-safe", torch_dtype=torch.float16
602
+ ).to("cuda")
603
+ prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker"
604
+ image = pipeline(prompt=prompt, **SafetyConfig.MEDIUM).images[0]
605
+ ```
606
+ """
607
+ # 0. Default height and width to unet
608
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
609
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
610
+
611
+ # 1. Check inputs. Raise error if not correct
612
+ self.check_inputs(prompt, height, width, callback_steps)
613
+
614
+ # 2. Define call parameters
615
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
616
+ device = self._execution_device
617
+
618
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
619
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
620
+ # corresponds to doing no classifier free guidance.
621
+ do_classifier_free_guidance = guidance_scale > 1.0
622
+
623
+ enable_safety_guidance = sld_guidance_scale > 1.0 and do_classifier_free_guidance
624
+ if not enable_safety_guidance:
625
+ warnings.warn("Safety checker disabled!")
626
+
627
+ if ip_adapter_image is not None:
628
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
629
+ image_embeds, negative_image_embeds = self.encode_image(
630
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
631
+ )
632
+ if do_classifier_free_guidance:
633
+ if enable_safety_guidance:
634
+ image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds])
635
+ else:
636
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
637
+
638
+ # 3. Encode input prompt
639
+ prompt_embeds = self._encode_prompt(
640
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance
641
+ )
642
+
643
+ # 4. Prepare timesteps
644
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
645
+ timesteps = self.scheduler.timesteps
646
+
647
+ # 5. Prepare latent variables
648
+ num_channels_latents = self.unet.config.in_channels
649
+ latents = self.prepare_latents(
650
+ batch_size * num_images_per_prompt,
651
+ num_channels_latents,
652
+ height,
653
+ width,
654
+ prompt_embeds.dtype,
655
+ device,
656
+ generator,
657
+ latents,
658
+ )
659
+
660
+ # 6. Prepare extra step kwargs.
661
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
662
+
663
+ # 6.1 Add image embeds for IP-Adapter
664
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
665
+
666
+ safety_momentum = None
667
+
668
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
669
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
670
+ for i, t in enumerate(timesteps):
671
+ # expand the latents if we are doing classifier free guidance
672
+ latent_model_input = (
673
+ torch.cat([latents] * (3 if enable_safety_guidance else 2))
674
+ if do_classifier_free_guidance
675
+ else latents
676
+ )
677
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
678
+
679
+ # predict the noise residual
680
+ noise_pred = self.unet(
681
+ latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs
682
+ ).sample
683
+
684
+ # perform guidance
685
+ if do_classifier_free_guidance:
686
+ noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2))
687
+ noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]
688
+
689
+ # default classifier free guidance
690
+ noise_guidance = noise_pred_text - noise_pred_uncond
691
+
692
+ # Perform SLD guidance
693
+ if enable_safety_guidance:
694
+ if safety_momentum is None:
695
+ safety_momentum = torch.zeros_like(noise_guidance)
696
+ noise_pred_safety_concept = noise_pred_out[2]
697
+
698
+ # Equation 6
699
+ scale = torch.clamp(
700
+ torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0
701
+ )
702
+
703
+ # Equation 6
704
+ safety_concept_scale = torch.where(
705
+ (noise_pred_text - noise_pred_safety_concept) >= sld_threshold,
706
+ torch.zeros_like(scale),
707
+ scale,
708
+ )
709
+
710
+ # Equation 4
711
+ noise_guidance_safety = torch.mul(
712
+ (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale
713
+ )
714
+
715
+ # Equation 7
716
+ noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum
717
+
718
+ # Equation 8
719
+ safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety
720
+
721
+ if i >= sld_warmup_steps: # Warmup
722
+ # Equation 3
723
+ noise_guidance = noise_guidance - noise_guidance_safety
724
+
725
+ noise_pred = noise_pred_uncond + guidance_scale * noise_guidance
726
+
727
+ # compute the previous noisy sample x_t -> x_t-1
728
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
729
+
730
+ # call the callback, if provided
731
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
732
+ progress_bar.update()
733
+ if callback is not None and i % callback_steps == 0:
734
+ step_idx = i // getattr(self.scheduler, "order", 1)
735
+ callback(step_idx, t, latents)
736
+
737
+ # 8. Post-processing
738
+ image = self.decode_latents(latents)
739
+
740
+ # 9. Run safety checker
741
+ image, has_nsfw_concept, flagged_images = self.run_safety_checker(
742
+ image, device, prompt_embeds.dtype, enable_safety_guidance
743
+ )
744
+
745
+ # 10. Convert to PIL
746
+ if output_type == "pil":
747
+ image = self.numpy_to_pil(image)
748
+ if flagged_images is not None:
749
+ flagged_images = self.numpy_to_pil(flagged_images)
750
+
751
+ if not return_dict:
752
+ return (
753
+ image,
754
+ has_nsfw_concept,
755
+ self._safety_text_concept if enable_safety_guidance else None,
756
+ flagged_images,
757
+ )
758
+
759
+ return StableDiffusionSafePipelineOutput(
760
+ images=image,
761
+ nsfw_content_detected=has_nsfw_concept,
762
+ applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None,
763
+ unsafe_images=flagged_images,
764
+ )
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_safe/safety_checker.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
18
+
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ def cosine_distance(image_embeds, text_embeds):
26
+ normalized_image_embeds = nn.functional.normalize(image_embeds)
27
+ normalized_text_embeds = nn.functional.normalize(text_embeds)
28
+ return torch.mm(normalized_image_embeds, normalized_text_embeds.t())
29
+
30
+
31
+ class SafeStableDiffusionSafetyChecker(PreTrainedModel):
32
+ config_class = CLIPConfig
33
+
34
+ _no_split_modules = ["CLIPEncoderLayer"]
35
+
36
+ def __init__(self, config: CLIPConfig):
37
+ super().__init__(config)
38
+
39
+ self.vision_model = CLIPVisionModel(config.vision_config)
40
+ self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False)
41
+
42
+ self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False)
43
+ self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False)
44
+
45
+ self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False)
46
+ self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False)
47
+
48
+ @torch.no_grad()
49
+ def forward(self, clip_input, images):
50
+ pooled_output = self.vision_model(clip_input)[1] # pooled_output
51
+ image_embeds = self.visual_projection(pooled_output)
52
+
53
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
54
+ special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy()
55
+ cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy()
56
+
57
+ result = []
58
+ batch_size = image_embeds.shape[0]
59
+ for i in range(batch_size):
60
+ result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
61
+
62
+ # increase this value to create a stronger `nfsw` filter
63
+ # at the cost of increasing the possibility of filtering benign images
64
+ adjustment = 0.0
65
+
66
+ for concept_idx in range(len(special_cos_dist[0])):
67
+ concept_cos = special_cos_dist[i][concept_idx]
68
+ concept_threshold = self.special_care_embeds_weights[concept_idx].item()
69
+ result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
70
+ if result_img["special_scores"][concept_idx] > 0:
71
+ result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
72
+ adjustment = 0.01
73
+
74
+ for concept_idx in range(len(cos_dist[0])):
75
+ concept_cos = cos_dist[i][concept_idx]
76
+ concept_threshold = self.concept_embeds_weights[concept_idx].item()
77
+ result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
78
+ if result_img["concept_scores"][concept_idx] > 0:
79
+ result_img["bad_concepts"].append(concept_idx)
80
+
81
+ result.append(result_img)
82
+
83
+ has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result]
84
+
85
+ return images, has_nsfw_concepts
86
+
87
+ @torch.no_grad()
88
+ def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor):
89
+ pooled_output = self.vision_model(clip_input)[1] # pooled_output
90
+ image_embeds = self.visual_projection(pooled_output)
91
+
92
+ special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds)
93
+ cos_dist = cosine_distance(image_embeds, self.concept_embeds)
94
+
95
+ # increase this value to create a stronger `nsfw` filter
96
+ # at the cost of increasing the possibility of filtering benign images
97
+ adjustment = 0.0
98
+
99
+ special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment
100
+ # special_scores = special_scores.round(decimals=3)
101
+ special_care = torch.any(special_scores > 0, dim=1)
102
+ special_adjustment = special_care * 0.01
103
+ special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
104
+
105
+ concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment
106
+ # concept_scores = concept_scores.round(decimals=3)
107
+ has_nsfw_concepts = torch.any(concept_scores > 0, dim=1)
108
+
109
+ return images, has_nsfw_concepts
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_sag/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)