ZTWHHH commited on
Commit
d5f8d81
·
verified ·
1 Parent(s): 222ac2b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf449/lib/python3.10/site-packages/diffusers/experimental/__init__.py +1 -0
  2. evalkit_tf449/lib/python3.10/site-packages/diffusers/experimental/rl/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_tf449/lib/python3.10/site-packages/diffusers/experimental/rl/value_guided_sampling.py +153 -0
  4. evalkit_tf449/lib/python3.10/site-packages/diffusers/loaders/__pycache__/textual_inversion.cpython-310.pyc +0 -0
  5. evalkit_tf449/lib/python3.10/site-packages/diffusers/loaders/__pycache__/unet.cpython-310.pyc +0 -0
  6. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/__init__.py +103 -0
  7. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/__pycache__/upsampling.cpython-310.pyc +0 -0
  8. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/activations.py +123 -0
  9. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/adapter.py +584 -0
  10. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/attention.py +665 -0
  11. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/attention_flax.py +494 -0
  12. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/attention_processor.py +0 -0
  13. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__init__.py +5 -0
  14. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/__init__.cpython-310.pyc +0 -0
  15. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_asym_kl.cpython-310.pyc +0 -0
  16. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_kl.cpython-310.pyc +0 -0
  17. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_kl_temporal_decoder.cpython-310.pyc +0 -0
  18. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_tiny.cpython-310.pyc +0 -0
  19. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/consistency_decoder_vae.cpython-310.pyc +0 -0
  20. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_asym_kl.py +186 -0
  21. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py +489 -0
  22. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +399 -0
  23. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_tiny.py +347 -0
  24. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/consistency_decoder_vae.py +435 -0
  25. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py +983 -0
  26. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/controlnet.py +868 -0
  27. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/controlnet_flax.py +395 -0
  28. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/downsampling.py +334 -0
  29. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/dual_transformer_2d.py +20 -0
  30. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/embeddings.py +914 -0
  31. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/embeddings_flax.py +97 -0
  32. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/lora.py +457 -0
  33. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_flax_pytorch_utils.py +134 -0
  34. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_flax_utils.py +566 -0
  35. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_outputs.py +17 -0
  36. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_pytorch_flax_utils.py +161 -0
  37. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_utils.py +1021 -0
  38. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/normalization.py +254 -0
  39. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/prior_transformer.py +12 -0
  40. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/resnet.py +802 -0
  41. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/resnet_flax.py +124 -0
  42. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/t5_film_transformer.py +70 -0
  43. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformer_2d.py +25 -0
  44. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformer_temporal.py +34 -0
  45. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformers/__init__.py +9 -0
  46. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformers/__pycache__/dual_transformer_2d.cpython-310.pyc +0 -0
  47. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformers/dual_transformer_2d.py +155 -0
  48. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/unet_1d.py +26 -0
  49. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/unet_1d_blocks.py +203 -0
  50. evalkit_tf449/lib/python3.10/site-packages/diffusers/models/unet_2d.py +27 -0
evalkit_tf449/lib/python3.10/site-packages/diffusers/experimental/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .rl import ValueGuidedRLPipeline
evalkit_tf449/lib/python3.10/site-packages/diffusers/experimental/rl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (251 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/experimental/rl/value_guided_sampling.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import torch
17
+ import tqdm
18
+
19
+ from ...models.unets.unet_1d import UNet1DModel
20
+ from ...pipelines import DiffusionPipeline
21
+ from ...utils.dummy_pt_objects import DDPMScheduler
22
+ from ...utils.torch_utils import randn_tensor
23
+
24
+
25
+ class ValueGuidedRLPipeline(DiffusionPipeline):
26
+ r"""
27
+ Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
28
+
29
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
30
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
31
+
32
+ Parameters:
33
+ value_function ([`UNet1DModel`]):
34
+ A specialized UNet for fine-tuning trajectories base on reward.
35
+ unet ([`UNet1DModel`]):
36
+ UNet architecture to denoise the encoded trajectories.
37
+ scheduler ([`SchedulerMixin`]):
38
+ A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
39
+ application is [`DDPMScheduler`].
40
+ env ():
41
+ An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ value_function: UNet1DModel,
47
+ unet: UNet1DModel,
48
+ scheduler: DDPMScheduler,
49
+ env,
50
+ ):
51
+ super().__init__()
52
+
53
+ self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env)
54
+
55
+ self.data = env.get_dataset()
56
+ self.means = {}
57
+ for key in self.data.keys():
58
+ try:
59
+ self.means[key] = self.data[key].mean()
60
+ except: # noqa: E722
61
+ pass
62
+ self.stds = {}
63
+ for key in self.data.keys():
64
+ try:
65
+ self.stds[key] = self.data[key].std()
66
+ except: # noqa: E722
67
+ pass
68
+ self.state_dim = env.observation_space.shape[0]
69
+ self.action_dim = env.action_space.shape[0]
70
+
71
+ def normalize(self, x_in, key):
72
+ return (x_in - self.means[key]) / self.stds[key]
73
+
74
+ def de_normalize(self, x_in, key):
75
+ return x_in * self.stds[key] + self.means[key]
76
+
77
+ def to_torch(self, x_in):
78
+ if isinstance(x_in, dict):
79
+ return {k: self.to_torch(v) for k, v in x_in.items()}
80
+ elif torch.is_tensor(x_in):
81
+ return x_in.to(self.unet.device)
82
+ return torch.tensor(x_in, device=self.unet.device)
83
+
84
+ def reset_x0(self, x_in, cond, act_dim):
85
+ for key, val in cond.items():
86
+ x_in[:, key, act_dim:] = val.clone()
87
+ return x_in
88
+
89
+ def run_diffusion(self, x, conditions, n_guide_steps, scale):
90
+ batch_size = x.shape[0]
91
+ y = None
92
+ for i in tqdm.tqdm(self.scheduler.timesteps):
93
+ # create batch of timesteps to pass into model
94
+ timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
95
+ for _ in range(n_guide_steps):
96
+ with torch.enable_grad():
97
+ x.requires_grad_()
98
+
99
+ # permute to match dimension for pre-trained models
100
+ y = self.value_function(x.permute(0, 2, 1), timesteps).sample
101
+ grad = torch.autograd.grad([y.sum()], [x])[0]
102
+
103
+ posterior_variance = self.scheduler._get_variance(i)
104
+ model_std = torch.exp(0.5 * posterior_variance)
105
+ grad = model_std * grad
106
+
107
+ grad[timesteps < 2] = 0
108
+ x = x.detach()
109
+ x = x + scale * grad
110
+ x = self.reset_x0(x, conditions, self.action_dim)
111
+
112
+ prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
113
+
114
+ # TODO: verify deprecation of this kwarg
115
+ x = self.scheduler.step(prev_x, i, x)["prev_sample"]
116
+
117
+ # apply conditions to the trajectory (set the initial state)
118
+ x = self.reset_x0(x, conditions, self.action_dim)
119
+ x = self.to_torch(x)
120
+ return x, y
121
+
122
+ def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
123
+ # normalize the observations and create batch dimension
124
+ obs = self.normalize(obs, "observations")
125
+ obs = obs[None].repeat(batch_size, axis=0)
126
+
127
+ conditions = {0: self.to_torch(obs)}
128
+ shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
129
+
130
+ # generate initial noise and apply our conditions (to make the trajectories start at current state)
131
+ x1 = randn_tensor(shape, device=self.unet.device)
132
+ x = self.reset_x0(x1, conditions, self.action_dim)
133
+ x = self.to_torch(x)
134
+
135
+ # run the diffusion process
136
+ x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
137
+
138
+ # sort output trajectories by value
139
+ sorted_idx = y.argsort(0, descending=True).squeeze()
140
+ sorted_values = x[sorted_idx]
141
+ actions = sorted_values[:, :, : self.action_dim]
142
+ actions = actions.detach().cpu().numpy()
143
+ denorm_actions = self.de_normalize(actions, key="actions")
144
+
145
+ # select the action with the highest value
146
+ if y is not None:
147
+ selected_index = 0
148
+ else:
149
+ # if we didn't run value guiding, select a random action
150
+ selected_index = np.random.randint(0, batch_size)
151
+
152
+ denorm_actions = denorm_actions[selected_index, 0]
153
+ return denorm_actions
evalkit_tf449/lib/python3.10/site-packages/diffusers/loaders/__pycache__/textual_inversion.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/loaders/__pycache__/unet.cpython-310.pyc ADDED
Binary file (32.1 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import (
18
+ DIFFUSERS_SLOW_IMPORT,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ if is_torch_available():
28
+ _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"]
29
+ _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"]
30
+ _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"]
31
+ _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"]
32
+ _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"]
33
+ _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
34
+ _import_structure["controlnet"] = ["ControlNetModel"]
35
+ _import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
36
+ _import_structure["embeddings"] = ["ImageProjection"]
37
+ _import_structure["modeling_utils"] = ["ModelMixin"]
38
+ _import_structure["transformers.prior_transformer"] = ["PriorTransformer"]
39
+ _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"]
40
+ _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"]
41
+ _import_structure["transformers.transformer_temporal"] = ["TransformerTemporalModel"]
42
+ _import_structure["unets.unet_1d"] = ["UNet1DModel"]
43
+ _import_structure["unets.unet_2d"] = ["UNet2DModel"]
44
+ _import_structure["unets.unet_2d_condition"] = ["UNet2DConditionModel"]
45
+ _import_structure["unets.unet_3d_condition"] = ["UNet3DConditionModel"]
46
+ _import_structure["unets.unet_i2vgen_xl"] = ["I2VGenXLUNet"]
47
+ _import_structure["unets.unet_kandinsky3"] = ["Kandinsky3UNet"]
48
+ _import_structure["unets.unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"]
49
+ _import_structure["unets.unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"]
50
+ _import_structure["unets.unet_stable_cascade"] = ["StableCascadeUNet"]
51
+ _import_structure["unets.uvit_2d"] = ["UVit2DModel"]
52
+ _import_structure["vq_model"] = ["VQModel"]
53
+
54
+ if is_flax_available():
55
+ _import_structure["controlnet_flax"] = ["FlaxControlNetModel"]
56
+ _import_structure["unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
57
+ _import_structure["vae_flax"] = ["FlaxAutoencoderKL"]
58
+
59
+
60
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
61
+ if is_torch_available():
62
+ from .adapter import MultiAdapter, T2IAdapter
63
+ from .autoencoders import (
64
+ AsymmetricAutoencoderKL,
65
+ AutoencoderKL,
66
+ AutoencoderKLTemporalDecoder,
67
+ AutoencoderTiny,
68
+ ConsistencyDecoderVAE,
69
+ )
70
+ from .controlnet import ControlNetModel
71
+ from .embeddings import ImageProjection
72
+ from .modeling_utils import ModelMixin
73
+ from .transformers import (
74
+ DualTransformer2DModel,
75
+ PriorTransformer,
76
+ T5FilmDecoder,
77
+ Transformer2DModel,
78
+ TransformerTemporalModel,
79
+ )
80
+ from .unets import (
81
+ I2VGenXLUNet,
82
+ Kandinsky3UNet,
83
+ MotionAdapter,
84
+ StableCascadeUNet,
85
+ UNet1DModel,
86
+ UNet2DConditionModel,
87
+ UNet2DModel,
88
+ UNet3DConditionModel,
89
+ UNetMotionModel,
90
+ UNetSpatioTemporalConditionModel,
91
+ UVit2DModel,
92
+ )
93
+ from .vq_model import VQModel
94
+
95
+ if is_flax_available():
96
+ from .controlnet_flax import FlaxControlNetModel
97
+ from .unets import FlaxUNet2DConditionModel
98
+ from .vae_flax import FlaxAutoencoderKL
99
+
100
+ else:
101
+ import sys
102
+
103
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/__pycache__/upsampling.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/activations.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from ..utils import deprecate
21
+
22
+
23
+ ACTIVATION_FUNCTIONS = {
24
+ "swish": nn.SiLU(),
25
+ "silu": nn.SiLU(),
26
+ "mish": nn.Mish(),
27
+ "gelu": nn.GELU(),
28
+ "relu": nn.ReLU(),
29
+ }
30
+
31
+
32
+ def get_activation(act_fn: str) -> nn.Module:
33
+ """Helper function to get activation function from string.
34
+
35
+ Args:
36
+ act_fn (str): Name of activation function.
37
+
38
+ Returns:
39
+ nn.Module: Activation function.
40
+ """
41
+
42
+ act_fn = act_fn.lower()
43
+ if act_fn in ACTIVATION_FUNCTIONS:
44
+ return ACTIVATION_FUNCTIONS[act_fn]
45
+ else:
46
+ raise ValueError(f"Unsupported activation function: {act_fn}")
47
+
48
+
49
+ class GELU(nn.Module):
50
+ r"""
51
+ GELU activation function with tanh approximation support with `approximate="tanh"`.
52
+
53
+ Parameters:
54
+ dim_in (`int`): The number of channels in the input.
55
+ dim_out (`int`): The number of channels in the output.
56
+ approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
57
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
58
+ """
59
+
60
+ def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True):
61
+ super().__init__()
62
+ self.proj = nn.Linear(dim_in, dim_out, bias=bias)
63
+ self.approximate = approximate
64
+
65
+ def gelu(self, gate: torch.Tensor) -> torch.Tensor:
66
+ if gate.device.type != "mps":
67
+ return F.gelu(gate, approximate=self.approximate)
68
+ # mps: gelu is not implemented for float16
69
+ return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
70
+
71
+ def forward(self, hidden_states):
72
+ hidden_states = self.proj(hidden_states)
73
+ hidden_states = self.gelu(hidden_states)
74
+ return hidden_states
75
+
76
+
77
+ class GEGLU(nn.Module):
78
+ r"""
79
+ A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
80
+
81
+ Parameters:
82
+ dim_in (`int`): The number of channels in the input.
83
+ dim_out (`int`): The number of channels in the output.
84
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
85
+ """
86
+
87
+ def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
88
+ super().__init__()
89
+ self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias)
90
+
91
+ def gelu(self, gate: torch.Tensor) -> torch.Tensor:
92
+ if gate.device.type != "mps":
93
+ return F.gelu(gate)
94
+ # mps: gelu is not implemented for float16
95
+ return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)
96
+
97
+ def forward(self, hidden_states, *args, **kwargs):
98
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
99
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
100
+ deprecate("scale", "1.0.0", deprecation_message)
101
+
102
+ hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
103
+ return hidden_states * self.gelu(gate)
104
+
105
+
106
+ class ApproximateGELU(nn.Module):
107
+ r"""
108
+ The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this
109
+ [paper](https://arxiv.org/abs/1606.08415).
110
+
111
+ Parameters:
112
+ dim_in (`int`): The number of channels in the input.
113
+ dim_out (`int`): The number of channels in the output.
114
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
115
+ """
116
+
117
+ def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
118
+ super().__init__()
119
+ self.proj = nn.Linear(dim_in, dim_out, bias=bias)
120
+
121
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
122
+ x = self.proj(x)
123
+ return x * torch.sigmoid(1.702 * x)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/adapter.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Callable, List, Optional, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from ..configuration_utils import ConfigMixin, register_to_config
21
+ from ..utils import logging
22
+ from .modeling_utils import ModelMixin
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class MultiAdapter(ModelMixin):
29
+ r"""
30
+ MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
31
+ user-assigned weighting.
32
+
33
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
34
+ implements for all the model (such as downloading or saving, etc.)
35
+
36
+ Parameters:
37
+ adapters (`List[T2IAdapter]`, *optional*, defaults to None):
38
+ A list of `T2IAdapter` model instances.
39
+ """
40
+
41
+ def __init__(self, adapters: List["T2IAdapter"]):
42
+ super(MultiAdapter, self).__init__()
43
+
44
+ self.num_adapter = len(adapters)
45
+ self.adapters = nn.ModuleList(adapters)
46
+
47
+ if len(adapters) == 0:
48
+ raise ValueError("Expecting at least one adapter")
49
+
50
+ if len(adapters) == 1:
51
+ raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`")
52
+
53
+ # The outputs from each adapter are added together with a weight.
54
+ # This means that the change in dimensions from downsampling must
55
+ # be the same for all adapters. Inductively, it also means the
56
+ # downscale_factor and total_downscale_factor must be the same for all
57
+ # adapters.
58
+ first_adapter_total_downscale_factor = adapters[0].total_downscale_factor
59
+ first_adapter_downscale_factor = adapters[0].downscale_factor
60
+ for idx in range(1, len(adapters)):
61
+ if (
62
+ adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor
63
+ or adapters[idx].downscale_factor != first_adapter_downscale_factor
64
+ ):
65
+ raise ValueError(
66
+ f"Expecting all adapters to have the same downscaling behavior, but got:\n"
67
+ f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n"
68
+ f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n"
69
+ f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n"
70
+ f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}"
71
+ )
72
+
73
+ self.total_downscale_factor = first_adapter_total_downscale_factor
74
+ self.downscale_factor = first_adapter_downscale_factor
75
+
76
+ def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]:
77
+ r"""
78
+ Args:
79
+ xs (`torch.Tensor`):
80
+ (batch, channel, height, width) input images for multiple adapter models concated along dimension 1,
81
+ `channel` should equal to `num_adapter` * "number of channel of image".
82
+ adapter_weights (`List[float]`, *optional*, defaults to None):
83
+ List of floats representing the weight which will be multiply to each adapter's output before adding
84
+ them together.
85
+ """
86
+ if adapter_weights is None:
87
+ adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter)
88
+ else:
89
+ adapter_weights = torch.tensor(adapter_weights)
90
+
91
+ accume_state = None
92
+ for x, w, adapter in zip(xs, adapter_weights, self.adapters):
93
+ features = adapter(x)
94
+ if accume_state is None:
95
+ accume_state = features
96
+ for i in range(len(accume_state)):
97
+ accume_state[i] = w * accume_state[i]
98
+ else:
99
+ for i in range(len(features)):
100
+ accume_state[i] += w * features[i]
101
+ return accume_state
102
+
103
+ def save_pretrained(
104
+ self,
105
+ save_directory: Union[str, os.PathLike],
106
+ is_main_process: bool = True,
107
+ save_function: Callable = None,
108
+ safe_serialization: bool = True,
109
+ variant: Optional[str] = None,
110
+ ):
111
+ """
112
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
113
+ `[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
114
+
115
+ Arguments:
116
+ save_directory (`str` or `os.PathLike`):
117
+ Directory to which to save. Will be created if it doesn't exist.
118
+ is_main_process (`bool`, *optional*, defaults to `True`):
119
+ Whether the process calling this is the main process or not. Useful when in distributed training like
120
+ TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
121
+ the main process to avoid race conditions.
122
+ save_function (`Callable`):
123
+ The function to use to save the state dictionary. Useful on distributed training like TPUs when one
124
+ need to replace `torch.save` by another method. Can be configured with the environment variable
125
+ `DIFFUSERS_SAVE_MODE`.
126
+ safe_serialization (`bool`, *optional*, defaults to `True`):
127
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
128
+ variant (`str`, *optional*):
129
+ If specified, weights are saved in the format pytorch_model.<variant>.bin.
130
+ """
131
+ idx = 0
132
+ model_path_to_save = save_directory
133
+ for adapter in self.adapters:
134
+ adapter.save_pretrained(
135
+ model_path_to_save,
136
+ is_main_process=is_main_process,
137
+ save_function=save_function,
138
+ safe_serialization=safe_serialization,
139
+ variant=variant,
140
+ )
141
+
142
+ idx += 1
143
+ model_path_to_save = model_path_to_save + f"_{idx}"
144
+
145
+ @classmethod
146
+ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
147
+ r"""
148
+ Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models.
149
+
150
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
151
+ the model, you should first set it back in training mode with `model.train()`.
152
+
153
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
154
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
155
+ task.
156
+
157
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
158
+ weights are discarded.
159
+
160
+ Parameters:
161
+ pretrained_model_path (`os.PathLike`):
162
+ A path to a *directory* containing model weights saved using
163
+ [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`.
164
+ torch_dtype (`str` or `torch.dtype`, *optional*):
165
+ Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
166
+ will be automatically derived from the model's weights.
167
+ output_loading_info(`bool`, *optional*, defaults to `False`):
168
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
169
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
170
+ A map that specifies where each submodule should go. It doesn't need to be refined to each
171
+ parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
172
+ same device.
173
+
174
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
175
+ more information about each option see [designing a device
176
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
177
+ max_memory (`Dict`, *optional*):
178
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
179
+ GPU and the available CPU RAM if unset.
180
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
181
+ Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
182
+ also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
183
+ model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
184
+ setting this argument to `True` will raise an error.
185
+ variant (`str`, *optional*):
186
+ If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
187
+ ignored when using `from_flax`.
188
+ use_safetensors (`bool`, *optional*, defaults to `None`):
189
+ If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
190
+ `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
191
+ `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
192
+ """
193
+ idx = 0
194
+ adapters = []
195
+
196
+ # load adapter and append to list until no adapter directory exists anymore
197
+ # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained`
198
+ # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ...
199
+ model_path_to_load = pretrained_model_path
200
+ while os.path.isdir(model_path_to_load):
201
+ adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs)
202
+ adapters.append(adapter)
203
+
204
+ idx += 1
205
+ model_path_to_load = pretrained_model_path + f"_{idx}"
206
+
207
+ logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.")
208
+
209
+ if len(adapters) == 0:
210
+ raise ValueError(
211
+ f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
212
+ )
213
+
214
+ return cls(adapters)
215
+
216
+
217
+ class T2IAdapter(ModelMixin, ConfigMixin):
218
+ r"""
219
+ A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model
220
+ generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's
221
+ architecture follows the original implementation of
222
+ [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97)
223
+ and
224
+ [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
225
+
226
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
227
+ implements for all the model (such as downloading or saving, etc.)
228
+
229
+ Parameters:
230
+ in_channels (`int`, *optional*, defaults to 3):
231
+ Number of channels of Aapter's input(*control image*). Set this parameter to 1 if you're using gray scale
232
+ image as *control image*.
233
+ channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
234
+ The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will
235
+ also determine the number of downsample blocks in the Adapter.
236
+ num_res_blocks (`int`, *optional*, defaults to 2):
237
+ Number of ResNet blocks in each downsample block.
238
+ downscale_factor (`int`, *optional*, defaults to 8):
239
+ A factor that determines the total downscale factor of the Adapter.
240
+ adapter_type (`str`, *optional*, defaults to `full_adapter`):
241
+ The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`.
242
+ """
243
+
244
+ @register_to_config
245
+ def __init__(
246
+ self,
247
+ in_channels: int = 3,
248
+ channels: List[int] = [320, 640, 1280, 1280],
249
+ num_res_blocks: int = 2,
250
+ downscale_factor: int = 8,
251
+ adapter_type: str = "full_adapter",
252
+ ):
253
+ super().__init__()
254
+
255
+ if adapter_type == "full_adapter":
256
+ self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor)
257
+ elif adapter_type == "full_adapter_xl":
258
+ self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor)
259
+ elif adapter_type == "light_adapter":
260
+ self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor)
261
+ else:
262
+ raise ValueError(
263
+ f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or "
264
+ "'full_adapter_xl' or 'light_adapter'."
265
+ )
266
+
267
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
268
+ r"""
269
+ This function processes the input tensor `x` through the adapter model and returns a list of feature tensors,
270
+ each representing information extracted at a different scale from the input. The length of the list is
271
+ determined by the number of downsample blocks in the Adapter, as specified by the `channels` and
272
+ `num_res_blocks` parameters during initialization.
273
+ """
274
+ return self.adapter(x)
275
+
276
+ @property
277
+ def total_downscale_factor(self):
278
+ return self.adapter.total_downscale_factor
279
+
280
+ @property
281
+ def downscale_factor(self):
282
+ """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are
283
+ not evenly divisible by the downscale_factor then an exception will be raised.
284
+ """
285
+ return self.adapter.unshuffle.downscale_factor
286
+
287
+
288
+ # full adapter
289
+
290
+
291
+ class FullAdapter(nn.Module):
292
+ r"""
293
+ See [`T2IAdapter`] for more information.
294
+ """
295
+
296
+ def __init__(
297
+ self,
298
+ in_channels: int = 3,
299
+ channels: List[int] = [320, 640, 1280, 1280],
300
+ num_res_blocks: int = 2,
301
+ downscale_factor: int = 8,
302
+ ):
303
+ super().__init__()
304
+
305
+ in_channels = in_channels * downscale_factor**2
306
+
307
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
308
+ self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
309
+
310
+ self.body = nn.ModuleList(
311
+ [
312
+ AdapterBlock(channels[0], channels[0], num_res_blocks),
313
+ *[
314
+ AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)
315
+ for i in range(1, len(channels))
316
+ ],
317
+ ]
318
+ )
319
+
320
+ self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
321
+
322
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
323
+ r"""
324
+ This method processes the input tensor `x` through the FullAdapter model and performs operations including
325
+ pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each
326
+ capturing information at a different stage of processing within the FullAdapter model. The number of feature
327
+ tensors in the list is determined by the number of downsample blocks specified during initialization.
328
+ """
329
+ x = self.unshuffle(x)
330
+ x = self.conv_in(x)
331
+
332
+ features = []
333
+
334
+ for block in self.body:
335
+ x = block(x)
336
+ features.append(x)
337
+
338
+ return features
339
+
340
+
341
+ class FullAdapterXL(nn.Module):
342
+ r"""
343
+ See [`T2IAdapter`] for more information.
344
+ """
345
+
346
+ def __init__(
347
+ self,
348
+ in_channels: int = 3,
349
+ channels: List[int] = [320, 640, 1280, 1280],
350
+ num_res_blocks: int = 2,
351
+ downscale_factor: int = 16,
352
+ ):
353
+ super().__init__()
354
+
355
+ in_channels = in_channels * downscale_factor**2
356
+
357
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
358
+ self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
359
+
360
+ self.body = []
361
+ # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32]
362
+ for i in range(len(channels)):
363
+ if i == 1:
364
+ self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks))
365
+ elif i == 2:
366
+ self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True))
367
+ else:
368
+ self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks))
369
+
370
+ self.body = nn.ModuleList(self.body)
371
+ # XL has only one downsampling AdapterBlock.
372
+ self.total_downscale_factor = downscale_factor * 2
373
+
374
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
375
+ r"""
376
+ This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations
377
+ including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors.
378
+ """
379
+ x = self.unshuffle(x)
380
+ x = self.conv_in(x)
381
+
382
+ features = []
383
+
384
+ for block in self.body:
385
+ x = block(x)
386
+ features.append(x)
387
+
388
+ return features
389
+
390
+
391
+ class AdapterBlock(nn.Module):
392
+ r"""
393
+ An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and
394
+ `FullAdapterXL` models.
395
+
396
+ Parameters:
397
+ in_channels (`int`):
398
+ Number of channels of AdapterBlock's input.
399
+ out_channels (`int`):
400
+ Number of channels of AdapterBlock's output.
401
+ num_res_blocks (`int`):
402
+ Number of ResNet blocks in the AdapterBlock.
403
+ down (`bool`, *optional*, defaults to `False`):
404
+ Whether to perform downsampling on AdapterBlock's input.
405
+ """
406
+
407
+ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
408
+ super().__init__()
409
+
410
+ self.downsample = None
411
+ if down:
412
+ self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
413
+
414
+ self.in_conv = None
415
+ if in_channels != out_channels:
416
+ self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
417
+
418
+ self.resnets = nn.Sequential(
419
+ *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)],
420
+ )
421
+
422
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
423
+ r"""
424
+ This method takes tensor x as input and performs operations downsampling and convolutional layers if the
425
+ self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of
426
+ residual blocks to the input tensor.
427
+ """
428
+ if self.downsample is not None:
429
+ x = self.downsample(x)
430
+
431
+ if self.in_conv is not None:
432
+ x = self.in_conv(x)
433
+
434
+ x = self.resnets(x)
435
+
436
+ return x
437
+
438
+
439
+ class AdapterResnetBlock(nn.Module):
440
+ r"""
441
+ An `AdapterResnetBlock` is a helper model that implements a ResNet-like block.
442
+
443
+ Parameters:
444
+ channels (`int`):
445
+ Number of channels of AdapterResnetBlock's input and output.
446
+ """
447
+
448
+ def __init__(self, channels: int):
449
+ super().__init__()
450
+ self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
451
+ self.act = nn.ReLU()
452
+ self.block2 = nn.Conv2d(channels, channels, kernel_size=1)
453
+
454
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
455
+ r"""
456
+ This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional
457
+ layer on the input tensor. It returns addition with the input tensor.
458
+ """
459
+
460
+ h = self.act(self.block1(x))
461
+ h = self.block2(h)
462
+
463
+ return h + x
464
+
465
+
466
+ # light adapter
467
+
468
+
469
+ class LightAdapter(nn.Module):
470
+ r"""
471
+ See [`T2IAdapter`] for more information.
472
+ """
473
+
474
+ def __init__(
475
+ self,
476
+ in_channels: int = 3,
477
+ channels: List[int] = [320, 640, 1280],
478
+ num_res_blocks: int = 4,
479
+ downscale_factor: int = 8,
480
+ ):
481
+ super().__init__()
482
+
483
+ in_channels = in_channels * downscale_factor**2
484
+
485
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
486
+
487
+ self.body = nn.ModuleList(
488
+ [
489
+ LightAdapterBlock(in_channels, channels[0], num_res_blocks),
490
+ *[
491
+ LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True)
492
+ for i in range(len(channels) - 1)
493
+ ],
494
+ LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True),
495
+ ]
496
+ )
497
+
498
+ self.total_downscale_factor = downscale_factor * (2 ** len(channels))
499
+
500
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
501
+ r"""
502
+ This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each
503
+ feature tensor corresponds to a different level of processing within the LightAdapter.
504
+ """
505
+ x = self.unshuffle(x)
506
+
507
+ features = []
508
+
509
+ for block in self.body:
510
+ x = block(x)
511
+ features.append(x)
512
+
513
+ return features
514
+
515
+
516
+ class LightAdapterBlock(nn.Module):
517
+ r"""
518
+ A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the
519
+ `LightAdapter` model.
520
+
521
+ Parameters:
522
+ in_channels (`int`):
523
+ Number of channels of LightAdapterBlock's input.
524
+ out_channels (`int`):
525
+ Number of channels of LightAdapterBlock's output.
526
+ num_res_blocks (`int`):
527
+ Number of LightAdapterResnetBlocks in the LightAdapterBlock.
528
+ down (`bool`, *optional*, defaults to `False`):
529
+ Whether to perform downsampling on LightAdapterBlock's input.
530
+ """
531
+
532
+ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
533
+ super().__init__()
534
+ mid_channels = out_channels // 4
535
+
536
+ self.downsample = None
537
+ if down:
538
+ self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
539
+
540
+ self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1)
541
+ self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)])
542
+ self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1)
543
+
544
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
545
+ r"""
546
+ This method takes tensor x as input and performs downsampling if required. Then it applies in convolution
547
+ layer, a sequence of residual blocks, and out convolutional layer.
548
+ """
549
+ if self.downsample is not None:
550
+ x = self.downsample(x)
551
+
552
+ x = self.in_conv(x)
553
+ x = self.resnets(x)
554
+ x = self.out_conv(x)
555
+
556
+ return x
557
+
558
+
559
+ class LightAdapterResnetBlock(nn.Module):
560
+ """
561
+ A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different
562
+ architecture than `AdapterResnetBlock`.
563
+
564
+ Parameters:
565
+ channels (`int`):
566
+ Number of channels of LightAdapterResnetBlock's input and output.
567
+ """
568
+
569
+ def __init__(self, channels: int):
570
+ super().__init__()
571
+ self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
572
+ self.act = nn.ReLU()
573
+ self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
574
+
575
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
576
+ r"""
577
+ This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and
578
+ another convolutional layer and adds it to input tensor.
579
+ """
580
+
581
+ h = self.act(self.block1(x))
582
+ h = self.block2(h)
583
+
584
+ return h + x
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/attention.py ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from ..utils import deprecate, logging
21
+ from ..utils.torch_utils import maybe_allow_in_graph
22
+ from .activations import GEGLU, GELU, ApproximateGELU
23
+ from .attention_processor import Attention
24
+ from .embeddings import SinusoidalPositionalEmbedding
25
+ from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int):
32
+ # "feed_forward_chunk_size" can be used to save memory
33
+ if hidden_states.shape[chunk_dim] % chunk_size != 0:
34
+ raise ValueError(
35
+ f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
36
+ )
37
+
38
+ num_chunks = hidden_states.shape[chunk_dim] // chunk_size
39
+ ff_output = torch.cat(
40
+ [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
41
+ dim=chunk_dim,
42
+ )
43
+ return ff_output
44
+
45
+
46
+ @maybe_allow_in_graph
47
+ class GatedSelfAttentionDense(nn.Module):
48
+ r"""
49
+ A gated self-attention dense layer that combines visual features and object features.
50
+
51
+ Parameters:
52
+ query_dim (`int`): The number of channels in the query.
53
+ context_dim (`int`): The number of channels in the context.
54
+ n_heads (`int`): The number of heads to use for attention.
55
+ d_head (`int`): The number of channels in each head.
56
+ """
57
+
58
+ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
59
+ super().__init__()
60
+
61
+ # we need a linear projection since we need cat visual feature and obj feature
62
+ self.linear = nn.Linear(context_dim, query_dim)
63
+
64
+ self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
65
+ self.ff = FeedForward(query_dim, activation_fn="geglu")
66
+
67
+ self.norm1 = nn.LayerNorm(query_dim)
68
+ self.norm2 = nn.LayerNorm(query_dim)
69
+
70
+ self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
71
+ self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))
72
+
73
+ self.enabled = True
74
+
75
+ def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
76
+ if not self.enabled:
77
+ return x
78
+
79
+ n_visual = x.shape[1]
80
+ objs = self.linear(objs)
81
+
82
+ x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
83
+ x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
84
+
85
+ return x
86
+
87
+
88
+ @maybe_allow_in_graph
89
+ class BasicTransformerBlock(nn.Module):
90
+ r"""
91
+ A basic Transformer block.
92
+
93
+ Parameters:
94
+ dim (`int`): The number of channels in the input and output.
95
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
96
+ attention_head_dim (`int`): The number of channels in each head.
97
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
98
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
99
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
100
+ num_embeds_ada_norm (:
101
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
102
+ attention_bias (:
103
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
104
+ only_cross_attention (`bool`, *optional*):
105
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
106
+ double_self_attention (`bool`, *optional*):
107
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
108
+ upcast_attention (`bool`, *optional*):
109
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
110
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
111
+ Whether to use learnable elementwise affine parameters for normalization.
112
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
113
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
114
+ final_dropout (`bool` *optional*, defaults to False):
115
+ Whether to apply a final dropout after the last feed-forward layer.
116
+ attention_type (`str`, *optional*, defaults to `"default"`):
117
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
118
+ positional_embeddings (`str`, *optional*, defaults to `None`):
119
+ The type of positional embeddings to apply to.
120
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
121
+ The maximum number of positional embeddings to apply.
122
+ """
123
+
124
+ def __init__(
125
+ self,
126
+ dim: int,
127
+ num_attention_heads: int,
128
+ attention_head_dim: int,
129
+ dropout=0.0,
130
+ cross_attention_dim: Optional[int] = None,
131
+ activation_fn: str = "geglu",
132
+ num_embeds_ada_norm: Optional[int] = None,
133
+ attention_bias: bool = False,
134
+ only_cross_attention: bool = False,
135
+ double_self_attention: bool = False,
136
+ upcast_attention: bool = False,
137
+ norm_elementwise_affine: bool = True,
138
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen'
139
+ norm_eps: float = 1e-5,
140
+ final_dropout: bool = False,
141
+ attention_type: str = "default",
142
+ positional_embeddings: Optional[str] = None,
143
+ num_positional_embeddings: Optional[int] = None,
144
+ ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
145
+ ada_norm_bias: Optional[int] = None,
146
+ ff_inner_dim: Optional[int] = None,
147
+ ff_bias: bool = True,
148
+ attention_out_bias: bool = True,
149
+ ):
150
+ super().__init__()
151
+ self.only_cross_attention = only_cross_attention
152
+
153
+ # We keep these boolean flags for backward-compatibility.
154
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
155
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
156
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
157
+ self.use_layer_norm = norm_type == "layer_norm"
158
+ self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
159
+
160
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
161
+ raise ValueError(
162
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
163
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
164
+ )
165
+
166
+ self.norm_type = norm_type
167
+ self.num_embeds_ada_norm = num_embeds_ada_norm
168
+
169
+ if positional_embeddings and (num_positional_embeddings is None):
170
+ raise ValueError(
171
+ "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
172
+ )
173
+
174
+ if positional_embeddings == "sinusoidal":
175
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
176
+ else:
177
+ self.pos_embed = None
178
+
179
+ # Define 3 blocks. Each block has its own normalization layer.
180
+ # 1. Self-Attn
181
+ if norm_type == "ada_norm":
182
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
183
+ elif norm_type == "ada_norm_zero":
184
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
185
+ elif norm_type == "ada_norm_continuous":
186
+ self.norm1 = AdaLayerNormContinuous(
187
+ dim,
188
+ ada_norm_continous_conditioning_embedding_dim,
189
+ norm_elementwise_affine,
190
+ norm_eps,
191
+ ada_norm_bias,
192
+ "rms_norm",
193
+ )
194
+ else:
195
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
196
+
197
+ self.attn1 = Attention(
198
+ query_dim=dim,
199
+ heads=num_attention_heads,
200
+ dim_head=attention_head_dim,
201
+ dropout=dropout,
202
+ bias=attention_bias,
203
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
204
+ upcast_attention=upcast_attention,
205
+ out_bias=attention_out_bias,
206
+ )
207
+
208
+ # 2. Cross-Attn
209
+ if cross_attention_dim is not None or double_self_attention:
210
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
211
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
212
+ # the second cross attention block.
213
+ if norm_type == "ada_norm":
214
+ self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
215
+ elif norm_type == "ada_norm_continuous":
216
+ self.norm2 = AdaLayerNormContinuous(
217
+ dim,
218
+ ada_norm_continous_conditioning_embedding_dim,
219
+ norm_elementwise_affine,
220
+ norm_eps,
221
+ ada_norm_bias,
222
+ "rms_norm",
223
+ )
224
+ else:
225
+ self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
226
+
227
+ self.attn2 = Attention(
228
+ query_dim=dim,
229
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
230
+ heads=num_attention_heads,
231
+ dim_head=attention_head_dim,
232
+ dropout=dropout,
233
+ bias=attention_bias,
234
+ upcast_attention=upcast_attention,
235
+ out_bias=attention_out_bias,
236
+ ) # is self-attn if encoder_hidden_states is none
237
+ else:
238
+ self.norm2 = None
239
+ self.attn2 = None
240
+
241
+ # 3. Feed-forward
242
+ if norm_type == "ada_norm_continuous":
243
+ self.norm3 = AdaLayerNormContinuous(
244
+ dim,
245
+ ada_norm_continous_conditioning_embedding_dim,
246
+ norm_elementwise_affine,
247
+ norm_eps,
248
+ ada_norm_bias,
249
+ "layer_norm",
250
+ )
251
+
252
+ elif norm_type in ["ada_norm_zero", "ada_norm", "layer_norm", "ada_norm_continuous"]:
253
+ self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
254
+ elif norm_type == "layer_norm_i2vgen":
255
+ self.norm3 = None
256
+
257
+ self.ff = FeedForward(
258
+ dim,
259
+ dropout=dropout,
260
+ activation_fn=activation_fn,
261
+ final_dropout=final_dropout,
262
+ inner_dim=ff_inner_dim,
263
+ bias=ff_bias,
264
+ )
265
+
266
+ # 4. Fuser
267
+ if attention_type == "gated" or attention_type == "gated-text-image":
268
+ self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
269
+
270
+ # 5. Scale-shift for PixArt-Alpha.
271
+ if norm_type == "ada_norm_single":
272
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
273
+
274
+ # let chunk size default to None
275
+ self._chunk_size = None
276
+ self._chunk_dim = 0
277
+
278
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
279
+ # Sets chunk feed-forward
280
+ self._chunk_size = chunk_size
281
+ self._chunk_dim = dim
282
+
283
+ def forward(
284
+ self,
285
+ hidden_states: torch.FloatTensor,
286
+ attention_mask: Optional[torch.FloatTensor] = None,
287
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
288
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
289
+ timestep: Optional[torch.LongTensor] = None,
290
+ cross_attention_kwargs: Dict[str, Any] = None,
291
+ class_labels: Optional[torch.LongTensor] = None,
292
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
293
+ ) -> torch.FloatTensor:
294
+ if cross_attention_kwargs is not None:
295
+ if cross_attention_kwargs.get("scale", None) is not None:
296
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.")
297
+
298
+ # Notice that normalization is always applied before the real computation in the following blocks.
299
+ # 0. Self-Attention
300
+ batch_size = hidden_states.shape[0]
301
+
302
+ if self.norm_type == "ada_norm":
303
+ norm_hidden_states = self.norm1(hidden_states, timestep)
304
+ elif self.norm_type == "ada_norm_zero":
305
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
306
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
307
+ )
308
+ elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]:
309
+ norm_hidden_states = self.norm1(hidden_states)
310
+ elif self.norm_type == "ada_norm_continuous":
311
+ norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
312
+ elif self.norm_type == "ada_norm_single":
313
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
314
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
315
+ ).chunk(6, dim=1)
316
+ norm_hidden_states = self.norm1(hidden_states)
317
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
318
+ norm_hidden_states = norm_hidden_states.squeeze(1)
319
+ else:
320
+ raise ValueError("Incorrect norm used")
321
+
322
+ if self.pos_embed is not None:
323
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
324
+
325
+ # 1. Prepare GLIGEN inputs
326
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
327
+ gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
328
+
329
+ attn_output = self.attn1(
330
+ norm_hidden_states,
331
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
332
+ attention_mask=attention_mask,
333
+ **cross_attention_kwargs,
334
+ )
335
+ if self.norm_type == "ada_norm_zero":
336
+ attn_output = gate_msa.unsqueeze(1) * attn_output
337
+ elif self.norm_type == "ada_norm_single":
338
+ attn_output = gate_msa * attn_output
339
+
340
+ hidden_states = attn_output + hidden_states
341
+ if hidden_states.ndim == 4:
342
+ hidden_states = hidden_states.squeeze(1)
343
+
344
+ # 1.2 GLIGEN Control
345
+ if gligen_kwargs is not None:
346
+ hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
347
+
348
+ # 3. Cross-Attention
349
+ if self.attn2 is not None:
350
+ if self.norm_type == "ada_norm":
351
+ norm_hidden_states = self.norm2(hidden_states, timestep)
352
+ elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]:
353
+ norm_hidden_states = self.norm2(hidden_states)
354
+ elif self.norm_type == "ada_norm_single":
355
+ # For PixArt norm2 isn't applied here:
356
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
357
+ norm_hidden_states = hidden_states
358
+ elif self.norm_type == "ada_norm_continuous":
359
+ norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
360
+ else:
361
+ raise ValueError("Incorrect norm")
362
+
363
+ if self.pos_embed is not None and self.norm_type != "ada_norm_single":
364
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
365
+
366
+ attn_output = self.attn2(
367
+ norm_hidden_states,
368
+ encoder_hidden_states=encoder_hidden_states,
369
+ attention_mask=encoder_attention_mask,
370
+ **cross_attention_kwargs,
371
+ )
372
+ hidden_states = attn_output + hidden_states
373
+
374
+ # 4. Feed-forward
375
+ # i2vgen doesn't have this norm 🤷‍♂️
376
+ if self.norm_type == "ada_norm_continuous":
377
+ norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
378
+ elif not self.norm_type == "ada_norm_single":
379
+ norm_hidden_states = self.norm3(hidden_states)
380
+
381
+ if self.norm_type == "ada_norm_zero":
382
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
383
+
384
+ if self.norm_type == "ada_norm_single":
385
+ norm_hidden_states = self.norm2(hidden_states)
386
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
387
+
388
+ if self._chunk_size is not None:
389
+ # "feed_forward_chunk_size" can be used to save memory
390
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
391
+ else:
392
+ ff_output = self.ff(norm_hidden_states)
393
+
394
+ if self.norm_type == "ada_norm_zero":
395
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
396
+ elif self.norm_type == "ada_norm_single":
397
+ ff_output = gate_mlp * ff_output
398
+
399
+ hidden_states = ff_output + hidden_states
400
+ if hidden_states.ndim == 4:
401
+ hidden_states = hidden_states.squeeze(1)
402
+
403
+ return hidden_states
404
+
405
+
406
+ @maybe_allow_in_graph
407
+ class TemporalBasicTransformerBlock(nn.Module):
408
+ r"""
409
+ A basic Transformer block for video like data.
410
+
411
+ Parameters:
412
+ dim (`int`): The number of channels in the input and output.
413
+ time_mix_inner_dim (`int`): The number of channels for temporal attention.
414
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
415
+ attention_head_dim (`int`): The number of channels in each head.
416
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
417
+ """
418
+
419
+ def __init__(
420
+ self,
421
+ dim: int,
422
+ time_mix_inner_dim: int,
423
+ num_attention_heads: int,
424
+ attention_head_dim: int,
425
+ cross_attention_dim: Optional[int] = None,
426
+ ):
427
+ super().__init__()
428
+ self.is_res = dim == time_mix_inner_dim
429
+
430
+ self.norm_in = nn.LayerNorm(dim)
431
+
432
+ # Define 3 blocks. Each block has its own normalization layer.
433
+ # 1. Self-Attn
434
+ self.ff_in = FeedForward(
435
+ dim,
436
+ dim_out=time_mix_inner_dim,
437
+ activation_fn="geglu",
438
+ )
439
+
440
+ self.norm1 = nn.LayerNorm(time_mix_inner_dim)
441
+ self.attn1 = Attention(
442
+ query_dim=time_mix_inner_dim,
443
+ heads=num_attention_heads,
444
+ dim_head=attention_head_dim,
445
+ cross_attention_dim=None,
446
+ )
447
+
448
+ # 2. Cross-Attn
449
+ if cross_attention_dim is not None:
450
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
451
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
452
+ # the second cross attention block.
453
+ self.norm2 = nn.LayerNorm(time_mix_inner_dim)
454
+ self.attn2 = Attention(
455
+ query_dim=time_mix_inner_dim,
456
+ cross_attention_dim=cross_attention_dim,
457
+ heads=num_attention_heads,
458
+ dim_head=attention_head_dim,
459
+ ) # is self-attn if encoder_hidden_states is none
460
+ else:
461
+ self.norm2 = None
462
+ self.attn2 = None
463
+
464
+ # 3. Feed-forward
465
+ self.norm3 = nn.LayerNorm(time_mix_inner_dim)
466
+ self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
467
+
468
+ # let chunk size default to None
469
+ self._chunk_size = None
470
+ self._chunk_dim = None
471
+
472
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
473
+ # Sets chunk feed-forward
474
+ self._chunk_size = chunk_size
475
+ # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
476
+ self._chunk_dim = 1
477
+
478
+ def forward(
479
+ self,
480
+ hidden_states: torch.FloatTensor,
481
+ num_frames: int,
482
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
483
+ ) -> torch.FloatTensor:
484
+ # Notice that normalization is always applied before the real computation in the following blocks.
485
+ # 0. Self-Attention
486
+ batch_size = hidden_states.shape[0]
487
+
488
+ batch_frames, seq_length, channels = hidden_states.shape
489
+ batch_size = batch_frames // num_frames
490
+
491
+ hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
492
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
493
+ hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)
494
+
495
+ residual = hidden_states
496
+ hidden_states = self.norm_in(hidden_states)
497
+
498
+ if self._chunk_size is not None:
499
+ hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
500
+ else:
501
+ hidden_states = self.ff_in(hidden_states)
502
+
503
+ if self.is_res:
504
+ hidden_states = hidden_states + residual
505
+
506
+ norm_hidden_states = self.norm1(hidden_states)
507
+ attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
508
+ hidden_states = attn_output + hidden_states
509
+
510
+ # 3. Cross-Attention
511
+ if self.attn2 is not None:
512
+ norm_hidden_states = self.norm2(hidden_states)
513
+ attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
514
+ hidden_states = attn_output + hidden_states
515
+
516
+ # 4. Feed-forward
517
+ norm_hidden_states = self.norm3(hidden_states)
518
+
519
+ if self._chunk_size is not None:
520
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
521
+ else:
522
+ ff_output = self.ff(norm_hidden_states)
523
+
524
+ if self.is_res:
525
+ hidden_states = ff_output + hidden_states
526
+ else:
527
+ hidden_states = ff_output
528
+
529
+ hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
530
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
531
+ hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)
532
+
533
+ return hidden_states
534
+
535
+
536
+ class SkipFFTransformerBlock(nn.Module):
537
+ def __init__(
538
+ self,
539
+ dim: int,
540
+ num_attention_heads: int,
541
+ attention_head_dim: int,
542
+ kv_input_dim: int,
543
+ kv_input_dim_proj_use_bias: bool,
544
+ dropout=0.0,
545
+ cross_attention_dim: Optional[int] = None,
546
+ attention_bias: bool = False,
547
+ attention_out_bias: bool = True,
548
+ ):
549
+ super().__init__()
550
+ if kv_input_dim != dim:
551
+ self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
552
+ else:
553
+ self.kv_mapper = None
554
+
555
+ self.norm1 = RMSNorm(dim, 1e-06)
556
+
557
+ self.attn1 = Attention(
558
+ query_dim=dim,
559
+ heads=num_attention_heads,
560
+ dim_head=attention_head_dim,
561
+ dropout=dropout,
562
+ bias=attention_bias,
563
+ cross_attention_dim=cross_attention_dim,
564
+ out_bias=attention_out_bias,
565
+ )
566
+
567
+ self.norm2 = RMSNorm(dim, 1e-06)
568
+
569
+ self.attn2 = Attention(
570
+ query_dim=dim,
571
+ cross_attention_dim=cross_attention_dim,
572
+ heads=num_attention_heads,
573
+ dim_head=attention_head_dim,
574
+ dropout=dropout,
575
+ bias=attention_bias,
576
+ out_bias=attention_out_bias,
577
+ )
578
+
579
+ def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
580
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
581
+
582
+ if self.kv_mapper is not None:
583
+ encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))
584
+
585
+ norm_hidden_states = self.norm1(hidden_states)
586
+
587
+ attn_output = self.attn1(
588
+ norm_hidden_states,
589
+ encoder_hidden_states=encoder_hidden_states,
590
+ **cross_attention_kwargs,
591
+ )
592
+
593
+ hidden_states = attn_output + hidden_states
594
+
595
+ norm_hidden_states = self.norm2(hidden_states)
596
+
597
+ attn_output = self.attn2(
598
+ norm_hidden_states,
599
+ encoder_hidden_states=encoder_hidden_states,
600
+ **cross_attention_kwargs,
601
+ )
602
+
603
+ hidden_states = attn_output + hidden_states
604
+
605
+ return hidden_states
606
+
607
+
608
+ class FeedForward(nn.Module):
609
+ r"""
610
+ A feed-forward layer.
611
+
612
+ Parameters:
613
+ dim (`int`): The number of channels in the input.
614
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
615
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
616
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
617
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
618
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
619
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
620
+ """
621
+
622
+ def __init__(
623
+ self,
624
+ dim: int,
625
+ dim_out: Optional[int] = None,
626
+ mult: int = 4,
627
+ dropout: float = 0.0,
628
+ activation_fn: str = "geglu",
629
+ final_dropout: bool = False,
630
+ inner_dim=None,
631
+ bias: bool = True,
632
+ ):
633
+ super().__init__()
634
+ if inner_dim is None:
635
+ inner_dim = int(dim * mult)
636
+ dim_out = dim_out if dim_out is not None else dim
637
+ linear_cls = nn.Linear
638
+
639
+ if activation_fn == "gelu":
640
+ act_fn = GELU(dim, inner_dim, bias=bias)
641
+ if activation_fn == "gelu-approximate":
642
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
643
+ elif activation_fn == "geglu":
644
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
645
+ elif activation_fn == "geglu-approximate":
646
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
647
+
648
+ self.net = nn.ModuleList([])
649
+ # project in
650
+ self.net.append(act_fn)
651
+ # project dropout
652
+ self.net.append(nn.Dropout(dropout))
653
+ # project out
654
+ self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
655
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
656
+ if final_dropout:
657
+ self.net.append(nn.Dropout(dropout))
658
+
659
+ def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor:
660
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
661
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
662
+ deprecate("scale", "1.0.0", deprecation_message)
663
+ for module in self.net:
664
+ hidden_states = module(hidden_states)
665
+ return hidden_states
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/attention_flax.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import math
17
+
18
+ import flax.linen as nn
19
+ import jax
20
+ import jax.numpy as jnp
21
+
22
+
23
+ def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096):
24
+ """Multi-head dot product attention with a limited number of queries."""
25
+ num_kv, num_heads, k_features = key.shape[-3:]
26
+ v_features = value.shape[-1]
27
+ key_chunk_size = min(key_chunk_size, num_kv)
28
+ query = query / jnp.sqrt(k_features)
29
+
30
+ @functools.partial(jax.checkpoint, prevent_cse=False)
31
+ def summarize_chunk(query, key, value):
32
+ attn_weights = jnp.einsum("...qhd,...khd->...qhk", query, key, precision=precision)
33
+
34
+ max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
35
+ max_score = jax.lax.stop_gradient(max_score)
36
+ exp_weights = jnp.exp(attn_weights - max_score)
37
+
38
+ exp_values = jnp.einsum("...vhf,...qhv->...qhf", value, exp_weights, precision=precision)
39
+ max_score = jnp.einsum("...qhk->...qh", max_score)
40
+
41
+ return (exp_values, exp_weights.sum(axis=-1), max_score)
42
+
43
+ def chunk_scanner(chunk_idx):
44
+ # julienne key array
45
+ key_chunk = jax.lax.dynamic_slice(
46
+ operand=key,
47
+ start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], # [...,k,h,d]
48
+ slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features], # [...,k,h,d]
49
+ )
50
+
51
+ # julienne value array
52
+ value_chunk = jax.lax.dynamic_slice(
53
+ operand=value,
54
+ start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], # [...,v,h,d]
55
+ slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features], # [...,v,h,d]
56
+ )
57
+
58
+ return summarize_chunk(query, key_chunk, value_chunk)
59
+
60
+ chunk_values, chunk_weights, chunk_max = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size))
61
+
62
+ global_max = jnp.max(chunk_max, axis=0, keepdims=True)
63
+ max_diffs = jnp.exp(chunk_max - global_max)
64
+
65
+ chunk_values *= jnp.expand_dims(max_diffs, axis=-1)
66
+ chunk_weights *= max_diffs
67
+
68
+ all_values = chunk_values.sum(axis=0)
69
+ all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0)
70
+
71
+ return all_values / all_weights
72
+
73
+
74
+ def jax_memory_efficient_attention(
75
+ query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int = 1024, key_chunk_size: int = 4096
76
+ ):
77
+ r"""
78
+ Flax Memory-efficient multi-head dot product attention. https://arxiv.org/abs/2112.05682v2
79
+ https://github.com/AminRezaei0x443/memory-efficient-attention
80
+
81
+ Args:
82
+ query (`jnp.ndarray`): (batch..., query_length, head, query_key_depth_per_head)
83
+ key (`jnp.ndarray`): (batch..., key_value_length, head, query_key_depth_per_head)
84
+ value (`jnp.ndarray`): (batch..., key_value_length, head, value_depth_per_head)
85
+ precision (`jax.lax.Precision`, *optional*, defaults to `jax.lax.Precision.HIGHEST`):
86
+ numerical precision for computation
87
+ query_chunk_size (`int`, *optional*, defaults to 1024):
88
+ chunk size to divide query array value must divide query_length equally without remainder
89
+ key_chunk_size (`int`, *optional*, defaults to 4096):
90
+ chunk size to divide key and value array value must divide key_value_length equally without remainder
91
+
92
+ Returns:
93
+ (`jnp.ndarray`) with shape of (batch..., query_length, head, value_depth_per_head)
94
+ """
95
+ num_q, num_heads, q_features = query.shape[-3:]
96
+
97
+ def chunk_scanner(chunk_idx, _):
98
+ # julienne query array
99
+ query_chunk = jax.lax.dynamic_slice(
100
+ operand=query,
101
+ start_indices=([0] * (query.ndim - 3)) + [chunk_idx, 0, 0], # [...,q,h,d]
102
+ slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features], # [...,q,h,d]
103
+ )
104
+
105
+ return (
106
+ chunk_idx + query_chunk_size, # unused ignore it
107
+ _query_chunk_attention(
108
+ query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size
109
+ ),
110
+ )
111
+
112
+ _, res = jax.lax.scan(
113
+ f=chunk_scanner,
114
+ init=0,
115
+ xs=None,
116
+ length=math.ceil(num_q / query_chunk_size), # start counter # stop counter
117
+ )
118
+
119
+ return jnp.concatenate(res, axis=-3) # fuse the chunked result back
120
+
121
+
122
+ class FlaxAttention(nn.Module):
123
+ r"""
124
+ A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
125
+
126
+ Parameters:
127
+ query_dim (:obj:`int`):
128
+ Input hidden states dimension
129
+ heads (:obj:`int`, *optional*, defaults to 8):
130
+ Number of heads
131
+ dim_head (:obj:`int`, *optional*, defaults to 64):
132
+ Hidden states dimension inside each head
133
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
134
+ Dropout rate
135
+ use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
136
+ enable memory efficient attention https://arxiv.org/abs/2112.05682
137
+ split_head_dim (`bool`, *optional*, defaults to `False`):
138
+ Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
139
+ enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
140
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
141
+ Parameters `dtype`
142
+
143
+ """
144
+
145
+ query_dim: int
146
+ heads: int = 8
147
+ dim_head: int = 64
148
+ dropout: float = 0.0
149
+ use_memory_efficient_attention: bool = False
150
+ split_head_dim: bool = False
151
+ dtype: jnp.dtype = jnp.float32
152
+
153
+ def setup(self):
154
+ inner_dim = self.dim_head * self.heads
155
+ self.scale = self.dim_head**-0.5
156
+
157
+ # Weights were exported with old names {to_q, to_k, to_v, to_out}
158
+ self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q")
159
+ self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k")
160
+ self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v")
161
+
162
+ self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0")
163
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
164
+
165
+ def reshape_heads_to_batch_dim(self, tensor):
166
+ batch_size, seq_len, dim = tensor.shape
167
+ head_size = self.heads
168
+ tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
169
+ tensor = jnp.transpose(tensor, (0, 2, 1, 3))
170
+ tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
171
+ return tensor
172
+
173
+ def reshape_batch_dim_to_heads(self, tensor):
174
+ batch_size, seq_len, dim = tensor.shape
175
+ head_size = self.heads
176
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
177
+ tensor = jnp.transpose(tensor, (0, 2, 1, 3))
178
+ tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size)
179
+ return tensor
180
+
181
+ def __call__(self, hidden_states, context=None, deterministic=True):
182
+ context = hidden_states if context is None else context
183
+
184
+ query_proj = self.query(hidden_states)
185
+ key_proj = self.key(context)
186
+ value_proj = self.value(context)
187
+
188
+ if self.split_head_dim:
189
+ b = hidden_states.shape[0]
190
+ query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head))
191
+ key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head))
192
+ value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head))
193
+ else:
194
+ query_states = self.reshape_heads_to_batch_dim(query_proj)
195
+ key_states = self.reshape_heads_to_batch_dim(key_proj)
196
+ value_states = self.reshape_heads_to_batch_dim(value_proj)
197
+
198
+ if self.use_memory_efficient_attention:
199
+ query_states = query_states.transpose(1, 0, 2)
200
+ key_states = key_states.transpose(1, 0, 2)
201
+ value_states = value_states.transpose(1, 0, 2)
202
+
203
+ # this if statement create a chunk size for each layer of the unet
204
+ # the chunk size is equal to the query_length dimension of the deepest layer of the unet
205
+
206
+ flatten_latent_dim = query_states.shape[-3]
207
+ if flatten_latent_dim % 64 == 0:
208
+ query_chunk_size = int(flatten_latent_dim / 64)
209
+ elif flatten_latent_dim % 16 == 0:
210
+ query_chunk_size = int(flatten_latent_dim / 16)
211
+ elif flatten_latent_dim % 4 == 0:
212
+ query_chunk_size = int(flatten_latent_dim / 4)
213
+ else:
214
+ query_chunk_size = int(flatten_latent_dim)
215
+
216
+ hidden_states = jax_memory_efficient_attention(
217
+ query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4
218
+ )
219
+
220
+ hidden_states = hidden_states.transpose(1, 0, 2)
221
+ else:
222
+ # compute attentions
223
+ if self.split_head_dim:
224
+ attention_scores = jnp.einsum("b t n h, b f n h -> b n f t", key_states, query_states)
225
+ else:
226
+ attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states)
227
+
228
+ attention_scores = attention_scores * self.scale
229
+ attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2)
230
+
231
+ # attend to values
232
+ if self.split_head_dim:
233
+ hidden_states = jnp.einsum("b n f t, b t n h -> b f n h", attention_probs, value_states)
234
+ b = hidden_states.shape[0]
235
+ hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head))
236
+ else:
237
+ hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states)
238
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
239
+
240
+ hidden_states = self.proj_attn(hidden_states)
241
+ return self.dropout_layer(hidden_states, deterministic=deterministic)
242
+
243
+
244
+ class FlaxBasicTransformerBlock(nn.Module):
245
+ r"""
246
+ A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
247
+ https://arxiv.org/abs/1706.03762
248
+
249
+
250
+ Parameters:
251
+ dim (:obj:`int`):
252
+ Inner hidden states dimension
253
+ n_heads (:obj:`int`):
254
+ Number of heads
255
+ d_head (:obj:`int`):
256
+ Hidden states dimension inside each head
257
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
258
+ Dropout rate
259
+ only_cross_attention (`bool`, defaults to `False`):
260
+ Whether to only apply cross attention.
261
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
262
+ Parameters `dtype`
263
+ use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
264
+ enable memory efficient attention https://arxiv.org/abs/2112.05682
265
+ split_head_dim (`bool`, *optional*, defaults to `False`):
266
+ Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
267
+ enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
268
+ """
269
+
270
+ dim: int
271
+ n_heads: int
272
+ d_head: int
273
+ dropout: float = 0.0
274
+ only_cross_attention: bool = False
275
+ dtype: jnp.dtype = jnp.float32
276
+ use_memory_efficient_attention: bool = False
277
+ split_head_dim: bool = False
278
+
279
+ def setup(self):
280
+ # self attention (or cross_attention if only_cross_attention is True)
281
+ self.attn1 = FlaxAttention(
282
+ self.dim,
283
+ self.n_heads,
284
+ self.d_head,
285
+ self.dropout,
286
+ self.use_memory_efficient_attention,
287
+ self.split_head_dim,
288
+ dtype=self.dtype,
289
+ )
290
+ # cross attention
291
+ self.attn2 = FlaxAttention(
292
+ self.dim,
293
+ self.n_heads,
294
+ self.d_head,
295
+ self.dropout,
296
+ self.use_memory_efficient_attention,
297
+ self.split_head_dim,
298
+ dtype=self.dtype,
299
+ )
300
+ self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype)
301
+ self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
302
+ self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
303
+ self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
304
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
305
+
306
+ def __call__(self, hidden_states, context, deterministic=True):
307
+ # self attention
308
+ residual = hidden_states
309
+ if self.only_cross_attention:
310
+ hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic)
311
+ else:
312
+ hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic)
313
+ hidden_states = hidden_states + residual
314
+
315
+ # cross attention
316
+ residual = hidden_states
317
+ hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic)
318
+ hidden_states = hidden_states + residual
319
+
320
+ # feed forward
321
+ residual = hidden_states
322
+ hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic)
323
+ hidden_states = hidden_states + residual
324
+
325
+ return self.dropout_layer(hidden_states, deterministic=deterministic)
326
+
327
+
328
+ class FlaxTransformer2DModel(nn.Module):
329
+ r"""
330
+ A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in:
331
+ https://arxiv.org/pdf/1506.02025.pdf
332
+
333
+
334
+ Parameters:
335
+ in_channels (:obj:`int`):
336
+ Input number of channels
337
+ n_heads (:obj:`int`):
338
+ Number of heads
339
+ d_head (:obj:`int`):
340
+ Hidden states dimension inside each head
341
+ depth (:obj:`int`, *optional*, defaults to 1):
342
+ Number of transformers block
343
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
344
+ Dropout rate
345
+ use_linear_projection (`bool`, defaults to `False`): tbd
346
+ only_cross_attention (`bool`, defaults to `False`): tbd
347
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
348
+ Parameters `dtype`
349
+ use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
350
+ enable memory efficient attention https://arxiv.org/abs/2112.05682
351
+ split_head_dim (`bool`, *optional*, defaults to `False`):
352
+ Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
353
+ enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
354
+ """
355
+
356
+ in_channels: int
357
+ n_heads: int
358
+ d_head: int
359
+ depth: int = 1
360
+ dropout: float = 0.0
361
+ use_linear_projection: bool = False
362
+ only_cross_attention: bool = False
363
+ dtype: jnp.dtype = jnp.float32
364
+ use_memory_efficient_attention: bool = False
365
+ split_head_dim: bool = False
366
+
367
+ def setup(self):
368
+ self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5)
369
+
370
+ inner_dim = self.n_heads * self.d_head
371
+ if self.use_linear_projection:
372
+ self.proj_in = nn.Dense(inner_dim, dtype=self.dtype)
373
+ else:
374
+ self.proj_in = nn.Conv(
375
+ inner_dim,
376
+ kernel_size=(1, 1),
377
+ strides=(1, 1),
378
+ padding="VALID",
379
+ dtype=self.dtype,
380
+ )
381
+
382
+ self.transformer_blocks = [
383
+ FlaxBasicTransformerBlock(
384
+ inner_dim,
385
+ self.n_heads,
386
+ self.d_head,
387
+ dropout=self.dropout,
388
+ only_cross_attention=self.only_cross_attention,
389
+ dtype=self.dtype,
390
+ use_memory_efficient_attention=self.use_memory_efficient_attention,
391
+ split_head_dim=self.split_head_dim,
392
+ )
393
+ for _ in range(self.depth)
394
+ ]
395
+
396
+ if self.use_linear_projection:
397
+ self.proj_out = nn.Dense(inner_dim, dtype=self.dtype)
398
+ else:
399
+ self.proj_out = nn.Conv(
400
+ inner_dim,
401
+ kernel_size=(1, 1),
402
+ strides=(1, 1),
403
+ padding="VALID",
404
+ dtype=self.dtype,
405
+ )
406
+
407
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
408
+
409
+ def __call__(self, hidden_states, context, deterministic=True):
410
+ batch, height, width, channels = hidden_states.shape
411
+ residual = hidden_states
412
+ hidden_states = self.norm(hidden_states)
413
+ if self.use_linear_projection:
414
+ hidden_states = hidden_states.reshape(batch, height * width, channels)
415
+ hidden_states = self.proj_in(hidden_states)
416
+ else:
417
+ hidden_states = self.proj_in(hidden_states)
418
+ hidden_states = hidden_states.reshape(batch, height * width, channels)
419
+
420
+ for transformer_block in self.transformer_blocks:
421
+ hidden_states = transformer_block(hidden_states, context, deterministic=deterministic)
422
+
423
+ if self.use_linear_projection:
424
+ hidden_states = self.proj_out(hidden_states)
425
+ hidden_states = hidden_states.reshape(batch, height, width, channels)
426
+ else:
427
+ hidden_states = hidden_states.reshape(batch, height, width, channels)
428
+ hidden_states = self.proj_out(hidden_states)
429
+
430
+ hidden_states = hidden_states + residual
431
+ return self.dropout_layer(hidden_states, deterministic=deterministic)
432
+
433
+
434
+ class FlaxFeedForward(nn.Module):
435
+ r"""
436
+ Flax module that encapsulates two Linear layers separated by a non-linearity. It is the counterpart of PyTorch's
437
+ [`FeedForward`] class, with the following simplifications:
438
+ - The activation function is currently hardcoded to a gated linear unit from:
439
+ https://arxiv.org/abs/2002.05202
440
+ - `dim_out` is equal to `dim`.
441
+ - The number of hidden dimensions is hardcoded to `dim * 4` in [`FlaxGELU`].
442
+
443
+ Parameters:
444
+ dim (:obj:`int`):
445
+ Inner hidden states dimension
446
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
447
+ Dropout rate
448
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
449
+ Parameters `dtype`
450
+ """
451
+
452
+ dim: int
453
+ dropout: float = 0.0
454
+ dtype: jnp.dtype = jnp.float32
455
+
456
+ def setup(self):
457
+ # The second linear layer needs to be called
458
+ # net_2 for now to match the index of the Sequential layer
459
+ self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype)
460
+ self.net_2 = nn.Dense(self.dim, dtype=self.dtype)
461
+
462
+ def __call__(self, hidden_states, deterministic=True):
463
+ hidden_states = self.net_0(hidden_states, deterministic=deterministic)
464
+ hidden_states = self.net_2(hidden_states)
465
+ return hidden_states
466
+
467
+
468
+ class FlaxGEGLU(nn.Module):
469
+ r"""
470
+ Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from
471
+ https://arxiv.org/abs/2002.05202.
472
+
473
+ Parameters:
474
+ dim (:obj:`int`):
475
+ Input hidden states dimension
476
+ dropout (:obj:`float`, *optional*, defaults to 0.0):
477
+ Dropout rate
478
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
479
+ Parameters `dtype`
480
+ """
481
+
482
+ dim: int
483
+ dropout: float = 0.0
484
+ dtype: jnp.dtype = jnp.float32
485
+
486
+ def setup(self):
487
+ inner_dim = self.dim * 4
488
+ self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype)
489
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
490
+
491
+ def __call__(self, hidden_states, deterministic=True):
492
+ hidden_states = self.proj(hidden_states)
493
+ hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2)
494
+ return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/attention_processor.py ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .autoencoder_asym_kl import AsymmetricAutoencoderKL
2
+ from .autoencoder_kl import AutoencoderKL
3
+ from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
4
+ from .autoencoder_tiny import AutoencoderTiny
5
+ from .consistency_decoder_vae import ConsistencyDecoderVAE
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (516 Bytes). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_asym_kl.cpython-310.pyc ADDED
Binary file (6.63 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_kl.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_kl_temporal_decoder.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/autoencoder_tiny.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/__pycache__/consistency_decoder_vae.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_asym_kl.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...utils.accelerate_utils import apply_forward_hook
21
+ from ..modeling_outputs import AutoencoderKLOutput
22
+ from ..modeling_utils import ModelMixin
23
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder, MaskConditionDecoder
24
+
25
+
26
+ class AsymmetricAutoencoderKL(ModelMixin, ConfigMixin):
27
+ r"""
28
+ Designing a Better Asymmetric VQGAN for StableDiffusion https://arxiv.org/abs/2306.04632 . A VAE model with KL loss
29
+ for encoding images into latents and decoding latent representations into images.
30
+
31
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
32
+ for all models (such as downloading or saving).
33
+
34
+ Parameters:
35
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
36
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
37
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
38
+ Tuple of downsample block types.
39
+ down_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
40
+ Tuple of down block output channels.
41
+ layers_per_down_block (`int`, *optional*, defaults to `1`):
42
+ Number layers for down block.
43
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
44
+ Tuple of upsample block types.
45
+ up_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
46
+ Tuple of up block output channels.
47
+ layers_per_up_block (`int`, *optional*, defaults to `1`):
48
+ Number layers for up block.
49
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
50
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
51
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
52
+ norm_num_groups (`int`, *optional*, defaults to `32`):
53
+ Number of groups to use for the first normalization layer in ResNet blocks.
54
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
55
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
56
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
57
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
58
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
59
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
60
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
61
+ """
62
+
63
+ @register_to_config
64
+ def __init__(
65
+ self,
66
+ in_channels: int = 3,
67
+ out_channels: int = 3,
68
+ down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
69
+ down_block_out_channels: Tuple[int, ...] = (64,),
70
+ layers_per_down_block: int = 1,
71
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
72
+ up_block_out_channels: Tuple[int, ...] = (64,),
73
+ layers_per_up_block: int = 1,
74
+ act_fn: str = "silu",
75
+ latent_channels: int = 4,
76
+ norm_num_groups: int = 32,
77
+ sample_size: int = 32,
78
+ scaling_factor: float = 0.18215,
79
+ ) -> None:
80
+ super().__init__()
81
+
82
+ # pass init params to Encoder
83
+ self.encoder = Encoder(
84
+ in_channels=in_channels,
85
+ out_channels=latent_channels,
86
+ down_block_types=down_block_types,
87
+ block_out_channels=down_block_out_channels,
88
+ layers_per_block=layers_per_down_block,
89
+ act_fn=act_fn,
90
+ norm_num_groups=norm_num_groups,
91
+ double_z=True,
92
+ )
93
+
94
+ # pass init params to Decoder
95
+ self.decoder = MaskConditionDecoder(
96
+ in_channels=latent_channels,
97
+ out_channels=out_channels,
98
+ up_block_types=up_block_types,
99
+ block_out_channels=up_block_out_channels,
100
+ layers_per_block=layers_per_up_block,
101
+ act_fn=act_fn,
102
+ norm_num_groups=norm_num_groups,
103
+ )
104
+
105
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
106
+ self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
107
+
108
+ self.use_slicing = False
109
+ self.use_tiling = False
110
+
111
+ self.register_to_config(block_out_channels=up_block_out_channels)
112
+ self.register_to_config(force_upcast=False)
113
+
114
+ @apply_forward_hook
115
+ def encode(
116
+ self, x: torch.FloatTensor, return_dict: bool = True
117
+ ) -> Union[AutoencoderKLOutput, Tuple[torch.FloatTensor]]:
118
+ h = self.encoder(x)
119
+ moments = self.quant_conv(h)
120
+ posterior = DiagonalGaussianDistribution(moments)
121
+
122
+ if not return_dict:
123
+ return (posterior,)
124
+
125
+ return AutoencoderKLOutput(latent_dist=posterior)
126
+
127
+ def _decode(
128
+ self,
129
+ z: torch.FloatTensor,
130
+ image: Optional[torch.FloatTensor] = None,
131
+ mask: Optional[torch.FloatTensor] = None,
132
+ return_dict: bool = True,
133
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
134
+ z = self.post_quant_conv(z)
135
+ dec = self.decoder(z, image, mask)
136
+
137
+ if not return_dict:
138
+ return (dec,)
139
+
140
+ return DecoderOutput(sample=dec)
141
+
142
+ @apply_forward_hook
143
+ def decode(
144
+ self,
145
+ z: torch.FloatTensor,
146
+ generator: Optional[torch.Generator] = None,
147
+ image: Optional[torch.FloatTensor] = None,
148
+ mask: Optional[torch.FloatTensor] = None,
149
+ return_dict: bool = True,
150
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
151
+ decoded = self._decode(z, image, mask).sample
152
+
153
+ if not return_dict:
154
+ return (decoded,)
155
+
156
+ return DecoderOutput(sample=decoded)
157
+
158
+ def forward(
159
+ self,
160
+ sample: torch.FloatTensor,
161
+ mask: Optional[torch.FloatTensor] = None,
162
+ sample_posterior: bool = False,
163
+ return_dict: bool = True,
164
+ generator: Optional[torch.Generator] = None,
165
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
166
+ r"""
167
+ Args:
168
+ sample (`torch.FloatTensor`): Input sample.
169
+ mask (`torch.FloatTensor`, *optional*, defaults to `None`): Optional inpainting mask.
170
+ sample_posterior (`bool`, *optional*, defaults to `False`):
171
+ Whether to sample from the posterior.
172
+ return_dict (`bool`, *optional*, defaults to `True`):
173
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
174
+ """
175
+ x = sample
176
+ posterior = self.encode(x).latent_dist
177
+ if sample_posterior:
178
+ z = posterior.sample(generator=generator)
179
+ else:
180
+ z = posterior.mode()
181
+ dec = self.decode(z, sample, mask).sample
182
+
183
+ if not return_dict:
184
+ return (dec,)
185
+
186
+ return DecoderOutput(sample=dec)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...loaders import FromOriginalVAEMixin
21
+ from ...utils.accelerate_utils import apply_forward_hook
22
+ from ..attention_processor import (
23
+ ADDED_KV_ATTENTION_PROCESSORS,
24
+ CROSS_ATTENTION_PROCESSORS,
25
+ Attention,
26
+ AttentionProcessor,
27
+ AttnAddedKVProcessor,
28
+ AttnProcessor,
29
+ )
30
+ from ..modeling_outputs import AutoencoderKLOutput
31
+ from ..modeling_utils import ModelMixin
32
+ from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
33
+
34
+
35
+ class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
36
+ r"""
37
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
38
+
39
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
40
+ for all models (such as downloading or saving).
41
+
42
+ Parameters:
43
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
44
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
45
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
46
+ Tuple of downsample block types.
47
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
48
+ Tuple of upsample block types.
49
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
50
+ Tuple of block output channels.
51
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
52
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
53
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
54
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
55
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
56
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
57
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
58
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
59
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
60
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
61
+ force_upcast (`bool`, *optional*, default to `True`):
62
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
63
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
64
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
65
+ """
66
+
67
+ _supports_gradient_checkpointing = True
68
+
69
+ @register_to_config
70
+ def __init__(
71
+ self,
72
+ in_channels: int = 3,
73
+ out_channels: int = 3,
74
+ down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
75
+ up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
76
+ block_out_channels: Tuple[int] = (64,),
77
+ layers_per_block: int = 1,
78
+ act_fn: str = "silu",
79
+ latent_channels: int = 4,
80
+ norm_num_groups: int = 32,
81
+ sample_size: int = 32,
82
+ scaling_factor: float = 0.18215,
83
+ latents_mean: Optional[Tuple[float]] = None,
84
+ latents_std: Optional[Tuple[float]] = None,
85
+ force_upcast: float = True,
86
+ ):
87
+ super().__init__()
88
+
89
+ # pass init params to Encoder
90
+ self.encoder = Encoder(
91
+ in_channels=in_channels,
92
+ out_channels=latent_channels,
93
+ down_block_types=down_block_types,
94
+ block_out_channels=block_out_channels,
95
+ layers_per_block=layers_per_block,
96
+ act_fn=act_fn,
97
+ norm_num_groups=norm_num_groups,
98
+ double_z=True,
99
+ )
100
+
101
+ # pass init params to Decoder
102
+ self.decoder = Decoder(
103
+ in_channels=latent_channels,
104
+ out_channels=out_channels,
105
+ up_block_types=up_block_types,
106
+ block_out_channels=block_out_channels,
107
+ layers_per_block=layers_per_block,
108
+ norm_num_groups=norm_num_groups,
109
+ act_fn=act_fn,
110
+ )
111
+
112
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
113
+ self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
114
+
115
+ self.use_slicing = False
116
+ self.use_tiling = False
117
+
118
+ # only relevant if vae tiling is enabled
119
+ self.tile_sample_min_size = self.config.sample_size
120
+ sample_size = (
121
+ self.config.sample_size[0]
122
+ if isinstance(self.config.sample_size, (list, tuple))
123
+ else self.config.sample_size
124
+ )
125
+ self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
126
+ self.tile_overlap_factor = 0.25
127
+
128
+ def _set_gradient_checkpointing(self, module, value=False):
129
+ if isinstance(module, (Encoder, Decoder)):
130
+ module.gradient_checkpointing = value
131
+
132
+ def enable_tiling(self, use_tiling: bool = True):
133
+ r"""
134
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
135
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
136
+ processing larger images.
137
+ """
138
+ self.use_tiling = use_tiling
139
+
140
+ def disable_tiling(self):
141
+ r"""
142
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
143
+ decoding in one step.
144
+ """
145
+ self.enable_tiling(False)
146
+
147
+ def enable_slicing(self):
148
+ r"""
149
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
150
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
151
+ """
152
+ self.use_slicing = True
153
+
154
+ def disable_slicing(self):
155
+ r"""
156
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
157
+ decoding in one step.
158
+ """
159
+ self.use_slicing = False
160
+
161
+ @property
162
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
163
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
164
+ r"""
165
+ Returns:
166
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
167
+ indexed by its weight name.
168
+ """
169
+ # set recursively
170
+ processors = {}
171
+
172
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
173
+ if hasattr(module, "get_processor"):
174
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
175
+
176
+ for sub_name, child in module.named_children():
177
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
178
+
179
+ return processors
180
+
181
+ for name, module in self.named_children():
182
+ fn_recursive_add_processors(name, module, processors)
183
+
184
+ return processors
185
+
186
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
187
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
188
+ r"""
189
+ Sets the attention processor to use to compute attention.
190
+
191
+ Parameters:
192
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
193
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
194
+ for **all** `Attention` layers.
195
+
196
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
197
+ processor. This is strongly recommended when setting trainable attention processors.
198
+
199
+ """
200
+ count = len(self.attn_processors.keys())
201
+
202
+ if isinstance(processor, dict) and len(processor) != count:
203
+ raise ValueError(
204
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
205
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
206
+ )
207
+
208
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
209
+ if hasattr(module, "set_processor"):
210
+ if not isinstance(processor, dict):
211
+ module.set_processor(processor)
212
+ else:
213
+ module.set_processor(processor.pop(f"{name}.processor"))
214
+
215
+ for sub_name, child in module.named_children():
216
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
217
+
218
+ for name, module in self.named_children():
219
+ fn_recursive_attn_processor(name, module, processor)
220
+
221
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
222
+ def set_default_attn_processor(self):
223
+ """
224
+ Disables custom attention processors and sets the default attention implementation.
225
+ """
226
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
227
+ processor = AttnAddedKVProcessor()
228
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
229
+ processor = AttnProcessor()
230
+ else:
231
+ raise ValueError(
232
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
233
+ )
234
+
235
+ self.set_attn_processor(processor)
236
+
237
+ @apply_forward_hook
238
+ def encode(
239
+ self, x: torch.FloatTensor, return_dict: bool = True
240
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
241
+ """
242
+ Encode a batch of images into latents.
243
+
244
+ Args:
245
+ x (`torch.FloatTensor`): Input batch of images.
246
+ return_dict (`bool`, *optional*, defaults to `True`):
247
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
248
+
249
+ Returns:
250
+ The latent representations of the encoded images. If `return_dict` is True, a
251
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
252
+ """
253
+ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
254
+ return self.tiled_encode(x, return_dict=return_dict)
255
+
256
+ if self.use_slicing and x.shape[0] > 1:
257
+ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
258
+ h = torch.cat(encoded_slices)
259
+ else:
260
+ h = self.encoder(x)
261
+
262
+ moments = self.quant_conv(h)
263
+ posterior = DiagonalGaussianDistribution(moments)
264
+
265
+ if not return_dict:
266
+ return (posterior,)
267
+
268
+ return AutoencoderKLOutput(latent_dist=posterior)
269
+
270
+ def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
271
+ if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
272
+ return self.tiled_decode(z, return_dict=return_dict)
273
+
274
+ z = self.post_quant_conv(z)
275
+ dec = self.decoder(z)
276
+
277
+ if not return_dict:
278
+ return (dec,)
279
+
280
+ return DecoderOutput(sample=dec)
281
+
282
+ @apply_forward_hook
283
+ def decode(
284
+ self, z: torch.FloatTensor, return_dict: bool = True, generator=None
285
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
286
+ """
287
+ Decode a batch of images.
288
+
289
+ Args:
290
+ z (`torch.FloatTensor`): Input batch of latent vectors.
291
+ return_dict (`bool`, *optional*, defaults to `True`):
292
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
293
+
294
+ Returns:
295
+ [`~models.vae.DecoderOutput`] or `tuple`:
296
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
297
+ returned.
298
+
299
+ """
300
+ if self.use_slicing and z.shape[0] > 1:
301
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
302
+ decoded = torch.cat(decoded_slices)
303
+ else:
304
+ decoded = self._decode(z).sample
305
+
306
+ if not return_dict:
307
+ return (decoded,)
308
+
309
+ return DecoderOutput(sample=decoded)
310
+
311
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
312
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
313
+ for y in range(blend_extent):
314
+ b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
315
+ return b
316
+
317
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
318
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
319
+ for x in range(blend_extent):
320
+ b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
321
+ return b
322
+
323
+ def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
324
+ r"""Encode a batch of images using a tiled encoder.
325
+
326
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
327
+ steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
328
+ different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
329
+ tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
330
+ output, but they should be much less noticeable.
331
+
332
+ Args:
333
+ x (`torch.FloatTensor`): Input batch of images.
334
+ return_dict (`bool`, *optional*, defaults to `True`):
335
+ Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
336
+
337
+ Returns:
338
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
339
+ If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
340
+ `tuple` is returned.
341
+ """
342
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
343
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
344
+ row_limit = self.tile_latent_min_size - blend_extent
345
+
346
+ # Split the image into 512x512 tiles and encode them separately.
347
+ rows = []
348
+ for i in range(0, x.shape[2], overlap_size):
349
+ row = []
350
+ for j in range(0, x.shape[3], overlap_size):
351
+ tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
352
+ tile = self.encoder(tile)
353
+ tile = self.quant_conv(tile)
354
+ row.append(tile)
355
+ rows.append(row)
356
+ result_rows = []
357
+ for i, row in enumerate(rows):
358
+ result_row = []
359
+ for j, tile in enumerate(row):
360
+ # blend the above tile and the left tile
361
+ # to the current tile and add the current tile to the result row
362
+ if i > 0:
363
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
364
+ if j > 0:
365
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
366
+ result_row.append(tile[:, :, :row_limit, :row_limit])
367
+ result_rows.append(torch.cat(result_row, dim=3))
368
+
369
+ moments = torch.cat(result_rows, dim=2)
370
+ posterior = DiagonalGaussianDistribution(moments)
371
+
372
+ if not return_dict:
373
+ return (posterior,)
374
+
375
+ return AutoencoderKLOutput(latent_dist=posterior)
376
+
377
+ def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
378
+ r"""
379
+ Decode a batch of images using a tiled decoder.
380
+
381
+ Args:
382
+ z (`torch.FloatTensor`): Input batch of latent vectors.
383
+ return_dict (`bool`, *optional*, defaults to `True`):
384
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
385
+
386
+ Returns:
387
+ [`~models.vae.DecoderOutput`] or `tuple`:
388
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
389
+ returned.
390
+ """
391
+ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
392
+ blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
393
+ row_limit = self.tile_sample_min_size - blend_extent
394
+
395
+ # Split z into overlapping 64x64 tiles and decode them separately.
396
+ # The tiles have an overlap to avoid seams between tiles.
397
+ rows = []
398
+ for i in range(0, z.shape[2], overlap_size):
399
+ row = []
400
+ for j in range(0, z.shape[3], overlap_size):
401
+ tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
402
+ tile = self.post_quant_conv(tile)
403
+ decoded = self.decoder(tile)
404
+ row.append(decoded)
405
+ rows.append(row)
406
+ result_rows = []
407
+ for i, row in enumerate(rows):
408
+ result_row = []
409
+ for j, tile in enumerate(row):
410
+ # blend the above tile and the left tile
411
+ # to the current tile and add the current tile to the result row
412
+ if i > 0:
413
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
414
+ if j > 0:
415
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
416
+ result_row.append(tile[:, :, :row_limit, :row_limit])
417
+ result_rows.append(torch.cat(result_row, dim=3))
418
+
419
+ dec = torch.cat(result_rows, dim=2)
420
+ if not return_dict:
421
+ return (dec,)
422
+
423
+ return DecoderOutput(sample=dec)
424
+
425
+ def forward(
426
+ self,
427
+ sample: torch.FloatTensor,
428
+ sample_posterior: bool = False,
429
+ return_dict: bool = True,
430
+ generator: Optional[torch.Generator] = None,
431
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
432
+ r"""
433
+ Args:
434
+ sample (`torch.FloatTensor`): Input sample.
435
+ sample_posterior (`bool`, *optional*, defaults to `False`):
436
+ Whether to sample from the posterior.
437
+ return_dict (`bool`, *optional*, defaults to `True`):
438
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
439
+ """
440
+ x = sample
441
+ posterior = self.encode(x).latent_dist
442
+ if sample_posterior:
443
+ z = posterior.sample(generator=generator)
444
+ else:
445
+ z = posterior.mode()
446
+ dec = self.decode(z).sample
447
+
448
+ if not return_dict:
449
+ return (dec,)
450
+
451
+ return DecoderOutput(sample=dec)
452
+
453
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
454
+ def fuse_qkv_projections(self):
455
+ """
456
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
457
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
458
+
459
+ <Tip warning={true}>
460
+
461
+ This API is 🧪 experimental.
462
+
463
+ </Tip>
464
+ """
465
+ self.original_attn_processors = None
466
+
467
+ for _, attn_processor in self.attn_processors.items():
468
+ if "Added" in str(attn_processor.__class__.__name__):
469
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
470
+
471
+ self.original_attn_processors = self.attn_processors
472
+
473
+ for module in self.modules():
474
+ if isinstance(module, Attention):
475
+ module.fuse_projections(fuse=True)
476
+
477
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
478
+ def unfuse_qkv_projections(self):
479
+ """Disables the fused QKV projection if enabled.
480
+
481
+ <Tip warning={true}>
482
+
483
+ This API is 🧪 experimental.
484
+
485
+ </Tip>
486
+
487
+ """
488
+ if self.original_attn_processors is not None:
489
+ self.set_attn_processor(self.original_attn_processors)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+ from ...configuration_utils import ConfigMixin, register_to_config
20
+ from ...utils import is_torch_version
21
+ from ...utils.accelerate_utils import apply_forward_hook
22
+ from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor
23
+ from ..modeling_outputs import AutoencoderKLOutput
24
+ from ..modeling_utils import ModelMixin
25
+ from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder
26
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
27
+
28
+
29
+ class TemporalDecoder(nn.Module):
30
+ def __init__(
31
+ self,
32
+ in_channels: int = 4,
33
+ out_channels: int = 3,
34
+ block_out_channels: Tuple[int] = (128, 256, 512, 512),
35
+ layers_per_block: int = 2,
36
+ ):
37
+ super().__init__()
38
+ self.layers_per_block = layers_per_block
39
+
40
+ self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
41
+ self.mid_block = MidBlockTemporalDecoder(
42
+ num_layers=self.layers_per_block,
43
+ in_channels=block_out_channels[-1],
44
+ out_channels=block_out_channels[-1],
45
+ attention_head_dim=block_out_channels[-1],
46
+ )
47
+
48
+ # up
49
+ self.up_blocks = nn.ModuleList([])
50
+ reversed_block_out_channels = list(reversed(block_out_channels))
51
+ output_channel = reversed_block_out_channels[0]
52
+ for i in range(len(block_out_channels)):
53
+ prev_output_channel = output_channel
54
+ output_channel = reversed_block_out_channels[i]
55
+
56
+ is_final_block = i == len(block_out_channels) - 1
57
+ up_block = UpBlockTemporalDecoder(
58
+ num_layers=self.layers_per_block + 1,
59
+ in_channels=prev_output_channel,
60
+ out_channels=output_channel,
61
+ add_upsample=not is_final_block,
62
+ )
63
+ self.up_blocks.append(up_block)
64
+ prev_output_channel = output_channel
65
+
66
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6)
67
+
68
+ self.conv_act = nn.SiLU()
69
+ self.conv_out = torch.nn.Conv2d(
70
+ in_channels=block_out_channels[0],
71
+ out_channels=out_channels,
72
+ kernel_size=3,
73
+ padding=1,
74
+ )
75
+
76
+ conv_out_kernel_size = (3, 1, 1)
77
+ padding = [int(k // 2) for k in conv_out_kernel_size]
78
+ self.time_conv_out = torch.nn.Conv3d(
79
+ in_channels=out_channels,
80
+ out_channels=out_channels,
81
+ kernel_size=conv_out_kernel_size,
82
+ padding=padding,
83
+ )
84
+
85
+ self.gradient_checkpointing = False
86
+
87
+ def forward(
88
+ self,
89
+ sample: torch.FloatTensor,
90
+ image_only_indicator: torch.FloatTensor,
91
+ num_frames: int = 1,
92
+ ) -> torch.FloatTensor:
93
+ r"""The forward method of the `Decoder` class."""
94
+
95
+ sample = self.conv_in(sample)
96
+
97
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
98
+ if self.training and self.gradient_checkpointing:
99
+
100
+ def create_custom_forward(module):
101
+ def custom_forward(*inputs):
102
+ return module(*inputs)
103
+
104
+ return custom_forward
105
+
106
+ if is_torch_version(">=", "1.11.0"):
107
+ # middle
108
+ sample = torch.utils.checkpoint.checkpoint(
109
+ create_custom_forward(self.mid_block),
110
+ sample,
111
+ image_only_indicator,
112
+ use_reentrant=False,
113
+ )
114
+ sample = sample.to(upscale_dtype)
115
+
116
+ # up
117
+ for up_block in self.up_blocks:
118
+ sample = torch.utils.checkpoint.checkpoint(
119
+ create_custom_forward(up_block),
120
+ sample,
121
+ image_only_indicator,
122
+ use_reentrant=False,
123
+ )
124
+ else:
125
+ # middle
126
+ sample = torch.utils.checkpoint.checkpoint(
127
+ create_custom_forward(self.mid_block),
128
+ sample,
129
+ image_only_indicator,
130
+ )
131
+ sample = sample.to(upscale_dtype)
132
+
133
+ # up
134
+ for up_block in self.up_blocks:
135
+ sample = torch.utils.checkpoint.checkpoint(
136
+ create_custom_forward(up_block),
137
+ sample,
138
+ image_only_indicator,
139
+ )
140
+ else:
141
+ # middle
142
+ sample = self.mid_block(sample, image_only_indicator=image_only_indicator)
143
+ sample = sample.to(upscale_dtype)
144
+
145
+ # up
146
+ for up_block in self.up_blocks:
147
+ sample = up_block(sample, image_only_indicator=image_only_indicator)
148
+
149
+ # post-process
150
+ sample = self.conv_norm_out(sample)
151
+ sample = self.conv_act(sample)
152
+ sample = self.conv_out(sample)
153
+
154
+ batch_frames, channels, height, width = sample.shape
155
+ batch_size = batch_frames // num_frames
156
+ sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)
157
+ sample = self.time_conv_out(sample)
158
+
159
+ sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width)
160
+
161
+ return sample
162
+
163
+
164
+ class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin):
165
+ r"""
166
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
167
+
168
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
169
+ for all models (such as downloading or saving).
170
+
171
+ Parameters:
172
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
173
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
174
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
175
+ Tuple of downsample block types.
176
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
177
+ Tuple of block output channels.
178
+ layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block.
179
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
180
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
181
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
182
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
183
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
184
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
185
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
186
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
187
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
188
+ force_upcast (`bool`, *optional*, default to `True`):
189
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
190
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
191
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
192
+ """
193
+
194
+ _supports_gradient_checkpointing = True
195
+
196
+ @register_to_config
197
+ def __init__(
198
+ self,
199
+ in_channels: int = 3,
200
+ out_channels: int = 3,
201
+ down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
202
+ block_out_channels: Tuple[int] = (64,),
203
+ layers_per_block: int = 1,
204
+ latent_channels: int = 4,
205
+ sample_size: int = 32,
206
+ scaling_factor: float = 0.18215,
207
+ force_upcast: float = True,
208
+ ):
209
+ super().__init__()
210
+
211
+ # pass init params to Encoder
212
+ self.encoder = Encoder(
213
+ in_channels=in_channels,
214
+ out_channels=latent_channels,
215
+ down_block_types=down_block_types,
216
+ block_out_channels=block_out_channels,
217
+ layers_per_block=layers_per_block,
218
+ double_z=True,
219
+ )
220
+
221
+ # pass init params to Decoder
222
+ self.decoder = TemporalDecoder(
223
+ in_channels=latent_channels,
224
+ out_channels=out_channels,
225
+ block_out_channels=block_out_channels,
226
+ layers_per_block=layers_per_block,
227
+ )
228
+
229
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
230
+
231
+ sample_size = (
232
+ self.config.sample_size[0]
233
+ if isinstance(self.config.sample_size, (list, tuple))
234
+ else self.config.sample_size
235
+ )
236
+ self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
237
+ self.tile_overlap_factor = 0.25
238
+
239
+ def _set_gradient_checkpointing(self, module, value=False):
240
+ if isinstance(module, (Encoder, TemporalDecoder)):
241
+ module.gradient_checkpointing = value
242
+
243
+ @property
244
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
245
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
246
+ r"""
247
+ Returns:
248
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
249
+ indexed by its weight name.
250
+ """
251
+ # set recursively
252
+ processors = {}
253
+
254
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
255
+ if hasattr(module, "get_processor"):
256
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
257
+
258
+ for sub_name, child in module.named_children():
259
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
260
+
261
+ return processors
262
+
263
+ for name, module in self.named_children():
264
+ fn_recursive_add_processors(name, module, processors)
265
+
266
+ return processors
267
+
268
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
269
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
270
+ r"""
271
+ Sets the attention processor to use to compute attention.
272
+
273
+ Parameters:
274
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
275
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
276
+ for **all** `Attention` layers.
277
+
278
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
279
+ processor. This is strongly recommended when setting trainable attention processors.
280
+
281
+ """
282
+ count = len(self.attn_processors.keys())
283
+
284
+ if isinstance(processor, dict) and len(processor) != count:
285
+ raise ValueError(
286
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
287
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
288
+ )
289
+
290
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
291
+ if hasattr(module, "set_processor"):
292
+ if not isinstance(processor, dict):
293
+ module.set_processor(processor)
294
+ else:
295
+ module.set_processor(processor.pop(f"{name}.processor"))
296
+
297
+ for sub_name, child in module.named_children():
298
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
299
+
300
+ for name, module in self.named_children():
301
+ fn_recursive_attn_processor(name, module, processor)
302
+
303
+ def set_default_attn_processor(self):
304
+ """
305
+ Disables custom attention processors and sets the default attention implementation.
306
+ """
307
+ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
308
+ processor = AttnProcessor()
309
+ else:
310
+ raise ValueError(
311
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
312
+ )
313
+
314
+ self.set_attn_processor(processor)
315
+
316
+ @apply_forward_hook
317
+ def encode(
318
+ self, x: torch.FloatTensor, return_dict: bool = True
319
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
320
+ """
321
+ Encode a batch of images into latents.
322
+
323
+ Args:
324
+ x (`torch.FloatTensor`): Input batch of images.
325
+ return_dict (`bool`, *optional*, defaults to `True`):
326
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
327
+
328
+ Returns:
329
+ The latent representations of the encoded images. If `return_dict` is True, a
330
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
331
+ """
332
+ h = self.encoder(x)
333
+ moments = self.quant_conv(h)
334
+ posterior = DiagonalGaussianDistribution(moments)
335
+
336
+ if not return_dict:
337
+ return (posterior,)
338
+
339
+ return AutoencoderKLOutput(latent_dist=posterior)
340
+
341
+ @apply_forward_hook
342
+ def decode(
343
+ self,
344
+ z: torch.FloatTensor,
345
+ num_frames: int,
346
+ return_dict: bool = True,
347
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
348
+ """
349
+ Decode a batch of images.
350
+
351
+ Args:
352
+ z (`torch.FloatTensor`): Input batch of latent vectors.
353
+ return_dict (`bool`, *optional*, defaults to `True`):
354
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
355
+
356
+ Returns:
357
+ [`~models.vae.DecoderOutput`] or `tuple`:
358
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
359
+ returned.
360
+
361
+ """
362
+ batch_size = z.shape[0] // num_frames
363
+ image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device)
364
+ decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator)
365
+
366
+ if not return_dict:
367
+ return (decoded,)
368
+
369
+ return DecoderOutput(sample=decoded)
370
+
371
+ def forward(
372
+ self,
373
+ sample: torch.FloatTensor,
374
+ sample_posterior: bool = False,
375
+ return_dict: bool = True,
376
+ generator: Optional[torch.Generator] = None,
377
+ num_frames: int = 1,
378
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
379
+ r"""
380
+ Args:
381
+ sample (`torch.FloatTensor`): Input sample.
382
+ sample_posterior (`bool`, *optional*, defaults to `False`):
383
+ Whether to sample from the posterior.
384
+ return_dict (`bool`, *optional*, defaults to `True`):
385
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
386
+ """
387
+ x = sample
388
+ posterior = self.encode(x).latent_dist
389
+ if sample_posterior:
390
+ z = posterior.sample(generator=generator)
391
+ else:
392
+ z = posterior.mode()
393
+
394
+ dec = self.decode(z, num_frames=num_frames).sample
395
+
396
+ if not return_dict:
397
+ return (dec,)
398
+
399
+ return DecoderOutput(sample=dec)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_tiny.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from dataclasses import dataclass
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...utils import BaseOutput
23
+ from ...utils.accelerate_utils import apply_forward_hook
24
+ from ..modeling_utils import ModelMixin
25
+ from .vae import DecoderOutput, DecoderTiny, EncoderTiny
26
+
27
+
28
+ @dataclass
29
+ class AutoencoderTinyOutput(BaseOutput):
30
+ """
31
+ Output of AutoencoderTiny encoding method.
32
+
33
+ Args:
34
+ latents (`torch.Tensor`): Encoded outputs of the `Encoder`.
35
+
36
+ """
37
+
38
+ latents: torch.Tensor
39
+
40
+
41
+ class AutoencoderTiny(ModelMixin, ConfigMixin):
42
+ r"""
43
+ A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.
44
+
45
+ [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.
46
+
47
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
48
+ all models (such as downloading or saving).
49
+
50
+ Parameters:
51
+ in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
52
+ out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
53
+ encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
54
+ Tuple of integers representing the number of output channels for each encoder block. The length of the
55
+ tuple should be equal to the number of encoder blocks.
56
+ decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
57
+ Tuple of integers representing the number of output channels for each decoder block. The length of the
58
+ tuple should be equal to the number of decoder blocks.
59
+ act_fn (`str`, *optional*, defaults to `"relu"`):
60
+ Activation function to be used throughout the model.
61
+ latent_channels (`int`, *optional*, defaults to 4):
62
+ Number of channels in the latent representation. The latent space acts as a compressed representation of
63
+ the input image.
64
+ upsampling_scaling_factor (`int`, *optional*, defaults to 2):
65
+ Scaling factor for upsampling in the decoder. It determines the size of the output image during the
66
+ upsampling process.
67
+ num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
68
+ Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
69
+ length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
70
+ number of encoder blocks.
71
+ num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
72
+ Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
73
+ length of the tuple should be equal to the number of stages in the decoder. Each stage has a different
74
+ number of decoder blocks.
75
+ latent_magnitude (`float`, *optional*, defaults to 3.0):
76
+ Magnitude of the latent representation. This parameter scales the latent representation values to control
77
+ the extent of information preservation.
78
+ latent_shift (float, *optional*, defaults to 0.5):
79
+ Shift applied to the latent representation. This parameter controls the center of the latent space.
80
+ scaling_factor (`float`, *optional*, defaults to 1.0):
81
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
82
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
83
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
84
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
85
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
86
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder,
87
+ however, no such scaling factor was used, hence the value of 1.0 as the default.
88
+ force_upcast (`bool`, *optional*, default to `False`):
89
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
90
+ can be fine-tuned / trained to a lower range without losing too much precision, in which case
91
+ `force_upcast` can be set to `False` (see this fp16-friendly
92
+ [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
93
+ """
94
+
95
+ _supports_gradient_checkpointing = True
96
+
97
+ @register_to_config
98
+ def __init__(
99
+ self,
100
+ in_channels: int = 3,
101
+ out_channels: int = 3,
102
+ encoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
103
+ decoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
104
+ act_fn: str = "relu",
105
+ latent_channels: int = 4,
106
+ upsampling_scaling_factor: int = 2,
107
+ num_encoder_blocks: Tuple[int, ...] = (1, 3, 3, 3),
108
+ num_decoder_blocks: Tuple[int, ...] = (3, 3, 3, 1),
109
+ latent_magnitude: int = 3,
110
+ latent_shift: float = 0.5,
111
+ force_upcast: bool = False,
112
+ scaling_factor: float = 1.0,
113
+ ):
114
+ super().__init__()
115
+
116
+ if len(encoder_block_out_channels) != len(num_encoder_blocks):
117
+ raise ValueError("`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.")
118
+ if len(decoder_block_out_channels) != len(num_decoder_blocks):
119
+ raise ValueError("`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.")
120
+
121
+ self.encoder = EncoderTiny(
122
+ in_channels=in_channels,
123
+ out_channels=latent_channels,
124
+ num_blocks=num_encoder_blocks,
125
+ block_out_channels=encoder_block_out_channels,
126
+ act_fn=act_fn,
127
+ )
128
+
129
+ self.decoder = DecoderTiny(
130
+ in_channels=latent_channels,
131
+ out_channels=out_channels,
132
+ num_blocks=num_decoder_blocks,
133
+ block_out_channels=decoder_block_out_channels,
134
+ upsampling_scaling_factor=upsampling_scaling_factor,
135
+ act_fn=act_fn,
136
+ )
137
+
138
+ self.latent_magnitude = latent_magnitude
139
+ self.latent_shift = latent_shift
140
+ self.scaling_factor = scaling_factor
141
+
142
+ self.use_slicing = False
143
+ self.use_tiling = False
144
+
145
+ # only relevant if vae tiling is enabled
146
+ self.spatial_scale_factor = 2**out_channels
147
+ self.tile_overlap_factor = 0.125
148
+ self.tile_sample_min_size = 512
149
+ self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor
150
+
151
+ self.register_to_config(block_out_channels=decoder_block_out_channels)
152
+ self.register_to_config(force_upcast=False)
153
+
154
+ def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
155
+ if isinstance(module, (EncoderTiny, DecoderTiny)):
156
+ module.gradient_checkpointing = value
157
+
158
+ def scale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor:
159
+ """raw latents -> [0, 1]"""
160
+ return x.div(2 * self.latent_magnitude).add(self.latent_shift).clamp(0, 1)
161
+
162
+ def unscale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor:
163
+ """[0, 1] -> raw latents"""
164
+ return x.sub(self.latent_shift).mul(2 * self.latent_magnitude)
165
+
166
+ def enable_slicing(self) -> None:
167
+ r"""
168
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
169
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
170
+ """
171
+ self.use_slicing = True
172
+
173
+ def disable_slicing(self) -> None:
174
+ r"""
175
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
176
+ decoding in one step.
177
+ """
178
+ self.use_slicing = False
179
+
180
+ def enable_tiling(self, use_tiling: bool = True) -> None:
181
+ r"""
182
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
183
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
184
+ processing larger images.
185
+ """
186
+ self.use_tiling = use_tiling
187
+
188
+ def disable_tiling(self) -> None:
189
+ r"""
190
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
191
+ decoding in one step.
192
+ """
193
+ self.enable_tiling(False)
194
+
195
+ def _tiled_encode(self, x: torch.FloatTensor) -> torch.FloatTensor:
196
+ r"""Encode a batch of images using a tiled encoder.
197
+
198
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
199
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
200
+ tiles overlap and are blended together to form a smooth output.
201
+
202
+ Args:
203
+ x (`torch.FloatTensor`): Input batch of images.
204
+
205
+ Returns:
206
+ `torch.FloatTensor`: Encoded batch of images.
207
+ """
208
+ # scale of encoder output relative to input
209
+ sf = self.spatial_scale_factor
210
+ tile_size = self.tile_sample_min_size
211
+
212
+ # number of pixels to blend and to traverse between tile
213
+ blend_size = int(tile_size * self.tile_overlap_factor)
214
+ traverse_size = tile_size - blend_size
215
+
216
+ # tiles index (up/left)
217
+ ti = range(0, x.shape[-2], traverse_size)
218
+ tj = range(0, x.shape[-1], traverse_size)
219
+
220
+ # mask for blending
221
+ blend_masks = torch.stack(
222
+ torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij")
223
+ )
224
+ blend_masks = blend_masks.clamp(0, 1).to(x.device)
225
+
226
+ # output array
227
+ out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device)
228
+ for i in ti:
229
+ for j in tj:
230
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
231
+ # tile result
232
+ tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf]
233
+ tile = self.encoder(tile_in)
234
+ h, w = tile.shape[-2], tile.shape[-1]
235
+ # blend tile result into output
236
+ blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
237
+ blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
238
+ blend_mask = blend_mask_i * blend_mask_j
239
+ tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w]
240
+ tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
241
+ return out
242
+
243
+ def _tiled_decode(self, x: torch.FloatTensor) -> torch.FloatTensor:
244
+ r"""Encode a batch of images using a tiled encoder.
245
+
246
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
247
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
248
+ tiles overlap and are blended together to form a smooth output.
249
+
250
+ Args:
251
+ x (`torch.FloatTensor`): Input batch of images.
252
+
253
+ Returns:
254
+ `torch.FloatTensor`: Encoded batch of images.
255
+ """
256
+ # scale of decoder output relative to input
257
+ sf = self.spatial_scale_factor
258
+ tile_size = self.tile_latent_min_size
259
+
260
+ # number of pixels to blend and to traverse between tiles
261
+ blend_size = int(tile_size * self.tile_overlap_factor)
262
+ traverse_size = tile_size - blend_size
263
+
264
+ # tiles index (up/left)
265
+ ti = range(0, x.shape[-2], traverse_size)
266
+ tj = range(0, x.shape[-1], traverse_size)
267
+
268
+ # mask for blending
269
+ blend_masks = torch.stack(
270
+ torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij")
271
+ )
272
+ blend_masks = blend_masks.clamp(0, 1).to(x.device)
273
+
274
+ # output array
275
+ out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device)
276
+ for i in ti:
277
+ for j in tj:
278
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
279
+ # tile result
280
+ tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf]
281
+ tile = self.decoder(tile_in)
282
+ h, w = tile.shape[-2], tile.shape[-1]
283
+ # blend tile result into output
284
+ blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
285
+ blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
286
+ blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w]
287
+ tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
288
+ return out
289
+
290
+ @apply_forward_hook
291
+ def encode(
292
+ self, x: torch.FloatTensor, return_dict: bool = True
293
+ ) -> Union[AutoencoderTinyOutput, Tuple[torch.FloatTensor]]:
294
+ if self.use_slicing and x.shape[0] > 1:
295
+ output = [
296
+ self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x_slice) for x_slice in x.split(1)
297
+ ]
298
+ output = torch.cat(output)
299
+ else:
300
+ output = self._tiled_encode(x) if self.use_tiling else self.encoder(x)
301
+
302
+ if not return_dict:
303
+ return (output,)
304
+
305
+ return AutoencoderTinyOutput(latents=output)
306
+
307
+ @apply_forward_hook
308
+ def decode(
309
+ self, x: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True
310
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
311
+ if self.use_slicing and x.shape[0] > 1:
312
+ output = [self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.split(1)]
313
+ output = torch.cat(output)
314
+ else:
315
+ output = self._tiled_decode(x) if self.use_tiling else self.decoder(x)
316
+
317
+ if not return_dict:
318
+ return (output,)
319
+
320
+ return DecoderOutput(sample=output)
321
+
322
+ def forward(
323
+ self,
324
+ sample: torch.FloatTensor,
325
+ return_dict: bool = True,
326
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
327
+ r"""
328
+ Args:
329
+ sample (`torch.FloatTensor`): Input sample.
330
+ return_dict (`bool`, *optional*, defaults to `True`):
331
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
332
+ """
333
+ enc = self.encode(sample).latents
334
+
335
+ # scale latents to be in [0, 1], then quantize latents to a byte tensor,
336
+ # as if we were storing the latents in an RGBA uint8 image.
337
+ scaled_enc = self.scale_latents(enc).mul_(255).round_().byte()
338
+
339
+ # unquantize latents back into [0, 1], then unscale latents back to their original range,
340
+ # as if we were loading the latents from an RGBA uint8 image.
341
+ unscaled_enc = self.unscale_latents(scaled_enc / 255.0)
342
+
343
+ dec = self.decode(unscaled_enc)
344
+
345
+ if not return_dict:
346
+ return (dec,)
347
+ return DecoderOutput(sample=dec)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/consistency_decoder_vae.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Dict, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch import nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...schedulers import ConsistencyDecoderScheduler
23
+ from ...utils import BaseOutput
24
+ from ...utils.accelerate_utils import apply_forward_hook
25
+ from ...utils.torch_utils import randn_tensor
26
+ from ..attention_processor import (
27
+ ADDED_KV_ATTENTION_PROCESSORS,
28
+ CROSS_ATTENTION_PROCESSORS,
29
+ AttentionProcessor,
30
+ AttnAddedKVProcessor,
31
+ AttnProcessor,
32
+ )
33
+ from ..modeling_utils import ModelMixin
34
+ from ..unets.unet_2d import UNet2DModel
35
+ from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
36
+
37
+
38
+ @dataclass
39
+ class ConsistencyDecoderVAEOutput(BaseOutput):
40
+ """
41
+ Output of encoding method.
42
+
43
+ Args:
44
+ latent_dist (`DiagonalGaussianDistribution`):
45
+ Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
46
+ `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
47
+ """
48
+
49
+ latent_dist: "DiagonalGaussianDistribution"
50
+
51
+
52
+ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
53
+ r"""
54
+ The consistency decoder used with DALL-E 3.
55
+
56
+ Examples:
57
+ ```py
58
+ >>> import torch
59
+ >>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE
60
+
61
+ >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
62
+ >>> pipe = StableDiffusionPipeline.from_pretrained(
63
+ ... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
64
+ ... ).to("cuda")
65
+
66
+ >>> pipe("horse", generator=torch.manual_seed(0)).images
67
+ ```
68
+ """
69
+
70
+ @register_to_config
71
+ def __init__(
72
+ self,
73
+ scaling_factor: float = 0.18215,
74
+ latent_channels: int = 4,
75
+ encoder_act_fn: str = "silu",
76
+ encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
77
+ encoder_double_z: bool = True,
78
+ encoder_down_block_types: Tuple[str, ...] = (
79
+ "DownEncoderBlock2D",
80
+ "DownEncoderBlock2D",
81
+ "DownEncoderBlock2D",
82
+ "DownEncoderBlock2D",
83
+ ),
84
+ encoder_in_channels: int = 3,
85
+ encoder_layers_per_block: int = 2,
86
+ encoder_norm_num_groups: int = 32,
87
+ encoder_out_channels: int = 4,
88
+ decoder_add_attention: bool = False,
89
+ decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024),
90
+ decoder_down_block_types: Tuple[str, ...] = (
91
+ "ResnetDownsampleBlock2D",
92
+ "ResnetDownsampleBlock2D",
93
+ "ResnetDownsampleBlock2D",
94
+ "ResnetDownsampleBlock2D",
95
+ ),
96
+ decoder_downsample_padding: int = 1,
97
+ decoder_in_channels: int = 7,
98
+ decoder_layers_per_block: int = 3,
99
+ decoder_norm_eps: float = 1e-05,
100
+ decoder_norm_num_groups: int = 32,
101
+ decoder_num_train_timesteps: int = 1024,
102
+ decoder_out_channels: int = 6,
103
+ decoder_resnet_time_scale_shift: str = "scale_shift",
104
+ decoder_time_embedding_type: str = "learned",
105
+ decoder_up_block_types: Tuple[str, ...] = (
106
+ "ResnetUpsampleBlock2D",
107
+ "ResnetUpsampleBlock2D",
108
+ "ResnetUpsampleBlock2D",
109
+ "ResnetUpsampleBlock2D",
110
+ ),
111
+ ):
112
+ super().__init__()
113
+ self.encoder = Encoder(
114
+ act_fn=encoder_act_fn,
115
+ block_out_channels=encoder_block_out_channels,
116
+ double_z=encoder_double_z,
117
+ down_block_types=encoder_down_block_types,
118
+ in_channels=encoder_in_channels,
119
+ layers_per_block=encoder_layers_per_block,
120
+ norm_num_groups=encoder_norm_num_groups,
121
+ out_channels=encoder_out_channels,
122
+ )
123
+
124
+ self.decoder_unet = UNet2DModel(
125
+ add_attention=decoder_add_attention,
126
+ block_out_channels=decoder_block_out_channels,
127
+ down_block_types=decoder_down_block_types,
128
+ downsample_padding=decoder_downsample_padding,
129
+ in_channels=decoder_in_channels,
130
+ layers_per_block=decoder_layers_per_block,
131
+ norm_eps=decoder_norm_eps,
132
+ norm_num_groups=decoder_norm_num_groups,
133
+ num_train_timesteps=decoder_num_train_timesteps,
134
+ out_channels=decoder_out_channels,
135
+ resnet_time_scale_shift=decoder_resnet_time_scale_shift,
136
+ time_embedding_type=decoder_time_embedding_type,
137
+ up_block_types=decoder_up_block_types,
138
+ )
139
+ self.decoder_scheduler = ConsistencyDecoderScheduler()
140
+ self.register_to_config(block_out_channels=encoder_block_out_channels)
141
+ self.register_to_config(force_upcast=False)
142
+ self.register_buffer(
143
+ "means",
144
+ torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None],
145
+ persistent=False,
146
+ )
147
+ self.register_buffer(
148
+ "stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False
149
+ )
150
+
151
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
152
+
153
+ self.use_slicing = False
154
+ self.use_tiling = False
155
+
156
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling
157
+ def enable_tiling(self, use_tiling: bool = True):
158
+ r"""
159
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
160
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
161
+ processing larger images.
162
+ """
163
+ self.use_tiling = use_tiling
164
+
165
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling
166
+ def disable_tiling(self):
167
+ r"""
168
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
169
+ decoding in one step.
170
+ """
171
+ self.enable_tiling(False)
172
+
173
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing
174
+ def enable_slicing(self):
175
+ r"""
176
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
177
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
178
+ """
179
+ self.use_slicing = True
180
+
181
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing
182
+ def disable_slicing(self):
183
+ r"""
184
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
185
+ decoding in one step.
186
+ """
187
+ self.use_slicing = False
188
+
189
+ @property
190
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
191
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
192
+ r"""
193
+ Returns:
194
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
195
+ indexed by its weight name.
196
+ """
197
+ # set recursively
198
+ processors = {}
199
+
200
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
201
+ if hasattr(module, "get_processor"):
202
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
203
+
204
+ for sub_name, child in module.named_children():
205
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
206
+
207
+ return processors
208
+
209
+ for name, module in self.named_children():
210
+ fn_recursive_add_processors(name, module, processors)
211
+
212
+ return processors
213
+
214
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
215
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
216
+ r"""
217
+ Sets the attention processor to use to compute attention.
218
+
219
+ Parameters:
220
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
221
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
222
+ for **all** `Attention` layers.
223
+
224
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
225
+ processor. This is strongly recommended when setting trainable attention processors.
226
+
227
+ """
228
+ count = len(self.attn_processors.keys())
229
+
230
+ if isinstance(processor, dict) and len(processor) != count:
231
+ raise ValueError(
232
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
233
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
234
+ )
235
+
236
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
237
+ if hasattr(module, "set_processor"):
238
+ if not isinstance(processor, dict):
239
+ module.set_processor(processor)
240
+ else:
241
+ module.set_processor(processor.pop(f"{name}.processor"))
242
+
243
+ for sub_name, child in module.named_children():
244
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
245
+
246
+ for name, module in self.named_children():
247
+ fn_recursive_attn_processor(name, module, processor)
248
+
249
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
250
+ def set_default_attn_processor(self):
251
+ """
252
+ Disables custom attention processors and sets the default attention implementation.
253
+ """
254
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
255
+ processor = AttnAddedKVProcessor()
256
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
257
+ processor = AttnProcessor()
258
+ else:
259
+ raise ValueError(
260
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
261
+ )
262
+
263
+ self.set_attn_processor(processor)
264
+
265
+ @apply_forward_hook
266
+ def encode(
267
+ self, x: torch.FloatTensor, return_dict: bool = True
268
+ ) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]:
269
+ """
270
+ Encode a batch of images into latents.
271
+
272
+ Args:
273
+ x (`torch.FloatTensor`): Input batch of images.
274
+ return_dict (`bool`, *optional*, defaults to `True`):
275
+ Whether to return a [`~models.consistecy_decoder_vae.ConsistencyDecoderOoutput`] instead of a plain
276
+ tuple.
277
+
278
+ Returns:
279
+ The latent representations of the encoded images. If `return_dict` is True, a
280
+ [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple`
281
+ is returned.
282
+ """
283
+ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
284
+ return self.tiled_encode(x, return_dict=return_dict)
285
+
286
+ if self.use_slicing and x.shape[0] > 1:
287
+ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
288
+ h = torch.cat(encoded_slices)
289
+ else:
290
+ h = self.encoder(x)
291
+
292
+ moments = self.quant_conv(h)
293
+ posterior = DiagonalGaussianDistribution(moments)
294
+
295
+ if not return_dict:
296
+ return (posterior,)
297
+
298
+ return ConsistencyDecoderVAEOutput(latent_dist=posterior)
299
+
300
+ @apply_forward_hook
301
+ def decode(
302
+ self,
303
+ z: torch.FloatTensor,
304
+ generator: Optional[torch.Generator] = None,
305
+ return_dict: bool = True,
306
+ num_inference_steps: int = 2,
307
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
308
+ z = (z * self.config.scaling_factor - self.means) / self.stds
309
+
310
+ scale_factor = 2 ** (len(self.config.block_out_channels) - 1)
311
+ z = F.interpolate(z, mode="nearest", scale_factor=scale_factor)
312
+
313
+ batch_size, _, height, width = z.shape
314
+
315
+ self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device)
316
+
317
+ x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor(
318
+ (batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device
319
+ )
320
+
321
+ for t in self.decoder_scheduler.timesteps:
322
+ model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1)
323
+ model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :]
324
+ prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample
325
+ x_t = prev_sample
326
+
327
+ x_0 = x_t
328
+
329
+ if not return_dict:
330
+ return (x_0,)
331
+
332
+ return DecoderOutput(sample=x_0)
333
+
334
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v
335
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
336
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
337
+ for y in range(blend_extent):
338
+ b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
339
+ return b
340
+
341
+ # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h
342
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
343
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
344
+ for x in range(blend_extent):
345
+ b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
346
+ return b
347
+
348
+ def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> ConsistencyDecoderVAEOutput:
349
+ r"""Encode a batch of images using a tiled encoder.
350
+
351
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
352
+ steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
353
+ different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
354
+ tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
355
+ output, but they should be much less noticeable.
356
+
357
+ Args:
358
+ x (`torch.FloatTensor`): Input batch of images.
359
+ return_dict (`bool`, *optional*, defaults to `True`):
360
+ Whether or not to return a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a
361
+ plain tuple.
362
+
363
+ Returns:
364
+ [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
365
+ If return_dict is True, a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned,
366
+ otherwise a plain `tuple` is returned.
367
+ """
368
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
369
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
370
+ row_limit = self.tile_latent_min_size - blend_extent
371
+
372
+ # Split the image into 512x512 tiles and encode them separately.
373
+ rows = []
374
+ for i in range(0, x.shape[2], overlap_size):
375
+ row = []
376
+ for j in range(0, x.shape[3], overlap_size):
377
+ tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
378
+ tile = self.encoder(tile)
379
+ tile = self.quant_conv(tile)
380
+ row.append(tile)
381
+ rows.append(row)
382
+ result_rows = []
383
+ for i, row in enumerate(rows):
384
+ result_row = []
385
+ for j, tile in enumerate(row):
386
+ # blend the above tile and the left tile
387
+ # to the current tile and add the current tile to the result row
388
+ if i > 0:
389
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
390
+ if j > 0:
391
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
392
+ result_row.append(tile[:, :, :row_limit, :row_limit])
393
+ result_rows.append(torch.cat(result_row, dim=3))
394
+
395
+ moments = torch.cat(result_rows, dim=2)
396
+ posterior = DiagonalGaussianDistribution(moments)
397
+
398
+ if not return_dict:
399
+ return (posterior,)
400
+
401
+ return ConsistencyDecoderVAEOutput(latent_dist=posterior)
402
+
403
+ def forward(
404
+ self,
405
+ sample: torch.FloatTensor,
406
+ sample_posterior: bool = False,
407
+ return_dict: bool = True,
408
+ generator: Optional[torch.Generator] = None,
409
+ ) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
410
+ r"""
411
+ Args:
412
+ sample (`torch.FloatTensor`): Input sample.
413
+ sample_posterior (`bool`, *optional*, defaults to `False`):
414
+ Whether to sample from the posterior.
415
+ return_dict (`bool`, *optional*, defaults to `True`):
416
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
417
+ generator (`torch.Generator`, *optional*, defaults to `None`):
418
+ Generator to use for sampling.
419
+
420
+ Returns:
421
+ [`DecoderOutput`] or `tuple`:
422
+ If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
423
+ """
424
+ x = sample
425
+ posterior = self.encode(x).latent_dist
426
+ if sample_posterior:
427
+ z = posterior.sample(generator=generator)
428
+ else:
429
+ z = posterior.mode()
430
+ dec = self.decode(z, generator=generator).sample
431
+
432
+ if not return_dict:
433
+ return (dec,)
434
+
435
+ return DecoderOutput(sample=dec)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Optional, Tuple
16
+
17
+ import numpy as np
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...utils import BaseOutput, is_torch_version
22
+ from ...utils.torch_utils import randn_tensor
23
+ from ..activations import get_activation
24
+ from ..attention_processor import SpatialNorm
25
+ from ..unets.unet_2d_blocks import (
26
+ AutoencoderTinyBlock,
27
+ UNetMidBlock2D,
28
+ get_down_block,
29
+ get_up_block,
30
+ )
31
+
32
+
33
+ @dataclass
34
+ class DecoderOutput(BaseOutput):
35
+ r"""
36
+ Output of decoding method.
37
+
38
+ Args:
39
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
40
+ The decoded output sample from the last layer of the model.
41
+ """
42
+
43
+ sample: torch.FloatTensor
44
+
45
+
46
+ class Encoder(nn.Module):
47
+ r"""
48
+ The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation.
49
+
50
+ Args:
51
+ in_channels (`int`, *optional*, defaults to 3):
52
+ The number of input channels.
53
+ out_channels (`int`, *optional*, defaults to 3):
54
+ The number of output channels.
55
+ down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
56
+ The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
57
+ options.
58
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
59
+ The number of output channels for each block.
60
+ layers_per_block (`int`, *optional*, defaults to 2):
61
+ The number of layers per block.
62
+ norm_num_groups (`int`, *optional*, defaults to 32):
63
+ The number of groups for normalization.
64
+ act_fn (`str`, *optional*, defaults to `"silu"`):
65
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
66
+ double_z (`bool`, *optional*, defaults to `True`):
67
+ Whether to double the number of output channels for the last block.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ in_channels: int = 3,
73
+ out_channels: int = 3,
74
+ down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
75
+ block_out_channels: Tuple[int, ...] = (64,),
76
+ layers_per_block: int = 2,
77
+ norm_num_groups: int = 32,
78
+ act_fn: str = "silu",
79
+ double_z: bool = True,
80
+ mid_block_add_attention=True,
81
+ ):
82
+ super().__init__()
83
+ self.layers_per_block = layers_per_block
84
+
85
+ self.conv_in = nn.Conv2d(
86
+ in_channels,
87
+ block_out_channels[0],
88
+ kernel_size=3,
89
+ stride=1,
90
+ padding=1,
91
+ )
92
+
93
+ self.mid_block = None
94
+ self.down_blocks = nn.ModuleList([])
95
+
96
+ # down
97
+ output_channel = block_out_channels[0]
98
+ for i, down_block_type in enumerate(down_block_types):
99
+ input_channel = output_channel
100
+ output_channel = block_out_channels[i]
101
+ is_final_block = i == len(block_out_channels) - 1
102
+
103
+ down_block = get_down_block(
104
+ down_block_type,
105
+ num_layers=self.layers_per_block,
106
+ in_channels=input_channel,
107
+ out_channels=output_channel,
108
+ add_downsample=not is_final_block,
109
+ resnet_eps=1e-6,
110
+ downsample_padding=0,
111
+ resnet_act_fn=act_fn,
112
+ resnet_groups=norm_num_groups,
113
+ attention_head_dim=output_channel,
114
+ temb_channels=None,
115
+ )
116
+ self.down_blocks.append(down_block)
117
+
118
+ # mid
119
+ self.mid_block = UNetMidBlock2D(
120
+ in_channels=block_out_channels[-1],
121
+ resnet_eps=1e-6,
122
+ resnet_act_fn=act_fn,
123
+ output_scale_factor=1,
124
+ resnet_time_scale_shift="default",
125
+ attention_head_dim=block_out_channels[-1],
126
+ resnet_groups=norm_num_groups,
127
+ temb_channels=None,
128
+ add_attention=mid_block_add_attention,
129
+ )
130
+
131
+ # out
132
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
133
+ self.conv_act = nn.SiLU()
134
+
135
+ conv_out_channels = 2 * out_channels if double_z else out_channels
136
+ self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
137
+
138
+ self.gradient_checkpointing = False
139
+
140
+ def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor:
141
+ r"""The forward method of the `Encoder` class."""
142
+
143
+ sample = self.conv_in(sample)
144
+
145
+ if self.training and self.gradient_checkpointing:
146
+
147
+ def create_custom_forward(module):
148
+ def custom_forward(*inputs):
149
+ return module(*inputs)
150
+
151
+ return custom_forward
152
+
153
+ # down
154
+ if is_torch_version(">=", "1.11.0"):
155
+ for down_block in self.down_blocks:
156
+ sample = torch.utils.checkpoint.checkpoint(
157
+ create_custom_forward(down_block), sample, use_reentrant=False
158
+ )
159
+ # middle
160
+ sample = torch.utils.checkpoint.checkpoint(
161
+ create_custom_forward(self.mid_block), sample, use_reentrant=False
162
+ )
163
+ else:
164
+ for down_block in self.down_blocks:
165
+ sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample)
166
+ # middle
167
+ sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample)
168
+
169
+ else:
170
+ # down
171
+ for down_block in self.down_blocks:
172
+ sample = down_block(sample)
173
+
174
+ # middle
175
+ sample = self.mid_block(sample)
176
+
177
+ # post-process
178
+ sample = self.conv_norm_out(sample)
179
+ sample = self.conv_act(sample)
180
+ sample = self.conv_out(sample)
181
+
182
+ return sample
183
+
184
+
185
+ class Decoder(nn.Module):
186
+ r"""
187
+ The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample.
188
+
189
+ Args:
190
+ in_channels (`int`, *optional*, defaults to 3):
191
+ The number of input channels.
192
+ out_channels (`int`, *optional*, defaults to 3):
193
+ The number of output channels.
194
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
195
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
196
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
197
+ The number of output channels for each block.
198
+ layers_per_block (`int`, *optional*, defaults to 2):
199
+ The number of layers per block.
200
+ norm_num_groups (`int`, *optional*, defaults to 32):
201
+ The number of groups for normalization.
202
+ act_fn (`str`, *optional*, defaults to `"silu"`):
203
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
204
+ norm_type (`str`, *optional*, defaults to `"group"`):
205
+ The normalization type to use. Can be either `"group"` or `"spatial"`.
206
+ """
207
+
208
+ def __init__(
209
+ self,
210
+ in_channels: int = 3,
211
+ out_channels: int = 3,
212
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
213
+ block_out_channels: Tuple[int, ...] = (64,),
214
+ layers_per_block: int = 2,
215
+ norm_num_groups: int = 32,
216
+ act_fn: str = "silu",
217
+ norm_type: str = "group", # group, spatial
218
+ mid_block_add_attention=True,
219
+ ):
220
+ super().__init__()
221
+ self.layers_per_block = layers_per_block
222
+
223
+ self.conv_in = nn.Conv2d(
224
+ in_channels,
225
+ block_out_channels[-1],
226
+ kernel_size=3,
227
+ stride=1,
228
+ padding=1,
229
+ )
230
+
231
+ self.mid_block = None
232
+ self.up_blocks = nn.ModuleList([])
233
+
234
+ temb_channels = in_channels if norm_type == "spatial" else None
235
+
236
+ # mid
237
+ self.mid_block = UNetMidBlock2D(
238
+ in_channels=block_out_channels[-1],
239
+ resnet_eps=1e-6,
240
+ resnet_act_fn=act_fn,
241
+ output_scale_factor=1,
242
+ resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
243
+ attention_head_dim=block_out_channels[-1],
244
+ resnet_groups=norm_num_groups,
245
+ temb_channels=temb_channels,
246
+ add_attention=mid_block_add_attention,
247
+ )
248
+
249
+ # up
250
+ reversed_block_out_channels = list(reversed(block_out_channels))
251
+ output_channel = reversed_block_out_channels[0]
252
+ for i, up_block_type in enumerate(up_block_types):
253
+ prev_output_channel = output_channel
254
+ output_channel = reversed_block_out_channels[i]
255
+
256
+ is_final_block = i == len(block_out_channels) - 1
257
+
258
+ up_block = get_up_block(
259
+ up_block_type,
260
+ num_layers=self.layers_per_block + 1,
261
+ in_channels=prev_output_channel,
262
+ out_channels=output_channel,
263
+ prev_output_channel=None,
264
+ add_upsample=not is_final_block,
265
+ resnet_eps=1e-6,
266
+ resnet_act_fn=act_fn,
267
+ resnet_groups=norm_num_groups,
268
+ attention_head_dim=output_channel,
269
+ temb_channels=temb_channels,
270
+ resnet_time_scale_shift=norm_type,
271
+ )
272
+ self.up_blocks.append(up_block)
273
+ prev_output_channel = output_channel
274
+
275
+ # out
276
+ if norm_type == "spatial":
277
+ self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
278
+ else:
279
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
280
+ self.conv_act = nn.SiLU()
281
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
282
+
283
+ self.gradient_checkpointing = False
284
+
285
+ def forward(
286
+ self,
287
+ sample: torch.FloatTensor,
288
+ latent_embeds: Optional[torch.FloatTensor] = None,
289
+ ) -> torch.FloatTensor:
290
+ r"""The forward method of the `Decoder` class."""
291
+
292
+ sample = self.conv_in(sample)
293
+
294
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
295
+ if self.training and self.gradient_checkpointing:
296
+
297
+ def create_custom_forward(module):
298
+ def custom_forward(*inputs):
299
+ return module(*inputs)
300
+
301
+ return custom_forward
302
+
303
+ if is_torch_version(">=", "1.11.0"):
304
+ # middle
305
+ sample = torch.utils.checkpoint.checkpoint(
306
+ create_custom_forward(self.mid_block),
307
+ sample,
308
+ latent_embeds,
309
+ use_reentrant=False,
310
+ )
311
+ sample = sample.to(upscale_dtype)
312
+
313
+ # up
314
+ for up_block in self.up_blocks:
315
+ sample = torch.utils.checkpoint.checkpoint(
316
+ create_custom_forward(up_block),
317
+ sample,
318
+ latent_embeds,
319
+ use_reentrant=False,
320
+ )
321
+ else:
322
+ # middle
323
+ sample = torch.utils.checkpoint.checkpoint(
324
+ create_custom_forward(self.mid_block), sample, latent_embeds
325
+ )
326
+ sample = sample.to(upscale_dtype)
327
+
328
+ # up
329
+ for up_block in self.up_blocks:
330
+ sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
331
+ else:
332
+ # middle
333
+ sample = self.mid_block(sample, latent_embeds)
334
+ sample = sample.to(upscale_dtype)
335
+
336
+ # up
337
+ for up_block in self.up_blocks:
338
+ sample = up_block(sample, latent_embeds)
339
+
340
+ # post-process
341
+ if latent_embeds is None:
342
+ sample = self.conv_norm_out(sample)
343
+ else:
344
+ sample = self.conv_norm_out(sample, latent_embeds)
345
+ sample = self.conv_act(sample)
346
+ sample = self.conv_out(sample)
347
+
348
+ return sample
349
+
350
+
351
+ class UpSample(nn.Module):
352
+ r"""
353
+ The `UpSample` layer of a variational autoencoder that upsamples its input.
354
+
355
+ Args:
356
+ in_channels (`int`, *optional*, defaults to 3):
357
+ The number of input channels.
358
+ out_channels (`int`, *optional*, defaults to 3):
359
+ The number of output channels.
360
+ """
361
+
362
+ def __init__(
363
+ self,
364
+ in_channels: int,
365
+ out_channels: int,
366
+ ) -> None:
367
+ super().__init__()
368
+ self.in_channels = in_channels
369
+ self.out_channels = out_channels
370
+ self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1)
371
+
372
+ def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
373
+ r"""The forward method of the `UpSample` class."""
374
+ x = torch.relu(x)
375
+ x = self.deconv(x)
376
+ return x
377
+
378
+
379
+ class MaskConditionEncoder(nn.Module):
380
+ """
381
+ used in AsymmetricAutoencoderKL
382
+ """
383
+
384
+ def __init__(
385
+ self,
386
+ in_ch: int,
387
+ out_ch: int = 192,
388
+ res_ch: int = 768,
389
+ stride: int = 16,
390
+ ) -> None:
391
+ super().__init__()
392
+
393
+ channels = []
394
+ while stride > 1:
395
+ stride = stride // 2
396
+ in_ch_ = out_ch * 2
397
+ if out_ch > res_ch:
398
+ out_ch = res_ch
399
+ if stride == 1:
400
+ in_ch_ = res_ch
401
+ channels.append((in_ch_, out_ch))
402
+ out_ch *= 2
403
+
404
+ out_channels = []
405
+ for _in_ch, _out_ch in channels:
406
+ out_channels.append(_out_ch)
407
+ out_channels.append(channels[-1][0])
408
+
409
+ layers = []
410
+ in_ch_ = in_ch
411
+ for l in range(len(out_channels)):
412
+ out_ch_ = out_channels[l]
413
+ if l == 0 or l == 1:
414
+ layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=3, stride=1, padding=1))
415
+ else:
416
+ layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=4, stride=2, padding=1))
417
+ in_ch_ = out_ch_
418
+
419
+ self.layers = nn.Sequential(*layers)
420
+
421
+ def forward(self, x: torch.FloatTensor, mask=None) -> torch.FloatTensor:
422
+ r"""The forward method of the `MaskConditionEncoder` class."""
423
+ out = {}
424
+ for l in range(len(self.layers)):
425
+ layer = self.layers[l]
426
+ x = layer(x)
427
+ out[str(tuple(x.shape))] = x
428
+ x = torch.relu(x)
429
+ return out
430
+
431
+
432
+ class MaskConditionDecoder(nn.Module):
433
+ r"""The `MaskConditionDecoder` should be used in combination with [`AsymmetricAutoencoderKL`] to enhance the model's
434
+ decoder with a conditioner on the mask and masked image.
435
+
436
+ Args:
437
+ in_channels (`int`, *optional*, defaults to 3):
438
+ The number of input channels.
439
+ out_channels (`int`, *optional*, defaults to 3):
440
+ The number of output channels.
441
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
442
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
443
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
444
+ The number of output channels for each block.
445
+ layers_per_block (`int`, *optional*, defaults to 2):
446
+ The number of layers per block.
447
+ norm_num_groups (`int`, *optional*, defaults to 32):
448
+ The number of groups for normalization.
449
+ act_fn (`str`, *optional*, defaults to `"silu"`):
450
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
451
+ norm_type (`str`, *optional*, defaults to `"group"`):
452
+ The normalization type to use. Can be either `"group"` or `"spatial"`.
453
+ """
454
+
455
+ def __init__(
456
+ self,
457
+ in_channels: int = 3,
458
+ out_channels: int = 3,
459
+ up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
460
+ block_out_channels: Tuple[int, ...] = (64,),
461
+ layers_per_block: int = 2,
462
+ norm_num_groups: int = 32,
463
+ act_fn: str = "silu",
464
+ norm_type: str = "group", # group, spatial
465
+ ):
466
+ super().__init__()
467
+ self.layers_per_block = layers_per_block
468
+
469
+ self.conv_in = nn.Conv2d(
470
+ in_channels,
471
+ block_out_channels[-1],
472
+ kernel_size=3,
473
+ stride=1,
474
+ padding=1,
475
+ )
476
+
477
+ self.mid_block = None
478
+ self.up_blocks = nn.ModuleList([])
479
+
480
+ temb_channels = in_channels if norm_type == "spatial" else None
481
+
482
+ # mid
483
+ self.mid_block = UNetMidBlock2D(
484
+ in_channels=block_out_channels[-1],
485
+ resnet_eps=1e-6,
486
+ resnet_act_fn=act_fn,
487
+ output_scale_factor=1,
488
+ resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
489
+ attention_head_dim=block_out_channels[-1],
490
+ resnet_groups=norm_num_groups,
491
+ temb_channels=temb_channels,
492
+ )
493
+
494
+ # up
495
+ reversed_block_out_channels = list(reversed(block_out_channels))
496
+ output_channel = reversed_block_out_channels[0]
497
+ for i, up_block_type in enumerate(up_block_types):
498
+ prev_output_channel = output_channel
499
+ output_channel = reversed_block_out_channels[i]
500
+
501
+ is_final_block = i == len(block_out_channels) - 1
502
+
503
+ up_block = get_up_block(
504
+ up_block_type,
505
+ num_layers=self.layers_per_block + 1,
506
+ in_channels=prev_output_channel,
507
+ out_channels=output_channel,
508
+ prev_output_channel=None,
509
+ add_upsample=not is_final_block,
510
+ resnet_eps=1e-6,
511
+ resnet_act_fn=act_fn,
512
+ resnet_groups=norm_num_groups,
513
+ attention_head_dim=output_channel,
514
+ temb_channels=temb_channels,
515
+ resnet_time_scale_shift=norm_type,
516
+ )
517
+ self.up_blocks.append(up_block)
518
+ prev_output_channel = output_channel
519
+
520
+ # condition encoder
521
+ self.condition_encoder = MaskConditionEncoder(
522
+ in_ch=out_channels,
523
+ out_ch=block_out_channels[0],
524
+ res_ch=block_out_channels[-1],
525
+ )
526
+
527
+ # out
528
+ if norm_type == "spatial":
529
+ self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
530
+ else:
531
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
532
+ self.conv_act = nn.SiLU()
533
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
534
+
535
+ self.gradient_checkpointing = False
536
+
537
+ def forward(
538
+ self,
539
+ z: torch.FloatTensor,
540
+ image: Optional[torch.FloatTensor] = None,
541
+ mask: Optional[torch.FloatTensor] = None,
542
+ latent_embeds: Optional[torch.FloatTensor] = None,
543
+ ) -> torch.FloatTensor:
544
+ r"""The forward method of the `MaskConditionDecoder` class."""
545
+ sample = z
546
+ sample = self.conv_in(sample)
547
+
548
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
549
+ if self.training and self.gradient_checkpointing:
550
+
551
+ def create_custom_forward(module):
552
+ def custom_forward(*inputs):
553
+ return module(*inputs)
554
+
555
+ return custom_forward
556
+
557
+ if is_torch_version(">=", "1.11.0"):
558
+ # middle
559
+ sample = torch.utils.checkpoint.checkpoint(
560
+ create_custom_forward(self.mid_block),
561
+ sample,
562
+ latent_embeds,
563
+ use_reentrant=False,
564
+ )
565
+ sample = sample.to(upscale_dtype)
566
+
567
+ # condition encoder
568
+ if image is not None and mask is not None:
569
+ masked_image = (1 - mask) * image
570
+ im_x = torch.utils.checkpoint.checkpoint(
571
+ create_custom_forward(self.condition_encoder),
572
+ masked_image,
573
+ mask,
574
+ use_reentrant=False,
575
+ )
576
+
577
+ # up
578
+ for up_block in self.up_blocks:
579
+ if image is not None and mask is not None:
580
+ sample_ = im_x[str(tuple(sample.shape))]
581
+ mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
582
+ sample = sample * mask_ + sample_ * (1 - mask_)
583
+ sample = torch.utils.checkpoint.checkpoint(
584
+ create_custom_forward(up_block),
585
+ sample,
586
+ latent_embeds,
587
+ use_reentrant=False,
588
+ )
589
+ if image is not None and mask is not None:
590
+ sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
591
+ else:
592
+ # middle
593
+ sample = torch.utils.checkpoint.checkpoint(
594
+ create_custom_forward(self.mid_block), sample, latent_embeds
595
+ )
596
+ sample = sample.to(upscale_dtype)
597
+
598
+ # condition encoder
599
+ if image is not None and mask is not None:
600
+ masked_image = (1 - mask) * image
601
+ im_x = torch.utils.checkpoint.checkpoint(
602
+ create_custom_forward(self.condition_encoder),
603
+ masked_image,
604
+ mask,
605
+ )
606
+
607
+ # up
608
+ for up_block in self.up_blocks:
609
+ if image is not None and mask is not None:
610
+ sample_ = im_x[str(tuple(sample.shape))]
611
+ mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
612
+ sample = sample * mask_ + sample_ * (1 - mask_)
613
+ sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
614
+ if image is not None and mask is not None:
615
+ sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
616
+ else:
617
+ # middle
618
+ sample = self.mid_block(sample, latent_embeds)
619
+ sample = sample.to(upscale_dtype)
620
+
621
+ # condition encoder
622
+ if image is not None and mask is not None:
623
+ masked_image = (1 - mask) * image
624
+ im_x = self.condition_encoder(masked_image, mask)
625
+
626
+ # up
627
+ for up_block in self.up_blocks:
628
+ if image is not None and mask is not None:
629
+ sample_ = im_x[str(tuple(sample.shape))]
630
+ mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
631
+ sample = sample * mask_ + sample_ * (1 - mask_)
632
+ sample = up_block(sample, latent_embeds)
633
+ if image is not None and mask is not None:
634
+ sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
635
+
636
+ # post-process
637
+ if latent_embeds is None:
638
+ sample = self.conv_norm_out(sample)
639
+ else:
640
+ sample = self.conv_norm_out(sample, latent_embeds)
641
+ sample = self.conv_act(sample)
642
+ sample = self.conv_out(sample)
643
+
644
+ return sample
645
+
646
+
647
+ class VectorQuantizer(nn.Module):
648
+ """
649
+ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
650
+ multiplications and allows for post-hoc remapping of indices.
651
+ """
652
+
653
+ # NOTE: due to a bug the beta term was applied to the wrong term. for
654
+ # backwards compatibility we use the buggy version by default, but you can
655
+ # specify legacy=False to fix it.
656
+ def __init__(
657
+ self,
658
+ n_e: int,
659
+ vq_embed_dim: int,
660
+ beta: float,
661
+ remap=None,
662
+ unknown_index: str = "random",
663
+ sane_index_shape: bool = False,
664
+ legacy: bool = True,
665
+ ):
666
+ super().__init__()
667
+ self.n_e = n_e
668
+ self.vq_embed_dim = vq_embed_dim
669
+ self.beta = beta
670
+ self.legacy = legacy
671
+
672
+ self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim)
673
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
674
+
675
+ self.remap = remap
676
+ if self.remap is not None:
677
+ self.register_buffer("used", torch.tensor(np.load(self.remap)))
678
+ self.used: torch.Tensor
679
+ self.re_embed = self.used.shape[0]
680
+ self.unknown_index = unknown_index # "random" or "extra" or integer
681
+ if self.unknown_index == "extra":
682
+ self.unknown_index = self.re_embed
683
+ self.re_embed = self.re_embed + 1
684
+ print(
685
+ f"Remapping {self.n_e} indices to {self.re_embed} indices. "
686
+ f"Using {self.unknown_index} for unknown indices."
687
+ )
688
+ else:
689
+ self.re_embed = n_e
690
+
691
+ self.sane_index_shape = sane_index_shape
692
+
693
+ def remap_to_used(self, inds: torch.LongTensor) -> torch.LongTensor:
694
+ ishape = inds.shape
695
+ assert len(ishape) > 1
696
+ inds = inds.reshape(ishape[0], -1)
697
+ used = self.used.to(inds)
698
+ match = (inds[:, :, None] == used[None, None, ...]).long()
699
+ new = match.argmax(-1)
700
+ unknown = match.sum(2) < 1
701
+ if self.unknown_index == "random":
702
+ new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
703
+ else:
704
+ new[unknown] = self.unknown_index
705
+ return new.reshape(ishape)
706
+
707
+ def unmap_to_all(self, inds: torch.LongTensor) -> torch.LongTensor:
708
+ ishape = inds.shape
709
+ assert len(ishape) > 1
710
+ inds = inds.reshape(ishape[0], -1)
711
+ used = self.used.to(inds)
712
+ if self.re_embed > self.used.shape[0]: # extra token
713
+ inds[inds >= self.used.shape[0]] = 0 # simply set to zero
714
+ back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
715
+ return back.reshape(ishape)
716
+
717
+ def forward(self, z: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, Tuple]:
718
+ # reshape z -> (batch, height, width, channel) and flatten
719
+ z = z.permute(0, 2, 3, 1).contiguous()
720
+ z_flattened = z.view(-1, self.vq_embed_dim)
721
+
722
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
723
+ min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1)
724
+
725
+ z_q = self.embedding(min_encoding_indices).view(z.shape)
726
+ perplexity = None
727
+ min_encodings = None
728
+
729
+ # compute loss for embedding
730
+ if not self.legacy:
731
+ loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
732
+ else:
733
+ loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
734
+
735
+ # preserve gradients
736
+ z_q: torch.FloatTensor = z + (z_q - z).detach()
737
+
738
+ # reshape back to match original input shape
739
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
740
+
741
+ if self.remap is not None:
742
+ min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
743
+ min_encoding_indices = self.remap_to_used(min_encoding_indices)
744
+ min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
745
+
746
+ if self.sane_index_shape:
747
+ min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])
748
+
749
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
750
+
751
+ def get_codebook_entry(self, indices: torch.LongTensor, shape: Tuple[int, ...]) -> torch.FloatTensor:
752
+ # shape specifying (batch, height, width, channel)
753
+ if self.remap is not None:
754
+ indices = indices.reshape(shape[0], -1) # add batch axis
755
+ indices = self.unmap_to_all(indices)
756
+ indices = indices.reshape(-1) # flatten again
757
+
758
+ # get quantized latent vectors
759
+ z_q: torch.FloatTensor = self.embedding(indices)
760
+
761
+ if shape is not None:
762
+ z_q = z_q.view(shape)
763
+ # reshape back to match original input shape
764
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
765
+
766
+ return z_q
767
+
768
+
769
+ class DiagonalGaussianDistribution(object):
770
+ def __init__(self, parameters: torch.Tensor, deterministic: bool = False):
771
+ self.parameters = parameters
772
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
773
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
774
+ self.deterministic = deterministic
775
+ self.std = torch.exp(0.5 * self.logvar)
776
+ self.var = torch.exp(self.logvar)
777
+ if self.deterministic:
778
+ self.var = self.std = torch.zeros_like(
779
+ self.mean, device=self.parameters.device, dtype=self.parameters.dtype
780
+ )
781
+
782
+ def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor:
783
+ # make sure sample is on the same device as the parameters and has same dtype
784
+ sample = randn_tensor(
785
+ self.mean.shape,
786
+ generator=generator,
787
+ device=self.parameters.device,
788
+ dtype=self.parameters.dtype,
789
+ )
790
+ x = self.mean + self.std * sample
791
+ return x
792
+
793
+ def kl(self, other: "DiagonalGaussianDistribution" = None) -> torch.Tensor:
794
+ if self.deterministic:
795
+ return torch.Tensor([0.0])
796
+ else:
797
+ if other is None:
798
+ return 0.5 * torch.sum(
799
+ torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
800
+ dim=[1, 2, 3],
801
+ )
802
+ else:
803
+ return 0.5 * torch.sum(
804
+ torch.pow(self.mean - other.mean, 2) / other.var
805
+ + self.var / other.var
806
+ - 1.0
807
+ - self.logvar
808
+ + other.logvar,
809
+ dim=[1, 2, 3],
810
+ )
811
+
812
+ def nll(self, sample: torch.Tensor, dims: Tuple[int, ...] = [1, 2, 3]) -> torch.Tensor:
813
+ if self.deterministic:
814
+ return torch.Tensor([0.0])
815
+ logtwopi = np.log(2.0 * np.pi)
816
+ return 0.5 * torch.sum(
817
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
818
+ dim=dims,
819
+ )
820
+
821
+ def mode(self) -> torch.Tensor:
822
+ return self.mean
823
+
824
+
825
+ class EncoderTiny(nn.Module):
826
+ r"""
827
+ The `EncoderTiny` layer is a simpler version of the `Encoder` layer.
828
+
829
+ Args:
830
+ in_channels (`int`):
831
+ The number of input channels.
832
+ out_channels (`int`):
833
+ The number of output channels.
834
+ num_blocks (`Tuple[int, ...]`):
835
+ Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
836
+ use.
837
+ block_out_channels (`Tuple[int, ...]`):
838
+ The number of output channels for each block.
839
+ act_fn (`str`):
840
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
841
+ """
842
+
843
+ def __init__(
844
+ self,
845
+ in_channels: int,
846
+ out_channels: int,
847
+ num_blocks: Tuple[int, ...],
848
+ block_out_channels: Tuple[int, ...],
849
+ act_fn: str,
850
+ ):
851
+ super().__init__()
852
+
853
+ layers = []
854
+ for i, num_block in enumerate(num_blocks):
855
+ num_channels = block_out_channels[i]
856
+
857
+ if i == 0:
858
+ layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1))
859
+ else:
860
+ layers.append(
861
+ nn.Conv2d(
862
+ num_channels,
863
+ num_channels,
864
+ kernel_size=3,
865
+ padding=1,
866
+ stride=2,
867
+ bias=False,
868
+ )
869
+ )
870
+
871
+ for _ in range(num_block):
872
+ layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
873
+
874
+ layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1))
875
+
876
+ self.layers = nn.Sequential(*layers)
877
+ self.gradient_checkpointing = False
878
+
879
+ def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
880
+ r"""The forward method of the `EncoderTiny` class."""
881
+ if self.training and self.gradient_checkpointing:
882
+
883
+ def create_custom_forward(module):
884
+ def custom_forward(*inputs):
885
+ return module(*inputs)
886
+
887
+ return custom_forward
888
+
889
+ if is_torch_version(">=", "1.11.0"):
890
+ x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)
891
+ else:
892
+ x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)
893
+
894
+ else:
895
+ # scale image from [-1, 1] to [0, 1] to match TAESD convention
896
+ x = self.layers(x.add(1).div(2))
897
+
898
+ return x
899
+
900
+
901
+ class DecoderTiny(nn.Module):
902
+ r"""
903
+ The `DecoderTiny` layer is a simpler version of the `Decoder` layer.
904
+
905
+ Args:
906
+ in_channels (`int`):
907
+ The number of input channels.
908
+ out_channels (`int`):
909
+ The number of output channels.
910
+ num_blocks (`Tuple[int, ...]`):
911
+ Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
912
+ use.
913
+ block_out_channels (`Tuple[int, ...]`):
914
+ The number of output channels for each block.
915
+ upsampling_scaling_factor (`int`):
916
+ The scaling factor to use for upsampling.
917
+ act_fn (`str`):
918
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
919
+ """
920
+
921
+ def __init__(
922
+ self,
923
+ in_channels: int,
924
+ out_channels: int,
925
+ num_blocks: Tuple[int, ...],
926
+ block_out_channels: Tuple[int, ...],
927
+ upsampling_scaling_factor: int,
928
+ act_fn: str,
929
+ ):
930
+ super().__init__()
931
+
932
+ layers = [
933
+ nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1),
934
+ get_activation(act_fn),
935
+ ]
936
+
937
+ for i, num_block in enumerate(num_blocks):
938
+ is_final_block = i == (len(num_blocks) - 1)
939
+ num_channels = block_out_channels[i]
940
+
941
+ for _ in range(num_block):
942
+ layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
943
+
944
+ if not is_final_block:
945
+ layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor))
946
+
947
+ conv_out_channel = num_channels if not is_final_block else out_channels
948
+ layers.append(
949
+ nn.Conv2d(
950
+ num_channels,
951
+ conv_out_channel,
952
+ kernel_size=3,
953
+ padding=1,
954
+ bias=is_final_block,
955
+ )
956
+ )
957
+
958
+ self.layers = nn.Sequential(*layers)
959
+ self.gradient_checkpointing = False
960
+
961
+ def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
962
+ r"""The forward method of the `DecoderTiny` class."""
963
+ # Clamp.
964
+ x = torch.tanh(x / 3) * 3
965
+
966
+ if self.training and self.gradient_checkpointing:
967
+
968
+ def create_custom_forward(module):
969
+ def custom_forward(*inputs):
970
+ return module(*inputs)
971
+
972
+ return custom_forward
973
+
974
+ if is_torch_version(">=", "1.11.0"):
975
+ x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)
976
+ else:
977
+ x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)
978
+
979
+ else:
980
+ x = self.layers(x)
981
+
982
+ # scale image from [0, 1] to [-1, 1] to match diffusers convention
983
+ return x.mul(2).sub(1)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/controlnet.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ from torch import nn
19
+ from torch.nn import functional as F
20
+
21
+ from ..configuration_utils import ConfigMixin, register_to_config
22
+ from ..loaders import FromOriginalControlNetMixin
23
+ from ..utils import BaseOutput, logging
24
+ from .attention_processor import (
25
+ ADDED_KV_ATTENTION_PROCESSORS,
26
+ CROSS_ATTENTION_PROCESSORS,
27
+ AttentionProcessor,
28
+ AttnAddedKVProcessor,
29
+ AttnProcessor,
30
+ )
31
+ from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
32
+ from .modeling_utils import ModelMixin
33
+ from .unets.unet_2d_blocks import (
34
+ CrossAttnDownBlock2D,
35
+ DownBlock2D,
36
+ UNetMidBlock2D,
37
+ UNetMidBlock2DCrossAttn,
38
+ get_down_block,
39
+ )
40
+ from .unets.unet_2d_condition import UNet2DConditionModel
41
+
42
+
43
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
44
+
45
+
46
+ @dataclass
47
+ class ControlNetOutput(BaseOutput):
48
+ """
49
+ The output of [`ControlNetModel`].
50
+
51
+ Args:
52
+ down_block_res_samples (`tuple[torch.Tensor]`):
53
+ A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
54
+ be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
55
+ used to condition the original UNet's downsampling activations.
56
+ mid_down_block_re_sample (`torch.Tensor`):
57
+ The activation of the midde block (the lowest sample resolution). Each tensor should be of shape
58
+ `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
59
+ Output can be used to condition the original UNet's middle block activation.
60
+ """
61
+
62
+ down_block_res_samples: Tuple[torch.Tensor]
63
+ mid_block_res_sample: torch.Tensor
64
+
65
+
66
+ class ControlNetConditioningEmbedding(nn.Module):
67
+ """
68
+ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
69
+ [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
70
+ training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
71
+ convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
72
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
73
+ model) to encode image-space conditions ... into feature maps ..."
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ conditioning_embedding_channels: int,
79
+ conditioning_channels: int = 3,
80
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
81
+ ):
82
+ super().__init__()
83
+
84
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
85
+
86
+ self.blocks = nn.ModuleList([])
87
+
88
+ for i in range(len(block_out_channels) - 1):
89
+ channel_in = block_out_channels[i]
90
+ channel_out = block_out_channels[i + 1]
91
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
92
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
93
+
94
+ self.conv_out = zero_module(
95
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
96
+ )
97
+
98
+ def forward(self, conditioning):
99
+ embedding = self.conv_in(conditioning)
100
+ embedding = F.silu(embedding)
101
+
102
+ for block in self.blocks:
103
+ embedding = block(embedding)
104
+ embedding = F.silu(embedding)
105
+
106
+ embedding = self.conv_out(embedding)
107
+
108
+ return embedding
109
+
110
+
111
+ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
112
+ """
113
+ A ControlNet model.
114
+
115
+ Args:
116
+ in_channels (`int`, defaults to 4):
117
+ The number of channels in the input sample.
118
+ flip_sin_to_cos (`bool`, defaults to `True`):
119
+ Whether to flip the sin to cos in the time embedding.
120
+ freq_shift (`int`, defaults to 0):
121
+ The frequency shift to apply to the time embedding.
122
+ down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
123
+ The tuple of downsample blocks to use.
124
+ only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
125
+ block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
126
+ The tuple of output channels for each block.
127
+ layers_per_block (`int`, defaults to 2):
128
+ The number of layers per block.
129
+ downsample_padding (`int`, defaults to 1):
130
+ The padding to use for the downsampling convolution.
131
+ mid_block_scale_factor (`float`, defaults to 1):
132
+ The scale factor to use for the mid block.
133
+ act_fn (`str`, defaults to "silu"):
134
+ The activation function to use.
135
+ norm_num_groups (`int`, *optional*, defaults to 32):
136
+ The number of groups to use for the normalization. If None, normalization and activation layers is skipped
137
+ in post-processing.
138
+ norm_eps (`float`, defaults to 1e-5):
139
+ The epsilon to use for the normalization.
140
+ cross_attention_dim (`int`, defaults to 1280):
141
+ The dimension of the cross attention features.
142
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
143
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
144
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
145
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
146
+ encoder_hid_dim (`int`, *optional*, defaults to None):
147
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
148
+ dimension to `cross_attention_dim`.
149
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
150
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
151
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
152
+ attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
153
+ The dimension of the attention heads.
154
+ use_linear_projection (`bool`, defaults to `False`):
155
+ class_embed_type (`str`, *optional*, defaults to `None`):
156
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
157
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
158
+ addition_embed_type (`str`, *optional*, defaults to `None`):
159
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
160
+ "text". "text" will use the `TextTimeEmbedding` layer.
161
+ num_class_embeds (`int`, *optional*, defaults to 0):
162
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
163
+ class conditioning with `class_embed_type` equal to `None`.
164
+ upcast_attention (`bool`, defaults to `False`):
165
+ resnet_time_scale_shift (`str`, defaults to `"default"`):
166
+ Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
167
+ projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
168
+ The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
169
+ `class_embed_type="projection"`.
170
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
171
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
172
+ conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
173
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
174
+ global_pool_conditions (`bool`, defaults to `False`):
175
+ TODO(Patrick) - unused parameter.
176
+ addition_embed_type_num_heads (`int`, defaults to 64):
177
+ The number of heads to use for the `TextTimeEmbedding` layer.
178
+ """
179
+
180
+ _supports_gradient_checkpointing = True
181
+
182
+ @register_to_config
183
+ def __init__(
184
+ self,
185
+ in_channels: int = 4,
186
+ conditioning_channels: int = 3,
187
+ flip_sin_to_cos: bool = True,
188
+ freq_shift: int = 0,
189
+ down_block_types: Tuple[str, ...] = (
190
+ "CrossAttnDownBlock2D",
191
+ "CrossAttnDownBlock2D",
192
+ "CrossAttnDownBlock2D",
193
+ "DownBlock2D",
194
+ ),
195
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
196
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
197
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
198
+ layers_per_block: int = 2,
199
+ downsample_padding: int = 1,
200
+ mid_block_scale_factor: float = 1,
201
+ act_fn: str = "silu",
202
+ norm_num_groups: Optional[int] = 32,
203
+ norm_eps: float = 1e-5,
204
+ cross_attention_dim: int = 1280,
205
+ transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
206
+ encoder_hid_dim: Optional[int] = None,
207
+ encoder_hid_dim_type: Optional[str] = None,
208
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8,
209
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
210
+ use_linear_projection: bool = False,
211
+ class_embed_type: Optional[str] = None,
212
+ addition_embed_type: Optional[str] = None,
213
+ addition_time_embed_dim: Optional[int] = None,
214
+ num_class_embeds: Optional[int] = None,
215
+ upcast_attention: bool = False,
216
+ resnet_time_scale_shift: str = "default",
217
+ projection_class_embeddings_input_dim: Optional[int] = None,
218
+ controlnet_conditioning_channel_order: str = "rgb",
219
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
220
+ global_pool_conditions: bool = False,
221
+ addition_embed_type_num_heads: int = 64,
222
+ ):
223
+ super().__init__()
224
+
225
+ # If `num_attention_heads` is not defined (which is the case for most models)
226
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
227
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
228
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
229
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
230
+ # which is why we correct for the naming here.
231
+ num_attention_heads = num_attention_heads or attention_head_dim
232
+
233
+ # Check inputs
234
+ if len(block_out_channels) != len(down_block_types):
235
+ raise ValueError(
236
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
237
+ )
238
+
239
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
240
+ raise ValueError(
241
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
242
+ )
243
+
244
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
245
+ raise ValueError(
246
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
247
+ )
248
+
249
+ if isinstance(transformer_layers_per_block, int):
250
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
251
+
252
+ # input
253
+ conv_in_kernel = 3
254
+ conv_in_padding = (conv_in_kernel - 1) // 2
255
+ self.conv_in = nn.Conv2d(
256
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
257
+ )
258
+
259
+ # time
260
+ time_embed_dim = block_out_channels[0] * 4
261
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
262
+ timestep_input_dim = block_out_channels[0]
263
+ self.time_embedding = TimestepEmbedding(
264
+ timestep_input_dim,
265
+ time_embed_dim,
266
+ act_fn=act_fn,
267
+ )
268
+
269
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
270
+ encoder_hid_dim_type = "text_proj"
271
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
272
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
273
+
274
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
275
+ raise ValueError(
276
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
277
+ )
278
+
279
+ if encoder_hid_dim_type == "text_proj":
280
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
281
+ elif encoder_hid_dim_type == "text_image_proj":
282
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
283
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
284
+ # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
285
+ self.encoder_hid_proj = TextImageProjection(
286
+ text_embed_dim=encoder_hid_dim,
287
+ image_embed_dim=cross_attention_dim,
288
+ cross_attention_dim=cross_attention_dim,
289
+ )
290
+
291
+ elif encoder_hid_dim_type is not None:
292
+ raise ValueError(
293
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
294
+ )
295
+ else:
296
+ self.encoder_hid_proj = None
297
+
298
+ # class embedding
299
+ if class_embed_type is None and num_class_embeds is not None:
300
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
301
+ elif class_embed_type == "timestep":
302
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
303
+ elif class_embed_type == "identity":
304
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
305
+ elif class_embed_type == "projection":
306
+ if projection_class_embeddings_input_dim is None:
307
+ raise ValueError(
308
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
309
+ )
310
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
311
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
312
+ # 2. it projects from an arbitrary input dimension.
313
+ #
314
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
315
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
316
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
317
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
318
+ else:
319
+ self.class_embedding = None
320
+
321
+ if addition_embed_type == "text":
322
+ if encoder_hid_dim is not None:
323
+ text_time_embedding_from_dim = encoder_hid_dim
324
+ else:
325
+ text_time_embedding_from_dim = cross_attention_dim
326
+
327
+ self.add_embedding = TextTimeEmbedding(
328
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
329
+ )
330
+ elif addition_embed_type == "text_image":
331
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
332
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
333
+ # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
334
+ self.add_embedding = TextImageTimeEmbedding(
335
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
336
+ )
337
+ elif addition_embed_type == "text_time":
338
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
339
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
340
+
341
+ elif addition_embed_type is not None:
342
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
343
+
344
+ # control net conditioning embedding
345
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
346
+ conditioning_embedding_channels=block_out_channels[0],
347
+ block_out_channels=conditioning_embedding_out_channels,
348
+ conditioning_channels=conditioning_channels,
349
+ )
350
+
351
+ self.down_blocks = nn.ModuleList([])
352
+ self.controlnet_down_blocks = nn.ModuleList([])
353
+
354
+ if isinstance(only_cross_attention, bool):
355
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
356
+
357
+ if isinstance(attention_head_dim, int):
358
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
359
+
360
+ if isinstance(num_attention_heads, int):
361
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
362
+
363
+ # down
364
+ output_channel = block_out_channels[0]
365
+
366
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
367
+ controlnet_block = zero_module(controlnet_block)
368
+ self.controlnet_down_blocks.append(controlnet_block)
369
+
370
+ for i, down_block_type in enumerate(down_block_types):
371
+ input_channel = output_channel
372
+ output_channel = block_out_channels[i]
373
+ is_final_block = i == len(block_out_channels) - 1
374
+
375
+ down_block = get_down_block(
376
+ down_block_type,
377
+ num_layers=layers_per_block,
378
+ transformer_layers_per_block=transformer_layers_per_block[i],
379
+ in_channels=input_channel,
380
+ out_channels=output_channel,
381
+ temb_channels=time_embed_dim,
382
+ add_downsample=not is_final_block,
383
+ resnet_eps=norm_eps,
384
+ resnet_act_fn=act_fn,
385
+ resnet_groups=norm_num_groups,
386
+ cross_attention_dim=cross_attention_dim,
387
+ num_attention_heads=num_attention_heads[i],
388
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
389
+ downsample_padding=downsample_padding,
390
+ use_linear_projection=use_linear_projection,
391
+ only_cross_attention=only_cross_attention[i],
392
+ upcast_attention=upcast_attention,
393
+ resnet_time_scale_shift=resnet_time_scale_shift,
394
+ )
395
+ self.down_blocks.append(down_block)
396
+
397
+ for _ in range(layers_per_block):
398
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
399
+ controlnet_block = zero_module(controlnet_block)
400
+ self.controlnet_down_blocks.append(controlnet_block)
401
+
402
+ if not is_final_block:
403
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
404
+ controlnet_block = zero_module(controlnet_block)
405
+ self.controlnet_down_blocks.append(controlnet_block)
406
+
407
+ # mid
408
+ mid_block_channel = block_out_channels[-1]
409
+
410
+ controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
411
+ controlnet_block = zero_module(controlnet_block)
412
+ self.controlnet_mid_block = controlnet_block
413
+
414
+ if mid_block_type == "UNetMidBlock2DCrossAttn":
415
+ self.mid_block = UNetMidBlock2DCrossAttn(
416
+ transformer_layers_per_block=transformer_layers_per_block[-1],
417
+ in_channels=mid_block_channel,
418
+ temb_channels=time_embed_dim,
419
+ resnet_eps=norm_eps,
420
+ resnet_act_fn=act_fn,
421
+ output_scale_factor=mid_block_scale_factor,
422
+ resnet_time_scale_shift=resnet_time_scale_shift,
423
+ cross_attention_dim=cross_attention_dim,
424
+ num_attention_heads=num_attention_heads[-1],
425
+ resnet_groups=norm_num_groups,
426
+ use_linear_projection=use_linear_projection,
427
+ upcast_attention=upcast_attention,
428
+ )
429
+ elif mid_block_type == "UNetMidBlock2D":
430
+ self.mid_block = UNetMidBlock2D(
431
+ in_channels=block_out_channels[-1],
432
+ temb_channels=time_embed_dim,
433
+ num_layers=0,
434
+ resnet_eps=norm_eps,
435
+ resnet_act_fn=act_fn,
436
+ output_scale_factor=mid_block_scale_factor,
437
+ resnet_groups=norm_num_groups,
438
+ resnet_time_scale_shift=resnet_time_scale_shift,
439
+ add_attention=False,
440
+ )
441
+ else:
442
+ raise ValueError(f"unknown mid_block_type : {mid_block_type}")
443
+
444
+ @classmethod
445
+ def from_unet(
446
+ cls,
447
+ unet: UNet2DConditionModel,
448
+ controlnet_conditioning_channel_order: str = "rgb",
449
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
450
+ load_weights_from_unet: bool = True,
451
+ conditioning_channels: int = 3,
452
+ ):
453
+ r"""
454
+ Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`].
455
+
456
+ Parameters:
457
+ unet (`UNet2DConditionModel`):
458
+ The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
459
+ where applicable.
460
+ """
461
+ transformer_layers_per_block = (
462
+ unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
463
+ )
464
+ encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
465
+ encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
466
+ addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
467
+ addition_time_embed_dim = (
468
+ unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
469
+ )
470
+
471
+ controlnet = cls(
472
+ encoder_hid_dim=encoder_hid_dim,
473
+ encoder_hid_dim_type=encoder_hid_dim_type,
474
+ addition_embed_type=addition_embed_type,
475
+ addition_time_embed_dim=addition_time_embed_dim,
476
+ transformer_layers_per_block=transformer_layers_per_block,
477
+ in_channels=unet.config.in_channels,
478
+ flip_sin_to_cos=unet.config.flip_sin_to_cos,
479
+ freq_shift=unet.config.freq_shift,
480
+ down_block_types=unet.config.down_block_types,
481
+ only_cross_attention=unet.config.only_cross_attention,
482
+ block_out_channels=unet.config.block_out_channels,
483
+ layers_per_block=unet.config.layers_per_block,
484
+ downsample_padding=unet.config.downsample_padding,
485
+ mid_block_scale_factor=unet.config.mid_block_scale_factor,
486
+ act_fn=unet.config.act_fn,
487
+ norm_num_groups=unet.config.norm_num_groups,
488
+ norm_eps=unet.config.norm_eps,
489
+ cross_attention_dim=unet.config.cross_attention_dim,
490
+ attention_head_dim=unet.config.attention_head_dim,
491
+ num_attention_heads=unet.config.num_attention_heads,
492
+ use_linear_projection=unet.config.use_linear_projection,
493
+ class_embed_type=unet.config.class_embed_type,
494
+ num_class_embeds=unet.config.num_class_embeds,
495
+ upcast_attention=unet.config.upcast_attention,
496
+ resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
497
+ projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
498
+ mid_block_type=unet.config.mid_block_type,
499
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
500
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
501
+ conditioning_channels=conditioning_channels,
502
+ )
503
+
504
+ if load_weights_from_unet:
505
+ controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
506
+ controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
507
+ controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
508
+
509
+ if controlnet.class_embedding:
510
+ controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
511
+
512
+ controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
513
+ controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
514
+
515
+ return controlnet
516
+
517
+ @property
518
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
519
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
520
+ r"""
521
+ Returns:
522
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
523
+ indexed by its weight name.
524
+ """
525
+ # set recursively
526
+ processors = {}
527
+
528
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
529
+ if hasattr(module, "get_processor"):
530
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
531
+
532
+ for sub_name, child in module.named_children():
533
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
534
+
535
+ return processors
536
+
537
+ for name, module in self.named_children():
538
+ fn_recursive_add_processors(name, module, processors)
539
+
540
+ return processors
541
+
542
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
543
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
544
+ r"""
545
+ Sets the attention processor to use to compute attention.
546
+
547
+ Parameters:
548
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
549
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
550
+ for **all** `Attention` layers.
551
+
552
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
553
+ processor. This is strongly recommended when setting trainable attention processors.
554
+
555
+ """
556
+ count = len(self.attn_processors.keys())
557
+
558
+ if isinstance(processor, dict) and len(processor) != count:
559
+ raise ValueError(
560
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
561
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
562
+ )
563
+
564
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
565
+ if hasattr(module, "set_processor"):
566
+ if not isinstance(processor, dict):
567
+ module.set_processor(processor)
568
+ else:
569
+ module.set_processor(processor.pop(f"{name}.processor"))
570
+
571
+ for sub_name, child in module.named_children():
572
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
573
+
574
+ for name, module in self.named_children():
575
+ fn_recursive_attn_processor(name, module, processor)
576
+
577
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
578
+ def set_default_attn_processor(self):
579
+ """
580
+ Disables custom attention processors and sets the default attention implementation.
581
+ """
582
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
583
+ processor = AttnAddedKVProcessor()
584
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
585
+ processor = AttnProcessor()
586
+ else:
587
+ raise ValueError(
588
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
589
+ )
590
+
591
+ self.set_attn_processor(processor)
592
+
593
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice
594
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None:
595
+ r"""
596
+ Enable sliced attention computation.
597
+
598
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
599
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
600
+
601
+ Args:
602
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
603
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
604
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
605
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
606
+ must be a multiple of `slice_size`.
607
+ """
608
+ sliceable_head_dims = []
609
+
610
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
611
+ if hasattr(module, "set_attention_slice"):
612
+ sliceable_head_dims.append(module.sliceable_head_dim)
613
+
614
+ for child in module.children():
615
+ fn_recursive_retrieve_sliceable_dims(child)
616
+
617
+ # retrieve number of attention layers
618
+ for module in self.children():
619
+ fn_recursive_retrieve_sliceable_dims(module)
620
+
621
+ num_sliceable_layers = len(sliceable_head_dims)
622
+
623
+ if slice_size == "auto":
624
+ # half the attention head size is usually a good trade-off between
625
+ # speed and memory
626
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
627
+ elif slice_size == "max":
628
+ # make smallest slice possible
629
+ slice_size = num_sliceable_layers * [1]
630
+
631
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
632
+
633
+ if len(slice_size) != len(sliceable_head_dims):
634
+ raise ValueError(
635
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
636
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
637
+ )
638
+
639
+ for i in range(len(slice_size)):
640
+ size = slice_size[i]
641
+ dim = sliceable_head_dims[i]
642
+ if size is not None and size > dim:
643
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
644
+
645
+ # Recursively walk through all the children.
646
+ # Any children which exposes the set_attention_slice method
647
+ # gets the message
648
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
649
+ if hasattr(module, "set_attention_slice"):
650
+ module.set_attention_slice(slice_size.pop())
651
+
652
+ for child in module.children():
653
+ fn_recursive_set_attention_slice(child, slice_size)
654
+
655
+ reversed_slice_size = list(reversed(slice_size))
656
+ for module in self.children():
657
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
658
+
659
+ def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
660
+ if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
661
+ module.gradient_checkpointing = value
662
+
663
+ def forward(
664
+ self,
665
+ sample: torch.FloatTensor,
666
+ timestep: Union[torch.Tensor, float, int],
667
+ encoder_hidden_states: torch.Tensor,
668
+ controlnet_cond: torch.FloatTensor,
669
+ conditioning_scale: float = 1.0,
670
+ class_labels: Optional[torch.Tensor] = None,
671
+ timestep_cond: Optional[torch.Tensor] = None,
672
+ attention_mask: Optional[torch.Tensor] = None,
673
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
674
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
675
+ guess_mode: bool = False,
676
+ return_dict: bool = True,
677
+ ) -> Union[ControlNetOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]:
678
+ """
679
+ The [`ControlNetModel`] forward method.
680
+
681
+ Args:
682
+ sample (`torch.FloatTensor`):
683
+ The noisy input tensor.
684
+ timestep (`Union[torch.Tensor, float, int]`):
685
+ The number of timesteps to denoise an input.
686
+ encoder_hidden_states (`torch.Tensor`):
687
+ The encoder hidden states.
688
+ controlnet_cond (`torch.FloatTensor`):
689
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
690
+ conditioning_scale (`float`, defaults to `1.0`):
691
+ The scale factor for ControlNet outputs.
692
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
693
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
694
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
695
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
696
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
697
+ embeddings.
698
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
699
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
700
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
701
+ negative values to the attention scores corresponding to "discard" tokens.
702
+ added_cond_kwargs (`dict`):
703
+ Additional conditions for the Stable Diffusion XL UNet.
704
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
705
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
706
+ guess_mode (`bool`, defaults to `False`):
707
+ In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
708
+ you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
709
+ return_dict (`bool`, defaults to `True`):
710
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
711
+
712
+ Returns:
713
+ [`~models.controlnet.ControlNetOutput`] **or** `tuple`:
714
+ If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
715
+ returned where the first element is the sample tensor.
716
+ """
717
+ # check channel order
718
+ channel_order = self.config.controlnet_conditioning_channel_order
719
+
720
+ if channel_order == "rgb":
721
+ # in rgb order by default
722
+ ...
723
+ elif channel_order == "bgr":
724
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
725
+ else:
726
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
727
+
728
+ # prepare attention_mask
729
+ if attention_mask is not None:
730
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
731
+ attention_mask = attention_mask.unsqueeze(1)
732
+
733
+ # 1. time
734
+ timesteps = timestep
735
+ if not torch.is_tensor(timesteps):
736
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
737
+ # This would be a good case for the `match` statement (Python 3.10+)
738
+ is_mps = sample.device.type == "mps"
739
+ if isinstance(timestep, float):
740
+ dtype = torch.float32 if is_mps else torch.float64
741
+ else:
742
+ dtype = torch.int32 if is_mps else torch.int64
743
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
744
+ elif len(timesteps.shape) == 0:
745
+ timesteps = timesteps[None].to(sample.device)
746
+
747
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
748
+ timesteps = timesteps.expand(sample.shape[0])
749
+
750
+ t_emb = self.time_proj(timesteps)
751
+
752
+ # timesteps does not contain any weights and will always return f32 tensors
753
+ # but time_embedding might actually be running in fp16. so we need to cast here.
754
+ # there might be better ways to encapsulate this.
755
+ t_emb = t_emb.to(dtype=sample.dtype)
756
+
757
+ emb = self.time_embedding(t_emb, timestep_cond)
758
+ aug_emb = None
759
+
760
+ if self.class_embedding is not None:
761
+ if class_labels is None:
762
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
763
+
764
+ if self.config.class_embed_type == "timestep":
765
+ class_labels = self.time_proj(class_labels)
766
+
767
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
768
+ emb = emb + class_emb
769
+
770
+ if self.config.addition_embed_type is not None:
771
+ if self.config.addition_embed_type == "text":
772
+ aug_emb = self.add_embedding(encoder_hidden_states)
773
+
774
+ elif self.config.addition_embed_type == "text_time":
775
+ if "text_embeds" not in added_cond_kwargs:
776
+ raise ValueError(
777
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
778
+ )
779
+ text_embeds = added_cond_kwargs.get("text_embeds")
780
+ if "time_ids" not in added_cond_kwargs:
781
+ raise ValueError(
782
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
783
+ )
784
+ time_ids = added_cond_kwargs.get("time_ids")
785
+ time_embeds = self.add_time_proj(time_ids.flatten())
786
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
787
+
788
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
789
+ add_embeds = add_embeds.to(emb.dtype)
790
+ aug_emb = self.add_embedding(add_embeds)
791
+
792
+ emb = emb + aug_emb if aug_emb is not None else emb
793
+
794
+ # 2. pre-process
795
+ sample = self.conv_in(sample)
796
+
797
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
798
+ sample = sample + controlnet_cond
799
+
800
+ # 3. down
801
+ down_block_res_samples = (sample,)
802
+ for downsample_block in self.down_blocks:
803
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
804
+ sample, res_samples = downsample_block(
805
+ hidden_states=sample,
806
+ temb=emb,
807
+ encoder_hidden_states=encoder_hidden_states,
808
+ attention_mask=attention_mask,
809
+ cross_attention_kwargs=cross_attention_kwargs,
810
+ )
811
+ else:
812
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
813
+
814
+ down_block_res_samples += res_samples
815
+
816
+ # 4. mid
817
+ if self.mid_block is not None:
818
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
819
+ sample = self.mid_block(
820
+ sample,
821
+ emb,
822
+ encoder_hidden_states=encoder_hidden_states,
823
+ attention_mask=attention_mask,
824
+ cross_attention_kwargs=cross_attention_kwargs,
825
+ )
826
+ else:
827
+ sample = self.mid_block(sample, emb)
828
+
829
+ # 5. Control net blocks
830
+
831
+ controlnet_down_block_res_samples = ()
832
+
833
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
834
+ down_block_res_sample = controlnet_block(down_block_res_sample)
835
+ controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
836
+
837
+ down_block_res_samples = controlnet_down_block_res_samples
838
+
839
+ mid_block_res_sample = self.controlnet_mid_block(sample)
840
+
841
+ # 6. scaling
842
+ if guess_mode and not self.config.global_pool_conditions:
843
+ scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
844
+ scales = scales * conditioning_scale
845
+ down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
846
+ mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
847
+ else:
848
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
849
+ mid_block_res_sample = mid_block_res_sample * conditioning_scale
850
+
851
+ if self.config.global_pool_conditions:
852
+ down_block_res_samples = [
853
+ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
854
+ ]
855
+ mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
856
+
857
+ if not return_dict:
858
+ return (down_block_res_samples, mid_block_res_sample)
859
+
860
+ return ControlNetOutput(
861
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
862
+ )
863
+
864
+
865
+ def zero_module(module):
866
+ for p in module.parameters():
867
+ nn.init.zeros_(p)
868
+ return module
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/controlnet_flax.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple, Union
15
+
16
+ import flax
17
+ import flax.linen as nn
18
+ import jax
19
+ import jax.numpy as jnp
20
+ from flax.core.frozen_dict import FrozenDict
21
+
22
+ from ..configuration_utils import ConfigMixin, flax_register_to_config
23
+ from ..utils import BaseOutput
24
+ from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
25
+ from .modeling_flax_utils import FlaxModelMixin
26
+ from .unets.unet_2d_blocks_flax import (
27
+ FlaxCrossAttnDownBlock2D,
28
+ FlaxDownBlock2D,
29
+ FlaxUNetMidBlock2DCrossAttn,
30
+ )
31
+
32
+
33
+ @flax.struct.dataclass
34
+ class FlaxControlNetOutput(BaseOutput):
35
+ """
36
+ The output of [`FlaxControlNetModel`].
37
+
38
+ Args:
39
+ down_block_res_samples (`jnp.ndarray`):
40
+ mid_block_res_sample (`jnp.ndarray`):
41
+ """
42
+
43
+ down_block_res_samples: jnp.ndarray
44
+ mid_block_res_sample: jnp.ndarray
45
+
46
+
47
+ class FlaxControlNetConditioningEmbedding(nn.Module):
48
+ conditioning_embedding_channels: int
49
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
50
+ dtype: jnp.dtype = jnp.float32
51
+
52
+ def setup(self) -> None:
53
+ self.conv_in = nn.Conv(
54
+ self.block_out_channels[0],
55
+ kernel_size=(3, 3),
56
+ padding=((1, 1), (1, 1)),
57
+ dtype=self.dtype,
58
+ )
59
+
60
+ blocks = []
61
+ for i in range(len(self.block_out_channels) - 1):
62
+ channel_in = self.block_out_channels[i]
63
+ channel_out = self.block_out_channels[i + 1]
64
+ conv1 = nn.Conv(
65
+ channel_in,
66
+ kernel_size=(3, 3),
67
+ padding=((1, 1), (1, 1)),
68
+ dtype=self.dtype,
69
+ )
70
+ blocks.append(conv1)
71
+ conv2 = nn.Conv(
72
+ channel_out,
73
+ kernel_size=(3, 3),
74
+ strides=(2, 2),
75
+ padding=((1, 1), (1, 1)),
76
+ dtype=self.dtype,
77
+ )
78
+ blocks.append(conv2)
79
+ self.blocks = blocks
80
+
81
+ self.conv_out = nn.Conv(
82
+ self.conditioning_embedding_channels,
83
+ kernel_size=(3, 3),
84
+ padding=((1, 1), (1, 1)),
85
+ kernel_init=nn.initializers.zeros_init(),
86
+ bias_init=nn.initializers.zeros_init(),
87
+ dtype=self.dtype,
88
+ )
89
+
90
+ def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray:
91
+ embedding = self.conv_in(conditioning)
92
+ embedding = nn.silu(embedding)
93
+
94
+ for block in self.blocks:
95
+ embedding = block(embedding)
96
+ embedding = nn.silu(embedding)
97
+
98
+ embedding = self.conv_out(embedding)
99
+
100
+ return embedding
101
+
102
+
103
+ @flax_register_to_config
104
+ class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
105
+ r"""
106
+ A ControlNet model.
107
+
108
+ This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods
109
+ implemented for all models (such as downloading or saving).
110
+
111
+ This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
112
+ subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its
113
+ general usage and behavior.
114
+
115
+ Inherent JAX features such as the following are supported:
116
+
117
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
118
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
119
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
120
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
121
+
122
+ Parameters:
123
+ sample_size (`int`, *optional*):
124
+ The size of the input sample.
125
+ in_channels (`int`, *optional*, defaults to 4):
126
+ The number of channels in the input sample.
127
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`):
128
+ The tuple of downsample blocks to use.
129
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
130
+ The tuple of output channels for each block.
131
+ layers_per_block (`int`, *optional*, defaults to 2):
132
+ The number of layers per block.
133
+ attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8):
134
+ The dimension of the attention heads.
135
+ num_attention_heads (`int` or `Tuple[int]`, *optional*):
136
+ The number of attention heads.
137
+ cross_attention_dim (`int`, *optional*, defaults to 768):
138
+ The dimension of the cross attention features.
139
+ dropout (`float`, *optional*, defaults to 0):
140
+ Dropout probability for down, up and bottleneck blocks.
141
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
142
+ Whether to flip the sin to cos in the time embedding.
143
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
144
+ controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`):
145
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
146
+ conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`):
147
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
148
+ """
149
+
150
+ sample_size: int = 32
151
+ in_channels: int = 4
152
+ down_block_types: Tuple[str, ...] = (
153
+ "CrossAttnDownBlock2D",
154
+ "CrossAttnDownBlock2D",
155
+ "CrossAttnDownBlock2D",
156
+ "DownBlock2D",
157
+ )
158
+ only_cross_attention: Union[bool, Tuple[bool, ...]] = False
159
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280)
160
+ layers_per_block: int = 2
161
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8
162
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None
163
+ cross_attention_dim: int = 1280
164
+ dropout: float = 0.0
165
+ use_linear_projection: bool = False
166
+ dtype: jnp.dtype = jnp.float32
167
+ flip_sin_to_cos: bool = True
168
+ freq_shift: int = 0
169
+ controlnet_conditioning_channel_order: str = "rgb"
170
+ conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
171
+
172
+ def init_weights(self, rng: jax.Array) -> FrozenDict:
173
+ # init input tensors
174
+ sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
175
+ sample = jnp.zeros(sample_shape, dtype=jnp.float32)
176
+ timesteps = jnp.ones((1,), dtype=jnp.int32)
177
+ encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
178
+ controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8)
179
+ controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32)
180
+
181
+ params_rng, dropout_rng = jax.random.split(rng)
182
+ rngs = {"params": params_rng, "dropout": dropout_rng}
183
+
184
+ return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"]
185
+
186
+ def setup(self) -> None:
187
+ block_out_channels = self.block_out_channels
188
+ time_embed_dim = block_out_channels[0] * 4
189
+
190
+ # If `num_attention_heads` is not defined (which is the case for most models)
191
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
192
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
193
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
194
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
195
+ # which is why we correct for the naming here.
196
+ num_attention_heads = self.num_attention_heads or self.attention_head_dim
197
+
198
+ # input
199
+ self.conv_in = nn.Conv(
200
+ block_out_channels[0],
201
+ kernel_size=(3, 3),
202
+ strides=(1, 1),
203
+ padding=((1, 1), (1, 1)),
204
+ dtype=self.dtype,
205
+ )
206
+
207
+ # time
208
+ self.time_proj = FlaxTimesteps(
209
+ block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
210
+ )
211
+ self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
212
+
213
+ self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding(
214
+ conditioning_embedding_channels=block_out_channels[0],
215
+ block_out_channels=self.conditioning_embedding_out_channels,
216
+ )
217
+
218
+ only_cross_attention = self.only_cross_attention
219
+ if isinstance(only_cross_attention, bool):
220
+ only_cross_attention = (only_cross_attention,) * len(self.down_block_types)
221
+
222
+ if isinstance(num_attention_heads, int):
223
+ num_attention_heads = (num_attention_heads,) * len(self.down_block_types)
224
+
225
+ # down
226
+ down_blocks = []
227
+ controlnet_down_blocks = []
228
+
229
+ output_channel = block_out_channels[0]
230
+
231
+ controlnet_block = nn.Conv(
232
+ output_channel,
233
+ kernel_size=(1, 1),
234
+ padding="VALID",
235
+ kernel_init=nn.initializers.zeros_init(),
236
+ bias_init=nn.initializers.zeros_init(),
237
+ dtype=self.dtype,
238
+ )
239
+ controlnet_down_blocks.append(controlnet_block)
240
+
241
+ for i, down_block_type in enumerate(self.down_block_types):
242
+ input_channel = output_channel
243
+ output_channel = block_out_channels[i]
244
+ is_final_block = i == len(block_out_channels) - 1
245
+
246
+ if down_block_type == "CrossAttnDownBlock2D":
247
+ down_block = FlaxCrossAttnDownBlock2D(
248
+ in_channels=input_channel,
249
+ out_channels=output_channel,
250
+ dropout=self.dropout,
251
+ num_layers=self.layers_per_block,
252
+ num_attention_heads=num_attention_heads[i],
253
+ add_downsample=not is_final_block,
254
+ use_linear_projection=self.use_linear_projection,
255
+ only_cross_attention=only_cross_attention[i],
256
+ dtype=self.dtype,
257
+ )
258
+ else:
259
+ down_block = FlaxDownBlock2D(
260
+ in_channels=input_channel,
261
+ out_channels=output_channel,
262
+ dropout=self.dropout,
263
+ num_layers=self.layers_per_block,
264
+ add_downsample=not is_final_block,
265
+ dtype=self.dtype,
266
+ )
267
+
268
+ down_blocks.append(down_block)
269
+
270
+ for _ in range(self.layers_per_block):
271
+ controlnet_block = nn.Conv(
272
+ output_channel,
273
+ kernel_size=(1, 1),
274
+ padding="VALID",
275
+ kernel_init=nn.initializers.zeros_init(),
276
+ bias_init=nn.initializers.zeros_init(),
277
+ dtype=self.dtype,
278
+ )
279
+ controlnet_down_blocks.append(controlnet_block)
280
+
281
+ if not is_final_block:
282
+ controlnet_block = nn.Conv(
283
+ output_channel,
284
+ kernel_size=(1, 1),
285
+ padding="VALID",
286
+ kernel_init=nn.initializers.zeros_init(),
287
+ bias_init=nn.initializers.zeros_init(),
288
+ dtype=self.dtype,
289
+ )
290
+ controlnet_down_blocks.append(controlnet_block)
291
+
292
+ self.down_blocks = down_blocks
293
+ self.controlnet_down_blocks = controlnet_down_blocks
294
+
295
+ # mid
296
+ mid_block_channel = block_out_channels[-1]
297
+ self.mid_block = FlaxUNetMidBlock2DCrossAttn(
298
+ in_channels=mid_block_channel,
299
+ dropout=self.dropout,
300
+ num_attention_heads=num_attention_heads[-1],
301
+ use_linear_projection=self.use_linear_projection,
302
+ dtype=self.dtype,
303
+ )
304
+
305
+ self.controlnet_mid_block = nn.Conv(
306
+ mid_block_channel,
307
+ kernel_size=(1, 1),
308
+ padding="VALID",
309
+ kernel_init=nn.initializers.zeros_init(),
310
+ bias_init=nn.initializers.zeros_init(),
311
+ dtype=self.dtype,
312
+ )
313
+
314
+ def __call__(
315
+ self,
316
+ sample: jnp.ndarray,
317
+ timesteps: Union[jnp.ndarray, float, int],
318
+ encoder_hidden_states: jnp.ndarray,
319
+ controlnet_cond: jnp.ndarray,
320
+ conditioning_scale: float = 1.0,
321
+ return_dict: bool = True,
322
+ train: bool = False,
323
+ ) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]:
324
+ r"""
325
+ Args:
326
+ sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor
327
+ timestep (`jnp.ndarray` or `float` or `int`): timesteps
328
+ encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states
329
+ controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor
330
+ conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs
331
+ return_dict (`bool`, *optional*, defaults to `True`):
332
+ Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a
333
+ plain tuple.
334
+ train (`bool`, *optional*, defaults to `False`):
335
+ Use deterministic functions and disable dropout when not training.
336
+
337
+ Returns:
338
+ [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
339
+ [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a
340
+ `tuple`. When returning a tuple, the first element is the sample tensor.
341
+ """
342
+ channel_order = self.controlnet_conditioning_channel_order
343
+ if channel_order == "bgr":
344
+ controlnet_cond = jnp.flip(controlnet_cond, axis=1)
345
+
346
+ # 1. time
347
+ if not isinstance(timesteps, jnp.ndarray):
348
+ timesteps = jnp.array([timesteps], dtype=jnp.int32)
349
+ elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0:
350
+ timesteps = timesteps.astype(dtype=jnp.float32)
351
+ timesteps = jnp.expand_dims(timesteps, 0)
352
+
353
+ t_emb = self.time_proj(timesteps)
354
+ t_emb = self.time_embedding(t_emb)
355
+
356
+ # 2. pre-process
357
+ sample = jnp.transpose(sample, (0, 2, 3, 1))
358
+ sample = self.conv_in(sample)
359
+
360
+ controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1))
361
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
362
+ sample += controlnet_cond
363
+
364
+ # 3. down
365
+ down_block_res_samples = (sample,)
366
+ for down_block in self.down_blocks:
367
+ if isinstance(down_block, FlaxCrossAttnDownBlock2D):
368
+ sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
369
+ else:
370
+ sample, res_samples = down_block(sample, t_emb, deterministic=not train)
371
+ down_block_res_samples += res_samples
372
+
373
+ # 4. mid
374
+ sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
375
+
376
+ # 5. contronet blocks
377
+ controlnet_down_block_res_samples = ()
378
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
379
+ down_block_res_sample = controlnet_block(down_block_res_sample)
380
+ controlnet_down_block_res_samples += (down_block_res_sample,)
381
+
382
+ down_block_res_samples = controlnet_down_block_res_samples
383
+
384
+ mid_block_res_sample = self.controlnet_mid_block(sample)
385
+
386
+ # 6. scaling
387
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
388
+ mid_block_res_sample *= conditioning_scale
389
+
390
+ if not return_dict:
391
+ return (down_block_res_samples, mid_block_res_sample)
392
+
393
+ return FlaxControlNetOutput(
394
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
395
+ )
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/downsampling.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional, Tuple
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+ from ..utils import deprecate
22
+ from .normalization import RMSNorm
23
+ from .upsampling import upfirdn2d_native
24
+
25
+
26
+ class Downsample1D(nn.Module):
27
+ """A 1D downsampling layer with an optional convolution.
28
+
29
+ Parameters:
30
+ channels (`int`):
31
+ number of channels in the inputs and outputs.
32
+ use_conv (`bool`, default `False`):
33
+ option to use a convolution.
34
+ out_channels (`int`, optional):
35
+ number of output channels. Defaults to `channels`.
36
+ padding (`int`, default `1`):
37
+ padding for the convolution.
38
+ name (`str`, default `conv`):
39
+ name of the downsampling 1D layer.
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ channels: int,
45
+ use_conv: bool = False,
46
+ out_channels: Optional[int] = None,
47
+ padding: int = 1,
48
+ name: str = "conv",
49
+ ):
50
+ super().__init__()
51
+ self.channels = channels
52
+ self.out_channels = out_channels or channels
53
+ self.use_conv = use_conv
54
+ self.padding = padding
55
+ stride = 2
56
+ self.name = name
57
+
58
+ if use_conv:
59
+ self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
60
+ else:
61
+ assert self.channels == self.out_channels
62
+ self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride)
63
+
64
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
65
+ assert inputs.shape[1] == self.channels
66
+ return self.conv(inputs)
67
+
68
+
69
+ class Downsample2D(nn.Module):
70
+ """A 2D downsampling layer with an optional convolution.
71
+
72
+ Parameters:
73
+ channels (`int`):
74
+ number of channels in the inputs and outputs.
75
+ use_conv (`bool`, default `False`):
76
+ option to use a convolution.
77
+ out_channels (`int`, optional):
78
+ number of output channels. Defaults to `channels`.
79
+ padding (`int`, default `1`):
80
+ padding for the convolution.
81
+ name (`str`, default `conv`):
82
+ name of the downsampling 2D layer.
83
+ """
84
+
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ use_conv: bool = False,
89
+ out_channels: Optional[int] = None,
90
+ padding: int = 1,
91
+ name: str = "conv",
92
+ kernel_size=3,
93
+ norm_type=None,
94
+ eps=None,
95
+ elementwise_affine=None,
96
+ bias=True,
97
+ ):
98
+ super().__init__()
99
+ self.channels = channels
100
+ self.out_channels = out_channels or channels
101
+ self.use_conv = use_conv
102
+ self.padding = padding
103
+ stride = 2
104
+ self.name = name
105
+ conv_cls = nn.Conv2d
106
+
107
+ if norm_type == "ln_norm":
108
+ self.norm = nn.LayerNorm(channels, eps, elementwise_affine)
109
+ elif norm_type == "rms_norm":
110
+ self.norm = RMSNorm(channels, eps, elementwise_affine)
111
+ elif norm_type is None:
112
+ self.norm = None
113
+ else:
114
+ raise ValueError(f"unknown norm_type: {norm_type}")
115
+
116
+ if use_conv:
117
+ conv = conv_cls(
118
+ self.channels, self.out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias
119
+ )
120
+ else:
121
+ assert self.channels == self.out_channels
122
+ conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
123
+
124
+ # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
125
+ if name == "conv":
126
+ self.Conv2d_0 = conv
127
+ self.conv = conv
128
+ elif name == "Conv2d_0":
129
+ self.conv = conv
130
+ else:
131
+ self.conv = conv
132
+
133
+ def forward(self, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
134
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
135
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
136
+ deprecate("scale", "1.0.0", deprecation_message)
137
+ assert hidden_states.shape[1] == self.channels
138
+
139
+ if self.norm is not None:
140
+ hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
141
+
142
+ if self.use_conv and self.padding == 0:
143
+ pad = (0, 1, 0, 1)
144
+ hidden_states = F.pad(hidden_states, pad, mode="constant", value=0)
145
+
146
+ assert hidden_states.shape[1] == self.channels
147
+
148
+ hidden_states = self.conv(hidden_states)
149
+
150
+ return hidden_states
151
+
152
+
153
+ class FirDownsample2D(nn.Module):
154
+ """A 2D FIR downsampling layer with an optional convolution.
155
+
156
+ Parameters:
157
+ channels (`int`):
158
+ number of channels in the inputs and outputs.
159
+ use_conv (`bool`, default `False`):
160
+ option to use a convolution.
161
+ out_channels (`int`, optional):
162
+ number of output channels. Defaults to `channels`.
163
+ fir_kernel (`tuple`, default `(1, 3, 3, 1)`):
164
+ kernel for the FIR filter.
165
+ """
166
+
167
+ def __init__(
168
+ self,
169
+ channels: Optional[int] = None,
170
+ out_channels: Optional[int] = None,
171
+ use_conv: bool = False,
172
+ fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1),
173
+ ):
174
+ super().__init__()
175
+ out_channels = out_channels if out_channels else channels
176
+ if use_conv:
177
+ self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
178
+ self.fir_kernel = fir_kernel
179
+ self.use_conv = use_conv
180
+ self.out_channels = out_channels
181
+
182
+ def _downsample_2d(
183
+ self,
184
+ hidden_states: torch.FloatTensor,
185
+ weight: Optional[torch.FloatTensor] = None,
186
+ kernel: Optional[torch.FloatTensor] = None,
187
+ factor: int = 2,
188
+ gain: float = 1,
189
+ ) -> torch.FloatTensor:
190
+ """Fused `Conv2d()` followed by `downsample_2d()`.
191
+ Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
192
+ efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of
193
+ arbitrary order.
194
+
195
+ Args:
196
+ hidden_states (`torch.FloatTensor`):
197
+ Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
198
+ weight (`torch.FloatTensor`, *optional*):
199
+ Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be
200
+ performed by `inChannels = x.shape[0] // numGroups`.
201
+ kernel (`torch.FloatTensor`, *optional*):
202
+ FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which
203
+ corresponds to average pooling.
204
+ factor (`int`, *optional*, default to `2`):
205
+ Integer downsampling factor.
206
+ gain (`float`, *optional*, default to `1.0`):
207
+ Scaling factor for signal magnitude.
208
+
209
+ Returns:
210
+ output (`torch.FloatTensor`):
211
+ Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same
212
+ datatype as `x`.
213
+ """
214
+
215
+ assert isinstance(factor, int) and factor >= 1
216
+ if kernel is None:
217
+ kernel = [1] * factor
218
+
219
+ # setup kernel
220
+ kernel = torch.tensor(kernel, dtype=torch.float32)
221
+ if kernel.ndim == 1:
222
+ kernel = torch.outer(kernel, kernel)
223
+ kernel /= torch.sum(kernel)
224
+
225
+ kernel = kernel * gain
226
+
227
+ if self.use_conv:
228
+ _, _, convH, convW = weight.shape
229
+ pad_value = (kernel.shape[0] - factor) + (convW - 1)
230
+ stride_value = [factor, factor]
231
+ upfirdn_input = upfirdn2d_native(
232
+ hidden_states,
233
+ torch.tensor(kernel, device=hidden_states.device),
234
+ pad=((pad_value + 1) // 2, pad_value // 2),
235
+ )
236
+ output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0)
237
+ else:
238
+ pad_value = kernel.shape[0] - factor
239
+ output = upfirdn2d_native(
240
+ hidden_states,
241
+ torch.tensor(kernel, device=hidden_states.device),
242
+ down=factor,
243
+ pad=((pad_value + 1) // 2, pad_value // 2),
244
+ )
245
+
246
+ return output
247
+
248
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
249
+ if self.use_conv:
250
+ downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel)
251
+ hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
252
+ else:
253
+ hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2)
254
+
255
+ return hidden_states
256
+
257
+
258
+ # downsample/upsample layer used in k-upscaler, might be able to use FirDownsample2D/DirUpsample2D instead
259
+ class KDownsample2D(nn.Module):
260
+ r"""A 2D K-downsampling layer.
261
+
262
+ Parameters:
263
+ pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use.
264
+ """
265
+
266
+ def __init__(self, pad_mode: str = "reflect"):
267
+ super().__init__()
268
+ self.pad_mode = pad_mode
269
+ kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]])
270
+ self.pad = kernel_1d.shape[1] // 2 - 1
271
+ self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False)
272
+
273
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
274
+ inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode)
275
+ weight = inputs.new_zeros(
276
+ [
277
+ inputs.shape[1],
278
+ inputs.shape[1],
279
+ self.kernel.shape[0],
280
+ self.kernel.shape[1],
281
+ ]
282
+ )
283
+ indices = torch.arange(inputs.shape[1], device=inputs.device)
284
+ kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1)
285
+ weight[indices, indices] = kernel
286
+ return F.conv2d(inputs, weight, stride=2)
287
+
288
+
289
+ def downsample_2d(
290
+ hidden_states: torch.FloatTensor,
291
+ kernel: Optional[torch.FloatTensor] = None,
292
+ factor: int = 2,
293
+ gain: float = 1,
294
+ ) -> torch.FloatTensor:
295
+ r"""Downsample2D a batch of 2D images with the given filter.
296
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
297
+ given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
298
+ specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
299
+ shape is a multiple of the downsampling factor.
300
+
301
+ Args:
302
+ hidden_states (`torch.FloatTensor`)
303
+ Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
304
+ kernel (`torch.FloatTensor`, *optional*):
305
+ FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which
306
+ corresponds to average pooling.
307
+ factor (`int`, *optional*, default to `2`):
308
+ Integer downsampling factor.
309
+ gain (`float`, *optional*, default to `1.0`):
310
+ Scaling factor for signal magnitude.
311
+
312
+ Returns:
313
+ output (`torch.FloatTensor`):
314
+ Tensor of the shape `[N, C, H // factor, W // factor]`
315
+ """
316
+
317
+ assert isinstance(factor, int) and factor >= 1
318
+ if kernel is None:
319
+ kernel = [1] * factor
320
+
321
+ kernel = torch.tensor(kernel, dtype=torch.float32)
322
+ if kernel.ndim == 1:
323
+ kernel = torch.outer(kernel, kernel)
324
+ kernel /= torch.sum(kernel)
325
+
326
+ kernel = kernel * gain
327
+ pad_value = kernel.shape[0] - factor
328
+ output = upfirdn2d_native(
329
+ hidden_states,
330
+ kernel.to(device=hidden_states.device),
331
+ down=factor,
332
+ pad=((pad_value + 1) // 2, pad_value // 2),
333
+ )
334
+ return output
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/dual_transformer_2d.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from ..utils import deprecate
15
+ from .transformers.dual_transformer_2d import DualTransformer2DModel
16
+
17
+
18
+ class DualTransformer2DModel(DualTransformer2DModel):
19
+ deprecation_message = "Importing `DualTransformer2DModel` from `diffusers.models.dual_transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.dual_transformer_2d import DualTransformer2DModel`, instead."
20
+ deprecate("DualTransformer2DModel", "0.29", deprecation_message)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/embeddings.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import numpy as np
18
+ import torch
19
+ from torch import nn
20
+
21
+ from ..utils import deprecate
22
+ from .activations import get_activation
23
+ from .attention_processor import Attention
24
+
25
+
26
+ def get_timestep_embedding(
27
+ timesteps: torch.Tensor,
28
+ embedding_dim: int,
29
+ flip_sin_to_cos: bool = False,
30
+ downscale_freq_shift: float = 1,
31
+ scale: float = 1,
32
+ max_period: int = 10000,
33
+ ):
34
+ """
35
+ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
36
+
37
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
38
+ These may be fractional.
39
+ :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
40
+ embeddings. :return: an [N x dim] Tensor of positional embeddings.
41
+ """
42
+ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
43
+
44
+ half_dim = embedding_dim // 2
45
+ exponent = -math.log(max_period) * torch.arange(
46
+ start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
47
+ )
48
+ exponent = exponent / (half_dim - downscale_freq_shift)
49
+
50
+ emb = torch.exp(exponent)
51
+ emb = timesteps[:, None].float() * emb[None, :]
52
+
53
+ # scale embeddings
54
+ emb = scale * emb
55
+
56
+ # concat sine and cosine embeddings
57
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
58
+
59
+ # flip sine and cosine embeddings
60
+ if flip_sin_to_cos:
61
+ emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
62
+
63
+ # zero pad
64
+ if embedding_dim % 2 == 1:
65
+ emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
66
+ return emb
67
+
68
+
69
+ def get_2d_sincos_pos_embed(
70
+ embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16
71
+ ):
72
+ """
73
+ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or
74
+ [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
75
+ """
76
+ if isinstance(grid_size, int):
77
+ grid_size = (grid_size, grid_size)
78
+
79
+ grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale
80
+ grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale
81
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
82
+ grid = np.stack(grid, axis=0)
83
+
84
+ grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])
85
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
86
+ if cls_token and extra_tokens > 0:
87
+ pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
88
+ return pos_embed
89
+
90
+
91
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
92
+ if embed_dim % 2 != 0:
93
+ raise ValueError("embed_dim must be divisible by 2")
94
+
95
+ # use half of dimensions to encode grid_h
96
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
97
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
98
+
99
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
100
+ return emb
101
+
102
+
103
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
104
+ """
105
+ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
106
+ """
107
+ if embed_dim % 2 != 0:
108
+ raise ValueError("embed_dim must be divisible by 2")
109
+
110
+ omega = np.arange(embed_dim // 2, dtype=np.float64)
111
+ omega /= embed_dim / 2.0
112
+ omega = 1.0 / 10000**omega # (D/2,)
113
+
114
+ pos = pos.reshape(-1) # (M,)
115
+ out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
116
+
117
+ emb_sin = np.sin(out) # (M, D/2)
118
+ emb_cos = np.cos(out) # (M, D/2)
119
+
120
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
121
+ return emb
122
+
123
+
124
+ class PatchEmbed(nn.Module):
125
+ """2D Image to Patch Embedding"""
126
+
127
+ def __init__(
128
+ self,
129
+ height=224,
130
+ width=224,
131
+ patch_size=16,
132
+ in_channels=3,
133
+ embed_dim=768,
134
+ layer_norm=False,
135
+ flatten=True,
136
+ bias=True,
137
+ interpolation_scale=1,
138
+ ):
139
+ super().__init__()
140
+
141
+ num_patches = (height // patch_size) * (width // patch_size)
142
+ self.flatten = flatten
143
+ self.layer_norm = layer_norm
144
+
145
+ self.proj = nn.Conv2d(
146
+ in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
147
+ )
148
+ if layer_norm:
149
+ self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
150
+ else:
151
+ self.norm = None
152
+
153
+ self.patch_size = patch_size
154
+ # See:
155
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L161
156
+ self.height, self.width = height // patch_size, width // patch_size
157
+ self.base_size = height // patch_size
158
+ self.interpolation_scale = interpolation_scale
159
+ pos_embed = get_2d_sincos_pos_embed(
160
+ embed_dim, int(num_patches**0.5), base_size=self.base_size, interpolation_scale=self.interpolation_scale
161
+ )
162
+ self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
163
+
164
+ def forward(self, latent):
165
+ height, width = latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size
166
+
167
+ latent = self.proj(latent)
168
+ if self.flatten:
169
+ latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
170
+ if self.layer_norm:
171
+ latent = self.norm(latent)
172
+
173
+ # Interpolate positional embeddings if needed.
174
+ # (For PixArt-Alpha: https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L162C151-L162C160)
175
+ if self.height != height or self.width != width:
176
+ pos_embed = get_2d_sincos_pos_embed(
177
+ embed_dim=self.pos_embed.shape[-1],
178
+ grid_size=(height, width),
179
+ base_size=self.base_size,
180
+ interpolation_scale=self.interpolation_scale,
181
+ )
182
+ pos_embed = torch.from_numpy(pos_embed)
183
+ pos_embed = pos_embed.float().unsqueeze(0).to(latent.device)
184
+ else:
185
+ pos_embed = self.pos_embed
186
+
187
+ return (latent + pos_embed).to(latent.dtype)
188
+
189
+
190
+ class TimestepEmbedding(nn.Module):
191
+ def __init__(
192
+ self,
193
+ in_channels: int,
194
+ time_embed_dim: int,
195
+ act_fn: str = "silu",
196
+ out_dim: int = None,
197
+ post_act_fn: Optional[str] = None,
198
+ cond_proj_dim=None,
199
+ sample_proj_bias=True,
200
+ ):
201
+ super().__init__()
202
+ linear_cls = nn.Linear
203
+
204
+ self.linear_1 = linear_cls(in_channels, time_embed_dim, sample_proj_bias)
205
+
206
+ if cond_proj_dim is not None:
207
+ self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
208
+ else:
209
+ self.cond_proj = None
210
+
211
+ self.act = get_activation(act_fn)
212
+
213
+ if out_dim is not None:
214
+ time_embed_dim_out = out_dim
215
+ else:
216
+ time_embed_dim_out = time_embed_dim
217
+ self.linear_2 = linear_cls(time_embed_dim, time_embed_dim_out, sample_proj_bias)
218
+
219
+ if post_act_fn is None:
220
+ self.post_act = None
221
+ else:
222
+ self.post_act = get_activation(post_act_fn)
223
+
224
+ def forward(self, sample, condition=None):
225
+ if condition is not None:
226
+ sample = sample + self.cond_proj(condition)
227
+ sample = self.linear_1(sample)
228
+
229
+ if self.act is not None:
230
+ sample = self.act(sample)
231
+
232
+ sample = self.linear_2(sample)
233
+
234
+ if self.post_act is not None:
235
+ sample = self.post_act(sample)
236
+ return sample
237
+
238
+
239
+ class Timesteps(nn.Module):
240
+ def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
241
+ super().__init__()
242
+ self.num_channels = num_channels
243
+ self.flip_sin_to_cos = flip_sin_to_cos
244
+ self.downscale_freq_shift = downscale_freq_shift
245
+
246
+ def forward(self, timesteps):
247
+ t_emb = get_timestep_embedding(
248
+ timesteps,
249
+ self.num_channels,
250
+ flip_sin_to_cos=self.flip_sin_to_cos,
251
+ downscale_freq_shift=self.downscale_freq_shift,
252
+ )
253
+ return t_emb
254
+
255
+
256
+ class GaussianFourierProjection(nn.Module):
257
+ """Gaussian Fourier embeddings for noise levels."""
258
+
259
+ def __init__(
260
+ self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
261
+ ):
262
+ super().__init__()
263
+ self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
264
+ self.log = log
265
+ self.flip_sin_to_cos = flip_sin_to_cos
266
+
267
+ if set_W_to_weight:
268
+ # to delete later
269
+ self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
270
+
271
+ self.weight = self.W
272
+
273
+ def forward(self, x):
274
+ if self.log:
275
+ x = torch.log(x)
276
+
277
+ x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi
278
+
279
+ if self.flip_sin_to_cos:
280
+ out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1)
281
+ else:
282
+ out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
283
+ return out
284
+
285
+
286
+ class SinusoidalPositionalEmbedding(nn.Module):
287
+ """Apply positional information to a sequence of embeddings.
288
+
289
+ Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to
290
+ them
291
+
292
+ Args:
293
+ embed_dim: (int): Dimension of the positional embedding.
294
+ max_seq_length: Maximum sequence length to apply positional embeddings
295
+
296
+ """
297
+
298
+ def __init__(self, embed_dim: int, max_seq_length: int = 32):
299
+ super().__init__()
300
+ position = torch.arange(max_seq_length).unsqueeze(1)
301
+ div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim))
302
+ pe = torch.zeros(1, max_seq_length, embed_dim)
303
+ pe[0, :, 0::2] = torch.sin(position * div_term)
304
+ pe[0, :, 1::2] = torch.cos(position * div_term)
305
+ self.register_buffer("pe", pe)
306
+
307
+ def forward(self, x):
308
+ _, seq_length, _ = x.shape
309
+ x = x + self.pe[:, :seq_length]
310
+ return x
311
+
312
+
313
+ class ImagePositionalEmbeddings(nn.Module):
314
+ """
315
+ Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the
316
+ height and width of the latent space.
317
+
318
+ For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092
319
+
320
+ For VQ-diffusion:
321
+
322
+ Output vector embeddings are used as input for the transformer.
323
+
324
+ Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE.
325
+
326
+ Args:
327
+ num_embed (`int`):
328
+ Number of embeddings for the latent pixels embeddings.
329
+ height (`int`):
330
+ Height of the latent image i.e. the number of height embeddings.
331
+ width (`int`):
332
+ Width of the latent image i.e. the number of width embeddings.
333
+ embed_dim (`int`):
334
+ Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings.
335
+ """
336
+
337
+ def __init__(
338
+ self,
339
+ num_embed: int,
340
+ height: int,
341
+ width: int,
342
+ embed_dim: int,
343
+ ):
344
+ super().__init__()
345
+
346
+ self.height = height
347
+ self.width = width
348
+ self.num_embed = num_embed
349
+ self.embed_dim = embed_dim
350
+
351
+ self.emb = nn.Embedding(self.num_embed, embed_dim)
352
+ self.height_emb = nn.Embedding(self.height, embed_dim)
353
+ self.width_emb = nn.Embedding(self.width, embed_dim)
354
+
355
+ def forward(self, index):
356
+ emb = self.emb(index)
357
+
358
+ height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height))
359
+
360
+ # 1 x H x D -> 1 x H x 1 x D
361
+ height_emb = height_emb.unsqueeze(2)
362
+
363
+ width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width))
364
+
365
+ # 1 x W x D -> 1 x 1 x W x D
366
+ width_emb = width_emb.unsqueeze(1)
367
+
368
+ pos_emb = height_emb + width_emb
369
+
370
+ # 1 x H x W x D -> 1 x L xD
371
+ pos_emb = pos_emb.view(1, self.height * self.width, -1)
372
+
373
+ emb = emb + pos_emb[:, : emb.shape[1], :]
374
+
375
+ return emb
376
+
377
+
378
+ class LabelEmbedding(nn.Module):
379
+ """
380
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
381
+
382
+ Args:
383
+ num_classes (`int`): The number of classes.
384
+ hidden_size (`int`): The size of the vector embeddings.
385
+ dropout_prob (`float`): The probability of dropping a label.
386
+ """
387
+
388
+ def __init__(self, num_classes, hidden_size, dropout_prob):
389
+ super().__init__()
390
+ use_cfg_embedding = dropout_prob > 0
391
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
392
+ self.num_classes = num_classes
393
+ self.dropout_prob = dropout_prob
394
+
395
+ def token_drop(self, labels, force_drop_ids=None):
396
+ """
397
+ Drops labels to enable classifier-free guidance.
398
+ """
399
+ if force_drop_ids is None:
400
+ drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
401
+ else:
402
+ drop_ids = torch.tensor(force_drop_ids == 1)
403
+ labels = torch.where(drop_ids, self.num_classes, labels)
404
+ return labels
405
+
406
+ def forward(self, labels: torch.LongTensor, force_drop_ids=None):
407
+ use_dropout = self.dropout_prob > 0
408
+ if (self.training and use_dropout) or (force_drop_ids is not None):
409
+ labels = self.token_drop(labels, force_drop_ids)
410
+ embeddings = self.embedding_table(labels)
411
+ return embeddings
412
+
413
+
414
+ class TextImageProjection(nn.Module):
415
+ def __init__(
416
+ self,
417
+ text_embed_dim: int = 1024,
418
+ image_embed_dim: int = 768,
419
+ cross_attention_dim: int = 768,
420
+ num_image_text_embeds: int = 10,
421
+ ):
422
+ super().__init__()
423
+
424
+ self.num_image_text_embeds = num_image_text_embeds
425
+ self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
426
+ self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim)
427
+
428
+ def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
429
+ batch_size = text_embeds.shape[0]
430
+
431
+ # image
432
+ image_text_embeds = self.image_embeds(image_embeds)
433
+ image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
434
+
435
+ # text
436
+ text_embeds = self.text_proj(text_embeds)
437
+
438
+ return torch.cat([image_text_embeds, text_embeds], dim=1)
439
+
440
+
441
+ class ImageProjection(nn.Module):
442
+ def __init__(
443
+ self,
444
+ image_embed_dim: int = 768,
445
+ cross_attention_dim: int = 768,
446
+ num_image_text_embeds: int = 32,
447
+ ):
448
+ super().__init__()
449
+
450
+ self.num_image_text_embeds = num_image_text_embeds
451
+ self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
452
+ self.norm = nn.LayerNorm(cross_attention_dim)
453
+
454
+ def forward(self, image_embeds: torch.FloatTensor):
455
+ batch_size = image_embeds.shape[0]
456
+
457
+ # image
458
+ image_embeds = self.image_embeds(image_embeds)
459
+ image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
460
+ image_embeds = self.norm(image_embeds)
461
+ return image_embeds
462
+
463
+
464
+ class IPAdapterFullImageProjection(nn.Module):
465
+ def __init__(self, image_embed_dim=1024, cross_attention_dim=1024):
466
+ super().__init__()
467
+ from .attention import FeedForward
468
+
469
+ self.ff = FeedForward(image_embed_dim, cross_attention_dim, mult=1, activation_fn="gelu")
470
+ self.norm = nn.LayerNorm(cross_attention_dim)
471
+
472
+ def forward(self, image_embeds: torch.FloatTensor):
473
+ return self.norm(self.ff(image_embeds))
474
+
475
+
476
+ class CombinedTimestepLabelEmbeddings(nn.Module):
477
+ def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1):
478
+ super().__init__()
479
+
480
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1)
481
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
482
+ self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob)
483
+
484
+ def forward(self, timestep, class_labels, hidden_dtype=None):
485
+ timesteps_proj = self.time_proj(timestep)
486
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
487
+
488
+ class_labels = self.class_embedder(class_labels) # (N, D)
489
+
490
+ conditioning = timesteps_emb + class_labels # (N, D)
491
+
492
+ return conditioning
493
+
494
+
495
+ class TextTimeEmbedding(nn.Module):
496
+ def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64):
497
+ super().__init__()
498
+ self.norm1 = nn.LayerNorm(encoder_dim)
499
+ self.pool = AttentionPooling(num_heads, encoder_dim)
500
+ self.proj = nn.Linear(encoder_dim, time_embed_dim)
501
+ self.norm2 = nn.LayerNorm(time_embed_dim)
502
+
503
+ def forward(self, hidden_states):
504
+ hidden_states = self.norm1(hidden_states)
505
+ hidden_states = self.pool(hidden_states)
506
+ hidden_states = self.proj(hidden_states)
507
+ hidden_states = self.norm2(hidden_states)
508
+ return hidden_states
509
+
510
+
511
+ class TextImageTimeEmbedding(nn.Module):
512
+ def __init__(self, text_embed_dim: int = 768, image_embed_dim: int = 768, time_embed_dim: int = 1536):
513
+ super().__init__()
514
+ self.text_proj = nn.Linear(text_embed_dim, time_embed_dim)
515
+ self.text_norm = nn.LayerNorm(time_embed_dim)
516
+ self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
517
+
518
+ def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
519
+ # text
520
+ time_text_embeds = self.text_proj(text_embeds)
521
+ time_text_embeds = self.text_norm(time_text_embeds)
522
+
523
+ # image
524
+ time_image_embeds = self.image_proj(image_embeds)
525
+
526
+ return time_image_embeds + time_text_embeds
527
+
528
+
529
+ class ImageTimeEmbedding(nn.Module):
530
+ def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
531
+ super().__init__()
532
+ self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
533
+ self.image_norm = nn.LayerNorm(time_embed_dim)
534
+
535
+ def forward(self, image_embeds: torch.FloatTensor):
536
+ # image
537
+ time_image_embeds = self.image_proj(image_embeds)
538
+ time_image_embeds = self.image_norm(time_image_embeds)
539
+ return time_image_embeds
540
+
541
+
542
+ class ImageHintTimeEmbedding(nn.Module):
543
+ def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
544
+ super().__init__()
545
+ self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
546
+ self.image_norm = nn.LayerNorm(time_embed_dim)
547
+ self.input_hint_block = nn.Sequential(
548
+ nn.Conv2d(3, 16, 3, padding=1),
549
+ nn.SiLU(),
550
+ nn.Conv2d(16, 16, 3, padding=1),
551
+ nn.SiLU(),
552
+ nn.Conv2d(16, 32, 3, padding=1, stride=2),
553
+ nn.SiLU(),
554
+ nn.Conv2d(32, 32, 3, padding=1),
555
+ nn.SiLU(),
556
+ nn.Conv2d(32, 96, 3, padding=1, stride=2),
557
+ nn.SiLU(),
558
+ nn.Conv2d(96, 96, 3, padding=1),
559
+ nn.SiLU(),
560
+ nn.Conv2d(96, 256, 3, padding=1, stride=2),
561
+ nn.SiLU(),
562
+ nn.Conv2d(256, 4, 3, padding=1),
563
+ )
564
+
565
+ def forward(self, image_embeds: torch.FloatTensor, hint: torch.FloatTensor):
566
+ # image
567
+ time_image_embeds = self.image_proj(image_embeds)
568
+ time_image_embeds = self.image_norm(time_image_embeds)
569
+ hint = self.input_hint_block(hint)
570
+ return time_image_embeds, hint
571
+
572
+
573
+ class AttentionPooling(nn.Module):
574
+ # Copied from https://github.com/deep-floyd/IF/blob/2f91391f27dd3c468bf174be5805b4cc92980c0b/deepfloyd_if/model/nn.py#L54
575
+
576
+ def __init__(self, num_heads, embed_dim, dtype=None):
577
+ super().__init__()
578
+ self.dtype = dtype
579
+ self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim**0.5)
580
+ self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
581
+ self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
582
+ self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
583
+ self.num_heads = num_heads
584
+ self.dim_per_head = embed_dim // self.num_heads
585
+
586
+ def forward(self, x):
587
+ bs, length, width = x.size()
588
+
589
+ def shape(x):
590
+ # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
591
+ x = x.view(bs, -1, self.num_heads, self.dim_per_head)
592
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
593
+ x = x.transpose(1, 2)
594
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
595
+ x = x.reshape(bs * self.num_heads, -1, self.dim_per_head)
596
+ # (bs*n_heads, length, dim_per_head) --> (bs*n_heads, dim_per_head, length)
597
+ x = x.transpose(1, 2)
598
+ return x
599
+
600
+ class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype)
601
+ x = torch.cat([class_token, x], dim=1) # (bs, length+1, width)
602
+
603
+ # (bs*n_heads, class_token_length, dim_per_head)
604
+ q = shape(self.q_proj(class_token))
605
+ # (bs*n_heads, length+class_token_length, dim_per_head)
606
+ k = shape(self.k_proj(x))
607
+ v = shape(self.v_proj(x))
608
+
609
+ # (bs*n_heads, class_token_length, length+class_token_length):
610
+ scale = 1 / math.sqrt(math.sqrt(self.dim_per_head))
611
+ weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards
612
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
613
+
614
+ # (bs*n_heads, dim_per_head, class_token_length)
615
+ a = torch.einsum("bts,bcs->bct", weight, v)
616
+
617
+ # (bs, length+1, width)
618
+ a = a.reshape(bs, -1, 1).transpose(1, 2)
619
+
620
+ return a[:, 0, :] # cls_token
621
+
622
+
623
+ def get_fourier_embeds_from_boundingbox(embed_dim, box):
624
+ """
625
+ Args:
626
+ embed_dim: int
627
+ box: a 3-D tensor [B x N x 4] representing the bounding boxes for GLIGEN pipeline
628
+ Returns:
629
+ [B x N x embed_dim] tensor of positional embeddings
630
+ """
631
+
632
+ batch_size, num_boxes = box.shape[:2]
633
+
634
+ emb = 100 ** (torch.arange(embed_dim) / embed_dim)
635
+ emb = emb[None, None, None].to(device=box.device, dtype=box.dtype)
636
+ emb = emb * box.unsqueeze(-1)
637
+
638
+ emb = torch.stack((emb.sin(), emb.cos()), dim=-1)
639
+ emb = emb.permute(0, 1, 3, 4, 2).reshape(batch_size, num_boxes, embed_dim * 2 * 4)
640
+
641
+ return emb
642
+
643
+
644
+ class GLIGENTextBoundingboxProjection(nn.Module):
645
+ def __init__(self, positive_len, out_dim, feature_type="text-only", fourier_freqs=8):
646
+ super().__init__()
647
+ self.positive_len = positive_len
648
+ self.out_dim = out_dim
649
+
650
+ self.fourier_embedder_dim = fourier_freqs
651
+ self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy
652
+
653
+ if isinstance(out_dim, tuple):
654
+ out_dim = out_dim[0]
655
+
656
+ if feature_type == "text-only":
657
+ self.linears = nn.Sequential(
658
+ nn.Linear(self.positive_len + self.position_dim, 512),
659
+ nn.SiLU(),
660
+ nn.Linear(512, 512),
661
+ nn.SiLU(),
662
+ nn.Linear(512, out_dim),
663
+ )
664
+ self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
665
+
666
+ elif feature_type == "text-image":
667
+ self.linears_text = nn.Sequential(
668
+ nn.Linear(self.positive_len + self.position_dim, 512),
669
+ nn.SiLU(),
670
+ nn.Linear(512, 512),
671
+ nn.SiLU(),
672
+ nn.Linear(512, out_dim),
673
+ )
674
+ self.linears_image = nn.Sequential(
675
+ nn.Linear(self.positive_len + self.position_dim, 512),
676
+ nn.SiLU(),
677
+ nn.Linear(512, 512),
678
+ nn.SiLU(),
679
+ nn.Linear(512, out_dim),
680
+ )
681
+ self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
682
+ self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
683
+
684
+ self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim]))
685
+
686
+ def forward(
687
+ self,
688
+ boxes,
689
+ masks,
690
+ positive_embeddings=None,
691
+ phrases_masks=None,
692
+ image_masks=None,
693
+ phrases_embeddings=None,
694
+ image_embeddings=None,
695
+ ):
696
+ masks = masks.unsqueeze(-1)
697
+
698
+ # embedding position (it may includes padding as placeholder)
699
+ xyxy_embedding = get_fourier_embeds_from_boundingbox(self.fourier_embedder_dim, boxes) # B*N*4 -> B*N*C
700
+
701
+ # learnable null embedding
702
+ xyxy_null = self.null_position_feature.view(1, 1, -1)
703
+
704
+ # replace padding with learnable null embedding
705
+ xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null
706
+
707
+ # positionet with text only information
708
+ if positive_embeddings is not None:
709
+ # learnable null embedding
710
+ positive_null = self.null_positive_feature.view(1, 1, -1)
711
+
712
+ # replace padding with learnable null embedding
713
+ positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null
714
+
715
+ objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1))
716
+
717
+ # positionet with text and image infomation
718
+ else:
719
+ phrases_masks = phrases_masks.unsqueeze(-1)
720
+ image_masks = image_masks.unsqueeze(-1)
721
+
722
+ # learnable null embedding
723
+ text_null = self.null_text_feature.view(1, 1, -1)
724
+ image_null = self.null_image_feature.view(1, 1, -1)
725
+
726
+ # replace padding with learnable null embedding
727
+ phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null
728
+ image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null
729
+
730
+ objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1))
731
+ objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1))
732
+ objs = torch.cat([objs_text, objs_image], dim=1)
733
+
734
+ return objs
735
+
736
+
737
+ class PixArtAlphaCombinedTimestepSizeEmbeddings(nn.Module):
738
+ """
739
+ For PixArt-Alpha.
740
+
741
+ Reference:
742
+ https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L164C9-L168C29
743
+ """
744
+
745
+ def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool = False):
746
+ super().__init__()
747
+
748
+ self.outdim = size_emb_dim
749
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
750
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
751
+
752
+ self.use_additional_conditions = use_additional_conditions
753
+ if use_additional_conditions:
754
+ self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
755
+ self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
756
+ self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
757
+
758
+ def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype):
759
+ timesteps_proj = self.time_proj(timestep)
760
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
761
+
762
+ if self.use_additional_conditions:
763
+ resolution_emb = self.additional_condition_proj(resolution.flatten()).to(hidden_dtype)
764
+ resolution_emb = self.resolution_embedder(resolution_emb).reshape(batch_size, -1)
765
+ aspect_ratio_emb = self.additional_condition_proj(aspect_ratio.flatten()).to(hidden_dtype)
766
+ aspect_ratio_emb = self.aspect_ratio_embedder(aspect_ratio_emb).reshape(batch_size, -1)
767
+ conditioning = timesteps_emb + torch.cat([resolution_emb, aspect_ratio_emb], dim=1)
768
+ else:
769
+ conditioning = timesteps_emb
770
+
771
+ return conditioning
772
+
773
+
774
+ class PixArtAlphaTextProjection(nn.Module):
775
+ """
776
+ Projects caption embeddings. Also handles dropout for classifier-free guidance.
777
+
778
+ Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
779
+ """
780
+
781
+ def __init__(self, in_features, hidden_size, num_tokens=120):
782
+ super().__init__()
783
+ self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True)
784
+ self.act_1 = nn.GELU(approximate="tanh")
785
+ self.linear_2 = nn.Linear(in_features=hidden_size, out_features=hidden_size, bias=True)
786
+
787
+ def forward(self, caption):
788
+ hidden_states = self.linear_1(caption)
789
+ hidden_states = self.act_1(hidden_states)
790
+ hidden_states = self.linear_2(hidden_states)
791
+ return hidden_states
792
+
793
+
794
+ class IPAdapterPlusImageProjection(nn.Module):
795
+ """Resampler of IP-Adapter Plus.
796
+
797
+ Args:
798
+ ----
799
+ embed_dims (int): The feature dimension. Defaults to 768.
800
+ output_dims (int): The number of output channels, that is the same
801
+ number of the channels in the
802
+ `unet.config.cross_attention_dim`. Defaults to 1024.
803
+ hidden_dims (int): The number of hidden channels. Defaults to 1280.
804
+ depth (int): The number of blocks. Defaults to 8.
805
+ dim_head (int): The number of head channels. Defaults to 64.
806
+ heads (int): Parallel attention heads. Defaults to 16.
807
+ num_queries (int): The number of queries. Defaults to 8.
808
+ ffn_ratio (float): The expansion ratio of feedforward network hidden
809
+ layer channels. Defaults to 4.
810
+ """
811
+
812
+ def __init__(
813
+ self,
814
+ embed_dims: int = 768,
815
+ output_dims: int = 1024,
816
+ hidden_dims: int = 1280,
817
+ depth: int = 4,
818
+ dim_head: int = 64,
819
+ heads: int = 16,
820
+ num_queries: int = 8,
821
+ ffn_ratio: float = 4,
822
+ ) -> None:
823
+ super().__init__()
824
+ from .attention import FeedForward # Lazy import to avoid circular import
825
+
826
+ self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dims) / hidden_dims**0.5)
827
+
828
+ self.proj_in = nn.Linear(embed_dims, hidden_dims)
829
+
830
+ self.proj_out = nn.Linear(hidden_dims, output_dims)
831
+ self.norm_out = nn.LayerNorm(output_dims)
832
+
833
+ self.layers = nn.ModuleList([])
834
+ for _ in range(depth):
835
+ self.layers.append(
836
+ nn.ModuleList(
837
+ [
838
+ nn.LayerNorm(hidden_dims),
839
+ nn.LayerNorm(hidden_dims),
840
+ Attention(
841
+ query_dim=hidden_dims,
842
+ dim_head=dim_head,
843
+ heads=heads,
844
+ out_bias=False,
845
+ ),
846
+ nn.Sequential(
847
+ nn.LayerNorm(hidden_dims),
848
+ FeedForward(hidden_dims, hidden_dims, activation_fn="gelu", mult=ffn_ratio, bias=False),
849
+ ),
850
+ ]
851
+ )
852
+ )
853
+
854
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
855
+ """Forward pass.
856
+
857
+ Args:
858
+ ----
859
+ x (torch.Tensor): Input Tensor.
860
+
861
+ Returns:
862
+ -------
863
+ torch.Tensor: Output Tensor.
864
+ """
865
+ latents = self.latents.repeat(x.size(0), 1, 1)
866
+
867
+ x = self.proj_in(x)
868
+
869
+ for ln0, ln1, attn, ff in self.layers:
870
+ residual = latents
871
+
872
+ encoder_hidden_states = ln0(x)
873
+ latents = ln1(latents)
874
+ encoder_hidden_states = torch.cat([encoder_hidden_states, latents], dim=-2)
875
+ latents = attn(latents, encoder_hidden_states) + residual
876
+ latents = ff(latents) + latents
877
+
878
+ latents = self.proj_out(latents)
879
+ return self.norm_out(latents)
880
+
881
+
882
+ class MultiIPAdapterImageProjection(nn.Module):
883
+ def __init__(self, IPAdapterImageProjectionLayers: Union[List[nn.Module], Tuple[nn.Module]]):
884
+ super().__init__()
885
+ self.image_projection_layers = nn.ModuleList(IPAdapterImageProjectionLayers)
886
+
887
+ def forward(self, image_embeds: List[torch.FloatTensor]):
888
+ projected_image_embeds = []
889
+
890
+ # currently, we accept `image_embeds` as
891
+ # 1. a tensor (deprecated) with shape [batch_size, embed_dim] or [batch_size, sequence_length, embed_dim]
892
+ # 2. list of `n` tensors where `n` is number of ip-adapters, each tensor can hae shape [batch_size, num_images, embed_dim] or [batch_size, num_images, sequence_length, embed_dim]
893
+ if not isinstance(image_embeds, list):
894
+ deprecation_message = (
895
+ "You have passed a tensor as `image_embeds`.This is deprecated and will be removed in a future release."
896
+ " Please make sure to update your script to pass `image_embeds` as a list of tensors to supress this warning."
897
+ )
898
+ deprecate("image_embeds not a list", "1.0.0", deprecation_message, standard_warn=False)
899
+ image_embeds = [image_embeds.unsqueeze(1)]
900
+
901
+ if len(image_embeds) != len(self.image_projection_layers):
902
+ raise ValueError(
903
+ f"image_embeds must have the same length as image_projection_layers, got {len(image_embeds)} and {len(self.image_projection_layers)}"
904
+ )
905
+
906
+ for image_embed, image_projection_layer in zip(image_embeds, self.image_projection_layers):
907
+ batch_size, num_images = image_embed.shape[0], image_embed.shape[1]
908
+ image_embed = image_embed.reshape((batch_size * num_images,) + image_embed.shape[2:])
909
+ image_embed = image_projection_layer(image_embed)
910
+ image_embed = image_embed.reshape((batch_size, num_images) + image_embed.shape[1:])
911
+
912
+ projected_image_embeds.append(image_embed)
913
+
914
+ return projected_image_embeds
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/embeddings_flax.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+
16
+ import flax.linen as nn
17
+ import jax.numpy as jnp
18
+
19
+
20
+ def get_sinusoidal_embeddings(
21
+ timesteps: jnp.ndarray,
22
+ embedding_dim: int,
23
+ freq_shift: float = 1,
24
+ min_timescale: float = 1,
25
+ max_timescale: float = 1.0e4,
26
+ flip_sin_to_cos: bool = False,
27
+ scale: float = 1.0,
28
+ ) -> jnp.ndarray:
29
+ """Returns the positional encoding (same as Tensor2Tensor).
30
+
31
+ Args:
32
+ timesteps: a 1-D Tensor of N indices, one per batch element.
33
+ These may be fractional.
34
+ embedding_dim: The number of output channels.
35
+ min_timescale: The smallest time unit (should probably be 0.0).
36
+ max_timescale: The largest time unit.
37
+ Returns:
38
+ a Tensor of timing signals [N, num_channels]
39
+ """
40
+ assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
41
+ assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
42
+ num_timescales = float(embedding_dim // 2)
43
+ log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
44
+ inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)
45
+ emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)
46
+
47
+ # scale embeddings
48
+ scaled_time = scale * emb
49
+
50
+ if flip_sin_to_cos:
51
+ signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)
52
+ else:
53
+ signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)
54
+ signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])
55
+ return signal
56
+
57
+
58
+ class FlaxTimestepEmbedding(nn.Module):
59
+ r"""
60
+ Time step Embedding Module. Learns embeddings for input time steps.
61
+
62
+ Args:
63
+ time_embed_dim (`int`, *optional*, defaults to `32`):
64
+ Time step embedding dimension
65
+ dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
66
+ Parameters `dtype`
67
+ """
68
+
69
+ time_embed_dim: int = 32
70
+ dtype: jnp.dtype = jnp.float32
71
+
72
+ @nn.compact
73
+ def __call__(self, temb):
74
+ temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb)
75
+ temb = nn.silu(temb)
76
+ temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb)
77
+ return temb
78
+
79
+
80
+ class FlaxTimesteps(nn.Module):
81
+ r"""
82
+ Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239
83
+
84
+ Args:
85
+ dim (`int`, *optional*, defaults to `32`):
86
+ Time step embedding dimension
87
+ """
88
+
89
+ dim: int = 32
90
+ flip_sin_to_cos: bool = False
91
+ freq_shift: float = 1
92
+
93
+ @nn.compact
94
+ def __call__(self, timesteps):
95
+ return get_sinusoidal_embeddings(
96
+ timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift
97
+ )
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/lora.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ # IMPORTANT: #
17
+ ###################################################################
18
+ # ----------------------------------------------------------------#
19
+ # This file is deprecated and will be removed soon #
20
+ # (as soon as PEFT will become a required dependency for LoRA) #
21
+ # ----------------------------------------------------------------#
22
+ ###################################################################
23
+
24
+ from typing import Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ from torch import nn
29
+
30
+ from ..utils import deprecate, logging
31
+ from ..utils.import_utils import is_transformers_available
32
+
33
+
34
+ if is_transformers_available():
35
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ def text_encoder_attn_modules(text_encoder):
42
+ attn_modules = []
43
+
44
+ if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
45
+ for i, layer in enumerate(text_encoder.text_model.encoder.layers):
46
+ name = f"text_model.encoder.layers.{i}.self_attn"
47
+ mod = layer.self_attn
48
+ attn_modules.append((name, mod))
49
+ else:
50
+ raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
51
+
52
+ return attn_modules
53
+
54
+
55
+ def text_encoder_mlp_modules(text_encoder):
56
+ mlp_modules = []
57
+
58
+ if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
59
+ for i, layer in enumerate(text_encoder.text_model.encoder.layers):
60
+ mlp_mod = layer.mlp
61
+ name = f"text_model.encoder.layers.{i}.mlp"
62
+ mlp_modules.append((name, mlp_mod))
63
+ else:
64
+ raise ValueError(f"do not know how to get mlp modules for: {text_encoder.__class__.__name__}")
65
+
66
+ return mlp_modules
67
+
68
+
69
+ def adjust_lora_scale_text_encoder(text_encoder, lora_scale: float = 1.0):
70
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
71
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
72
+ attn_module.q_proj.lora_scale = lora_scale
73
+ attn_module.k_proj.lora_scale = lora_scale
74
+ attn_module.v_proj.lora_scale = lora_scale
75
+ attn_module.out_proj.lora_scale = lora_scale
76
+
77
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
78
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
79
+ mlp_module.fc1.lora_scale = lora_scale
80
+ mlp_module.fc2.lora_scale = lora_scale
81
+
82
+
83
+ class PatchedLoraProjection(torch.nn.Module):
84
+ def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
85
+ deprecation_message = "Use of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
86
+ deprecate("PatchedLoraProjection", "1.0.0", deprecation_message)
87
+
88
+ super().__init__()
89
+ from ..models.lora import LoRALinearLayer
90
+
91
+ self.regular_linear_layer = regular_linear_layer
92
+
93
+ device = self.regular_linear_layer.weight.device
94
+
95
+ if dtype is None:
96
+ dtype = self.regular_linear_layer.weight.dtype
97
+
98
+ self.lora_linear_layer = LoRALinearLayer(
99
+ self.regular_linear_layer.in_features,
100
+ self.regular_linear_layer.out_features,
101
+ network_alpha=network_alpha,
102
+ device=device,
103
+ dtype=dtype,
104
+ rank=rank,
105
+ )
106
+
107
+ self.lora_scale = lora_scale
108
+
109
+ # overwrite PyTorch's `state_dict` to be sure that only the 'regular_linear_layer' weights are saved
110
+ # when saving the whole text encoder model and when LoRA is unloaded or fused
111
+ def state_dict(self, *args, destination=None, prefix="", keep_vars=False):
112
+ if self.lora_linear_layer is None:
113
+ return self.regular_linear_layer.state_dict(
114
+ *args, destination=destination, prefix=prefix, keep_vars=keep_vars
115
+ )
116
+
117
+ return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars)
118
+
119
+ def _fuse_lora(self, lora_scale=1.0, safe_fusing=False):
120
+ if self.lora_linear_layer is None:
121
+ return
122
+
123
+ dtype, device = self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device
124
+
125
+ w_orig = self.regular_linear_layer.weight.data.float()
126
+ w_up = self.lora_linear_layer.up.weight.data.float()
127
+ w_down = self.lora_linear_layer.down.weight.data.float()
128
+
129
+ if self.lora_linear_layer.network_alpha is not None:
130
+ w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank
131
+
132
+ fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
133
+
134
+ if safe_fusing and torch.isnan(fused_weight).any().item():
135
+ raise ValueError(
136
+ "This LoRA weight seems to be broken. "
137
+ f"Encountered NaN values when trying to fuse LoRA weights for {self}."
138
+ "LoRA weights will not be fused."
139
+ )
140
+
141
+ self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype)
142
+
143
+ # we can drop the lora layer now
144
+ self.lora_linear_layer = None
145
+
146
+ # offload the up and down matrices to CPU to not blow the memory
147
+ self.w_up = w_up.cpu()
148
+ self.w_down = w_down.cpu()
149
+ self.lora_scale = lora_scale
150
+
151
+ def _unfuse_lora(self):
152
+ if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
153
+ return
154
+
155
+ fused_weight = self.regular_linear_layer.weight.data
156
+ dtype, device = fused_weight.dtype, fused_weight.device
157
+
158
+ w_up = self.w_up.to(device=device).float()
159
+ w_down = self.w_down.to(device).float()
160
+
161
+ unfused_weight = fused_weight.float() - (self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
162
+ self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype)
163
+
164
+ self.w_up = None
165
+ self.w_down = None
166
+
167
+ def forward(self, input):
168
+ if self.lora_scale is None:
169
+ self.lora_scale = 1.0
170
+ if self.lora_linear_layer is None:
171
+ return self.regular_linear_layer(input)
172
+ return self.regular_linear_layer(input) + (self.lora_scale * self.lora_linear_layer(input))
173
+
174
+
175
+ class LoRALinearLayer(nn.Module):
176
+ r"""
177
+ A linear layer that is used with LoRA.
178
+
179
+ Parameters:
180
+ in_features (`int`):
181
+ Number of input features.
182
+ out_features (`int`):
183
+ Number of output features.
184
+ rank (`int`, `optional`, defaults to 4):
185
+ The rank of the LoRA layer.
186
+ network_alpha (`float`, `optional`, defaults to `None`):
187
+ The value of the network alpha used for stable learning and preventing underflow. This value has the same
188
+ meaning as the `--network_alpha` option in the kohya-ss trainer script. See
189
+ https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
190
+ device (`torch.device`, `optional`, defaults to `None`):
191
+ The device to use for the layer's weights.
192
+ dtype (`torch.dtype`, `optional`, defaults to `None`):
193
+ The dtype to use for the layer's weights.
194
+ """
195
+
196
+ def __init__(
197
+ self,
198
+ in_features: int,
199
+ out_features: int,
200
+ rank: int = 4,
201
+ network_alpha: Optional[float] = None,
202
+ device: Optional[Union[torch.device, str]] = None,
203
+ dtype: Optional[torch.dtype] = None,
204
+ ):
205
+ super().__init__()
206
+
207
+ deprecation_message = "Use of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
208
+ deprecate("LoRALinearLayer", "1.0.0", deprecation_message)
209
+
210
+ self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)
211
+ self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)
212
+ # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
213
+ # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
214
+ self.network_alpha = network_alpha
215
+ self.rank = rank
216
+ self.out_features = out_features
217
+ self.in_features = in_features
218
+
219
+ nn.init.normal_(self.down.weight, std=1 / rank)
220
+ nn.init.zeros_(self.up.weight)
221
+
222
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
223
+ orig_dtype = hidden_states.dtype
224
+ dtype = self.down.weight.dtype
225
+
226
+ down_hidden_states = self.down(hidden_states.to(dtype))
227
+ up_hidden_states = self.up(down_hidden_states)
228
+
229
+ if self.network_alpha is not None:
230
+ up_hidden_states *= self.network_alpha / self.rank
231
+
232
+ return up_hidden_states.to(orig_dtype)
233
+
234
+
235
+ class LoRAConv2dLayer(nn.Module):
236
+ r"""
237
+ A convolutional layer that is used with LoRA.
238
+
239
+ Parameters:
240
+ in_features (`int`):
241
+ Number of input features.
242
+ out_features (`int`):
243
+ Number of output features.
244
+ rank (`int`, `optional`, defaults to 4):
245
+ The rank of the LoRA layer.
246
+ kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1):
247
+ The kernel size of the convolution.
248
+ stride (`int` or `tuple` of two `int`, `optional`, defaults to 1):
249
+ The stride of the convolution.
250
+ padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0):
251
+ The padding of the convolution.
252
+ network_alpha (`float`, `optional`, defaults to `None`):
253
+ The value of the network alpha used for stable learning and preventing underflow. This value has the same
254
+ meaning as the `--network_alpha` option in the kohya-ss trainer script. See
255
+ https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
256
+ """
257
+
258
+ def __init__(
259
+ self,
260
+ in_features: int,
261
+ out_features: int,
262
+ rank: int = 4,
263
+ kernel_size: Union[int, Tuple[int, int]] = (1, 1),
264
+ stride: Union[int, Tuple[int, int]] = (1, 1),
265
+ padding: Union[int, Tuple[int, int], str] = 0,
266
+ network_alpha: Optional[float] = None,
267
+ ):
268
+ super().__init__()
269
+
270
+ deprecation_message = "Use of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
271
+ deprecate("LoRAConv2dLayer", "1.0.0", deprecation_message)
272
+
273
+ self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
274
+ # according to the official kohya_ss trainer kernel_size are always fixed for the up layer
275
+ # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129
276
+ self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False)
277
+
278
+ # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
279
+ # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
280
+ self.network_alpha = network_alpha
281
+ self.rank = rank
282
+
283
+ nn.init.normal_(self.down.weight, std=1 / rank)
284
+ nn.init.zeros_(self.up.weight)
285
+
286
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
287
+ orig_dtype = hidden_states.dtype
288
+ dtype = self.down.weight.dtype
289
+
290
+ down_hidden_states = self.down(hidden_states.to(dtype))
291
+ up_hidden_states = self.up(down_hidden_states)
292
+
293
+ if self.network_alpha is not None:
294
+ up_hidden_states *= self.network_alpha / self.rank
295
+
296
+ return up_hidden_states.to(orig_dtype)
297
+
298
+
299
+ class LoRACompatibleConv(nn.Conv2d):
300
+ """
301
+ A convolutional layer that can be used with LoRA.
302
+ """
303
+
304
+ def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):
305
+ deprecation_message = "Use of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
306
+ deprecate("LoRACompatibleConv", "1.0.0", deprecation_message)
307
+
308
+ super().__init__(*args, **kwargs)
309
+ self.lora_layer = lora_layer
310
+
311
+ def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):
312
+ deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
313
+ deprecate("set_lora_layer", "1.0.0", deprecation_message)
314
+
315
+ self.lora_layer = lora_layer
316
+
317
+ def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
318
+ if self.lora_layer is None:
319
+ return
320
+
321
+ dtype, device = self.weight.data.dtype, self.weight.data.device
322
+
323
+ w_orig = self.weight.data.float()
324
+ w_up = self.lora_layer.up.weight.data.float()
325
+ w_down = self.lora_layer.down.weight.data.float()
326
+
327
+ if self.lora_layer.network_alpha is not None:
328
+ w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank
329
+
330
+ fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1))
331
+ fusion = fusion.reshape((w_orig.shape))
332
+ fused_weight = w_orig + (lora_scale * fusion)
333
+
334
+ if safe_fusing and torch.isnan(fused_weight).any().item():
335
+ raise ValueError(
336
+ "This LoRA weight seems to be broken. "
337
+ f"Encountered NaN values when trying to fuse LoRA weights for {self}."
338
+ "LoRA weights will not be fused."
339
+ )
340
+
341
+ self.weight.data = fused_weight.to(device=device, dtype=dtype)
342
+
343
+ # we can drop the lora layer now
344
+ self.lora_layer = None
345
+
346
+ # offload the up and down matrices to CPU to not blow the memory
347
+ self.w_up = w_up.cpu()
348
+ self.w_down = w_down.cpu()
349
+ self._lora_scale = lora_scale
350
+
351
+ def _unfuse_lora(self):
352
+ if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
353
+ return
354
+
355
+ fused_weight = self.weight.data
356
+ dtype, device = fused_weight.data.dtype, fused_weight.data.device
357
+
358
+ self.w_up = self.w_up.to(device=device).float()
359
+ self.w_down = self.w_down.to(device).float()
360
+
361
+ fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1))
362
+ fusion = fusion.reshape((fused_weight.shape))
363
+ unfused_weight = fused_weight.float() - (self._lora_scale * fusion)
364
+ self.weight.data = unfused_weight.to(device=device, dtype=dtype)
365
+
366
+ self.w_up = None
367
+ self.w_down = None
368
+
369
+ def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
370
+ if self.padding_mode != "zeros":
371
+ hidden_states = F.pad(hidden_states, self._reversed_padding_repeated_twice, mode=self.padding_mode)
372
+ padding = (0, 0)
373
+ else:
374
+ padding = self.padding
375
+
376
+ original_outputs = F.conv2d(
377
+ hidden_states, self.weight, self.bias, self.stride, padding, self.dilation, self.groups
378
+ )
379
+
380
+ if self.lora_layer is None:
381
+ return original_outputs
382
+ else:
383
+ return original_outputs + (scale * self.lora_layer(hidden_states))
384
+
385
+
386
+ class LoRACompatibleLinear(nn.Linear):
387
+ """
388
+ A Linear layer that can be used with LoRA.
389
+ """
390
+
391
+ def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):
392
+ deprecation_message = "Use of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
393
+ deprecate("LoRACompatibleLinear", "1.0.0", deprecation_message)
394
+
395
+ super().__init__(*args, **kwargs)
396
+ self.lora_layer = lora_layer
397
+
398
+ def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):
399
+ deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
400
+ deprecate("set_lora_layer", "1.0.0", deprecation_message)
401
+ self.lora_layer = lora_layer
402
+
403
+ def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
404
+ if self.lora_layer is None:
405
+ return
406
+
407
+ dtype, device = self.weight.data.dtype, self.weight.data.device
408
+
409
+ w_orig = self.weight.data.float()
410
+ w_up = self.lora_layer.up.weight.data.float()
411
+ w_down = self.lora_layer.down.weight.data.float()
412
+
413
+ if self.lora_layer.network_alpha is not None:
414
+ w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank
415
+
416
+ fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
417
+
418
+ if safe_fusing and torch.isnan(fused_weight).any().item():
419
+ raise ValueError(
420
+ "This LoRA weight seems to be broken. "
421
+ f"Encountered NaN values when trying to fuse LoRA weights for {self}."
422
+ "LoRA weights will not be fused."
423
+ )
424
+
425
+ self.weight.data = fused_weight.to(device=device, dtype=dtype)
426
+
427
+ # we can drop the lora layer now
428
+ self.lora_layer = None
429
+
430
+ # offload the up and down matrices to CPU to not blow the memory
431
+ self.w_up = w_up.cpu()
432
+ self.w_down = w_down.cpu()
433
+ self._lora_scale = lora_scale
434
+
435
+ def _unfuse_lora(self):
436
+ if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
437
+ return
438
+
439
+ fused_weight = self.weight.data
440
+ dtype, device = fused_weight.dtype, fused_weight.device
441
+
442
+ w_up = self.w_up.to(device=device).float()
443
+ w_down = self.w_down.to(device).float()
444
+
445
+ unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
446
+ self.weight.data = unfused_weight.to(device=device, dtype=dtype)
447
+
448
+ self.w_up = None
449
+ self.w_down = None
450
+
451
+ def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
452
+ if self.lora_layer is None:
453
+ out = super().forward(hidden_states)
454
+ return out
455
+ else:
456
+ out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))
457
+ return out
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_flax_pytorch_utils.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch - Flax general utilities."""
16
+ import re
17
+
18
+ import jax.numpy as jnp
19
+ from flax.traverse_util import flatten_dict, unflatten_dict
20
+ from jax.random import PRNGKey
21
+
22
+ from ..utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ def rename_key(key):
29
+ regex = r"\w+[.]\d+"
30
+ pats = re.findall(regex, key)
31
+ for pat in pats:
32
+ key = key.replace(pat, "_".join(pat.split(".")))
33
+ return key
34
+
35
+
36
+ #####################
37
+ # PyTorch => Flax #
38
+ #####################
39
+
40
+
41
+ # Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69
42
+ # and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py
43
+ def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict):
44
+ """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary"""
45
+ # conv norm or layer norm
46
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
47
+
48
+ # rename attention layers
49
+ if len(pt_tuple_key) > 1:
50
+ for rename_from, rename_to in (
51
+ ("to_out_0", "proj_attn"),
52
+ ("to_k", "key"),
53
+ ("to_v", "value"),
54
+ ("to_q", "query"),
55
+ ):
56
+ if pt_tuple_key[-2] == rename_from:
57
+ weight_name = pt_tuple_key[-1]
58
+ weight_name = "kernel" if weight_name == "weight" else weight_name
59
+ renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name)
60
+ if renamed_pt_tuple_key in random_flax_state_dict:
61
+ assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape
62
+ return renamed_pt_tuple_key, pt_tensor.T
63
+
64
+ if (
65
+ any("norm" in str_ for str_ in pt_tuple_key)
66
+ and (pt_tuple_key[-1] == "bias")
67
+ and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
68
+ and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
69
+ ):
70
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
71
+ return renamed_pt_tuple_key, pt_tensor
72
+ elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
73
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
74
+ return renamed_pt_tuple_key, pt_tensor
75
+
76
+ # embedding
77
+ if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
78
+ pt_tuple_key = pt_tuple_key[:-1] + ("embedding",)
79
+ return renamed_pt_tuple_key, pt_tensor
80
+
81
+ # conv layer
82
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",)
83
+ if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
84
+ pt_tensor = pt_tensor.transpose(2, 3, 1, 0)
85
+ return renamed_pt_tuple_key, pt_tensor
86
+
87
+ # linear layer
88
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",)
89
+ if pt_tuple_key[-1] == "weight":
90
+ pt_tensor = pt_tensor.T
91
+ return renamed_pt_tuple_key, pt_tensor
92
+
93
+ # old PyTorch layer norm weight
94
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",)
95
+ if pt_tuple_key[-1] == "gamma":
96
+ return renamed_pt_tuple_key, pt_tensor
97
+
98
+ # old PyTorch layer norm bias
99
+ renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",)
100
+ if pt_tuple_key[-1] == "beta":
101
+ return renamed_pt_tuple_key, pt_tensor
102
+
103
+ return pt_tuple_key, pt_tensor
104
+
105
+
106
+ def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42):
107
+ # Step 1: Convert pytorch tensor to numpy
108
+ pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()}
109
+
110
+ # Step 2: Since the model is stateless, get random Flax params
111
+ random_flax_params = flax_model.init_weights(PRNGKey(init_key))
112
+
113
+ random_flax_state_dict = flatten_dict(random_flax_params)
114
+ flax_state_dict = {}
115
+
116
+ # Need to change some parameters name to match Flax names
117
+ for pt_key, pt_tensor in pt_state_dict.items():
118
+ renamed_pt_key = rename_key(pt_key)
119
+ pt_tuple_key = tuple(renamed_pt_key.split("."))
120
+
121
+ # Correctly rename weight parameters
122
+ flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict)
123
+
124
+ if flax_key in random_flax_state_dict:
125
+ if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
126
+ raise ValueError(
127
+ f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
128
+ f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}."
129
+ )
130
+
131
+ # also add unexpected weight so that warning is thrown
132
+ flax_state_dict[flax_key] = jnp.asarray(flax_tensor)
133
+
134
+ return unflatten_dict(flax_state_dict)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_flax_utils.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pickle import UnpicklingError
18
+ from typing import Any, Dict, Union
19
+
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import msgpack.exceptions
23
+ from flax.core.frozen_dict import FrozenDict, unfreeze
24
+ from flax.serialization import from_bytes, to_bytes
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from huggingface_hub import create_repo, hf_hub_download
27
+ from huggingface_hub.utils import (
28
+ EntryNotFoundError,
29
+ RepositoryNotFoundError,
30
+ RevisionNotFoundError,
31
+ validate_hf_hub_args,
32
+ )
33
+ from requests import HTTPError
34
+
35
+ from .. import __version__, is_torch_available
36
+ from ..utils import (
37
+ CONFIG_NAME,
38
+ FLAX_WEIGHTS_NAME,
39
+ HUGGINGFACE_CO_RESOLVE_ENDPOINT,
40
+ WEIGHTS_NAME,
41
+ PushToHubMixin,
42
+ logging,
43
+ )
44
+ from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class FlaxModelMixin(PushToHubMixin):
51
+ r"""
52
+ Base class for all Flax models.
53
+
54
+ [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and
55
+ saving models.
56
+
57
+ - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`].
58
+ """
59
+
60
+ config_name = CONFIG_NAME
61
+ _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"]
62
+ _flax_internal_args = ["name", "parent", "dtype"]
63
+
64
+ @classmethod
65
+ def _from_config(cls, config, **kwargs):
66
+ """
67
+ All context managers that the model should be initialized under go here.
68
+ """
69
+ return cls(config, **kwargs)
70
+
71
+ def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:
72
+ """
73
+ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.
74
+ """
75
+
76
+ # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27
77
+ def conditional_cast(param):
78
+ if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):
79
+ param = param.astype(dtype)
80
+ return param
81
+
82
+ if mask is None:
83
+ return jax.tree_map(conditional_cast, params)
84
+
85
+ flat_params = flatten_dict(params)
86
+ flat_mask, _ = jax.tree_flatten(mask)
87
+
88
+ for masked, key in zip(flat_mask, flat_params.keys()):
89
+ if masked:
90
+ param = flat_params[key]
91
+ flat_params[key] = conditional_cast(param)
92
+
93
+ return unflatten_dict(flat_params)
94
+
95
+ def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):
96
+ r"""
97
+ Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast
98
+ the `params` in place.
99
+
100
+ This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full
101
+ half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
102
+
103
+ Arguments:
104
+ params (`Union[Dict, FrozenDict]`):
105
+ A `PyTree` of model parameters.
106
+ mask (`Union[Dict, FrozenDict]`):
107
+ A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
108
+ for params you want to cast, and `False` for those you want to skip.
109
+
110
+ Examples:
111
+
112
+ ```python
113
+ >>> from diffusers import FlaxUNet2DConditionModel
114
+
115
+ >>> # load model
116
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
117
+ >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision
118
+ >>> params = model.to_bf16(params)
119
+ >>> # If you don't want to cast certain parameters (for example layer norm bias and scale)
120
+ >>> # then pass the mask as follows
121
+ >>> from flax import traverse_util
122
+
123
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
124
+ >>> flat_params = traverse_util.flatten_dict(params)
125
+ >>> mask = {
126
+ ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
127
+ ... for path in flat_params
128
+ ... }
129
+ >>> mask = traverse_util.unflatten_dict(mask)
130
+ >>> params = model.to_bf16(params, mask)
131
+ ```"""
132
+ return self._cast_floating_to(params, jnp.bfloat16, mask)
133
+
134
+ def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):
135
+ r"""
136
+ Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the
137
+ model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
138
+
139
+ Arguments:
140
+ params (`Union[Dict, FrozenDict]`):
141
+ A `PyTree` of model parameters.
142
+ mask (`Union[Dict, FrozenDict]`):
143
+ A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
144
+ for params you want to cast, and `False` for those you want to skip.
145
+
146
+ Examples:
147
+
148
+ ```python
149
+ >>> from diffusers import FlaxUNet2DConditionModel
150
+
151
+ >>> # Download model and configuration from huggingface.co
152
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
153
+ >>> # By default, the model params will be in fp32, to illustrate the use of this method,
154
+ >>> # we'll first cast to fp16 and back to fp32
155
+ >>> params = model.to_f16(params)
156
+ >>> # now cast back to fp32
157
+ >>> params = model.to_fp32(params)
158
+ ```"""
159
+ return self._cast_floating_to(params, jnp.float32, mask)
160
+
161
+ def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):
162
+ r"""
163
+ Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the
164
+ `params` in place.
165
+
166
+ This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full
167
+ half-precision training or to save weights in float16 for inference in order to save memory and improve speed.
168
+
169
+ Arguments:
170
+ params (`Union[Dict, FrozenDict]`):
171
+ A `PyTree` of model parameters.
172
+ mask (`Union[Dict, FrozenDict]`):
173
+ A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
174
+ for params you want to cast, and `False` for those you want to skip.
175
+
176
+ Examples:
177
+
178
+ ```python
179
+ >>> from diffusers import FlaxUNet2DConditionModel
180
+
181
+ >>> # load model
182
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
183
+ >>> # By default, the model params will be in fp32, to cast these to float16
184
+ >>> params = model.to_fp16(params)
185
+ >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
186
+ >>> # then pass the mask as follows
187
+ >>> from flax import traverse_util
188
+
189
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
190
+ >>> flat_params = traverse_util.flatten_dict(params)
191
+ >>> mask = {
192
+ ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
193
+ ... for path in flat_params
194
+ ... }
195
+ >>> mask = traverse_util.unflatten_dict(mask)
196
+ >>> params = model.to_fp16(params, mask)
197
+ ```"""
198
+ return self._cast_floating_to(params, jnp.float16, mask)
199
+
200
+ def init_weights(self, rng: jax.Array) -> Dict:
201
+ raise NotImplementedError(f"init_weights method has to be implemented for {self}")
202
+
203
+ @classmethod
204
+ @validate_hf_hub_args
205
+ def from_pretrained(
206
+ cls,
207
+ pretrained_model_name_or_path: Union[str, os.PathLike],
208
+ dtype: jnp.dtype = jnp.float32,
209
+ *model_args,
210
+ **kwargs,
211
+ ):
212
+ r"""
213
+ Instantiate a pretrained Flax model from a pretrained model configuration.
214
+
215
+ Parameters:
216
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
217
+ Can be either:
218
+
219
+ - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model
220
+ hosted on the Hub.
221
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
222
+ using [`~FlaxModelMixin.save_pretrained`].
223
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
224
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
225
+ `jax.numpy.bfloat16` (on TPUs).
226
+
227
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
228
+ specified, all the computation will be performed with the given `dtype`.
229
+
230
+ <Tip>
231
+
232
+ This only specifies the dtype of the *computation* and does not influence the dtype of model
233
+ parameters.
234
+
235
+ If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and
236
+ [`~FlaxModelMixin.to_bf16`].
237
+
238
+ </Tip>
239
+
240
+ model_args (sequence of positional arguments, *optional*):
241
+ All remaining positional arguments are passed to the underlying model's `__init__` method.
242
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
243
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
244
+ is not used.
245
+ force_download (`bool`, *optional*, defaults to `False`):
246
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
247
+ cached versions if they exist.
248
+ resume_download (`bool`, *optional*, defaults to `False`):
249
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
250
+ incompletely downloaded files are deleted.
251
+ proxies (`Dict[str, str]`, *optional*):
252
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
253
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
254
+ local_files_only(`bool`, *optional*, defaults to `False`):
255
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
256
+ won't be downloaded from the Hub.
257
+ revision (`str`, *optional*, defaults to `"main"`):
258
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
259
+ allowed by Git.
260
+ from_pt (`bool`, *optional*, defaults to `False`):
261
+ Load the model weights from a PyTorch checkpoint save file.
262
+ kwargs (remaining dictionary of keyword arguments, *optional*):
263
+ Can be used to update the configuration object (after it is loaded) and initiate the model (for
264
+ example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
265
+ automatically loaded:
266
+
267
+ - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying
268
+ model's `__init__` method (we assume all relevant updates to the configuration have already been
269
+ done).
270
+ - If a configuration is not provided, `kwargs` are first passed to the configuration class
271
+ initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds
272
+ to a configuration attribute is used to override said attribute with the supplied `kwargs` value.
273
+ Remaining keys that do not correspond to any configuration attribute are passed to the underlying
274
+ model's `__init__` function.
275
+
276
+ Examples:
277
+
278
+ ```python
279
+ >>> from diffusers import FlaxUNet2DConditionModel
280
+
281
+ >>> # Download model and configuration from huggingface.co and cache.
282
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
283
+ >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
284
+ >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/")
285
+ ```
286
+
287
+ If you get the error message below, you need to finetune the weights for your downstream task:
288
+
289
+ ```bash
290
+ Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
291
+ - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
292
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
293
+ ```
294
+ """
295
+ config = kwargs.pop("config", None)
296
+ cache_dir = kwargs.pop("cache_dir", None)
297
+ force_download = kwargs.pop("force_download", False)
298
+ from_pt = kwargs.pop("from_pt", False)
299
+ resume_download = kwargs.pop("resume_download", False)
300
+ proxies = kwargs.pop("proxies", None)
301
+ local_files_only = kwargs.pop("local_files_only", False)
302
+ token = kwargs.pop("token", None)
303
+ revision = kwargs.pop("revision", None)
304
+ subfolder = kwargs.pop("subfolder", None)
305
+
306
+ user_agent = {
307
+ "diffusers": __version__,
308
+ "file_type": "model",
309
+ "framework": "flax",
310
+ }
311
+
312
+ # Load config if we don't provide one
313
+ if config is None:
314
+ config, unused_kwargs = cls.load_config(
315
+ pretrained_model_name_or_path,
316
+ cache_dir=cache_dir,
317
+ return_unused_kwargs=True,
318
+ force_download=force_download,
319
+ resume_download=resume_download,
320
+ proxies=proxies,
321
+ local_files_only=local_files_only,
322
+ token=token,
323
+ revision=revision,
324
+ subfolder=subfolder,
325
+ **kwargs,
326
+ )
327
+
328
+ model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs)
329
+
330
+ # Load model
331
+ pretrained_path_with_subfolder = (
332
+ pretrained_model_name_or_path
333
+ if subfolder is None
334
+ else os.path.join(pretrained_model_name_or_path, subfolder)
335
+ )
336
+ if os.path.isdir(pretrained_path_with_subfolder):
337
+ if from_pt:
338
+ if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):
339
+ raise EnvironmentError(
340
+ f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} "
341
+ )
342
+ model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)
343
+ elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)):
344
+ # Load from a Flax checkpoint
345
+ model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)
346
+ # Check if pytorch weights exist instead
347
+ elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):
348
+ raise EnvironmentError(
349
+ f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model"
350
+ " using `from_pt=True`."
351
+ )
352
+ else:
353
+ raise EnvironmentError(
354
+ f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
355
+ f"{pretrained_path_with_subfolder}."
356
+ )
357
+ else:
358
+ try:
359
+ model_file = hf_hub_download(
360
+ pretrained_model_name_or_path,
361
+ filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME,
362
+ cache_dir=cache_dir,
363
+ force_download=force_download,
364
+ proxies=proxies,
365
+ resume_download=resume_download,
366
+ local_files_only=local_files_only,
367
+ token=token,
368
+ user_agent=user_agent,
369
+ subfolder=subfolder,
370
+ revision=revision,
371
+ )
372
+
373
+ except RepositoryNotFoundError:
374
+ raise EnvironmentError(
375
+ f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
376
+ "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
377
+ "token having permission to this repo with `token` or log in with `huggingface-cli "
378
+ "login`."
379
+ )
380
+ except RevisionNotFoundError:
381
+ raise EnvironmentError(
382
+ f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
383
+ "this model name. Check the model page at "
384
+ f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
385
+ )
386
+ except EntryNotFoundError:
387
+ raise EnvironmentError(
388
+ f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}."
389
+ )
390
+ except HTTPError as err:
391
+ raise EnvironmentError(
392
+ f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n"
393
+ f"{err}"
394
+ )
395
+ except ValueError:
396
+ raise EnvironmentError(
397
+ f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
398
+ f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
399
+ f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your"
400
+ " internet connection or see how to run the library in offline mode at"
401
+ " 'https://huggingface.co/docs/transformers/installation#offline-mode'."
402
+ )
403
+ except EnvironmentError:
404
+ raise EnvironmentError(
405
+ f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
406
+ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
407
+ f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
408
+ f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
409
+ )
410
+
411
+ if from_pt:
412
+ if is_torch_available():
413
+ from .modeling_utils import load_state_dict
414
+ else:
415
+ raise EnvironmentError(
416
+ "Can't load the model in PyTorch format because PyTorch is not installed. "
417
+ "Please, install PyTorch or use native Flax weights."
418
+ )
419
+
420
+ # Step 1: Get the pytorch file
421
+ pytorch_model_file = load_state_dict(model_file)
422
+
423
+ # Step 2: Convert the weights
424
+ state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model)
425
+ else:
426
+ try:
427
+ with open(model_file, "rb") as state_f:
428
+ state = from_bytes(cls, state_f.read())
429
+ except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
430
+ try:
431
+ with open(model_file) as f:
432
+ if f.read().startswith("version"):
433
+ raise OSError(
434
+ "You seem to have cloned a repository without having git-lfs installed. Please"
435
+ " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
436
+ " folder you cloned."
437
+ )
438
+ else:
439
+ raise ValueError from e
440
+ except (UnicodeDecodeError, ValueError):
441
+ raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ")
442
+ # make sure all arrays are stored as jnp.ndarray
443
+ # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:
444
+ # https://github.com/google/flax/issues/1261
445
+ state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state)
446
+
447
+ # flatten dicts
448
+ state = flatten_dict(state)
449
+
450
+ params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0))
451
+ required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())
452
+
453
+ shape_state = flatten_dict(unfreeze(params_shape_tree))
454
+
455
+ missing_keys = required_params - set(state.keys())
456
+ unexpected_keys = set(state.keys()) - required_params
457
+
458
+ if missing_keys:
459
+ logger.warning(
460
+ f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. "
461
+ "Make sure to call model.init_weights to initialize the missing weights."
462
+ )
463
+ cls._missing_keys = missing_keys
464
+
465
+ for key in state.keys():
466
+ if key in shape_state and state[key].shape != shape_state[key].shape:
467
+ raise ValueError(
468
+ f"Trying to load the pretrained weight for {key} failed: checkpoint has shape "
469
+ f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. "
470
+ )
471
+
472
+ # remove unexpected keys to not be saved again
473
+ for unexpected_key in unexpected_keys:
474
+ del state[unexpected_key]
475
+
476
+ if len(unexpected_keys) > 0:
477
+ logger.warning(
478
+ f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
479
+ f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
480
+ f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
481
+ " with another architecture."
482
+ )
483
+ else:
484
+ logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
485
+
486
+ if len(missing_keys) > 0:
487
+ logger.warning(
488
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
489
+ f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
490
+ " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
491
+ )
492
+ else:
493
+ logger.info(
494
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
495
+ f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
496
+ f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
497
+ " training."
498
+ )
499
+
500
+ return model, unflatten_dict(state)
501
+
502
+ def save_pretrained(
503
+ self,
504
+ save_directory: Union[str, os.PathLike],
505
+ params: Union[Dict, FrozenDict],
506
+ is_main_process: bool = True,
507
+ push_to_hub: bool = False,
508
+ **kwargs,
509
+ ):
510
+ """
511
+ Save a model and its configuration file to a directory so that it can be reloaded using the
512
+ [`~FlaxModelMixin.from_pretrained`] class method.
513
+
514
+ Arguments:
515
+ save_directory (`str` or `os.PathLike`):
516
+ Directory to save a model and its configuration file to. Will be created if it doesn't exist.
517
+ params (`Union[Dict, FrozenDict]`):
518
+ A `PyTree` of model parameters.
519
+ is_main_process (`bool`, *optional*, defaults to `True`):
520
+ Whether the process calling this is the main process or not. Useful during distributed training and you
521
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
522
+ process to avoid race conditions.
523
+ push_to_hub (`bool`, *optional*, defaults to `False`):
524
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
525
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
526
+ namespace).
527
+ kwargs (`Dict[str, Any]`, *optional*):
528
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
529
+ """
530
+ if os.path.isfile(save_directory):
531
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
532
+ return
533
+
534
+ os.makedirs(save_directory, exist_ok=True)
535
+
536
+ if push_to_hub:
537
+ commit_message = kwargs.pop("commit_message", None)
538
+ private = kwargs.pop("private", False)
539
+ create_pr = kwargs.pop("create_pr", False)
540
+ token = kwargs.pop("token", None)
541
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
542
+ repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
543
+
544
+ model_to_save = self
545
+
546
+ # Attach architecture to the config
547
+ # Save the config
548
+ if is_main_process:
549
+ model_to_save.save_config(save_directory)
550
+
551
+ # save model
552
+ output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)
553
+ with open(output_model_file, "wb") as f:
554
+ model_bytes = to_bytes(params)
555
+ f.write(model_bytes)
556
+
557
+ logger.info(f"Model weights saved in {output_model_file}")
558
+
559
+ if push_to_hub:
560
+ self._upload_folder(
561
+ save_directory,
562
+ repo_id,
563
+ token=token,
564
+ commit_message=commit_message,
565
+ create_pr=create_pr,
566
+ )
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_outputs.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from ..utils import BaseOutput
4
+
5
+
6
+ @dataclass
7
+ class AutoencoderKLOutput(BaseOutput):
8
+ """
9
+ Output of AutoencoderKL encoding method.
10
+
11
+ Args:
12
+ latent_dist (`DiagonalGaussianDistribution`):
13
+ Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
14
+ `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
15
+ """
16
+
17
+ latent_dist: "DiagonalGaussianDistribution" # noqa: F821
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_pytorch_flax_utils.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch - Flax general utilities."""
16
+
17
+ from pickle import UnpicklingError
18
+
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.serialization import from_bytes
23
+ from flax.traverse_util import flatten_dict
24
+
25
+ from ..utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ #####################
32
+ # Flax => PyTorch #
33
+ #####################
34
+
35
+
36
+ # from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_flax_pytorch_utils.py#L224-L352
37
+ def load_flax_checkpoint_in_pytorch_model(pt_model, model_file):
38
+ try:
39
+ with open(model_file, "rb") as flax_state_f:
40
+ flax_state = from_bytes(None, flax_state_f.read())
41
+ except UnpicklingError as e:
42
+ try:
43
+ with open(model_file) as f:
44
+ if f.read().startswith("version"):
45
+ raise OSError(
46
+ "You seem to have cloned a repository without having git-lfs installed. Please"
47
+ " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
48
+ " folder you cloned."
49
+ )
50
+ else:
51
+ raise ValueError from e
52
+ except (UnicodeDecodeError, ValueError):
53
+ raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ")
54
+
55
+ return load_flax_weights_in_pytorch_model(pt_model, flax_state)
56
+
57
+
58
+ def load_flax_weights_in_pytorch_model(pt_model, flax_state):
59
+ """Load flax checkpoints in a PyTorch model"""
60
+
61
+ try:
62
+ import torch # noqa: F401
63
+ except ImportError:
64
+ logger.error(
65
+ "Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
66
+ " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
67
+ " instructions."
68
+ )
69
+ raise
70
+
71
+ # check if we have bf16 weights
72
+ is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values()
73
+ if any(is_type_bf16):
74
+ # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
75
+
76
+ # and bf16 is not fully supported in PT yet.
77
+ logger.warning(
78
+ "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
79
+ "before loading those in PyTorch model."
80
+ )
81
+ flax_state = jax.tree_util.tree_map(
82
+ lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state
83
+ )
84
+
85
+ pt_model.base_model_prefix = ""
86
+
87
+ flax_state_dict = flatten_dict(flax_state, sep=".")
88
+ pt_model_dict = pt_model.state_dict()
89
+
90
+ # keep track of unexpected & missing keys
91
+ unexpected_keys = []
92
+ missing_keys = set(pt_model_dict.keys())
93
+
94
+ for flax_key_tuple, flax_tensor in flax_state_dict.items():
95
+ flax_key_tuple_array = flax_key_tuple.split(".")
96
+
97
+ if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
98
+ flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"]
99
+ flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
100
+ elif flax_key_tuple_array[-1] == "kernel":
101
+ flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"]
102
+ flax_tensor = flax_tensor.T
103
+ elif flax_key_tuple_array[-1] == "scale":
104
+ flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"]
105
+
106
+ if "time_embedding" not in flax_key_tuple_array:
107
+ for i, flax_key_tuple_string in enumerate(flax_key_tuple_array):
108
+ flax_key_tuple_array[i] = (
109
+ flax_key_tuple_string.replace("_0", ".0")
110
+ .replace("_1", ".1")
111
+ .replace("_2", ".2")
112
+ .replace("_3", ".3")
113
+ .replace("_4", ".4")
114
+ .replace("_5", ".5")
115
+ .replace("_6", ".6")
116
+ .replace("_7", ".7")
117
+ .replace("_8", ".8")
118
+ .replace("_9", ".9")
119
+ )
120
+
121
+ flax_key = ".".join(flax_key_tuple_array)
122
+
123
+ if flax_key in pt_model_dict:
124
+ if flax_tensor.shape != pt_model_dict[flax_key].shape:
125
+ raise ValueError(
126
+ f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
127
+ f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}."
128
+ )
129
+ else:
130
+ # add weight to pytorch dict
131
+ flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor
132
+ pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
133
+ # remove from missing keys
134
+ missing_keys.remove(flax_key)
135
+ else:
136
+ # weight is not expected by PyTorch model
137
+ unexpected_keys.append(flax_key)
138
+
139
+ pt_model.load_state_dict(pt_model_dict)
140
+
141
+ # re-transform missing_keys to list
142
+ missing_keys = list(missing_keys)
143
+
144
+ if len(unexpected_keys) > 0:
145
+ logger.warning(
146
+ "Some weights of the Flax model were not used when initializing the PyTorch model"
147
+ f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
148
+ f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
149
+ " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
150
+ f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
151
+ " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
152
+ " FlaxBertForSequenceClassification model)."
153
+ )
154
+ if len(missing_keys) > 0:
155
+ logger.warning(
156
+ f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
157
+ f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
158
+ " use it for predictions and inference."
159
+ )
160
+
161
+ return pt_model
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/modeling_utils.py ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import inspect
18
+ import itertools
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from functools import partial
23
+ from typing import Any, Callable, List, Optional, Tuple, Union
24
+
25
+ import safetensors
26
+ import torch
27
+ from huggingface_hub import create_repo
28
+ from huggingface_hub.utils import validate_hf_hub_args
29
+ from torch import Tensor, nn
30
+
31
+ from .. import __version__
32
+ from ..utils import (
33
+ CONFIG_NAME,
34
+ FLAX_WEIGHTS_NAME,
35
+ SAFETENSORS_FILE_EXTENSION,
36
+ SAFETENSORS_WEIGHTS_NAME,
37
+ WEIGHTS_NAME,
38
+ _add_variant,
39
+ _get_model_file,
40
+ deprecate,
41
+ is_accelerate_available,
42
+ is_torch_version,
43
+ logging,
44
+ )
45
+ from ..utils.hub_utils import PushToHubMixin, load_or_create_model_card, populate_model_card
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ if is_torch_version(">=", "1.9.0"):
52
+ _LOW_CPU_MEM_USAGE_DEFAULT = True
53
+ else:
54
+ _LOW_CPU_MEM_USAGE_DEFAULT = False
55
+
56
+
57
+ if is_accelerate_available():
58
+ import accelerate
59
+ from accelerate.utils import set_module_tensor_to_device
60
+ from accelerate.utils.versions import is_torch_version
61
+
62
+
63
+ def get_parameter_device(parameter: torch.nn.Module) -> torch.device:
64
+ try:
65
+ parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers())
66
+ return next(parameters_and_buffers).device
67
+ except StopIteration:
68
+ # For torch.nn.DataParallel compatibility in PyTorch 1.5
69
+
70
+ def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
71
+ tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
72
+ return tuples
73
+
74
+ gen = parameter._named_members(get_members_fn=find_tensor_attributes)
75
+ first_tuple = next(gen)
76
+ return first_tuple[1].device
77
+
78
+
79
+ def get_parameter_dtype(parameter: torch.nn.Module) -> torch.dtype:
80
+ try:
81
+ params = tuple(parameter.parameters())
82
+ if len(params) > 0:
83
+ return params[0].dtype
84
+
85
+ buffers = tuple(parameter.buffers())
86
+ if len(buffers) > 0:
87
+ return buffers[0].dtype
88
+
89
+ except StopIteration:
90
+ # For torch.nn.DataParallel compatibility in PyTorch 1.5
91
+
92
+ def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
93
+ tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
94
+ return tuples
95
+
96
+ gen = parameter._named_members(get_members_fn=find_tensor_attributes)
97
+ first_tuple = next(gen)
98
+ return first_tuple[1].dtype
99
+
100
+
101
+ def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None):
102
+ """
103
+ Reads a checkpoint file, returning properly formatted errors if they arise.
104
+ """
105
+ try:
106
+ file_extension = os.path.basename(checkpoint_file).split(".")[-1]
107
+ if file_extension == SAFETENSORS_FILE_EXTENSION:
108
+ return safetensors.torch.load_file(checkpoint_file, device="cpu")
109
+ else:
110
+ return torch.load(checkpoint_file, map_location="cpu")
111
+ except Exception as e:
112
+ try:
113
+ with open(checkpoint_file) as f:
114
+ if f.read().startswith("version"):
115
+ raise OSError(
116
+ "You seem to have cloned a repository without having git-lfs installed. Please install "
117
+ "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
118
+ "you cloned."
119
+ )
120
+ else:
121
+ raise ValueError(
122
+ f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained "
123
+ "model. Make sure you have saved the model properly."
124
+ ) from e
125
+ except (UnicodeDecodeError, ValueError):
126
+ raise OSError(
127
+ f"Unable to load weights from checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. "
128
+ )
129
+
130
+
131
+ def load_model_dict_into_meta(
132
+ model,
133
+ state_dict: OrderedDict,
134
+ device: Optional[Union[str, torch.device]] = None,
135
+ dtype: Optional[Union[str, torch.dtype]] = None,
136
+ model_name_or_path: Optional[str] = None,
137
+ ) -> List[str]:
138
+ device = device or torch.device("cpu")
139
+ dtype = dtype or torch.float32
140
+
141
+ accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys())
142
+
143
+ unexpected_keys = []
144
+ empty_state_dict = model.state_dict()
145
+ for param_name, param in state_dict.items():
146
+ if param_name not in empty_state_dict:
147
+ unexpected_keys.append(param_name)
148
+ continue
149
+
150
+ if empty_state_dict[param_name].shape != param.shape:
151
+ model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else ""
152
+ raise ValueError(
153
+ f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example."
154
+ )
155
+
156
+ if accepts_dtype:
157
+ set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype)
158
+ else:
159
+ set_module_tensor_to_device(model, param_name, device, value=param)
160
+ return unexpected_keys
161
+
162
+
163
+ def _load_state_dict_into_model(model_to_load, state_dict: OrderedDict) -> List[str]:
164
+ # Convert old format to new format if needed from a PyTorch state_dict
165
+ # copy state_dict so _load_from_state_dict can modify it
166
+ state_dict = state_dict.copy()
167
+ error_msgs = []
168
+
169
+ # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
170
+ # so we need to apply the function recursively.
171
+ def load(module: torch.nn.Module, prefix: str = ""):
172
+ args = (state_dict, prefix, {}, True, [], [], error_msgs)
173
+ module._load_from_state_dict(*args)
174
+
175
+ for name, child in module._modules.items():
176
+ if child is not None:
177
+ load(child, prefix + name + ".")
178
+
179
+ load(model_to_load)
180
+
181
+ return error_msgs
182
+
183
+
184
+ class ModelMixin(torch.nn.Module, PushToHubMixin):
185
+ r"""
186
+ Base class for all models.
187
+
188
+ [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and
189
+ saving models.
190
+
191
+ - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`].
192
+ """
193
+
194
+ config_name = CONFIG_NAME
195
+ _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"]
196
+ _supports_gradient_checkpointing = False
197
+ _keys_to_ignore_on_load_unexpected = None
198
+
199
+ def __init__(self):
200
+ super().__init__()
201
+
202
+ def __getattr__(self, name: str) -> Any:
203
+ """The only reason we overwrite `getattr` here is to gracefully deprecate accessing
204
+ config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite
205
+ __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__':
206
+ https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
207
+ """
208
+
209
+ is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name)
210
+ is_attribute = name in self.__dict__
211
+
212
+ if is_in_config and not is_attribute:
213
+ deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'."
214
+ deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False, stacklevel=3)
215
+ return self._internal_dict[name]
216
+
217
+ # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
218
+ return super().__getattr__(name)
219
+
220
+ @property
221
+ def is_gradient_checkpointing(self) -> bool:
222
+ """
223
+ Whether gradient checkpointing is activated for this model or not.
224
+ """
225
+ return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules())
226
+
227
+ def enable_gradient_checkpointing(self) -> None:
228
+ """
229
+ Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or
230
+ *checkpoint activations* in other frameworks).
231
+ """
232
+ if not self._supports_gradient_checkpointing:
233
+ raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
234
+ self.apply(partial(self._set_gradient_checkpointing, value=True))
235
+
236
+ def disable_gradient_checkpointing(self) -> None:
237
+ """
238
+ Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or
239
+ *checkpoint activations* in other frameworks).
240
+ """
241
+ if self._supports_gradient_checkpointing:
242
+ self.apply(partial(self._set_gradient_checkpointing, value=False))
243
+
244
+ def set_use_memory_efficient_attention_xformers(
245
+ self, valid: bool, attention_op: Optional[Callable] = None
246
+ ) -> None:
247
+ # Recursively walk through all the children.
248
+ # Any children which exposes the set_use_memory_efficient_attention_xformers method
249
+ # gets the message
250
+ def fn_recursive_set_mem_eff(module: torch.nn.Module):
251
+ if hasattr(module, "set_use_memory_efficient_attention_xformers"):
252
+ module.set_use_memory_efficient_attention_xformers(valid, attention_op)
253
+
254
+ for child in module.children():
255
+ fn_recursive_set_mem_eff(child)
256
+
257
+ for module in self.children():
258
+ if isinstance(module, torch.nn.Module):
259
+ fn_recursive_set_mem_eff(module)
260
+
261
+ def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None) -> None:
262
+ r"""
263
+ Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
264
+
265
+ When this option is enabled, you should observe lower GPU memory usage and a potential speed up during
266
+ inference. Speed up during training is not guaranteed.
267
+
268
+ <Tip warning={true}>
269
+
270
+ ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes
271
+ precedent.
272
+
273
+ </Tip>
274
+
275
+ Parameters:
276
+ attention_op (`Callable`, *optional*):
277
+ Override the default `None` operator for use as `op` argument to the
278
+ [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)
279
+ function of xFormers.
280
+
281
+ Examples:
282
+
283
+ ```py
284
+ >>> import torch
285
+ >>> from diffusers import UNet2DConditionModel
286
+ >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
287
+
288
+ >>> model = UNet2DConditionModel.from_pretrained(
289
+ ... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16
290
+ ... )
291
+ >>> model = model.to("cuda")
292
+ >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
293
+ ```
294
+ """
295
+ self.set_use_memory_efficient_attention_xformers(True, attention_op)
296
+
297
+ def disable_xformers_memory_efficient_attention(self) -> None:
298
+ r"""
299
+ Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
300
+ """
301
+ self.set_use_memory_efficient_attention_xformers(False)
302
+
303
+ def save_pretrained(
304
+ self,
305
+ save_directory: Union[str, os.PathLike],
306
+ is_main_process: bool = True,
307
+ save_function: Optional[Callable] = None,
308
+ safe_serialization: bool = True,
309
+ variant: Optional[str] = None,
310
+ push_to_hub: bool = False,
311
+ **kwargs,
312
+ ):
313
+ """
314
+ Save a model and its configuration file to a directory so that it can be reloaded using the
315
+ [`~models.ModelMixin.from_pretrained`] class method.
316
+
317
+ Arguments:
318
+ save_directory (`str` or `os.PathLike`):
319
+ Directory to save a model and its configuration file to. Will be created if it doesn't exist.
320
+ is_main_process (`bool`, *optional*, defaults to `True`):
321
+ Whether the process calling this is the main process or not. Useful during distributed training and you
322
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
323
+ process to avoid race conditions.
324
+ save_function (`Callable`):
325
+ The function to use to save the state dictionary. Useful during distributed training when you need to
326
+ replace `torch.save` with another method. Can be configured with the environment variable
327
+ `DIFFUSERS_SAVE_MODE`.
328
+ safe_serialization (`bool`, *optional*, defaults to `True`):
329
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
330
+ variant (`str`, *optional*):
331
+ If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
332
+ push_to_hub (`bool`, *optional*, defaults to `False`):
333
+ Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
334
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
335
+ namespace).
336
+ kwargs (`Dict[str, Any]`, *optional*):
337
+ Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
338
+ """
339
+ if os.path.isfile(save_directory):
340
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
341
+ return
342
+
343
+ os.makedirs(save_directory, exist_ok=True)
344
+
345
+ if push_to_hub:
346
+ commit_message = kwargs.pop("commit_message", None)
347
+ private = kwargs.pop("private", False)
348
+ create_pr = kwargs.pop("create_pr", False)
349
+ token = kwargs.pop("token", None)
350
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
351
+ repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
352
+
353
+ # Only save the model itself if we are using distributed training
354
+ model_to_save = self
355
+
356
+ # Attach architecture to the config
357
+ # Save the config
358
+ if is_main_process:
359
+ model_to_save.save_config(save_directory)
360
+
361
+ # Save the model
362
+ state_dict = model_to_save.state_dict()
363
+
364
+ weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
365
+ weights_name = _add_variant(weights_name, variant)
366
+
367
+ # Save the model
368
+ if safe_serialization:
369
+ safetensors.torch.save_file(
370
+ state_dict, os.path.join(save_directory, weights_name), metadata={"format": "pt"}
371
+ )
372
+ else:
373
+ torch.save(state_dict, os.path.join(save_directory, weights_name))
374
+
375
+ logger.info(f"Model weights saved in {os.path.join(save_directory, weights_name)}")
376
+
377
+ if push_to_hub:
378
+ # Create a new empty model card and eventually tag it
379
+ model_card = load_or_create_model_card(repo_id, token=token)
380
+ model_card = populate_model_card(model_card)
381
+ model_card.save(os.path.join(save_directory, "README.md"))
382
+
383
+ self._upload_folder(
384
+ save_directory,
385
+ repo_id,
386
+ token=token,
387
+ commit_message=commit_message,
388
+ create_pr=create_pr,
389
+ )
390
+
391
+ @classmethod
392
+ @validate_hf_hub_args
393
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
394
+ r"""
395
+ Instantiate a pretrained PyTorch model from a pretrained model configuration.
396
+
397
+ The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To
398
+ train the model, set it back in training mode with `model.train()`.
399
+
400
+ Parameters:
401
+ pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
402
+ Can be either:
403
+
404
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
405
+ the Hub.
406
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
407
+ with [`~ModelMixin.save_pretrained`].
408
+
409
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
410
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
411
+ is not used.
412
+ torch_dtype (`str` or `torch.dtype`, *optional*):
413
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
414
+ dtype is automatically derived from the model's weights.
415
+ force_download (`bool`, *optional*, defaults to `False`):
416
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
417
+ cached versions if they exist.
418
+ resume_download (`bool`, *optional*, defaults to `False`):
419
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
420
+ incompletely downloaded files are deleted.
421
+ proxies (`Dict[str, str]`, *optional*):
422
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
423
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
424
+ output_loading_info (`bool`, *optional*, defaults to `False`):
425
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
426
+ local_files_only(`bool`, *optional*, defaults to `False`):
427
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
428
+ won't be downloaded from the Hub.
429
+ token (`str` or *bool*, *optional*):
430
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
431
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
432
+ revision (`str`, *optional*, defaults to `"main"`):
433
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
434
+ allowed by Git.
435
+ from_flax (`bool`, *optional*, defaults to `False`):
436
+ Load the model weights from a Flax checkpoint save file.
437
+ subfolder (`str`, *optional*, defaults to `""`):
438
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
439
+ mirror (`str`, *optional*):
440
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
441
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
442
+ information.
443
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
444
+ A map that specifies where each submodule should go. It doesn't need to be defined for each
445
+ parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
446
+ same device.
447
+
448
+ Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
449
+ more information about each option see [designing a device
450
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
451
+ max_memory (`Dict`, *optional*):
452
+ A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
453
+ each GPU and the available CPU RAM if unset.
454
+ offload_folder (`str` or `os.PathLike`, *optional*):
455
+ The path to offload weights if `device_map` contains the value `"disk"`.
456
+ offload_state_dict (`bool`, *optional*):
457
+ If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
458
+ the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
459
+ when there is some disk offload.
460
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
461
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
462
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
463
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
464
+ argument to `True` will raise an error.
465
+ variant (`str`, *optional*):
466
+ Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when
467
+ loading `from_flax`.
468
+ use_safetensors (`bool`, *optional*, defaults to `None`):
469
+ If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the
470
+ `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors`
471
+ weights. If set to `False`, `safetensors` weights are not loaded.
472
+
473
+ <Tip>
474
+
475
+ To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
476
+ `huggingface-cli login`. You can also activate the special
477
+ ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
478
+ firewalled environment.
479
+
480
+ </Tip>
481
+
482
+ Example:
483
+
484
+ ```py
485
+ from diffusers import UNet2DConditionModel
486
+
487
+ unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet")
488
+ ```
489
+
490
+ If you get the error message below, you need to finetune the weights for your downstream task:
491
+
492
+ ```bash
493
+ Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
494
+ - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
495
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
496
+ ```
497
+ """
498
+ cache_dir = kwargs.pop("cache_dir", None)
499
+ ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
500
+ force_download = kwargs.pop("force_download", False)
501
+ from_flax = kwargs.pop("from_flax", False)
502
+ resume_download = kwargs.pop("resume_download", False)
503
+ proxies = kwargs.pop("proxies", None)
504
+ output_loading_info = kwargs.pop("output_loading_info", False)
505
+ local_files_only = kwargs.pop("local_files_only", None)
506
+ token = kwargs.pop("token", None)
507
+ revision = kwargs.pop("revision", None)
508
+ torch_dtype = kwargs.pop("torch_dtype", None)
509
+ subfolder = kwargs.pop("subfolder", None)
510
+ device_map = kwargs.pop("device_map", None)
511
+ max_memory = kwargs.pop("max_memory", None)
512
+ offload_folder = kwargs.pop("offload_folder", None)
513
+ offload_state_dict = kwargs.pop("offload_state_dict", False)
514
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
515
+ variant = kwargs.pop("variant", None)
516
+ use_safetensors = kwargs.pop("use_safetensors", None)
517
+
518
+ allow_pickle = False
519
+ if use_safetensors is None:
520
+ use_safetensors = True
521
+ allow_pickle = True
522
+
523
+ if low_cpu_mem_usage and not is_accelerate_available():
524
+ low_cpu_mem_usage = False
525
+ logger.warning(
526
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
527
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
528
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
529
+ " install accelerate\n```\n."
530
+ )
531
+
532
+ if device_map is not None and not is_accelerate_available():
533
+ raise NotImplementedError(
534
+ "Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set"
535
+ " `device_map=None`. You can install accelerate with `pip install accelerate`."
536
+ )
537
+
538
+ # Check if we can handle device_map and dispatching the weights
539
+ if device_map is not None and not is_torch_version(">=", "1.9.0"):
540
+ raise NotImplementedError(
541
+ "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set"
542
+ " `device_map=None`."
543
+ )
544
+
545
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
546
+ raise NotImplementedError(
547
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
548
+ " `low_cpu_mem_usage=False`."
549
+ )
550
+
551
+ if low_cpu_mem_usage is False and device_map is not None:
552
+ raise ValueError(
553
+ f"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and"
554
+ " dispatching. Please make sure to set `low_cpu_mem_usage=True`."
555
+ )
556
+
557
+ # Load config if we don't provide a configuration
558
+ config_path = pretrained_model_name_or_path
559
+
560
+ user_agent = {
561
+ "diffusers": __version__,
562
+ "file_type": "model",
563
+ "framework": "pytorch",
564
+ }
565
+
566
+ # load config
567
+ config, unused_kwargs, commit_hash = cls.load_config(
568
+ config_path,
569
+ cache_dir=cache_dir,
570
+ return_unused_kwargs=True,
571
+ return_commit_hash=True,
572
+ force_download=force_download,
573
+ resume_download=resume_download,
574
+ proxies=proxies,
575
+ local_files_only=local_files_only,
576
+ token=token,
577
+ revision=revision,
578
+ subfolder=subfolder,
579
+ device_map=device_map,
580
+ max_memory=max_memory,
581
+ offload_folder=offload_folder,
582
+ offload_state_dict=offload_state_dict,
583
+ user_agent=user_agent,
584
+ **kwargs,
585
+ )
586
+
587
+ # load model
588
+ model_file = None
589
+ if from_flax:
590
+ model_file = _get_model_file(
591
+ pretrained_model_name_or_path,
592
+ weights_name=FLAX_WEIGHTS_NAME,
593
+ cache_dir=cache_dir,
594
+ force_download=force_download,
595
+ resume_download=resume_download,
596
+ proxies=proxies,
597
+ local_files_only=local_files_only,
598
+ token=token,
599
+ revision=revision,
600
+ subfolder=subfolder,
601
+ user_agent=user_agent,
602
+ commit_hash=commit_hash,
603
+ )
604
+ model = cls.from_config(config, **unused_kwargs)
605
+
606
+ # Convert the weights
607
+ from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model
608
+
609
+ model = load_flax_checkpoint_in_pytorch_model(model, model_file)
610
+ else:
611
+ if use_safetensors:
612
+ try:
613
+ model_file = _get_model_file(
614
+ pretrained_model_name_or_path,
615
+ weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant),
616
+ cache_dir=cache_dir,
617
+ force_download=force_download,
618
+ resume_download=resume_download,
619
+ proxies=proxies,
620
+ local_files_only=local_files_only,
621
+ token=token,
622
+ revision=revision,
623
+ subfolder=subfolder,
624
+ user_agent=user_agent,
625
+ commit_hash=commit_hash,
626
+ )
627
+ except IOError as e:
628
+ if not allow_pickle:
629
+ raise e
630
+ pass
631
+ if model_file is None:
632
+ model_file = _get_model_file(
633
+ pretrained_model_name_or_path,
634
+ weights_name=_add_variant(WEIGHTS_NAME, variant),
635
+ cache_dir=cache_dir,
636
+ force_download=force_download,
637
+ resume_download=resume_download,
638
+ proxies=proxies,
639
+ local_files_only=local_files_only,
640
+ token=token,
641
+ revision=revision,
642
+ subfolder=subfolder,
643
+ user_agent=user_agent,
644
+ commit_hash=commit_hash,
645
+ )
646
+
647
+ if low_cpu_mem_usage:
648
+ # Instantiate model with empty weights
649
+ with accelerate.init_empty_weights():
650
+ model = cls.from_config(config, **unused_kwargs)
651
+
652
+ # if device_map is None, load the state dict and move the params from meta device to the cpu
653
+ if device_map is None:
654
+ param_device = "cpu"
655
+ state_dict = load_state_dict(model_file, variant=variant)
656
+ model._convert_deprecated_attention_blocks(state_dict)
657
+ # move the params from meta device to cpu
658
+ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
659
+ if len(missing_keys) > 0:
660
+ raise ValueError(
661
+ f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are"
662
+ f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass"
663
+ " `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize"
664
+ " those weights or else make sure your checkpoint file is correct."
665
+ )
666
+
667
+ unexpected_keys = load_model_dict_into_meta(
668
+ model,
669
+ state_dict,
670
+ device=param_device,
671
+ dtype=torch_dtype,
672
+ model_name_or_path=pretrained_model_name_or_path,
673
+ )
674
+
675
+ if cls._keys_to_ignore_on_load_unexpected is not None:
676
+ for pat in cls._keys_to_ignore_on_load_unexpected:
677
+ unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
678
+
679
+ if len(unexpected_keys) > 0:
680
+ logger.warning(
681
+ f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
682
+ )
683
+
684
+ else: # else let accelerate handle loading and dispatching.
685
+ # Load weights and dispatch according to the device_map
686
+ # by default the device_map is None and the weights are loaded on the CPU
687
+ try:
688
+ accelerate.load_checkpoint_and_dispatch(
689
+ model,
690
+ model_file,
691
+ device_map,
692
+ max_memory=max_memory,
693
+ offload_folder=offload_folder,
694
+ offload_state_dict=offload_state_dict,
695
+ dtype=torch_dtype,
696
+ )
697
+ except AttributeError as e:
698
+ # When using accelerate loading, we do not have the ability to load the state
699
+ # dict and rename the weight names manually. Additionally, accelerate skips
700
+ # torch loading conventions and directly writes into `module.{_buffers, _parameters}`
701
+ # (which look like they should be private variables?), so we can't use the standard hooks
702
+ # to rename parameters on load. We need to mimic the original weight names so the correct
703
+ # attributes are available. After we have loaded the weights, we convert the deprecated
704
+ # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert
705
+ # the weights so we don't have to do this again.
706
+
707
+ if "'Attention' object has no attribute" in str(e):
708
+ logger.warning(
709
+ f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}"
710
+ " was saved with deprecated attention block weight names. We will load it with the deprecated attention block"
711
+ " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion,"
712
+ " so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint,"
713
+ " please also re-upload it or open a PR on the original repository."
714
+ )
715
+ model._temp_convert_self_to_deprecated_attention_blocks()
716
+ accelerate.load_checkpoint_and_dispatch(
717
+ model,
718
+ model_file,
719
+ device_map,
720
+ max_memory=max_memory,
721
+ offload_folder=offload_folder,
722
+ offload_state_dict=offload_state_dict,
723
+ dtype=torch_dtype,
724
+ )
725
+ model._undo_temp_convert_self_to_deprecated_attention_blocks()
726
+ else:
727
+ raise e
728
+
729
+ loading_info = {
730
+ "missing_keys": [],
731
+ "unexpected_keys": [],
732
+ "mismatched_keys": [],
733
+ "error_msgs": [],
734
+ }
735
+ else:
736
+ model = cls.from_config(config, **unused_kwargs)
737
+
738
+ state_dict = load_state_dict(model_file, variant=variant)
739
+ model._convert_deprecated_attention_blocks(state_dict)
740
+
741
+ model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
742
+ model,
743
+ state_dict,
744
+ model_file,
745
+ pretrained_model_name_or_path,
746
+ ignore_mismatched_sizes=ignore_mismatched_sizes,
747
+ )
748
+
749
+ loading_info = {
750
+ "missing_keys": missing_keys,
751
+ "unexpected_keys": unexpected_keys,
752
+ "mismatched_keys": mismatched_keys,
753
+ "error_msgs": error_msgs,
754
+ }
755
+
756
+ if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):
757
+ raise ValueError(
758
+ f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}."
759
+ )
760
+ elif torch_dtype is not None:
761
+ model = model.to(torch_dtype)
762
+
763
+ model.register_to_config(_name_or_path=pretrained_model_name_or_path)
764
+
765
+ # Set model in evaluation mode to deactivate DropOut modules by default
766
+ model.eval()
767
+ if output_loading_info:
768
+ return model, loading_info
769
+
770
+ return model
771
+
772
+ @classmethod
773
+ def _load_pretrained_model(
774
+ cls,
775
+ model,
776
+ state_dict: OrderedDict,
777
+ resolved_archive_file,
778
+ pretrained_model_name_or_path: Union[str, os.PathLike],
779
+ ignore_mismatched_sizes: bool = False,
780
+ ):
781
+ # Retrieve missing & unexpected_keys
782
+ model_state_dict = model.state_dict()
783
+ loaded_keys = list(state_dict.keys())
784
+
785
+ expected_keys = list(model_state_dict.keys())
786
+
787
+ original_loaded_keys = loaded_keys
788
+
789
+ missing_keys = list(set(expected_keys) - set(loaded_keys))
790
+ unexpected_keys = list(set(loaded_keys) - set(expected_keys))
791
+
792
+ # Make sure we are able to load base models as well as derived models (with heads)
793
+ model_to_load = model
794
+
795
+ def _find_mismatched_keys(
796
+ state_dict,
797
+ model_state_dict,
798
+ loaded_keys,
799
+ ignore_mismatched_sizes,
800
+ ):
801
+ mismatched_keys = []
802
+ if ignore_mismatched_sizes:
803
+ for checkpoint_key in loaded_keys:
804
+ model_key = checkpoint_key
805
+
806
+ if (
807
+ model_key in model_state_dict
808
+ and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape
809
+ ):
810
+ mismatched_keys.append(
811
+ (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)
812
+ )
813
+ del state_dict[checkpoint_key]
814
+ return mismatched_keys
815
+
816
+ if state_dict is not None:
817
+ # Whole checkpoint
818
+ mismatched_keys = _find_mismatched_keys(
819
+ state_dict,
820
+ model_state_dict,
821
+ original_loaded_keys,
822
+ ignore_mismatched_sizes,
823
+ )
824
+ error_msgs = _load_state_dict_into_model(model_to_load, state_dict)
825
+
826
+ if len(error_msgs) > 0:
827
+ error_msg = "\n\t".join(error_msgs)
828
+ if "size mismatch" in error_msg:
829
+ error_msg += (
830
+ "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method."
831
+ )
832
+ raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
833
+
834
+ if len(unexpected_keys) > 0:
835
+ logger.warning(
836
+ f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
837
+ f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
838
+ f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task"
839
+ " or with another architecture (e.g. initializing a BertForSequenceClassification model from a"
840
+ " BertForPreTraining model).\n- This IS NOT expected if you are initializing"
841
+ f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly"
842
+ " identical (initializing a BertForSequenceClassification model from a"
843
+ " BertForSequenceClassification model)."
844
+ )
845
+ else:
846
+ logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
847
+ if len(missing_keys) > 0:
848
+ logger.warning(
849
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
850
+ f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
851
+ " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
852
+ )
853
+ elif len(mismatched_keys) == 0:
854
+ logger.info(
855
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
856
+ f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the"
857
+ f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions"
858
+ " without further training."
859
+ )
860
+ if len(mismatched_keys) > 0:
861
+ mismatched_warning = "\n".join(
862
+ [
863
+ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
864
+ for key, shape1, shape2 in mismatched_keys
865
+ ]
866
+ )
867
+ logger.warning(
868
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
869
+ f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
870
+ f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be"
871
+ " able to use it for predictions and inference."
872
+ )
873
+
874
+ return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs
875
+
876
+ @property
877
+ def device(self) -> torch.device:
878
+ """
879
+ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same
880
+ device).
881
+ """
882
+ return get_parameter_device(self)
883
+
884
+ @property
885
+ def dtype(self) -> torch.dtype:
886
+ """
887
+ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
888
+ """
889
+ return get_parameter_dtype(self)
890
+
891
+ def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
892
+ """
893
+ Get number of (trainable or non-embedding) parameters in the module.
894
+
895
+ Args:
896
+ only_trainable (`bool`, *optional*, defaults to `False`):
897
+ Whether or not to return only the number of trainable parameters.
898
+ exclude_embeddings (`bool`, *optional*, defaults to `False`):
899
+ Whether or not to return only the number of non-embedding parameters.
900
+
901
+ Returns:
902
+ `int`: The number of parameters.
903
+
904
+ Example:
905
+
906
+ ```py
907
+ from diffusers import UNet2DConditionModel
908
+
909
+ model_id = "runwayml/stable-diffusion-v1-5"
910
+ unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet")
911
+ unet.num_parameters(only_trainable=True)
912
+ 859520964
913
+ ```
914
+ """
915
+
916
+ if exclude_embeddings:
917
+ embedding_param_names = [
918
+ f"{name}.weight"
919
+ for name, module_type in self.named_modules()
920
+ if isinstance(module_type, torch.nn.Embedding)
921
+ ]
922
+ non_embedding_parameters = [
923
+ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names
924
+ ]
925
+ return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)
926
+ else:
927
+ return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)
928
+
929
+ def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None:
930
+ deprecated_attention_block_paths = []
931
+
932
+ def recursive_find_attn_block(name, module):
933
+ if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block:
934
+ deprecated_attention_block_paths.append(name)
935
+
936
+ for sub_name, sub_module in module.named_children():
937
+ sub_name = sub_name if name == "" else f"{name}.{sub_name}"
938
+ recursive_find_attn_block(sub_name, sub_module)
939
+
940
+ recursive_find_attn_block("", self)
941
+
942
+ # NOTE: we have to check if the deprecated parameters are in the state dict
943
+ # because it is possible we are loading from a state dict that was already
944
+ # converted
945
+
946
+ for path in deprecated_attention_block_paths:
947
+ # group_norm path stays the same
948
+
949
+ # query -> to_q
950
+ if f"{path}.query.weight" in state_dict:
951
+ state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight")
952
+ if f"{path}.query.bias" in state_dict:
953
+ state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias")
954
+
955
+ # key -> to_k
956
+ if f"{path}.key.weight" in state_dict:
957
+ state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight")
958
+ if f"{path}.key.bias" in state_dict:
959
+ state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias")
960
+
961
+ # value -> to_v
962
+ if f"{path}.value.weight" in state_dict:
963
+ state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight")
964
+ if f"{path}.value.bias" in state_dict:
965
+ state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias")
966
+
967
+ # proj_attn -> to_out.0
968
+ if f"{path}.proj_attn.weight" in state_dict:
969
+ state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight")
970
+ if f"{path}.proj_attn.bias" in state_dict:
971
+ state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias")
972
+
973
+ def _temp_convert_self_to_deprecated_attention_blocks(self) -> None:
974
+ deprecated_attention_block_modules = []
975
+
976
+ def recursive_find_attn_block(module):
977
+ if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block:
978
+ deprecated_attention_block_modules.append(module)
979
+
980
+ for sub_module in module.children():
981
+ recursive_find_attn_block(sub_module)
982
+
983
+ recursive_find_attn_block(self)
984
+
985
+ for module in deprecated_attention_block_modules:
986
+ module.query = module.to_q
987
+ module.key = module.to_k
988
+ module.value = module.to_v
989
+ module.proj_attn = module.to_out[0]
990
+
991
+ # We don't _have_ to delete the old attributes, but it's helpful to ensure
992
+ # that _all_ the weights are loaded into the new attributes and we're not
993
+ # making an incorrect assumption that this model should be converted when
994
+ # it really shouldn't be.
995
+ del module.to_q
996
+ del module.to_k
997
+ del module.to_v
998
+ del module.to_out
999
+
1000
+ def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None:
1001
+ deprecated_attention_block_modules = []
1002
+
1003
+ def recursive_find_attn_block(module) -> None:
1004
+ if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block:
1005
+ deprecated_attention_block_modules.append(module)
1006
+
1007
+ for sub_module in module.children():
1008
+ recursive_find_attn_block(sub_module)
1009
+
1010
+ recursive_find_attn_block(self)
1011
+
1012
+ for module in deprecated_attention_block_modules:
1013
+ module.to_q = module.query
1014
+ module.to_k = module.key
1015
+ module.to_v = module.value
1016
+ module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)])
1017
+
1018
+ del module.query
1019
+ del module.key
1020
+ del module.value
1021
+ del module.proj_attn
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/normalization.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import numbers
17
+ from typing import Dict, Optional, Tuple
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from ..utils import is_torch_version
24
+ from .activations import get_activation
25
+ from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings
26
+
27
+
28
+ class AdaLayerNorm(nn.Module):
29
+ r"""
30
+ Norm layer modified to incorporate timestep embeddings.
31
+
32
+ Parameters:
33
+ embedding_dim (`int`): The size of each embedding vector.
34
+ num_embeddings (`int`): The size of the embeddings dictionary.
35
+ """
36
+
37
+ def __init__(self, embedding_dim: int, num_embeddings: int):
38
+ super().__init__()
39
+ self.emb = nn.Embedding(num_embeddings, embedding_dim)
40
+ self.silu = nn.SiLU()
41
+ self.linear = nn.Linear(embedding_dim, embedding_dim * 2)
42
+ self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)
43
+
44
+ def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> torch.Tensor:
45
+ emb = self.linear(self.silu(self.emb(timestep)))
46
+ scale, shift = torch.chunk(emb, 2)
47
+ x = self.norm(x) * (1 + scale) + shift
48
+ return x
49
+
50
+
51
+ class AdaLayerNormZero(nn.Module):
52
+ r"""
53
+ Norm layer adaptive layer norm zero (adaLN-Zero).
54
+
55
+ Parameters:
56
+ embedding_dim (`int`): The size of each embedding vector.
57
+ num_embeddings (`int`): The size of the embeddings dictionary.
58
+ """
59
+
60
+ def __init__(self, embedding_dim: int, num_embeddings: int):
61
+ super().__init__()
62
+
63
+ self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)
64
+
65
+ self.silu = nn.SiLU()
66
+ self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
67
+ self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
68
+
69
+ def forward(
70
+ self,
71
+ x: torch.Tensor,
72
+ timestep: torch.Tensor,
73
+ class_labels: torch.LongTensor,
74
+ hidden_dtype: Optional[torch.dtype] = None,
75
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
76
+ emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)))
77
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
78
+ x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
79
+ return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
80
+
81
+
82
+ class AdaLayerNormSingle(nn.Module):
83
+ r"""
84
+ Norm layer adaptive layer norm single (adaLN-single).
85
+
86
+ As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3).
87
+
88
+ Parameters:
89
+ embedding_dim (`int`): The size of each embedding vector.
90
+ use_additional_conditions (`bool`): To use additional conditions for normalization or not.
91
+ """
92
+
93
+ def __init__(self, embedding_dim: int, use_additional_conditions: bool = False):
94
+ super().__init__()
95
+
96
+ self.emb = PixArtAlphaCombinedTimestepSizeEmbeddings(
97
+ embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions
98
+ )
99
+
100
+ self.silu = nn.SiLU()
101
+ self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
102
+
103
+ def forward(
104
+ self,
105
+ timestep: torch.Tensor,
106
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
107
+ batch_size: Optional[int] = None,
108
+ hidden_dtype: Optional[torch.dtype] = None,
109
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
110
+ # No modulation happening here.
111
+ embedded_timestep = self.emb(timestep, **added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_dtype)
112
+ return self.linear(self.silu(embedded_timestep)), embedded_timestep
113
+
114
+
115
+ class AdaGroupNorm(nn.Module):
116
+ r"""
117
+ GroupNorm layer modified to incorporate timestep embeddings.
118
+
119
+ Parameters:
120
+ embedding_dim (`int`): The size of each embedding vector.
121
+ num_embeddings (`int`): The size of the embeddings dictionary.
122
+ num_groups (`int`): The number of groups to separate the channels into.
123
+ act_fn (`str`, *optional*, defaults to `None`): The activation function to use.
124
+ eps (`float`, *optional*, defaults to `1e-5`): The epsilon value to use for numerical stability.
125
+ """
126
+
127
+ def __init__(
128
+ self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5
129
+ ):
130
+ super().__init__()
131
+ self.num_groups = num_groups
132
+ self.eps = eps
133
+
134
+ if act_fn is None:
135
+ self.act = None
136
+ else:
137
+ self.act = get_activation(act_fn)
138
+
139
+ self.linear = nn.Linear(embedding_dim, out_dim * 2)
140
+
141
+ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
142
+ if self.act:
143
+ emb = self.act(emb)
144
+ emb = self.linear(emb)
145
+ emb = emb[:, :, None, None]
146
+ scale, shift = emb.chunk(2, dim=1)
147
+
148
+ x = F.group_norm(x, self.num_groups, eps=self.eps)
149
+ x = x * (1 + scale) + shift
150
+ return x
151
+
152
+
153
+ class AdaLayerNormContinuous(nn.Module):
154
+ def __init__(
155
+ self,
156
+ embedding_dim: int,
157
+ conditioning_embedding_dim: int,
158
+ # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters
159
+ # because the output is immediately scaled and shifted by the projected conditioning embeddings.
160
+ # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters.
161
+ # However, this is how it was implemented in the original code, and it's rather likely you should
162
+ # set `elementwise_affine` to False.
163
+ elementwise_affine=True,
164
+ eps=1e-5,
165
+ bias=True,
166
+ norm_type="layer_norm",
167
+ ):
168
+ super().__init__()
169
+ self.silu = nn.SiLU()
170
+ self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
171
+ if norm_type == "layer_norm":
172
+ self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
173
+ elif norm_type == "rms_norm":
174
+ self.norm = RMSNorm(embedding_dim, eps, elementwise_affine)
175
+ else:
176
+ raise ValueError(f"unknown norm_type {norm_type}")
177
+
178
+ def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor:
179
+ emb = self.linear(self.silu(conditioning_embedding))
180
+ scale, shift = torch.chunk(emb, 2, dim=1)
181
+ x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
182
+ return x
183
+
184
+
185
+ if is_torch_version(">=", "2.1.0"):
186
+ LayerNorm = nn.LayerNorm
187
+ else:
188
+ # Has optional bias parameter compared to torch layer norm
189
+ # TODO: replace with torch layernorm once min required torch version >= 2.1
190
+ class LayerNorm(nn.Module):
191
+ def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True):
192
+ super().__init__()
193
+
194
+ self.eps = eps
195
+
196
+ if isinstance(dim, numbers.Integral):
197
+ dim = (dim,)
198
+
199
+ self.dim = torch.Size(dim)
200
+
201
+ if elementwise_affine:
202
+ self.weight = nn.Parameter(torch.ones(dim))
203
+ self.bias = nn.Parameter(torch.zeros(dim)) if bias else None
204
+ else:
205
+ self.weight = None
206
+ self.bias = None
207
+
208
+ def forward(self, input):
209
+ return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps)
210
+
211
+
212
+ class RMSNorm(nn.Module):
213
+ def __init__(self, dim, eps: float, elementwise_affine: bool = True):
214
+ super().__init__()
215
+
216
+ self.eps = eps
217
+
218
+ if isinstance(dim, numbers.Integral):
219
+ dim = (dim,)
220
+
221
+ self.dim = torch.Size(dim)
222
+
223
+ if elementwise_affine:
224
+ self.weight = nn.Parameter(torch.ones(dim))
225
+ else:
226
+ self.weight = None
227
+
228
+ def forward(self, hidden_states):
229
+ input_dtype = hidden_states.dtype
230
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
231
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
232
+
233
+ if self.weight is not None:
234
+ # convert into half-precision if necessary
235
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
236
+ hidden_states = hidden_states.to(self.weight.dtype)
237
+ hidden_states = hidden_states * self.weight
238
+ else:
239
+ hidden_states = hidden_states.to(input_dtype)
240
+
241
+ return hidden_states
242
+
243
+
244
+ class GlobalResponseNorm(nn.Module):
245
+ # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105
246
+ def __init__(self, dim):
247
+ super().__init__()
248
+ self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
249
+ self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
250
+
251
+ def forward(self, x):
252
+ gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
253
+ nx = gx / (gx.mean(dim=-1, keepdim=True) + 1e-6)
254
+ return self.gamma * (x * nx) + self.beta + x
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/prior_transformer.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..utils import deprecate
2
+ from .transformers.prior_transformer import PriorTransformer, PriorTransformerOutput
3
+
4
+
5
+ class PriorTransformerOutput(PriorTransformerOutput):
6
+ deprecation_message = "Importing `PriorTransformerOutput` from `diffusers.models.prior_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.prior_transformer import PriorTransformerOutput`, instead."
7
+ deprecate("PriorTransformerOutput", "0.29", deprecation_message)
8
+
9
+
10
+ class PriorTransformer(PriorTransformer):
11
+ deprecation_message = "Importing `PriorTransformer` from `diffusers.models.prior_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.prior_transformer import PriorTransformer`, instead."
12
+ deprecate("PriorTransformer", "0.29", deprecation_message)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/resnet.py ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ # `TemporalConvLayer` Copyright 2024 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from functools import partial
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from ..utils import deprecate
24
+ from .activations import get_activation
25
+ from .attention_processor import SpatialNorm
26
+ from .downsampling import ( # noqa
27
+ Downsample1D,
28
+ Downsample2D,
29
+ FirDownsample2D,
30
+ KDownsample2D,
31
+ downsample_2d,
32
+ )
33
+ from .normalization import AdaGroupNorm
34
+ from .upsampling import ( # noqa
35
+ FirUpsample2D,
36
+ KUpsample2D,
37
+ Upsample1D,
38
+ Upsample2D,
39
+ upfirdn2d_native,
40
+ upsample_2d,
41
+ )
42
+
43
+
44
+ class ResnetBlockCondNorm2D(nn.Module):
45
+ r"""
46
+ A Resnet block that use normalization layer that incorporate conditioning information.
47
+
48
+ Parameters:
49
+ in_channels (`int`): The number of channels in the input.
50
+ out_channels (`int`, *optional*, default to be `None`):
51
+ The number of output channels for the first conv2d layer. If None, same as `in_channels`.
52
+ dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.
53
+ temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.
54
+ groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.
55
+ groups_out (`int`, *optional*, default to None):
56
+ The number of groups to use for the second normalization layer. if set to None, same as `groups`.
57
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.
58
+ non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use.
59
+ time_embedding_norm (`str`, *optional*, default to `"ada_group"` ):
60
+ The normalization layer for time embedding `temb`. Currently only support "ada_group" or "spatial".
61
+ kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see
62
+ [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`].
63
+ output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output.
64
+ use_in_shortcut (`bool`, *optional*, default to `True`):
65
+ If `True`, add a 1x1 nn.conv2d layer for skip-connection.
66
+ up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer.
67
+ down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer.
68
+ conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the
69
+ `conv_shortcut` output.
70
+ conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output.
71
+ If None, same as `out_channels`.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ *,
77
+ in_channels: int,
78
+ out_channels: Optional[int] = None,
79
+ conv_shortcut: bool = False,
80
+ dropout: float = 0.0,
81
+ temb_channels: int = 512,
82
+ groups: int = 32,
83
+ groups_out: Optional[int] = None,
84
+ eps: float = 1e-6,
85
+ non_linearity: str = "swish",
86
+ time_embedding_norm: str = "ada_group", # ada_group, spatial
87
+ output_scale_factor: float = 1.0,
88
+ use_in_shortcut: Optional[bool] = None,
89
+ up: bool = False,
90
+ down: bool = False,
91
+ conv_shortcut_bias: bool = True,
92
+ conv_2d_out_channels: Optional[int] = None,
93
+ ):
94
+ super().__init__()
95
+ self.in_channels = in_channels
96
+ out_channels = in_channels if out_channels is None else out_channels
97
+ self.out_channels = out_channels
98
+ self.use_conv_shortcut = conv_shortcut
99
+ self.up = up
100
+ self.down = down
101
+ self.output_scale_factor = output_scale_factor
102
+ self.time_embedding_norm = time_embedding_norm
103
+
104
+ conv_cls = nn.Conv2d
105
+
106
+ if groups_out is None:
107
+ groups_out = groups
108
+
109
+ if self.time_embedding_norm == "ada_group": # ada_group
110
+ self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps)
111
+ elif self.time_embedding_norm == "spatial":
112
+ self.norm1 = SpatialNorm(in_channels, temb_channels)
113
+ else:
114
+ raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}")
115
+
116
+ self.conv1 = conv_cls(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
117
+
118
+ if self.time_embedding_norm == "ada_group": # ada_group
119
+ self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps)
120
+ elif self.time_embedding_norm == "spatial": # spatial
121
+ self.norm2 = SpatialNorm(out_channels, temb_channels)
122
+ else:
123
+ raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}")
124
+
125
+ self.dropout = torch.nn.Dropout(dropout)
126
+
127
+ conv_2d_out_channels = conv_2d_out_channels or out_channels
128
+ self.conv2 = conv_cls(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1)
129
+
130
+ self.nonlinearity = get_activation(non_linearity)
131
+
132
+ self.upsample = self.downsample = None
133
+ if self.up:
134
+ self.upsample = Upsample2D(in_channels, use_conv=False)
135
+ elif self.down:
136
+ self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op")
137
+
138
+ self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut
139
+
140
+ self.conv_shortcut = None
141
+ if self.use_in_shortcut:
142
+ self.conv_shortcut = conv_cls(
143
+ in_channels,
144
+ conv_2d_out_channels,
145
+ kernel_size=1,
146
+ stride=1,
147
+ padding=0,
148
+ bias=conv_shortcut_bias,
149
+ )
150
+
151
+ def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
152
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
153
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
154
+ deprecate("scale", "1.0.0", deprecation_message)
155
+
156
+ hidden_states = input_tensor
157
+
158
+ hidden_states = self.norm1(hidden_states, temb)
159
+
160
+ hidden_states = self.nonlinearity(hidden_states)
161
+
162
+ if self.upsample is not None:
163
+ # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
164
+ if hidden_states.shape[0] >= 64:
165
+ input_tensor = input_tensor.contiguous()
166
+ hidden_states = hidden_states.contiguous()
167
+ input_tensor = self.upsample(input_tensor)
168
+ hidden_states = self.upsample(hidden_states)
169
+
170
+ elif self.downsample is not None:
171
+ input_tensor = self.downsample(input_tensor)
172
+ hidden_states = self.downsample(hidden_states)
173
+
174
+ hidden_states = self.conv1(hidden_states)
175
+
176
+ hidden_states = self.norm2(hidden_states, temb)
177
+
178
+ hidden_states = self.nonlinearity(hidden_states)
179
+
180
+ hidden_states = self.dropout(hidden_states)
181
+ hidden_states = self.conv2(hidden_states)
182
+
183
+ if self.conv_shortcut is not None:
184
+ input_tensor = self.conv_shortcut(input_tensor)
185
+
186
+ output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
187
+
188
+ return output_tensor
189
+
190
+
191
+ class ResnetBlock2D(nn.Module):
192
+ r"""
193
+ A Resnet block.
194
+
195
+ Parameters:
196
+ in_channels (`int`): The number of channels in the input.
197
+ out_channels (`int`, *optional*, default to be `None`):
198
+ The number of output channels for the first conv2d layer. If None, same as `in_channels`.
199
+ dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.
200
+ temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.
201
+ groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.
202
+ groups_out (`int`, *optional*, default to None):
203
+ The number of groups to use for the second normalization layer. if set to None, same as `groups`.
204
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.
205
+ non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use.
206
+ time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config.
207
+ By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift"
208
+ for a stronger conditioning with scale and shift.
209
+ kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see
210
+ [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`].
211
+ output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output.
212
+ use_in_shortcut (`bool`, *optional*, default to `True`):
213
+ If `True`, add a 1x1 nn.conv2d layer for skip-connection.
214
+ up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer.
215
+ down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer.
216
+ conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the
217
+ `conv_shortcut` output.
218
+ conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output.
219
+ If None, same as `out_channels`.
220
+ """
221
+
222
+ def __init__(
223
+ self,
224
+ *,
225
+ in_channels: int,
226
+ out_channels: Optional[int] = None,
227
+ conv_shortcut: bool = False,
228
+ dropout: float = 0.0,
229
+ temb_channels: int = 512,
230
+ groups: int = 32,
231
+ groups_out: Optional[int] = None,
232
+ pre_norm: bool = True,
233
+ eps: float = 1e-6,
234
+ non_linearity: str = "swish",
235
+ skip_time_act: bool = False,
236
+ time_embedding_norm: str = "default", # default, scale_shift,
237
+ kernel: Optional[torch.FloatTensor] = None,
238
+ output_scale_factor: float = 1.0,
239
+ use_in_shortcut: Optional[bool] = None,
240
+ up: bool = False,
241
+ down: bool = False,
242
+ conv_shortcut_bias: bool = True,
243
+ conv_2d_out_channels: Optional[int] = None,
244
+ ):
245
+ super().__init__()
246
+ if time_embedding_norm == "ada_group":
247
+ raise ValueError(
248
+ "This class cannot be used with `time_embedding_norm==ada_group`, please use `ResnetBlockCondNorm2D` instead",
249
+ )
250
+ if time_embedding_norm == "spatial":
251
+ raise ValueError(
252
+ "This class cannot be used with `time_embedding_norm==spatial`, please use `ResnetBlockCondNorm2D` instead",
253
+ )
254
+
255
+ self.pre_norm = True
256
+ self.in_channels = in_channels
257
+ out_channels = in_channels if out_channels is None else out_channels
258
+ self.out_channels = out_channels
259
+ self.use_conv_shortcut = conv_shortcut
260
+ self.up = up
261
+ self.down = down
262
+ self.output_scale_factor = output_scale_factor
263
+ self.time_embedding_norm = time_embedding_norm
264
+ self.skip_time_act = skip_time_act
265
+
266
+ linear_cls = nn.Linear
267
+ conv_cls = nn.Conv2d
268
+
269
+ if groups_out is None:
270
+ groups_out = groups
271
+
272
+ self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
273
+
274
+ self.conv1 = conv_cls(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
275
+
276
+ if temb_channels is not None:
277
+ if self.time_embedding_norm == "default":
278
+ self.time_emb_proj = linear_cls(temb_channels, out_channels)
279
+ elif self.time_embedding_norm == "scale_shift":
280
+ self.time_emb_proj = linear_cls(temb_channels, 2 * out_channels)
281
+ else:
282
+ raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
283
+ else:
284
+ self.time_emb_proj = None
285
+
286
+ self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
287
+
288
+ self.dropout = torch.nn.Dropout(dropout)
289
+ conv_2d_out_channels = conv_2d_out_channels or out_channels
290
+ self.conv2 = conv_cls(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1)
291
+
292
+ self.nonlinearity = get_activation(non_linearity)
293
+
294
+ self.upsample = self.downsample = None
295
+ if self.up:
296
+ if kernel == "fir":
297
+ fir_kernel = (1, 3, 3, 1)
298
+ self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)
299
+ elif kernel == "sde_vp":
300
+ self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
301
+ else:
302
+ self.upsample = Upsample2D(in_channels, use_conv=False)
303
+ elif self.down:
304
+ if kernel == "fir":
305
+ fir_kernel = (1, 3, 3, 1)
306
+ self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)
307
+ elif kernel == "sde_vp":
308
+ self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)
309
+ else:
310
+ self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op")
311
+
312
+ self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut
313
+
314
+ self.conv_shortcut = None
315
+ if self.use_in_shortcut:
316
+ self.conv_shortcut = conv_cls(
317
+ in_channels,
318
+ conv_2d_out_channels,
319
+ kernel_size=1,
320
+ stride=1,
321
+ padding=0,
322
+ bias=conv_shortcut_bias,
323
+ )
324
+
325
+ def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
326
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
327
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
328
+ deprecate("scale", "1.0.0", deprecation_message)
329
+
330
+ hidden_states = input_tensor
331
+
332
+ hidden_states = self.norm1(hidden_states)
333
+ hidden_states = self.nonlinearity(hidden_states)
334
+
335
+ if self.upsample is not None:
336
+ # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
337
+ if hidden_states.shape[0] >= 64:
338
+ input_tensor = input_tensor.contiguous()
339
+ hidden_states = hidden_states.contiguous()
340
+ input_tensor = self.upsample(input_tensor)
341
+ hidden_states = self.upsample(hidden_states)
342
+ elif self.downsample is not None:
343
+ input_tensor = self.downsample(input_tensor)
344
+ hidden_states = self.downsample(hidden_states)
345
+
346
+ hidden_states = self.conv1(hidden_states)
347
+
348
+ if self.time_emb_proj is not None:
349
+ if not self.skip_time_act:
350
+ temb = self.nonlinearity(temb)
351
+ temb = self.time_emb_proj(temb)[:, :, None, None]
352
+
353
+ if self.time_embedding_norm == "default":
354
+ if temb is not None:
355
+ hidden_states = hidden_states + temb
356
+ hidden_states = self.norm2(hidden_states)
357
+ elif self.time_embedding_norm == "scale_shift":
358
+ if temb is None:
359
+ raise ValueError(
360
+ f" `temb` should not be None when `time_embedding_norm` is {self.time_embedding_norm}"
361
+ )
362
+ time_scale, time_shift = torch.chunk(temb, 2, dim=1)
363
+ hidden_states = self.norm2(hidden_states)
364
+ hidden_states = hidden_states * (1 + time_scale) + time_shift
365
+ else:
366
+ hidden_states = self.norm2(hidden_states)
367
+
368
+ hidden_states = self.nonlinearity(hidden_states)
369
+
370
+ hidden_states = self.dropout(hidden_states)
371
+ hidden_states = self.conv2(hidden_states)
372
+
373
+ if self.conv_shortcut is not None:
374
+ input_tensor = self.conv_shortcut(input_tensor)
375
+
376
+ output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
377
+
378
+ return output_tensor
379
+
380
+
381
+ # unet_rl.py
382
+ def rearrange_dims(tensor: torch.Tensor) -> torch.Tensor:
383
+ if len(tensor.shape) == 2:
384
+ return tensor[:, :, None]
385
+ if len(tensor.shape) == 3:
386
+ return tensor[:, :, None, :]
387
+ elif len(tensor.shape) == 4:
388
+ return tensor[:, :, 0, :]
389
+ else:
390
+ raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.")
391
+
392
+
393
+ class Conv1dBlock(nn.Module):
394
+ """
395
+ Conv1d --> GroupNorm --> Mish
396
+
397
+ Parameters:
398
+ inp_channels (`int`): Number of input channels.
399
+ out_channels (`int`): Number of output channels.
400
+ kernel_size (`int` or `tuple`): Size of the convolving kernel.
401
+ n_groups (`int`, default `8`): Number of groups to separate the channels into.
402
+ activation (`str`, defaults to `mish`): Name of the activation function.
403
+ """
404
+
405
+ def __init__(
406
+ self,
407
+ inp_channels: int,
408
+ out_channels: int,
409
+ kernel_size: Union[int, Tuple[int, int]],
410
+ n_groups: int = 8,
411
+ activation: str = "mish",
412
+ ):
413
+ super().__init__()
414
+
415
+ self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2)
416
+ self.group_norm = nn.GroupNorm(n_groups, out_channels)
417
+ self.mish = get_activation(activation)
418
+
419
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
420
+ intermediate_repr = self.conv1d(inputs)
421
+ intermediate_repr = rearrange_dims(intermediate_repr)
422
+ intermediate_repr = self.group_norm(intermediate_repr)
423
+ intermediate_repr = rearrange_dims(intermediate_repr)
424
+ output = self.mish(intermediate_repr)
425
+ return output
426
+
427
+
428
+ # unet_rl.py
429
+ class ResidualTemporalBlock1D(nn.Module):
430
+ """
431
+ Residual 1D block with temporal convolutions.
432
+
433
+ Parameters:
434
+ inp_channels (`int`): Number of input channels.
435
+ out_channels (`int`): Number of output channels.
436
+ embed_dim (`int`): Embedding dimension.
437
+ kernel_size (`int` or `tuple`): Size of the convolving kernel.
438
+ activation (`str`, defaults `mish`): It is possible to choose the right activation function.
439
+ """
440
+
441
+ def __init__(
442
+ self,
443
+ inp_channels: int,
444
+ out_channels: int,
445
+ embed_dim: int,
446
+ kernel_size: Union[int, Tuple[int, int]] = 5,
447
+ activation: str = "mish",
448
+ ):
449
+ super().__init__()
450
+ self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size)
451
+ self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size)
452
+
453
+ self.time_emb_act = get_activation(activation)
454
+ self.time_emb = nn.Linear(embed_dim, out_channels)
455
+
456
+ self.residual_conv = (
457
+ nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity()
458
+ )
459
+
460
+ def forward(self, inputs: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
461
+ """
462
+ Args:
463
+ inputs : [ batch_size x inp_channels x horizon ]
464
+ t : [ batch_size x embed_dim ]
465
+
466
+ returns:
467
+ out : [ batch_size x out_channels x horizon ]
468
+ """
469
+ t = self.time_emb_act(t)
470
+ t = self.time_emb(t)
471
+ out = self.conv_in(inputs) + rearrange_dims(t)
472
+ out = self.conv_out(out)
473
+ return out + self.residual_conv(inputs)
474
+
475
+
476
+ class TemporalConvLayer(nn.Module):
477
+ """
478
+ Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from:
479
+ https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016
480
+
481
+ Parameters:
482
+ in_dim (`int`): Number of input channels.
483
+ out_dim (`int`): Number of output channels.
484
+ dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.
485
+ """
486
+
487
+ def __init__(
488
+ self,
489
+ in_dim: int,
490
+ out_dim: Optional[int] = None,
491
+ dropout: float = 0.0,
492
+ norm_num_groups: int = 32,
493
+ ):
494
+ super().__init__()
495
+ out_dim = out_dim or in_dim
496
+ self.in_dim = in_dim
497
+ self.out_dim = out_dim
498
+
499
+ # conv layers
500
+ self.conv1 = nn.Sequential(
501
+ nn.GroupNorm(norm_num_groups, in_dim),
502
+ nn.SiLU(),
503
+ nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0)),
504
+ )
505
+ self.conv2 = nn.Sequential(
506
+ nn.GroupNorm(norm_num_groups, out_dim),
507
+ nn.SiLU(),
508
+ nn.Dropout(dropout),
509
+ nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),
510
+ )
511
+ self.conv3 = nn.Sequential(
512
+ nn.GroupNorm(norm_num_groups, out_dim),
513
+ nn.SiLU(),
514
+ nn.Dropout(dropout),
515
+ nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),
516
+ )
517
+ self.conv4 = nn.Sequential(
518
+ nn.GroupNorm(norm_num_groups, out_dim),
519
+ nn.SiLU(),
520
+ nn.Dropout(dropout),
521
+ nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),
522
+ )
523
+
524
+ # zero out the last layer params,so the conv block is identity
525
+ nn.init.zeros_(self.conv4[-1].weight)
526
+ nn.init.zeros_(self.conv4[-1].bias)
527
+
528
+ def forward(self, hidden_states: torch.Tensor, num_frames: int = 1) -> torch.Tensor:
529
+ hidden_states = (
530
+ hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4)
531
+ )
532
+
533
+ identity = hidden_states
534
+ hidden_states = self.conv1(hidden_states)
535
+ hidden_states = self.conv2(hidden_states)
536
+ hidden_states = self.conv3(hidden_states)
537
+ hidden_states = self.conv4(hidden_states)
538
+
539
+ hidden_states = identity + hidden_states
540
+
541
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(
542
+ (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:]
543
+ )
544
+ return hidden_states
545
+
546
+
547
+ class TemporalResnetBlock(nn.Module):
548
+ r"""
549
+ A Resnet block.
550
+
551
+ Parameters:
552
+ in_channels (`int`): The number of channels in the input.
553
+ out_channels (`int`, *optional*, default to be `None`):
554
+ The number of output channels for the first conv2d layer. If None, same as `in_channels`.
555
+ temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.
556
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.
557
+ """
558
+
559
+ def __init__(
560
+ self,
561
+ in_channels: int,
562
+ out_channels: Optional[int] = None,
563
+ temb_channels: int = 512,
564
+ eps: float = 1e-6,
565
+ ):
566
+ super().__init__()
567
+ self.in_channels = in_channels
568
+ out_channels = in_channels if out_channels is None else out_channels
569
+ self.out_channels = out_channels
570
+
571
+ kernel_size = (3, 1, 1)
572
+ padding = [k // 2 for k in kernel_size]
573
+
574
+ self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=eps, affine=True)
575
+ self.conv1 = nn.Conv3d(
576
+ in_channels,
577
+ out_channels,
578
+ kernel_size=kernel_size,
579
+ stride=1,
580
+ padding=padding,
581
+ )
582
+
583
+ if temb_channels is not None:
584
+ self.time_emb_proj = nn.Linear(temb_channels, out_channels)
585
+ else:
586
+ self.time_emb_proj = None
587
+
588
+ self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=eps, affine=True)
589
+
590
+ self.dropout = torch.nn.Dropout(0.0)
591
+ self.conv2 = nn.Conv3d(
592
+ out_channels,
593
+ out_channels,
594
+ kernel_size=kernel_size,
595
+ stride=1,
596
+ padding=padding,
597
+ )
598
+
599
+ self.nonlinearity = get_activation("silu")
600
+
601
+ self.use_in_shortcut = self.in_channels != out_channels
602
+
603
+ self.conv_shortcut = None
604
+ if self.use_in_shortcut:
605
+ self.conv_shortcut = nn.Conv3d(
606
+ in_channels,
607
+ out_channels,
608
+ kernel_size=1,
609
+ stride=1,
610
+ padding=0,
611
+ )
612
+
613
+ def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor) -> torch.FloatTensor:
614
+ hidden_states = input_tensor
615
+
616
+ hidden_states = self.norm1(hidden_states)
617
+ hidden_states = self.nonlinearity(hidden_states)
618
+ hidden_states = self.conv1(hidden_states)
619
+
620
+ if self.time_emb_proj is not None:
621
+ temb = self.nonlinearity(temb)
622
+ temb = self.time_emb_proj(temb)[:, :, :, None, None]
623
+ temb = temb.permute(0, 2, 1, 3, 4)
624
+ hidden_states = hidden_states + temb
625
+
626
+ hidden_states = self.norm2(hidden_states)
627
+ hidden_states = self.nonlinearity(hidden_states)
628
+ hidden_states = self.dropout(hidden_states)
629
+ hidden_states = self.conv2(hidden_states)
630
+
631
+ if self.conv_shortcut is not None:
632
+ input_tensor = self.conv_shortcut(input_tensor)
633
+
634
+ output_tensor = input_tensor + hidden_states
635
+
636
+ return output_tensor
637
+
638
+
639
+ # VideoResBlock
640
+ class SpatioTemporalResBlock(nn.Module):
641
+ r"""
642
+ A SpatioTemporal Resnet block.
643
+
644
+ Parameters:
645
+ in_channels (`int`): The number of channels in the input.
646
+ out_channels (`int`, *optional*, default to be `None`):
647
+ The number of output channels for the first conv2d layer. If None, same as `in_channels`.
648
+ temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.
649
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the spatial resenet.
650
+ temporal_eps (`float`, *optional*, defaults to `eps`): The epsilon to use for the temporal resnet.
651
+ merge_factor (`float`, *optional*, defaults to `0.5`): The merge factor to use for the temporal mixing.
652
+ merge_strategy (`str`, *optional*, defaults to `learned_with_images`):
653
+ The merge strategy to use for the temporal mixing.
654
+ switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`):
655
+ If `True`, switch the spatial and temporal mixing.
656
+ """
657
+
658
+ def __init__(
659
+ self,
660
+ in_channels: int,
661
+ out_channels: Optional[int] = None,
662
+ temb_channels: int = 512,
663
+ eps: float = 1e-6,
664
+ temporal_eps: Optional[float] = None,
665
+ merge_factor: float = 0.5,
666
+ merge_strategy="learned_with_images",
667
+ switch_spatial_to_temporal_mix: bool = False,
668
+ ):
669
+ super().__init__()
670
+
671
+ self.spatial_res_block = ResnetBlock2D(
672
+ in_channels=in_channels,
673
+ out_channels=out_channels,
674
+ temb_channels=temb_channels,
675
+ eps=eps,
676
+ )
677
+
678
+ self.temporal_res_block = TemporalResnetBlock(
679
+ in_channels=out_channels if out_channels is not None else in_channels,
680
+ out_channels=out_channels if out_channels is not None else in_channels,
681
+ temb_channels=temb_channels,
682
+ eps=temporal_eps if temporal_eps is not None else eps,
683
+ )
684
+
685
+ self.time_mixer = AlphaBlender(
686
+ alpha=merge_factor,
687
+ merge_strategy=merge_strategy,
688
+ switch_spatial_to_temporal_mix=switch_spatial_to_temporal_mix,
689
+ )
690
+
691
+ def forward(
692
+ self,
693
+ hidden_states: torch.FloatTensor,
694
+ temb: Optional[torch.FloatTensor] = None,
695
+ image_only_indicator: Optional[torch.Tensor] = None,
696
+ ):
697
+ num_frames = image_only_indicator.shape[-1]
698
+ hidden_states = self.spatial_res_block(hidden_states, temb)
699
+
700
+ batch_frames, channels, height, width = hidden_states.shape
701
+ batch_size = batch_frames // num_frames
702
+
703
+ hidden_states_mix = (
704
+ hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)
705
+ )
706
+ hidden_states = (
707
+ hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)
708
+ )
709
+
710
+ if temb is not None:
711
+ temb = temb.reshape(batch_size, num_frames, -1)
712
+
713
+ hidden_states = self.temporal_res_block(hidden_states, temb)
714
+ hidden_states = self.time_mixer(
715
+ x_spatial=hidden_states_mix,
716
+ x_temporal=hidden_states,
717
+ image_only_indicator=image_only_indicator,
718
+ )
719
+
720
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width)
721
+ return hidden_states
722
+
723
+
724
+ class AlphaBlender(nn.Module):
725
+ r"""
726
+ A module to blend spatial and temporal features.
727
+
728
+ Parameters:
729
+ alpha (`float`): The initial value of the blending factor.
730
+ merge_strategy (`str`, *optional*, defaults to `learned_with_images`):
731
+ The merge strategy to use for the temporal mixing.
732
+ switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`):
733
+ If `True`, switch the spatial and temporal mixing.
734
+ """
735
+
736
+ strategies = ["learned", "fixed", "learned_with_images"]
737
+
738
+ def __init__(
739
+ self,
740
+ alpha: float,
741
+ merge_strategy: str = "learned_with_images",
742
+ switch_spatial_to_temporal_mix: bool = False,
743
+ ):
744
+ super().__init__()
745
+ self.merge_strategy = merge_strategy
746
+ self.switch_spatial_to_temporal_mix = switch_spatial_to_temporal_mix # For TemporalVAE
747
+
748
+ if merge_strategy not in self.strategies:
749
+ raise ValueError(f"merge_strategy needs to be in {self.strategies}")
750
+
751
+ if self.merge_strategy == "fixed":
752
+ self.register_buffer("mix_factor", torch.Tensor([alpha]))
753
+ elif self.merge_strategy == "learned" or self.merge_strategy == "learned_with_images":
754
+ self.register_parameter("mix_factor", torch.nn.Parameter(torch.Tensor([alpha])))
755
+ else:
756
+ raise ValueError(f"Unknown merge strategy {self.merge_strategy}")
757
+
758
+ def get_alpha(self, image_only_indicator: torch.Tensor, ndims: int) -> torch.Tensor:
759
+ if self.merge_strategy == "fixed":
760
+ alpha = self.mix_factor
761
+
762
+ elif self.merge_strategy == "learned":
763
+ alpha = torch.sigmoid(self.mix_factor)
764
+
765
+ elif self.merge_strategy == "learned_with_images":
766
+ if image_only_indicator is None:
767
+ raise ValueError("Please provide image_only_indicator to use learned_with_images merge strategy")
768
+
769
+ alpha = torch.where(
770
+ image_only_indicator.bool(),
771
+ torch.ones(1, 1, device=image_only_indicator.device),
772
+ torch.sigmoid(self.mix_factor)[..., None],
773
+ )
774
+
775
+ # (batch, channel, frames, height, width)
776
+ if ndims == 5:
777
+ alpha = alpha[:, None, :, None, None]
778
+ # (batch*frames, height*width, channels)
779
+ elif ndims == 3:
780
+ alpha = alpha.reshape(-1)[:, None, None]
781
+ else:
782
+ raise ValueError(f"Unexpected ndims {ndims}. Dimensions should be 3 or 5")
783
+
784
+ else:
785
+ raise NotImplementedError
786
+
787
+ return alpha
788
+
789
+ def forward(
790
+ self,
791
+ x_spatial: torch.Tensor,
792
+ x_temporal: torch.Tensor,
793
+ image_only_indicator: Optional[torch.Tensor] = None,
794
+ ) -> torch.Tensor:
795
+ alpha = self.get_alpha(image_only_indicator, x_spatial.ndim)
796
+ alpha = alpha.to(x_spatial.dtype)
797
+
798
+ if self.switch_spatial_to_temporal_mix:
799
+ alpha = 1.0 - alpha
800
+
801
+ x = alpha * x_spatial + (1.0 - alpha) * x_temporal
802
+ return x
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/resnet_flax.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import flax.linen as nn
15
+ import jax
16
+ import jax.numpy as jnp
17
+
18
+
19
+ class FlaxUpsample2D(nn.Module):
20
+ out_channels: int
21
+ dtype: jnp.dtype = jnp.float32
22
+
23
+ def setup(self):
24
+ self.conv = nn.Conv(
25
+ self.out_channels,
26
+ kernel_size=(3, 3),
27
+ strides=(1, 1),
28
+ padding=((1, 1), (1, 1)),
29
+ dtype=self.dtype,
30
+ )
31
+
32
+ def __call__(self, hidden_states):
33
+ batch, height, width, channels = hidden_states.shape
34
+ hidden_states = jax.image.resize(
35
+ hidden_states,
36
+ shape=(batch, height * 2, width * 2, channels),
37
+ method="nearest",
38
+ )
39
+ hidden_states = self.conv(hidden_states)
40
+ return hidden_states
41
+
42
+
43
+ class FlaxDownsample2D(nn.Module):
44
+ out_channels: int
45
+ dtype: jnp.dtype = jnp.float32
46
+
47
+ def setup(self):
48
+ self.conv = nn.Conv(
49
+ self.out_channels,
50
+ kernel_size=(3, 3),
51
+ strides=(2, 2),
52
+ padding=((1, 1), (1, 1)), # padding="VALID",
53
+ dtype=self.dtype,
54
+ )
55
+
56
+ def __call__(self, hidden_states):
57
+ # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
58
+ # hidden_states = jnp.pad(hidden_states, pad_width=pad)
59
+ hidden_states = self.conv(hidden_states)
60
+ return hidden_states
61
+
62
+
63
+ class FlaxResnetBlock2D(nn.Module):
64
+ in_channels: int
65
+ out_channels: int = None
66
+ dropout_prob: float = 0.0
67
+ use_nin_shortcut: bool = None
68
+ dtype: jnp.dtype = jnp.float32
69
+
70
+ def setup(self):
71
+ out_channels = self.in_channels if self.out_channels is None else self.out_channels
72
+
73
+ self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
74
+ self.conv1 = nn.Conv(
75
+ out_channels,
76
+ kernel_size=(3, 3),
77
+ strides=(1, 1),
78
+ padding=((1, 1), (1, 1)),
79
+ dtype=self.dtype,
80
+ )
81
+
82
+ self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype)
83
+
84
+ self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
85
+ self.dropout = nn.Dropout(self.dropout_prob)
86
+ self.conv2 = nn.Conv(
87
+ out_channels,
88
+ kernel_size=(3, 3),
89
+ strides=(1, 1),
90
+ padding=((1, 1), (1, 1)),
91
+ dtype=self.dtype,
92
+ )
93
+
94
+ use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
95
+
96
+ self.conv_shortcut = None
97
+ if use_nin_shortcut:
98
+ self.conv_shortcut = nn.Conv(
99
+ out_channels,
100
+ kernel_size=(1, 1),
101
+ strides=(1, 1),
102
+ padding="VALID",
103
+ dtype=self.dtype,
104
+ )
105
+
106
+ def __call__(self, hidden_states, temb, deterministic=True):
107
+ residual = hidden_states
108
+ hidden_states = self.norm1(hidden_states)
109
+ hidden_states = nn.swish(hidden_states)
110
+ hidden_states = self.conv1(hidden_states)
111
+
112
+ temb = self.time_emb_proj(nn.swish(temb))
113
+ temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1)
114
+ hidden_states = hidden_states + temb
115
+
116
+ hidden_states = self.norm2(hidden_states)
117
+ hidden_states = nn.swish(hidden_states)
118
+ hidden_states = self.dropout(hidden_states, deterministic)
119
+ hidden_states = self.conv2(hidden_states)
120
+
121
+ if self.conv_shortcut is not None:
122
+ residual = self.conv_shortcut(residual)
123
+
124
+ return hidden_states + residual
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/t5_film_transformer.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from ..utils import deprecate
15
+ from .transformers.t5_film_transformer import (
16
+ DecoderLayer,
17
+ NewGELUActivation,
18
+ T5DenseGatedActDense,
19
+ T5FilmDecoder,
20
+ T5FiLMLayer,
21
+ T5LayerCrossAttention,
22
+ T5LayerFFCond,
23
+ T5LayerNorm,
24
+ T5LayerSelfAttentionCond,
25
+ )
26
+
27
+
28
+ class T5FilmDecoder(T5FilmDecoder):
29
+ deprecation_message = "Importing `T5FilmDecoder` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5FilmDecoder`, instead."
30
+ deprecate("T5FilmDecoder", "0.29", deprecation_message)
31
+
32
+
33
+ class DecoderLayer(DecoderLayer):
34
+ deprecation_message = "Importing `DecoderLayer` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import DecoderLayer`, instead."
35
+ deprecate("DecoderLayer", "0.29", deprecation_message)
36
+
37
+
38
+ class T5LayerSelfAttentionCond(T5LayerSelfAttentionCond):
39
+ deprecation_message = "Importing `T5LayerSelfAttentionCond` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerSelfAttentionCond`, instead."
40
+ deprecate("T5LayerSelfAttentionCond", "0.29", deprecation_message)
41
+
42
+
43
+ class T5LayerCrossAttention(T5LayerCrossAttention):
44
+ deprecation_message = "Importing `T5LayerCrossAttention` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerCrossAttention`, instead."
45
+ deprecate("T5LayerCrossAttention", "0.29", deprecation_message)
46
+
47
+
48
+ class T5LayerFFCond(T5LayerFFCond):
49
+ deprecation_message = "Importing `T5LayerFFCond` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerFFCond`, instead."
50
+ deprecate("T5LayerFFCond", "0.29", deprecation_message)
51
+
52
+
53
+ class T5DenseGatedActDense(T5DenseGatedActDense):
54
+ deprecation_message = "Importing `T5DenseGatedActDense` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5DenseGatedActDense`, instead."
55
+ deprecate("T5DenseGatedActDense", "0.29", deprecation_message)
56
+
57
+
58
+ class T5LayerNorm(T5LayerNorm):
59
+ deprecation_message = "Importing `T5LayerNorm` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5LayerNorm`, instead."
60
+ deprecate("T5LayerNorm", "0.29", deprecation_message)
61
+
62
+
63
+ class NewGELUActivation(NewGELUActivation):
64
+ deprecation_message = "Importing `T5LayerNorm` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import NewGELUActivation`, instead."
65
+ deprecate("NewGELUActivation", "0.29", deprecation_message)
66
+
67
+
68
+ class T5FiLMLayer(T5FiLMLayer):
69
+ deprecation_message = "Importing `T5FiLMLayer` from `diffusers.models.t5_film_transformer` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.t5_film_transformer import T5FiLMLayer`, instead."
70
+ deprecate("T5FiLMLayer", "0.29", deprecation_message)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformer_2d.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from ..utils import deprecate
15
+ from .transformers.transformer_2d import Transformer2DModel, Transformer2DModelOutput
16
+
17
+
18
+ class Transformer2DModelOutput(Transformer2DModelOutput):
19
+ deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.transformer_2d import Transformer2DModelOutput`, instead."
20
+ deprecate("Transformer2DModelOutput", "0.29", deprecation_message)
21
+
22
+
23
+ class Transformer2DModel(Transformer2DModel):
24
+ deprecation_message = "Importing `Transformer2DModel` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.transformer_2d import Transformer2DModel`, instead."
25
+ deprecate("Transformer2DModel", "0.29", deprecation_message)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformer_temporal.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from ..utils import deprecate
15
+ from .transformers.transformer_temporal import (
16
+ TransformerSpatioTemporalModel,
17
+ TransformerTemporalModel,
18
+ TransformerTemporalModelOutput,
19
+ )
20
+
21
+
22
+ class TransformerTemporalModelOutput(TransformerTemporalModelOutput):
23
+ deprecation_message = "Importing `TransformerTemporalModelOutput` from `diffusers.models.transformer_temporal` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.tranformer_temporal import TransformerTemporalModelOutput`, instead."
24
+ deprecate("TransformerTemporalModelOutput", "0.29", deprecation_message)
25
+
26
+
27
+ class TransformerTemporalModel(TransformerTemporalModel):
28
+ deprecation_message = "Importing `TransformerTemporalModel` from `diffusers.models.transformer_temporal` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.tranformer_temporal import TransformerTemporalModel`, instead."
29
+ deprecate("TransformerTemporalModel", "0.29", deprecation_message)
30
+
31
+
32
+ class TransformerSpatioTemporalModel(TransformerSpatioTemporalModel):
33
+ deprecation_message = "Importing `TransformerSpatioTemporalModel` from `diffusers.models.transformer_temporal` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.tranformer_temporal import TransformerSpatioTemporalModel`, instead."
34
+ deprecate("TransformerTemporalModelOutput", "0.29", deprecation_message)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformers/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from ...utils import is_torch_available
2
+
3
+
4
+ if is_torch_available():
5
+ from .dual_transformer_2d import DualTransformer2DModel
6
+ from .prior_transformer import PriorTransformer
7
+ from .t5_film_transformer import T5FilmDecoder
8
+ from .transformer_2d import Transformer2DModel
9
+ from .transformer_temporal import TransformerTemporalModel
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformers/__pycache__/dual_transformer_2d.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/transformers/dual_transformer_2d.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ from torch import nn
17
+
18
+ from .transformer_2d import Transformer2DModel, Transformer2DModelOutput
19
+
20
+
21
+ class DualTransformer2DModel(nn.Module):
22
+ """
23
+ Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
24
+
25
+ Parameters:
26
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
27
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
28
+ in_channels (`int`, *optional*):
29
+ Pass if the input is continuous. The number of channels in the input and output.
30
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
31
+ dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
32
+ cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
33
+ sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
34
+ Note that this is fixed at training time as it is used for learning a number of position embeddings. See
35
+ `ImagePositionalEmbeddings`.
36
+ num_vector_embeds (`int`, *optional*):
37
+ Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
38
+ Includes the class for the masked latent pixel.
39
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
40
+ num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
41
+ The number of diffusion steps used during training. Note that this is fixed at training time as it is used
42
+ to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
43
+ up to but not more than steps than `num_embeds_ada_norm`.
44
+ attention_bias (`bool`, *optional*):
45
+ Configure if the TransformerBlocks' attention should contain a bias parameter.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ num_attention_heads: int = 16,
51
+ attention_head_dim: int = 88,
52
+ in_channels: Optional[int] = None,
53
+ num_layers: int = 1,
54
+ dropout: float = 0.0,
55
+ norm_num_groups: int = 32,
56
+ cross_attention_dim: Optional[int] = None,
57
+ attention_bias: bool = False,
58
+ sample_size: Optional[int] = None,
59
+ num_vector_embeds: Optional[int] = None,
60
+ activation_fn: str = "geglu",
61
+ num_embeds_ada_norm: Optional[int] = None,
62
+ ):
63
+ super().__init__()
64
+ self.transformers = nn.ModuleList(
65
+ [
66
+ Transformer2DModel(
67
+ num_attention_heads=num_attention_heads,
68
+ attention_head_dim=attention_head_dim,
69
+ in_channels=in_channels,
70
+ num_layers=num_layers,
71
+ dropout=dropout,
72
+ norm_num_groups=norm_num_groups,
73
+ cross_attention_dim=cross_attention_dim,
74
+ attention_bias=attention_bias,
75
+ sample_size=sample_size,
76
+ num_vector_embeds=num_vector_embeds,
77
+ activation_fn=activation_fn,
78
+ num_embeds_ada_norm=num_embeds_ada_norm,
79
+ )
80
+ for _ in range(2)
81
+ ]
82
+ )
83
+
84
+ # Variables that can be set by a pipeline:
85
+
86
+ # The ratio of transformer1 to transformer2's output states to be combined during inference
87
+ self.mix_ratio = 0.5
88
+
89
+ # The shape of `encoder_hidden_states` is expected to be
90
+ # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
91
+ self.condition_lengths = [77, 257]
92
+
93
+ # Which transformer to use to encode which condition.
94
+ # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
95
+ self.transformer_index_for_condition = [1, 0]
96
+
97
+ def forward(
98
+ self,
99
+ hidden_states,
100
+ encoder_hidden_states,
101
+ timestep=None,
102
+ attention_mask=None,
103
+ cross_attention_kwargs=None,
104
+ return_dict: bool = True,
105
+ ):
106
+ """
107
+ Args:
108
+ hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
109
+ When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
110
+ hidden_states.
111
+ encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
112
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
113
+ self-attention.
114
+ timestep ( `torch.long`, *optional*):
115
+ Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
116
+ attention_mask (`torch.FloatTensor`, *optional*):
117
+ Optional attention mask to be applied in Attention.
118
+ cross_attention_kwargs (`dict`, *optional*):
119
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
120
+ `self.processor` in
121
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
122
+ return_dict (`bool`, *optional*, defaults to `True`):
123
+ Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
124
+
125
+ Returns:
126
+ [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
127
+ [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
128
+ returning a tuple, the first element is the sample tensor.
129
+ """
130
+ input_states = hidden_states
131
+
132
+ encoded_states = []
133
+ tokens_start = 0
134
+ # attention_mask is not used yet
135
+ for i in range(2):
136
+ # for each of the two transformers, pass the corresponding condition tokens
137
+ condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
138
+ transformer_index = self.transformer_index_for_condition[i]
139
+ encoded_state = self.transformers[transformer_index](
140
+ input_states,
141
+ encoder_hidden_states=condition_state,
142
+ timestep=timestep,
143
+ cross_attention_kwargs=cross_attention_kwargs,
144
+ return_dict=False,
145
+ )[0]
146
+ encoded_states.append(encoded_state - input_states)
147
+ tokens_start += self.condition_lengths[i]
148
+
149
+ output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
150
+ output_states = output_states + input_states
151
+
152
+ if not return_dict:
153
+ return (output_states,)
154
+
155
+ return Transformer2DModelOutput(sample=output_states)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/unet_1d.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from ..utils import deprecate
16
+ from .unets.unet_1d import UNet1DModel, UNet1DOutput
17
+
18
+
19
+ class UNet1DOutput(UNet1DOutput):
20
+ deprecation_message = "Importing `UNet1DOutput` from `diffusers.models.unet_1d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d import UNet1DOutput`, instead."
21
+ deprecate("UNet1DOutput", "0.29", deprecation_message)
22
+
23
+
24
+ class UNet1DModel(UNet1DModel):
25
+ deprecation_message = "Importing `UNet1DModel` from `diffusers.models.unet_1d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d import UNet1DModel`, instead."
26
+ deprecate("UNet1DModel", "0.29", deprecation_message)
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/unet_1d_blocks.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from ..utils import deprecate
16
+ from .unets.unet_1d_blocks import (
17
+ AttnDownBlock1D,
18
+ AttnUpBlock1D,
19
+ DownBlock1D,
20
+ DownBlock1DNoSkip,
21
+ DownResnetBlock1D,
22
+ Downsample1d,
23
+ MidResTemporalBlock1D,
24
+ OutConv1DBlock,
25
+ OutValueFunctionBlock,
26
+ ResConvBlock,
27
+ SelfAttention1d,
28
+ UNetMidBlock1D,
29
+ UpBlock1D,
30
+ UpBlock1DNoSkip,
31
+ UpResnetBlock1D,
32
+ Upsample1d,
33
+ ValueFunctionMidBlock1D,
34
+ )
35
+
36
+
37
+ class DownResnetBlock1D(DownResnetBlock1D):
38
+ deprecation_message = "Importing `DownResnetBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import DownResnetBlock1D`, instead."
39
+ deprecate("DownResnetBlock1D", "0.29", deprecation_message)
40
+
41
+
42
+ class UpResnetBlock1D(UpResnetBlock1D):
43
+ deprecation_message = "Importing `UpResnetBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UpResnetBlock1D`, instead."
44
+ deprecate("UpResnetBlock1D", "0.29", deprecation_message)
45
+
46
+
47
+ class ValueFunctionMidBlock1D(ValueFunctionMidBlock1D):
48
+ deprecation_message = "Importing `ValueFunctionMidBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import ValueFunctionMidBlock1D`, instead."
49
+ deprecate("ValueFunctionMidBlock1D", "0.29", deprecation_message)
50
+
51
+
52
+ class OutConv1DBlock(OutConv1DBlock):
53
+ deprecation_message = "Importing `OutConv1DBlock` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import OutConv1DBlock`, instead."
54
+ deprecate("OutConv1DBlock", "0.29", deprecation_message)
55
+
56
+
57
+ class OutValueFunctionBlock(OutValueFunctionBlock):
58
+ deprecation_message = "Importing `OutValueFunctionBlock` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import OutValueFunctionBlock`, instead."
59
+ deprecate("OutValueFunctionBlock", "0.29", deprecation_message)
60
+
61
+
62
+ class Downsample1d(Downsample1d):
63
+ deprecation_message = "Importing `Downsample1d` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import Downsample1d`, instead."
64
+ deprecate("Downsample1d", "0.29", deprecation_message)
65
+
66
+
67
+ class Upsample1d(Upsample1d):
68
+ deprecation_message = "Importing `Upsample1d` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import Upsample1d`, instead."
69
+ deprecate("Upsample1d", "0.29", deprecation_message)
70
+
71
+
72
+ class SelfAttention1d(SelfAttention1d):
73
+ deprecation_message = "Importing `SelfAttention1d` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import SelfAttention1d`, instead."
74
+ deprecate("SelfAttention1d", "0.29", deprecation_message)
75
+
76
+
77
+ class ResConvBlock(ResConvBlock):
78
+ deprecation_message = "Importing `ResConvBlock` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import ResConvBlock`, instead."
79
+ deprecate("ResConvBlock", "0.29", deprecation_message)
80
+
81
+
82
+ class UNetMidBlock1D(UNetMidBlock1D):
83
+ deprecation_message = "Importing `UNetMidBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UNetMidBlock1D`, instead."
84
+ deprecate("UNetMidBlock1D", "0.29", deprecation_message)
85
+
86
+
87
+ class AttnDownBlock1D(AttnDownBlock1D):
88
+ deprecation_message = "Importing `AttnDownBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import AttnDownBlock1D`, instead."
89
+ deprecate("AttnDownBlock1D", "0.29", deprecation_message)
90
+
91
+
92
+ class DownBlock1D(DownBlock1D):
93
+ deprecation_message = "Importing `DownBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import DownBlock1D`, instead."
94
+ deprecate("DownBlock1D", "0.29", deprecation_message)
95
+
96
+
97
+ class DownBlock1DNoSkip(DownBlock1DNoSkip):
98
+ deprecation_message = "Importing `DownBlock1DNoSkip` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import DownBlock1DNoSkip`, instead."
99
+ deprecate("DownBlock1DNoSkip", "0.29", deprecation_message)
100
+
101
+
102
+ class AttnUpBlock1D(AttnUpBlock1D):
103
+ deprecation_message = "Importing `AttnUpBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import AttnUpBlock1D`, instead."
104
+ deprecate("AttnUpBlock1D", "0.29", deprecation_message)
105
+
106
+
107
+ class UpBlock1D(UpBlock1D):
108
+ deprecation_message = "Importing `UpBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UpBlock1D`, instead."
109
+ deprecate("UpBlock1D", "0.29", deprecation_message)
110
+
111
+
112
+ class UpBlock1DNoSkip(UpBlock1DNoSkip):
113
+ deprecation_message = "Importing `UpBlock1DNoSkip` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import UpBlock1DNoSkip`, instead."
114
+ deprecate("UpBlock1DNoSkip", "0.29", deprecation_message)
115
+
116
+
117
+ class MidResTemporalBlock1D(MidResTemporalBlock1D):
118
+ deprecation_message = "Importing `MidResTemporalBlock1D` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import MidResTemporalBlock1D`, instead."
119
+ deprecate("MidResTemporalBlock1D", "0.29", deprecation_message)
120
+
121
+
122
+ def get_down_block(
123
+ down_block_type: str,
124
+ num_layers: int,
125
+ in_channels: int,
126
+ out_channels: int,
127
+ temb_channels: int,
128
+ add_downsample: bool,
129
+ ):
130
+ deprecation_message = "Importing `get_down_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_down_block`, instead."
131
+ deprecate("get_down_block", "0.29", deprecation_message)
132
+
133
+ from .unets.unet_1d_blocks import get_down_block
134
+
135
+ return get_down_block(
136
+ down_block_type=down_block_type,
137
+ num_layers=num_layers,
138
+ in_channels=in_channels,
139
+ out_channels=out_channels,
140
+ temb_channels=temb_channels,
141
+ add_downsample=add_downsample,
142
+ )
143
+
144
+
145
+ def get_up_block(
146
+ up_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_upsample: bool
147
+ ):
148
+ deprecation_message = "Importing `get_up_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_up_block`, instead."
149
+ deprecate("get_up_block", "0.29", deprecation_message)
150
+
151
+ from .unets.unet_1d_blocks import get_up_block
152
+
153
+ return get_up_block(
154
+ up_block_type=up_block_type,
155
+ num_layers=num_layers,
156
+ in_channels=in_channels,
157
+ out_channels=out_channels,
158
+ temb_channels=temb_channels,
159
+ add_upsample=add_upsample,
160
+ )
161
+
162
+
163
+ def get_mid_block(
164
+ mid_block_type: str,
165
+ num_layers: int,
166
+ in_channels: int,
167
+ mid_channels: int,
168
+ out_channels: int,
169
+ embed_dim: int,
170
+ add_downsample: bool,
171
+ ):
172
+ deprecation_message = "Importing `get_mid_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_mid_block`, instead."
173
+ deprecate("get_mid_block", "0.29", deprecation_message)
174
+
175
+ from .unets.unet_1d_blocks import get_mid_block
176
+
177
+ return get_mid_block(
178
+ mid_block_type=mid_block_type,
179
+ num_layers=num_layers,
180
+ in_channels=in_channels,
181
+ mid_channels=mid_channels,
182
+ out_channels=out_channels,
183
+ embed_dim=embed_dim,
184
+ add_downsample=add_downsample,
185
+ )
186
+
187
+
188
+ def get_out_block(
189
+ *, out_block_type: str, num_groups_out: int, embed_dim: int, out_channels: int, act_fn: str, fc_dim: int
190
+ ):
191
+ deprecation_message = "Importing `get_out_block` from `diffusers.models.unet_1d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_1d_blocks import get_out_block`, instead."
192
+ deprecate("get_out_block", "0.29", deprecation_message)
193
+
194
+ from .unets.unet_1d_blocks import get_out_block
195
+
196
+ return get_out_block(
197
+ out_block_type=out_block_type,
198
+ num_groups_out=num_groups_out,
199
+ embed_dim=embed_dim,
200
+ out_channels=out_channels,
201
+ act_fn=act_fn,
202
+ fc_dim=fc_dim,
203
+ )
evalkit_tf449/lib/python3.10/site-packages/diffusers/models/unet_2d.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from ..utils import deprecate
17
+ from .unets.unet_2d import UNet2DModel, UNet2DOutput
18
+
19
+
20
+ class UNet2DOutput(UNet2DOutput):
21
+ deprecation_message = "Importing `UNet2DOutput` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DOutput`, instead."
22
+ deprecate("UNet2DOutput", "0.29", deprecation_message)
23
+
24
+
25
+ class UNet2DModel(UNet2DModel):
26
+ deprecation_message = "Importing `UNet2DModel` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DModel`, instead."
27
+ deprecate("UNet2DModel", "0.29", deprecation_message)