AlexGraikos commited on
Commit
69c7767
·
verified ·
1 Parent(s): 0691f24

Update pixcell_transformer_2d.py

Browse files
Files changed (1) hide show
  1. pixcell_transformer_2d.py +210 -1
pixcell_transformer_2d.py CHANGED
@@ -24,12 +24,221 @@ from diffusers.models.embeddings import PatchEmbed
24
  from diffusers.models.modeling_outputs import Transformer2DModelOutput
25
  from diffusers.models.modeling_utils import ModelMixin
26
  from diffusers.models.normalization import AdaLayerNormSingle
 
27
 
28
- from .embeddings_pixcell import PixcellUNIProjection, UNIPosEmbed
29
 
30
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
 
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  class PixCellTransformer2DModel(ModelMixin, ConfigMixin):
34
  r"""
35
  A 2D Transformer model as introduced in PixArt family of models (https://arxiv.org/abs/2310.00426,
 
24
  from diffusers.models.modeling_outputs import Transformer2DModelOutput
25
  from diffusers.models.modeling_utils import ModelMixin
26
  from diffusers.models.normalization import AdaLayerNormSingle
27
+ from diffusers.models.activations import deprecate, FP32SiLU
28
 
 
29
 
30
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
 
32
 
33
+ # PixCell UNI conditioning
34
+ def pixcell_get_2d_sincos_pos_embed(
35
+ embed_dim,
36
+ grid_size,
37
+ cls_token=False,
38
+ extra_tokens=0,
39
+ interpolation_scale=1.0,
40
+ base_size=16,
41
+ device: Optional[torch.device] = None,
42
+ phase=0,
43
+ output_type: str = "np",
44
+ ):
45
+ """
46
+ Creates 2D sinusoidal positional embeddings.
47
+
48
+ Args:
49
+ embed_dim (`int`):
50
+ The embedding dimension.
51
+ grid_size (`int`):
52
+ The size of the grid height and width.
53
+ cls_token (`bool`, defaults to `False`):
54
+ Whether or not to add a classification token.
55
+ extra_tokens (`int`, defaults to `0`):
56
+ The number of extra tokens to add.
57
+ interpolation_scale (`float`, defaults to `1.0`):
58
+ The scale of the interpolation.
59
+
60
+ Returns:
61
+ pos_embed (`torch.Tensor`):
62
+ Shape is either `[grid_size * grid_size, embed_dim]` if not using cls_token, or `[1 + grid_size*grid_size,
63
+ embed_dim]` if using cls_token
64
+ """
65
+ if output_type == "np":
66
+ deprecation_message = (
67
+ "`get_2d_sincos_pos_embed` uses `torch` and supports `device`."
68
+ " `from_numpy` is no longer required."
69
+ " Pass `output_type='pt' to use the new version now."
70
+ )
71
+ deprecate("output_type=='np'", "0.33.0", deprecation_message, standard_warn=False)
72
+ raise ValueError("Not supported")
73
+ if isinstance(grid_size, int):
74
+ grid_size = (grid_size, grid_size)
75
+
76
+ grid_h = (
77
+ torch.arange(grid_size[0], device=device, dtype=torch.float32)
78
+ / (grid_size[0] / base_size)
79
+ / interpolation_scale
80
+ )
81
+ grid_w = (
82
+ torch.arange(grid_size[1], device=device, dtype=torch.float32)
83
+ / (grid_size[1] / base_size)
84
+ / interpolation_scale
85
+ )
86
+ grid = torch.meshgrid(grid_w, grid_h, indexing="xy") # here w goes first
87
+ grid = torch.stack(grid, dim=0)
88
+
89
+ grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])
90
+ pos_embed = pixcell_get_2d_sincos_pos_embed_from_grid(embed_dim, grid, phase=phase, output_type=output_type)
91
+ if cls_token and extra_tokens > 0:
92
+ pos_embed = torch.concat([torch.zeros([extra_tokens, embed_dim]), pos_embed], dim=0)
93
+ return pos_embed
94
+
95
+
96
+ def pixcell_get_2d_sincos_pos_embed_from_grid(embed_dim, grid, phase=0, output_type="np"):
97
+ r"""
98
+ This function generates 2D sinusoidal positional embeddings from a grid.
99
+
100
+ Args:
101
+ embed_dim (`int`): The embedding dimension.
102
+ grid (`torch.Tensor`): Grid of positions with shape `(H * W,)`.
103
+
104
+ Returns:
105
+ `torch.Tensor`: The 2D sinusoidal positional embeddings with shape `(H * W, embed_dim)`
106
+ """
107
+ if output_type == "np":
108
+ deprecation_message = (
109
+ "`get_2d_sincos_pos_embed_from_grid` uses `torch` and supports `device`."
110
+ " `from_numpy` is no longer required."
111
+ " Pass `output_type='pt' to use the new version now."
112
+ )
113
+ deprecate("output_type=='np'", "0.33.0", deprecation_message, standard_warn=False)
114
+ raise ValueError("Not supported")
115
+ if embed_dim % 2 != 0:
116
+ raise ValueError("embed_dim must be divisible by 2")
117
+
118
+ # use half of dimensions to encode grid_h
119
+ emb_h = pixcell_get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0], phase=phase, output_type=output_type) # (H*W, D/2)
120
+ emb_w = pixcell_get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1], phase=phase, output_type=output_type) # (H*W, D/2)
121
+
122
+ emb = torch.concat([emb_h, emb_w], dim=1) # (H*W, D)
123
+ return emb
124
+
125
+
126
+ def pixcell_get_1d_sincos_pos_embed_from_grid(embed_dim, pos, phase=0, output_type="np"):
127
+ """
128
+ This function generates 1D positional embeddings from a grid.
129
+
130
+ Args:
131
+ embed_dim (`int`): The embedding dimension `D`
132
+ pos (`torch.Tensor`): 1D tensor of positions with shape `(M,)`
133
+
134
+ Returns:
135
+ `torch.Tensor`: Sinusoidal positional embeddings of shape `(M, D)`.
136
+ """
137
+ if output_type == "np":
138
+ deprecation_message = (
139
+ "`get_1d_sincos_pos_embed_from_grid` uses `torch` and supports `device`."
140
+ " `from_numpy` is no longer required."
141
+ " Pass `output_type='pt' to use the new version now."
142
+ )
143
+ deprecate("output_type=='np'", "0.34.0", deprecation_message, standard_warn=False)
144
+ raise ValueError("Not supported")
145
+ if embed_dim % 2 != 0:
146
+ raise ValueError("embed_dim must be divisible by 2")
147
+
148
+ omega = torch.arange(embed_dim // 2, device=pos.device, dtype=torch.float64)
149
+ omega /= embed_dim / 2.0
150
+ omega = 1.0 / 10000**omega # (D/2,)
151
+
152
+ pos = pos.reshape(-1) + phase # (M,)
153
+ out = torch.outer(pos, omega) # (M, D/2), outer product
154
+
155
+ emb_sin = torch.sin(out) # (M, D/2)
156
+ emb_cos = torch.cos(out) # (M, D/2)
157
+
158
+ emb = torch.concat([emb_sin, emb_cos], dim=1) # (M, D)
159
+ return emb
160
+
161
+
162
+ class PixcellUNIProjection(nn.Module):
163
+ """
164
+ Projects UNI embeddings. Also handles dropout for classifier-free guidance.
165
+
166
+ Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
167
+ """
168
+
169
+ def __init__(self, in_features, hidden_size, out_features=None, act_fn="gelu_tanh", num_tokens=1):
170
+ super().__init__()
171
+ if out_features is None:
172
+ out_features = hidden_size
173
+ self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True)
174
+ if act_fn == "gelu_tanh":
175
+ self.act_1 = nn.GELU(approximate="tanh")
176
+ elif act_fn == "silu":
177
+ self.act_1 = nn.SiLU()
178
+ elif act_fn == "silu_fp32":
179
+ self.act_1 = FP32SiLU()
180
+ else:
181
+ raise ValueError(f"Unknown activation function: {act_fn}")
182
+ self.linear_2 = nn.Linear(in_features=hidden_size, out_features=out_features, bias=True)
183
+
184
+ self.register_buffer("uncond_embedding", nn.Parameter(torch.randn(num_tokens, in_features) / in_features ** 0.5))
185
+
186
+ def forward(self, caption):
187
+ hidden_states = self.linear_1(caption)
188
+ hidden_states = self.act_1(hidden_states)
189
+ hidden_states = self.linear_2(hidden_states)
190
+ return hidden_states
191
+
192
+ class UNIPosEmbed(nn.Module):
193
+ """
194
+ Adds positional embeddings to the UNI conditions.
195
+
196
+ Args:
197
+ height (`int`, defaults to `224`): The height of the image.
198
+ width (`int`, defaults to `224`): The width of the image.
199
+ patch_size (`int`, defaults to `16`): The size of the patches.
200
+ in_channels (`int`, defaults to `3`): The number of input channels.
201
+ embed_dim (`int`, defaults to `768`): The output dimension of the embedding.
202
+ layer_norm (`bool`, defaults to `False`): Whether or not to use layer normalization.
203
+ flatten (`bool`, defaults to `True`): Whether or not to flatten the output.
204
+ bias (`bool`, defaults to `True`): Whether or not to use bias.
205
+ interpolation_scale (`float`, defaults to `1`): The scale of the interpolation.
206
+ pos_embed_type (`str`, defaults to `"sincos"`): The type of positional embedding.
207
+ pos_embed_max_size (`int`, defaults to `None`): The maximum size of the positional embedding.
208
+ """
209
+
210
+ def __init__(
211
+ self,
212
+ height=1,
213
+ width=1,
214
+ base_size=16,
215
+ embed_dim=768,
216
+ interpolation_scale=1,
217
+ pos_embed_type="sincos",
218
+ ):
219
+ super().__init__()
220
+
221
+ num_embeds = height*width
222
+ grid_size = int(num_embeds ** 0.5)
223
+
224
+ if pos_embed_type == "sincos":
225
+ y_pos_embed = pixcell_get_2d_sincos_pos_embed(
226
+ embed_dim,
227
+ grid_size,
228
+ base_size=base_size,
229
+ interpolation_scale=interpolation_scale,
230
+ output_type="pt",
231
+ phase = base_size // num_embeds
232
+ )
233
+ self.register_buffer("y_pos_embed", y_pos_embed.float().unsqueeze(0))
234
+ else:
235
+ raise ValueError("`pos_embed_type` not supported")
236
+
237
+ def forward(self, uni_embeds):
238
+ return (uni_embeds + self.y_pos_embed).to(uni_embeds.dtype)
239
+
240
+
241
+
242
  class PixCellTransformer2DModel(ModelMixin, ConfigMixin):
243
  r"""
244
  A 2D Transformer model as introduced in PixArt family of models (https://arxiv.org/abs/2310.00426,