BiliSakura commited on
Commit
272a844
·
verified ·
1 Parent(s): 33325f6

Update all files for BitDance-14B-16x-diffusers

Browse files
bitdance_diffusers/pipeline_bitdance.py CHANGED
@@ -17,6 +17,13 @@ from .constants import SUPPORTED_IMAGE_SIZES
17
  PromptType = Union[str, List[str]]
18
 
19
 
 
 
 
 
 
 
 
20
  class BitDanceDiffusionPipeline(DiffusionPipeline):
21
  model_cpu_offload_seq = "text_encoder->projector->diffusion_head->autoencoder"
22
 
@@ -130,6 +137,8 @@ class BitDanceDiffusionPipeline(DiffusionPipeline):
130
  def _decode_tokens_to_image(self, image_latents: torch.Tensor, image_size: Tuple[int, int], ps: int = 1) -> torch.Tensor:
131
  h, w = image_size
132
  image_latents = rearrange(image_latents, "b (h w p1 p2) c -> b c (h p1) (w p2)", h=h // ps, w=w // ps, p1=ps, p2=ps)
 
 
133
  return self.autoencoder.decode(image_latents)
134
 
135
  @torch.no_grad()
@@ -183,7 +192,7 @@ class BitDanceDiffusionPipeline(DiffusionPipeline):
183
  pkv_c = outputs_c.past_key_values
184
 
185
  bi_attn_mask = torch.ones(
186
- (input_embeds_cond.shape[0], 1, step_width, step_width + pkv_c[0][0].shape[2]),
187
  dtype=torch.bool,
188
  device=device,
189
  )
@@ -201,11 +210,16 @@ class BitDanceDiffusionPipeline(DiffusionPipeline):
201
  if guidance_scale > 1.0 and input_embeds_uncond is not None:
202
  outputs_u = model(inputs_embeds=input_embeds_uncond[:, :-step_width, :], use_cache=True)
203
  pkv_u = outputs_u.past_key_values
 
 
 
 
 
204
  outputs_u = model(
205
  inputs_embeds=input_embeds_uncond[:, -step_width:, :],
206
  past_key_values=pkv_u,
207
  use_cache=True,
208
- attention_mask=bi_attn_mask,
209
  )
210
  pkv_u = outputs_u.past_key_values
211
  hidden_u = outputs_u.last_hidden_state[:, -step_width:]
@@ -235,7 +249,7 @@ class BitDanceDiffusionPipeline(DiffusionPipeline):
235
 
236
  model_input = curr_embeds + pos_slice
237
  bi_attn_mask = torch.ones(
238
- (model_input.shape[0], 1, model_input.shape[1], model_input.shape[1] + pkv_c[0][0].shape[2]),
239
  dtype=torch.bool,
240
  device=device,
241
  )
@@ -249,11 +263,16 @@ class BitDanceDiffusionPipeline(DiffusionPipeline):
249
  hidden_c = outputs_c.last_hidden_state[:, -step_width:]
250
 
251
  if guidance_scale > 1.0 and hidden_u is not None and pkv_u is not None:
 
 
 
 
 
252
  outputs_u = model(
253
  inputs_embeds=model_input[num_images_per_prompt:],
254
  past_key_values=pkv_u,
255
  use_cache=True,
256
- attention_mask=bi_attn_mask[num_images_per_prompt:],
257
  )
258
  pkv_u = outputs_u.past_key_values
259
  hidden_u = outputs_u.last_hidden_state[:, -step_width:]
 
17
  PromptType = Union[str, List[str]]
18
 
19
 
20
+ def _get_pkv_seq_len(past_key_values) -> int:
21
+ """Get cached sequence length from past_key_values (tuple or DynamicCache)."""
22
+ if hasattr(past_key_values, "get_seq_length"):
23
+ return past_key_values.get_seq_length()
24
+ return past_key_values[0][0].shape[2]
25
+
26
+
27
  class BitDanceDiffusionPipeline(DiffusionPipeline):
28
  model_cpu_offload_seq = "text_encoder->projector->diffusion_head->autoencoder"
29
 
 
137
  def _decode_tokens_to_image(self, image_latents: torch.Tensor, image_size: Tuple[int, int], ps: int = 1) -> torch.Tensor:
138
  h, w = image_size
139
  image_latents = rearrange(image_latents, "b (h w p1 p2) c -> b c (h p1) (w p2)", h=h // ps, w=w // ps, p1=ps, p2=ps)
140
+ ae_dtype = next(self.autoencoder.parameters()).dtype
141
+ image_latents = image_latents.to(dtype=ae_dtype)
142
  return self.autoencoder.decode(image_latents)
143
 
144
  @torch.no_grad()
 
192
  pkv_c = outputs_c.past_key_values
193
 
194
  bi_attn_mask = torch.ones(
195
+ (input_embeds_cond.shape[0], 1, step_width, step_width + _get_pkv_seq_len(pkv_c)),
196
  dtype=torch.bool,
197
  device=device,
198
  )
 
210
  if guidance_scale > 1.0 and input_embeds_uncond is not None:
211
  outputs_u = model(inputs_embeds=input_embeds_uncond[:, :-step_width, :], use_cache=True)
212
  pkv_u = outputs_u.past_key_values
213
+ bi_attn_mask_u = torch.ones(
214
+ (input_embeds_uncond.shape[0], 1, step_width, step_width + _get_pkv_seq_len(pkv_u)),
215
+ dtype=torch.bool,
216
+ device=device,
217
+ )
218
  outputs_u = model(
219
  inputs_embeds=input_embeds_uncond[:, -step_width:, :],
220
  past_key_values=pkv_u,
221
  use_cache=True,
222
+ attention_mask=bi_attn_mask_u,
223
  )
224
  pkv_u = outputs_u.past_key_values
225
  hidden_u = outputs_u.last_hidden_state[:, -step_width:]
 
249
 
250
  model_input = curr_embeds + pos_slice
251
  bi_attn_mask = torch.ones(
252
+ (model_input.shape[0], 1, model_input.shape[1], model_input.shape[1] + _get_pkv_seq_len(pkv_c)),
253
  dtype=torch.bool,
254
  device=device,
255
  )
 
263
  hidden_c = outputs_c.last_hidden_state[:, -step_width:]
264
 
265
  if guidance_scale > 1.0 and hidden_u is not None and pkv_u is not None:
266
+ bi_attn_mask_u = torch.ones(
267
+ (model_input.shape[0], 1, model_input.shape[1], model_input.shape[1] + _get_pkv_seq_len(pkv_u)),
268
+ dtype=torch.bool,
269
+ device=device,
270
+ )
271
  outputs_u = model(
272
  inputs_embeds=model_input[num_images_per_prompt:],
273
  past_key_values=pkv_u,
274
  use_cache=True,
275
+ attention_mask=bi_attn_mask_u[num_images_per_prompt:],
276
  )
277
  pkv_u = outputs_u.past_key_values
278
  hidden_u = outputs_u.last_hidden_state[:, -step_width:]