Fabrice-TIERCELIN commited on
Commit
a3394ff
·
verified ·
1 Parent(s): c303d3a

More optimization

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -995,20 +995,21 @@ def worker_video(input_video, end_frame, end_stillness, prompts, n_prompt, seed,
995
  load_model_as_complete(vae, target_device=gpu)
996
 
997
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'End frame encoding ...'))))
998
- end_latent, end_clip_embedding = image_encode(
999
  end_frame, target_width=width, target_height=height, vae=vae,
1000
  image_encoder=image_encoder, feature_extractor=feature_extractor, device=gpu
1001
- )[:2]
1002
  end_frame = None
1003
  end_latent = end_latent.to(dtype=torch.float32, device=cpu)
1004
  else:
1005
- end_latent = end_clip_embedding = None
1006
 
1007
  # Clean GPU
1008
  if not high_vram:
1009
  unload_complete_models(image_encoder, vae)
1010
 
1011
  image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
 
1012
 
1013
  # Dtype
1014
  image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
@@ -1180,6 +1181,13 @@ def worker_video(input_video, end_frame, end_stillness, prompts, n_prompt, seed,
1180
  clean_latent_4x_indices=clean_latent_4x_indices,
1181
  callback=callback,
1182
  )
 
 
 
 
 
 
 
1183
 
1184
  total_generated_latent_frames += int(generated_latents.shape[2])
1185
  history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
 
995
  load_model_as_complete(vae, target_device=gpu)
996
 
997
  stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'End frame encoding ...'))))
998
+ end_latent = image_encode(
999
  end_frame, target_width=width, target_height=height, vae=vae,
1000
  image_encoder=image_encoder, feature_extractor=feature_extractor, device=gpu
1001
+ )[:1]
1002
  end_frame = None
1003
  end_latent = end_latent.to(dtype=torch.float32, device=cpu)
1004
  else:
1005
+ end_latent = None
1006
 
1007
  # Clean GPU
1008
  if not high_vram:
1009
  unload_complete_models(image_encoder, vae)
1010
 
1011
  image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
1012
+ image_encoder_output = None
1013
 
1014
  # Dtype
1015
  image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
 
1181
  clean_latent_4x_indices=clean_latent_4x_indices,
1182
  callback=callback,
1183
  )
1184
+ clean_latents = None
1185
+ clean_latents_2x = None
1186
+ clean_latents_4x = None
1187
+ latent_indices = None
1188
+ clean_latent_indices = None
1189
+ clean_latent_2x_indices = None
1190
+ clean_latent_4x_indices = None
1191
 
1192
  total_generated_latent_frames += int(generated_latents.shape[2])
1193
  history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)