Spaces:
Runtime error
Runtime error
Model changed
Browse files
app.py
CHANGED
|
@@ -124,6 +124,19 @@ default_local_storage = {
|
|
| 124 |
"generation-mode": "image",
|
| 125 |
}
|
| 126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
@torch.no_grad()
|
| 128 |
def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
|
| 129 |
"""
|
|
@@ -319,6 +332,13 @@ def worker(input_image, image_position, prompts, n_prompt, seed, resolution, tot
|
|
| 319 |
lora_multiplier,
|
| 320 |
fp8_optimization):
|
| 321 |
global transformer, previous_lora_file, previous_lora_multiplier, previous_fp8_optimization
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
is_last_frame = (image_position == 100)
|
| 323 |
def encode_prompt(prompt, n_prompt):
|
| 324 |
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
|
@@ -559,6 +579,13 @@ def worker_video(input_video, prompts, n_prompt, seed, batch, resolution, total_
|
|
| 559 |
lora_multiplier,
|
| 560 |
fp8_optimization):
|
| 561 |
global transformer, previous_lora_file, previous_lora_multiplier, previous_fp8_optimization
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 562 |
def encode_prompt(prompt, n_prompt):
|
| 563 |
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
| 564 |
|
|
|
|
| 124 |
"generation-mode": "image",
|
| 125 |
}
|
| 126 |
|
| 127 |
+
def load_transfomer():
|
| 128 |
+
print("Loading transformer ...")
|
| 129 |
+
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained(
|
| 130 |
+
"lllyasviel/FramePackI2V_HY", torch_dtype=torch.bfloat16
|
| 131 |
+
).cpu()
|
| 132 |
+
transformer.eval()
|
| 133 |
+
transformer.high_quality_fp32_output_for_inference = True
|
| 134 |
+
print("transformer.high_quality_fp32_output_for_inference = True")
|
| 135 |
+
|
| 136 |
+
transformer.to(dtype=torch.bfloat16)
|
| 137 |
+
transformer.requires_grad_(False)
|
| 138 |
+
return transformer
|
| 139 |
+
|
| 140 |
@torch.no_grad()
|
| 141 |
def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
|
| 142 |
"""
|
|
|
|
| 332 |
lora_multiplier,
|
| 333 |
fp8_optimization):
|
| 334 |
global transformer, previous_lora_file, previous_lora_multiplier, previous_fp8_optimization
|
| 335 |
+
|
| 336 |
+
model_changed = transformer is None or (
|
| 337 |
+
lora_file != previous_lora_file
|
| 338 |
+
or lora_multiplier != previous_lora_multiplier
|
| 339 |
+
or fp8_optimization != previous_fp8_optimization
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
is_last_frame = (image_position == 100)
|
| 343 |
def encode_prompt(prompt, n_prompt):
|
| 344 |
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
|
|
|
| 579 |
lora_multiplier,
|
| 580 |
fp8_optimization):
|
| 581 |
global transformer, previous_lora_file, previous_lora_multiplier, previous_fp8_optimization
|
| 582 |
+
|
| 583 |
+
model_changed = transformer is None or (
|
| 584 |
+
lora_file != previous_lora_file
|
| 585 |
+
or lora_multiplier != previous_lora_multiplier
|
| 586 |
+
or fp8_optimization != previous_fp8_optimization
|
| 587 |
+
)
|
| 588 |
+
|
| 589 |
def encode_prompt(prompt, n_prompt):
|
| 590 |
llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
|
| 591 |
|