Commit
·
c059aa2
1
Parent(s):
42e8438
preload
Browse files
app.py
CHANGED
|
@@ -102,6 +102,48 @@ def load_video_frames(video_path: str, source_frames: int):
|
|
| 102 |
|
| 103 |
return input_video, original_height, original_width
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
class VideoCoF_Controller(Wan_Controller):
|
| 106 |
@spaces.GPU(duration=300)
|
| 107 |
@timer
|
|
@@ -317,24 +359,33 @@ def ui(GPU_memory_mode, scheduler_dict, config_path, compile_dit, weight_dtype):
|
|
| 317 |
try:
|
| 318 |
from huggingface_hub import snapshot_download, hf_hub_download
|
| 319 |
print("Downloading Wan2.1-T2V-14B weights...")
|
| 320 |
-
|
|
|
|
| 321 |
|
| 322 |
os.makedirs("models/Personalized_Model", exist_ok=True)
|
| 323 |
|
| 324 |
print("Downloading VideoCoF weights...")
|
| 325 |
-
|
|
|
|
| 326 |
|
| 327 |
print("Downloading FusionX Acceleration LoRA...")
|
| 328 |
-
|
|
|
|
| 329 |
|
| 330 |
except Exception as e:
|
| 331 |
print(f"Warning: Failed to pre-download weights: {e}")
|
| 332 |
|
| 333 |
-
base_model_dropdown, lora_model_dropdown, lora_alpha_slider, _ = create_finetune_models_checkpoints(
|
|
|
|
|
|
|
| 334 |
|
| 335 |
# Set default LoRA alpha to 1.0 (matching inference.py)
|
| 336 |
lora_alpha_slider.value = 1.0
|
| 337 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
with gr.Column(variant="panel"):
|
| 339 |
prompt_textbox, negative_prompt_textbox = create_prompts(prompt="Remove the young man with short black hair wearing black shirt on the left.")
|
| 340 |
|
|
|
|
| 102 |
|
| 103 |
return input_video, original_height, original_width
|
| 104 |
|
| 105 |
+
|
| 106 |
+
def preload_models(controller, default_model_path, default_lora_name, acc_lora_path):
|
| 107 |
+
"""
|
| 108 |
+
Preload base model and LoRAs before launching the app to avoid first-run latency.
|
| 109 |
+
"""
|
| 110 |
+
controller.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 111 |
+
|
| 112 |
+
# Ensure tracking flags exist
|
| 113 |
+
if not hasattr(controller, "_active_lora_path"):
|
| 114 |
+
controller._active_lora_path = None
|
| 115 |
+
if not hasattr(controller, "_acc_lora_active"):
|
| 116 |
+
controller._acc_lora_active = False
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
print(f"[preload] Loading base model: {default_model_path}")
|
| 120 |
+
controller.update_diffusion_transformer(default_model_path)
|
| 121 |
+
controller.update_base_model(default_model_path)
|
| 122 |
+
|
| 123 |
+
print(f"[preload] Loading VideoCoF LoRA: {default_lora_name}")
|
| 124 |
+
controller.update_lora_model(default_lora_name)
|
| 125 |
+
if controller.lora_model_path and controller.lora_model_path != "none":
|
| 126 |
+
controller.pipeline = merge_lora(
|
| 127 |
+
controller.pipeline,
|
| 128 |
+
controller.lora_model_path,
|
| 129 |
+
multiplier=1.0,
|
| 130 |
+
device=controller.device,
|
| 131 |
+
)
|
| 132 |
+
controller._active_lora_path = controller.lora_model_path
|
| 133 |
+
|
| 134 |
+
if acc_lora_path and os.path.exists(acc_lora_path):
|
| 135 |
+
print(f"[preload] Loading Acceleration LoRA: {acc_lora_path}")
|
| 136 |
+
controller.pipeline = merge_lora(
|
| 137 |
+
controller.pipeline, acc_lora_path, multiplier=1.0, device=controller.device
|
| 138 |
+
)
|
| 139 |
+
controller._acc_lora_active = True
|
| 140 |
+
else:
|
| 141 |
+
print(f"[preload] Acceleration LoRA not found at {acc_lora_path}")
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"[preload] Warning: preload failed: {e}")
|
| 144 |
+
finally:
|
| 145 |
+
torch.cuda.empty_cache()
|
| 146 |
+
|
| 147 |
class VideoCoF_Controller(Wan_Controller):
|
| 148 |
@spaces.GPU(duration=300)
|
| 149 |
@timer
|
|
|
|
| 359 |
try:
|
| 360 |
from huggingface_hub import snapshot_download, hf_hub_download
|
| 361 |
print("Downloading Wan2.1-T2V-14B weights...")
|
| 362 |
+
default_model_path = "Wan-AI/Wan2.1-T2V-14B"
|
| 363 |
+
snapshot_download(repo_id=default_model_path, local_dir=default_model_path)
|
| 364 |
|
| 365 |
os.makedirs("models/Personalized_Model", exist_ok=True)
|
| 366 |
|
| 367 |
print("Downloading VideoCoF weights...")
|
| 368 |
+
default_lora_name = "videocof.safetensors"
|
| 369 |
+
hf_hub_download(repo_id="XiangpengYang/VideoCoF", filename=default_lora_name, local_dir="models/Personalized_Model")
|
| 370 |
|
| 371 |
print("Downloading FusionX Acceleration LoRA...")
|
| 372 |
+
acc_lora_filename = "Wan2.1_Text_to_Video_14B_FusionX_LoRA.safetensors"
|
| 373 |
+
hf_hub_download(repo_id="MonsterMMORPG/Wan_GGUF", filename=acc_lora_filename, local_dir="models/Personalized_Model")
|
| 374 |
|
| 375 |
except Exception as e:
|
| 376 |
print(f"Warning: Failed to pre-download weights: {e}")
|
| 377 |
|
| 378 |
+
base_model_dropdown, lora_model_dropdown, lora_alpha_slider, _ = create_finetune_models_checkpoints(
|
| 379 |
+
controller, visible=False, default_lora="videocof.safetensors"
|
| 380 |
+
)
|
| 381 |
|
| 382 |
# Set default LoRA alpha to 1.0 (matching inference.py)
|
| 383 |
lora_alpha_slider.value = 1.0
|
| 384 |
|
| 385 |
+
# Preload heavy weights and LoRAs before launching the UI to avoid first-run latency.
|
| 386 |
+
acc_lora_path = os.path.join("models", "Personalized_Model", "Wan2.1_Text_to_Video_14B_FusionX_LoRA.safetensors")
|
| 387 |
+
preload_models(controller, default_model_path, "videocof.safetensors", acc_lora_path)
|
| 388 |
+
|
| 389 |
with gr.Column(variant="panel"):
|
| 390 |
prompt_textbox, negative_prompt_textbox = create_prompts(prompt="Remove the young man with short black hair wearing black shirt on the left.")
|
| 391 |
|