| """ |
| SteadyDancer-14B - ZeroGPU ์ต์ ํ ๋ฒ์ |
| ===================================== |
| |
| ์ฃผ์ ๋ณ๊ฒฝ์ฌํญ: |
| 1. subprocess ์ ๊ฑฐ โ ์ง์ Python import ์ฌ์ฉ (ZeroGPU ํธํ์ฑ) |
| 2. ๋ชจ๋ธ ๋ก๋ฉ ์ต์ ํ (์ ์ญ ์บ์ฑ + GPU ํจ์ ๋ด ์ด๋) |
| 3. duration ์กฐ์ (300์ด = ZeroGPU ์ต๋๊ฐ) |
| 4. ํ๋ ์ ์ ์ ํ์ผ๋ก ํ์์์ ๋ฐฉ์ง |
| 5. ํฌ์ฆ ์ถ์ถ์ GPU ํจ์ ๋ฐ์ผ๋ก ๋ถ๋ฆฌ |
| 6. ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ (torch.cuda.empty_cache, gc) |
| """ |
|
|
| import gradio as gr |
| import spaces |
| import torch |
| import os |
| import gc |
| import tempfile |
| import sys |
| import shutil |
| from pathlib import Path |
| from PIL import Image |
| import cv2 |
| import numpy as np |
| from huggingface_hub import snapshot_download |
|
|
| |
| REPO_DIR = Path("SteadyDancer") |
| MODEL_DIR = Path("SteadyDancer-14B") |
| MAX_FRAMES = 49 |
| MAX_DURATION_SECONDS = 300 |
|
|
| |
| _pipe = None |
| _pose_detector = None |
| _repo_ready = False |
|
|
|
|
| def ensure_repo(): |
| """SteadyDancer ๋ ํฌ์งํ ๋ฆฌ ํด๋ก ๋ฐ ์์กด์ฑ ์ค์น (1ํ๋ง)""" |
| global _repo_ready |
| if _repo_ready: |
| return |
| |
| if not REPO_DIR.exists(): |
| print("๐ฅ Cloning SteadyDancer repository...") |
| import git |
| git.Repo.clone_from( |
| "https://github.com/MCG-NJU/SteadyDancer.git", |
| str(REPO_DIR), |
| depth=1 |
| ) |
| |
| |
| repo_requirements = REPO_DIR / "requirements.txt" |
| if repo_requirements.exists(): |
| print("๐ฆ Installing SteadyDancer requirements...") |
| import subprocess |
| subprocess.run([ |
| sys.executable, "-m", "pip", "install", "-q", |
| "-r", str(repo_requirements) |
| ], check=False) |
| |
| if str(REPO_DIR) not in sys.path: |
| sys.path.insert(0, str(REPO_DIR)) |
| |
| _repo_ready = True |
| print("โ
Repository ready") |
|
|
|
|
| def ensure_model(): |
| """๋ชจ๋ธ ๊ฐ์ค์น ๋ค์ด๋ก๋ (1ํ๋ง)""" |
| if not MODEL_DIR.exists(): |
| print("๐ฅ Downloading SteadyDancer-14B model weights...") |
| snapshot_download( |
| repo_id="MCG-NJU/SteadyDancer-14B", |
| local_dir=str(MODEL_DIR), |
| resume_download=True |
| ) |
| print("โ
Model weights downloaded") |
|
|
|
|
| def get_pose_detector(): |
| """ํฌ์ฆ ๋ํ
ํฐ ๋ก๋ (CPU์ ์ ์ง, ํ์์ GPU๋ก ์ด๋)""" |
| global _pose_detector |
| |
| if _pose_detector is None: |
| print("๐ฅ Loading DWPose detector...") |
| try: |
| from controlnet_aux import DWposeDetector |
| _pose_detector = DWposeDetector.from_pretrained("lllyasviel/Annotators") |
| except Exception as e: |
| print(f"โ ๏ธ DWPose ๋ก๋ ์คํจ, OpenPose๋ก ๋์ฒด: {e}") |
| from controlnet_aux import OpenposeDetector |
| _pose_detector = OpenposeDetector.from_pretrained("lllyasviel/Annotators") |
| print("โ
Pose detector loaded") |
| |
| return _pose_detector |
|
|
|
|
| def extract_poses_from_video(video_path, output_dir, max_frames=MAX_FRAMES, progress_callback=None): |
| """ |
| ๋๋ผ์ด๋น ๋น๋์ค์์ ํฌ์ฆ ์ถ์ถ (CPU์์ ์คํ) |
| - GPU ์๊ฐ ์ ์ฝ์ ์ํด @spaces.GPU ๋ฐ์์ ์คํ |
| - ํ๋ ์ ์ ์ ํ์ผ๋ก ํ์์์ ๋ฐฉ์ง |
| """ |
| pose_detector = get_pose_detector() |
| |
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| try: |
| pose_detector = pose_detector.to(device) |
| except: |
| pass |
| |
| cap = cv2.VideoCapture(str(video_path)) |
| fps = cap.get(cv2.CAP_PROP_FPS) or 24 |
| total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
| |
| |
| target_frames = min(total_frames, max_frames) |
| |
| |
| if total_frames > max_frames: |
| frame_indices = np.linspace(0, total_frames - 1, max_frames, dtype=int) |
| else: |
| frame_indices = list(range(total_frames)) |
| |
| pos_dir = Path(output_dir) / "positive" |
| neg_dir = Path(output_dir) / "negative" |
| pos_dir.mkdir(parents=True, exist_ok=True) |
| neg_dir.mkdir(parents=True, exist_ok=True) |
| |
| extracted_count = 0 |
| for idx, frame_idx in enumerate(frame_indices): |
| cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) |
| ret, frame = cap.read() |
| if not ret: |
| continue |
| |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
| pil_image = Image.fromarray(frame_rgb) |
| |
| try: |
| with torch.inference_mode(): |
| pose_image = pose_detector(pil_image) |
| except Exception as e: |
| print(f"โ ๏ธ Frame {idx} pose extraction failed: {e}") |
| pose_image = Image.new('RGB', pil_image.size, (0, 0, 0)) |
| |
| pose_image.save(pos_dir / f"{idx:04d}.jpg") |
| pose_image.save(neg_dir / f"{idx:04d}.jpg") |
| extracted_count += 1 |
| |
| if progress_callback: |
| progress_callback(idx / len(frame_indices)) |
| |
| cap.release() |
| |
| |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
| gc.collect() |
| |
| return str(pos_dir), str(neg_dir), fps, extracted_count |
|
|
|
|
| def load_steadydancer_components(): |
| """ |
| SteadyDancer ์ปดํฌ๋ํธ ๋ก๋ (generate.py ๋ฐฉ์ ์ฐธ์กฐ) |
| ZeroGPU์์๋ ๋งค ํธ์ถ๋ง๋ค ์๋ก ๋ก๋ํด์ผ ํจ |
| """ |
| ensure_repo() |
| ensure_model() |
| |
| print("๐ฅ Loading SteadyDancer components...") |
| |
| |
| from wan.configs import WAN_CONFIGS |
| from wan.modules.vae import WanVAE |
| from wan.modules.t5 import T5EncoderModel |
| from wan.modules.clip import CLIPModel |
| from wan.modules.model import WanModel |
| |
| cfg = WAN_CONFIGS["i2v-14B"] |
| |
| |
| ckpt_dir = str(MODEL_DIR) |
| |
| |
| t5_encoder = T5EncoderModel( |
| text_len=cfg.text_len, |
| dtype=cfg.t5_dtype, |
| device="cuda", |
| checkpoint_path=f"{ckpt_dir}/models_t5_umt5-xxl-enc-bf16.pth", |
| tokenizer_path=f"{ckpt_dir}/google_umt5-xxl", |
| spiece_path=f"{ckpt_dir}/google_umt5-xxl/spiece.model", |
| ) |
| |
| |
| clip_encoder = CLIPModel( |
| dtype=cfg.clip_dtype, |
| device="cuda", |
| checkpoint_path=f"{ckpt_dir}/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", |
| ) |
| |
| |
| vae = WanVAE( |
| vae_pth=f"{ckpt_dir}/Wan2.1_VAE.pth", |
| device="cuda", |
| dtype=cfg.vae_dtype, |
| ) |
| |
| |
| model = WanModel.from_pretrained(ckpt_dir, torch_dtype=torch.bfloat16) |
| model = model.to("cuda") |
| model.eval() |
| |
| print("โ
All components loaded") |
| return cfg, t5_encoder, clip_encoder, vae, model |
|
|
|
|
| @spaces.GPU(duration=MAX_DURATION_SECONDS) |
| def generate_video_gpu( |
| ref_image_path: str, |
| pos_folder: str, |
| neg_folder: str, |
| prompt: str, |
| cfg_scale: float, |
| condition_guide_scale: float, |
| seed: int, |
| width: int, |
| height: int, |
| output_path: str, |
| num_frames: int = 49, |
| ): |
| """ |
| GPU์์ ๋น๋์ค ์์ฑ (SteadyDancer ๋ด๋ถ API ์ง์ ์ฌ์ฉ) |
| """ |
| import random |
| import subprocess |
| from PIL import Image |
| |
| print(f"๐ฌ Starting generation: {width}x{height}, seed={seed}, frames={num_frames}") |
| |
| |
| random.seed(seed) |
| np.random.seed(seed) |
| torch.manual_seed(seed) |
| if torch.cuda.is_available(): |
| torch.cuda.manual_seed_all(seed) |
| |
| try: |
| |
| if str(REPO_DIR) not in sys.path: |
| sys.path.insert(0, str(REPO_DIR)) |
| |
| from wan.pipelines.pipeline_dancer import DancerPipeline |
| from wan.configs import WAN_CONFIGS |
| |
| cfg = WAN_CONFIGS["i2v-14B"] |
| |
| |
| print("๐ฆ Creating DancerPipeline...") |
| pipe = DancerPipeline( |
| config=cfg, |
| checkpoint_dir=str(MODEL_DIR), |
| device_id=0, |
| dtype=torch.bfloat16, |
| ) |
| |
| |
| ref_image = Image.open(ref_image_path).convert("RGB") |
| |
| |
| print("๐จ Running inference...") |
| output = pipe.generate( |
| image=ref_image, |
| prompt=prompt, |
| cond_pos_folder=pos_folder, |
| cond_neg_folder=neg_folder, |
| size=f"{width}*{height}", |
| num_frames=num_frames, |
| sample_guide_scale=cfg_scale, |
| condition_guide_scale=condition_guide_scale, |
| seed=seed, |
| save_path=output_path, |
| ) |
| |
| print(f"โ
Generation complete!") |
| |
| except Exception as e: |
| print(f"โ ๏ธ Direct API failed: {e}") |
| print("โ ๏ธ Trying CLI fallback...") |
| import traceback |
| traceback.print_exc() |
| |
| |
| cmd = [ |
| sys.executable, str(REPO_DIR / "generate_dancer.py"), |
| "--task", "i2v-14B", |
| "--size", f"{width}*{height}", |
| "--image", ref_image_path, |
| "--cond_pos_folder", pos_folder, |
| "--cond_neg_folder", neg_folder, |
| "--prompt", prompt, |
| "--save_file", output_path, |
| "--sample_guide_scale", str(cfg_scale), |
| "--condition_guide_scale", str(condition_guide_scale), |
| "--base_seed", str(seed), |
| "--ckpt_dir", str(MODEL_DIR), |
| ] |
| |
| result = subprocess.run(cmd, capture_output=True, text=True, timeout=250) |
| |
| if result.returncode != 0: |
| error_msg = result.stderr or result.stdout or str(e) |
| raise gr.Error(f"์์ฑ ์คํจ: {error_msg[:300]}") |
| |
| finally: |
| |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
| gc.collect() |
| |
| return output_path |
|
|
|
|
| def generate_video( |
| reference_image, |
| driving_video, |
| prompt, |
| cfg_scale, |
| condition_guide_scale, |
| seed, |
| resolution, |
| max_frames, |
| progress=gr.Progress() |
| ): |
| """ |
| ๋ฉ์ธ ์์ฑ ํจ์ (Gradio ์ธํฐํ์ด์ค) |
| - ํฌ์ฆ ์ถ์ถ: CPU (GPU ์๊ฐ ์ ์ฝ) |
| - ๋น๋์ค ์์ฑ: GPU (@spaces.GPU) |
| |
| ์ค์: ZeroGPU ํ ํฐ ๋ง๋ฃ ๋ฐฉ์ง๋ฅผ ์ํด ์ต๋ํ ๋นจ๋ฆฌ GPU ํจ์ ํธ์ถ |
| """ |
| if reference_image is None: |
| raise gr.Error("โ ์ฐธ์กฐ ์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํด์ฃผ์ธ์.") |
| if driving_video is None: |
| raise gr.Error("โ ๋๋ผ์ด๋น ๋น๋์ค๋ฅผ ์
๋ก๋ํด์ฃผ์ธ์.") |
| |
| |
| if not MODEL_DIR.exists(): |
| progress(0.1, desc="โณ ๋ชจ๋ธ ๋ค์ด๋ก๋ ์ค (์ฒซ ์คํ ์ ์ค๋ ๊ฑธ๋ฆผ)...") |
| ensure_model() |
| |
| progress(0.05, desc="๐ง ํ๊ฒฝ ์ค์ ์ค...") |
| |
| with tempfile.TemporaryDirectory() as tmpdir: |
| tmpdir = Path(tmpdir) |
| |
| |
| progress(0.08, desc="๐ธ ์ด๋ฏธ์ง ์ฒ๋ฆฌ ์ค...") |
| ref_image_path = tmpdir / "reference.png" |
| if isinstance(reference_image, str): |
| shutil.copy(reference_image, ref_image_path) |
| elif isinstance(reference_image, np.ndarray): |
| Image.fromarray(reference_image).save(ref_image_path) |
| else: |
| reference_image.save(ref_image_path) |
| |
| |
| progress(0.1, desc="๐บ ํฌ์ฆ ์ถ์ถ ์ค...") |
| pose_dir = tmpdir / "poses" |
| pose_dir.mkdir(exist_ok=True) |
| |
| |
| actual_max_frames = min(int(max_frames), 49) |
| |
| def pose_progress(p): |
| progress(0.1 + 0.25 * p, desc=f"๐บ ํฌ์ฆ ์ถ์ถ ์ค... {int(p*100)}%") |
| |
| pos_folder, neg_folder, fps, frame_count = extract_poses_from_video( |
| driving_video, |
| pose_dir, |
| max_frames=actual_max_frames, |
| progress_callback=pose_progress |
| ) |
| |
| if frame_count == 0: |
| raise gr.Error("โ ๋น๋์ค์์ ํ๋ ์์ ์ถ์ถํ ์ ์์ต๋๋ค.") |
| |
| progress(0.35, desc=f"โ
{frame_count}๊ฐ ํ๋ ์ ์ถ์ถ ์๋ฃ") |
| |
| |
| width, height = map(int, resolution.split("x")) |
| |
| |
| output_path = str(tmpdir / "output.mp4") |
| |
| |
| progress(0.4, desc="๐ฌ ๋น๋์ค ์์ฑ ์ค (GPU)...") |
| |
| final_prompt = prompt.strip() if prompt and prompt.strip() else "A person dancing gracefully" |
| |
| try: |
| generate_video_gpu( |
| ref_image_path=str(ref_image_path), |
| pos_folder=pos_folder, |
| neg_folder=neg_folder, |
| prompt=final_prompt, |
| cfg_scale=cfg_scale, |
| condition_guide_scale=condition_guide_scale, |
| seed=int(seed), |
| width=width, |
| height=height, |
| output_path=output_path, |
| num_frames=frame_count, |
| ) |
| except Exception as e: |
| error_msg = str(e) |
| if "Expired ZeroGPU proxy token" in error_msg: |
| raise gr.Error( |
| "โ ZeroGPU ํ ํฐ ๋ง๋ฃ๋จ. ํ์ด์ง๋ฅผ ์๋ก๊ณ ์นจํ๊ณ ๋ค์ ์๋ํด์ฃผ์ธ์. " |
| "ํ: ํ๋ ์ ์๋ฅผ 30 ์ดํ๋ก ์ค์ฌ๋ณด์ธ์." |
| ) |
| raise gr.Error(f"โ ์์ฑ ์คํจ: {error_msg[:300]}") |
| |
| progress(0.95, desc="๐ผ ๋น๋์ค ์ ์ฅ ์ค...") |
| |
| |
| final_output = Path(tempfile.gettempdir()) / f"steadydancer_output_{seed}.mp4" |
| |
| if Path(output_path).exists(): |
| shutil.copy(output_path, final_output) |
| else: |
| raise gr.Error("โ ์ถ๋ ฅ ํ์ผ์ ์ฐพ์ ์ ์์ต๋๋ค.") |
| |
| progress(1.0, desc="โ
์๋ฃ!") |
| return str(final_output) |
|
|
|
|
| |
| with gr.Blocks( |
| title="SteadyDancer-14B - ZeroGPU Optimized", |
| theme=gr.themes.Soft(), |
| css=""" |
| .main-title { text-align: center; margin-bottom: 1rem; } |
| .warning-box { |
| background: linear-gradient(135deg, #fff3cd 0%, #ffeeba 100%); |
| border: 1px solid #ffc107; |
| border-radius: 8px; |
| padding: 1rem; |
| margin: 1rem 0; |
| } |
| .tip-box { |
| background: linear-gradient(135deg, #d4edda 0%, #c3e6cb 100%); |
| border: 1px solid #28a745; |
| border-radius: 8px; |
| padding: 1rem; |
| margin-top: 1rem; |
| } |
| """ |
| ) as demo: |
| gr.Markdown(""" |
| # ๐บ SteadyDancer-14B (ZeroGPU ์ต์ ํ) |
| ## Pose-Guided Human Image Animation |
| |
| **๋๋ผ์ด๋น ๋น๋์ค์ ๋์์ ์ฐธ์กฐ ์ด๋ฏธ์ง์ ์ ์กํฉ๋๋ค!** |
| |
| ๐ [Paper](https://arxiv.org/abs/2412.12534) | |
| ๐ [GitHub](https://github.com/MCG-NJU/SteadyDancer) | |
| ๐ค [Model](https://huggingface.co/MCG-NJU/SteadyDancer-14B) |
| """, elem_classes=["main-title"]) |
| |
| gr.Markdown(""" |
| ### โ ๏ธ ZeroGPU ์ ํ์ฌํญ |
| - **์ต๋ ์คํ ์๊ฐ**: 5๋ถ (300์ด) |
| - **๊ถ์ฅ ํ๋ ์ ์**: 20-30 ํ๋ ์ (ํ์์์/ํ ํฐ ๋ง๋ฃ ๋ฐฉ์ง) |
| - **๊ถ์ฅ ํด์๋**: 480x832 ๋๋ ๋ ๋ฎ์ ํด์๋ |
| - **์ฒซ ์คํ**: ๋ชจ๋ธ ๋ค์ด๋ก๋๋ก ์๊ฐ์ด ๊ฑธ๋ฆด ์ ์์ โ ํ์ด์ง ์๋ก๊ณ ์นจ ํ ์ฌ์๋ |
| """, elem_classes=["warning-box"]) |
| |
| with gr.Row(): |
| with gr.Column(scale=1): |
| gr.Markdown("### ๐ธ ์
๋ ฅ") |
| |
| reference_image = gr.Image( |
| label="์ฐธ์กฐ ์ด๋ฏธ์ง (์ ๋๋ฉ์ด์
ํ ์ธ๋ฌผ)", |
| type="numpy", |
| sources=["upload", "clipboard"], |
| height=280 |
| ) |
| |
| driving_video = gr.Video( |
| label="๋๋ผ์ด๋น ๋น๋์ค (๋์ ์์ค)", |
| sources=["upload"], |
| height=280 |
| ) |
| |
| prompt = gr.Textbox( |
| label="ํ๋กฌํํธ (์ ํ์ฌํญ)", |
| placeholder="์: A person dancing gracefully in a studio", |
| value="" |
| ) |
| |
| with gr.Accordion("โ๏ธ ๊ณ ๊ธ ์ค์ ", open=True): |
| resolution = gr.Dropdown( |
| label="์ถ๋ ฅ ํด์๋", |
| choices=[ |
| "480x832", |
| "832x480", |
| "576x1024", |
| "1024x576", |
| "720x1280", |
| "1280x720", |
| ], |
| value="480x832", |
| info="โก ๋ฎ์ ํด์๋ = ๋น ๋ฅธ ์์ฑ + ํ์์์ ๋ฐฉ์ง" |
| ) |
| |
| max_frames = gr.Slider( |
| label="์ต๋ ํ๋ ์ ์", |
| minimum=10, |
| maximum=49, |
| value=30, |
| step=1, |
| info="โก ์ ์ ํ๋ ์ = ๋น ๋ฅธ ์์ฑ + ํ ํฐ ๋ง๋ฃ ๋ฐฉ์ง (30 ๊ถ์ฅ)" |
| ) |
| |
| cfg_scale = gr.Slider( |
| label="CFG Scale", |
| minimum=1.0, |
| maximum=10.0, |
| value=5.0, |
| step=0.5 |
| ) |
| |
| condition_guide_scale = gr.Slider( |
| label="Condition Guide Scale", |
| minimum=0.0, |
| maximum=2.0, |
| value=1.0, |
| step=0.1 |
| ) |
| |
| seed = gr.Slider( |
| label="์๋", |
| minimum=0, |
| maximum=999999, |
| value=42, |
| step=1 |
| ) |
| |
| generate_btn = gr.Button( |
| "๐ฌ ๋น๋์ค ์์ฑ", |
| variant="primary", |
| size="lg" |
| ) |
| |
| with gr.Column(scale=1): |
| gr.Markdown("### ๐ฅ ์ถ๋ ฅ") |
| |
| output_video = gr.Video( |
| label="์์ฑ๋ ๋น๋์ค", |
| height=450, |
| autoplay=True |
| ) |
| |
| gr.Markdown(""" |
| ### ๐ก ํ |
| - **์ฐธ์กฐ ์ด๋ฏธ์ง**: ์ ์ ์ด ๋ณด์ด๊ณ ๋ฐฐ๊ฒฝ์ด ๋จ์ํ ์ด๋ฏธ์ง๊ฐ ์ข์ต๋๋ค |
| - **๋๋ผ์ด๋น ๋น๋์ค**: 3-5์ด ์ ๋์ ์งง์ ๋น๋์ค๊ฐ ์ข์ต๋๋ค |
| - **ํ์์์ ๋ฐ์ ์**: ํ๋ ์ ์์ ํด์๋๋ฅผ ๋ฎ์ถฐ๋ณด์ธ์ |
| - **์ฒซ ์คํ**: ๋ชจ๋ธ ๋ค์ด๋ก๋๋ก ์๊ฐ์ด ๊ฑธ๋ฆด ์ ์์ต๋๋ค |
| """, elem_classes=["tip-box"]) |
| |
| generate_btn.click( |
| fn=generate_video, |
| inputs=[ |
| reference_image, |
| driving_video, |
| prompt, |
| cfg_scale, |
| condition_guide_scale, |
| seed, |
| resolution, |
| max_frames, |
| ], |
| outputs=output_video |
| ) |
|
|
|
|
| def warmup(): |
| """ |
| Space ์์ ์ ๋ชจ๋ ์ค๋น ์์
์ํ |
| - ๋ ํฌ ํด๋ก |
| - ์์กด์ฑ ์ค์น |
| - ๋ชจ๋ธ ๋ค์ด๋ก๋ (๊ฐ์ฅ ์ค๋ ๊ฑธ๋ฆผ!) |
| - ํฌ์ฆ ๋ํ
ํฐ ๋ก๋ |
| |
| ์ด๋ ๊ฒ ํ๋ฉด ์ฌ์ฉ์ ์์ฒญ ์ ZeroGPU ํ ํฐ ๋ง๋ฃ ๋ฐฉ์ง |
| """ |
| import subprocess |
| |
| print("๐ Warming up SteadyDancer-14B...") |
| |
| |
| print("๐ฆ Checking dependencies...") |
| deps_to_install = [] |
| |
| try: |
| import easydict |
| except ImportError: |
| deps_to_install.append("easydict") |
| |
| try: |
| import einops |
| except ImportError: |
| deps_to_install.append("einops") |
| |
| try: |
| import ftfy |
| except ImportError: |
| deps_to_install.append("ftfy") |
| |
| try: |
| import decord |
| except ImportError: |
| deps_to_install.append("decord") |
| |
| if deps_to_install: |
| print(f"๐ฆ Installing missing dependencies: {deps_to_install}") |
| subprocess.run( |
| [sys.executable, "-m", "pip", "install", "-q"] + deps_to_install, |
| check=False |
| ) |
| |
| |
| ensure_repo() |
| print("โ
Repository ready") |
| |
| |
| ensure_model() |
| print("โ
Model weights ready") |
| |
| |
| try: |
| get_pose_detector() |
| print("โ
Pose detector ready") |
| except Exception as e: |
| print(f"โ ๏ธ Pose detector will be loaded on first use: {e}") |
| |
| |
| try: |
| sys.path.insert(0, str(REPO_DIR)) |
| from wan.configs import WAN_CONFIGS |
| print("โ
SteadyDancer modules importable") |
| except Exception as e: |
| print(f"โ ๏ธ SteadyDancer import test failed: {e}") |
| print(" (Will try again during generation)") |
| |
| print("๐ Warmup complete! Ready for requests.") |
|
|
|
|
| if __name__ == "__main__": |
| |
| warmup() |
| demo.launch() |