import torch import gradio as gr from PIL import Image from diffusers import WanImageToVideoPipeline from diffusers.utils import export_to_video MODEL_ID = "TestOrganizationPleaseIgnore/WAMU-Merge-VisualEffects_WAN2.2_I2V_LIGHTNING" pipe = None def generate(image, prompt): global pipe if pipe is None: pipe = WanImageToVideoPipeline.from_pretrained( MODEL_ID, torch_dtype=torch.float16 ).to("cuda" if torch.cuda.is_available() else "cpu") result = pipe( image=image, prompt=prompt or "", num_frames=24 ) video_path = export_to_video(result.frames) # unload model to free VRAM (cost safety) del pipe pipe = None torch.cuda.empty_cache() return video_path with gr.Blocks() as demo: gr.Markdown("# WAN Image → Video (Private Safe Mode)") img = gr.Image(type="pil") prompt = gr.Textbox(label="Prompt") out = gr.Video() btn = gr.Button("Generate") btn.click(generate, [img, prompt], out) import os USERNAME = os.getenv("GRADIO_USERNAME") PASSWORD = os.getenv("GRADIO_PASSWORD") auth = (USERNAME, PASSWORD) if USERNAME and PASSWORD else None demo.launch( server_name="0.0.0.0", server_port=7860, ssr_mode=False, auth=auth )