import spaces import torch from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler, WanTransformer3DModel, AutoModel, DiffusionPipeline from diffusers.utils import export_to_video from transformers import CLIPVisionModel, UMT5EncoderModel, CLIPTextModel, CLIPImageProcessor from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import tempfile import re import os import traceback from huggingface_hub import hf_hub_download import numpy as np from PIL import Image import gradio as gr import random # --- I2V (Image-to-Video) Configuration --- I2V_BASE_MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" # Used for VAE/encoder components I2V_FUSIONX_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX" I2V_FUSIONX_FILENAME = "Wan14Bi2vFusioniX.safetensors" # --- Load Pipelines --- print("🚀 Loading I2V pipeline from single file...") i2v_pipe = None try: # Load ALL components needed for the pipeline from the base model repo i2v_image_encoder = CLIPVisionModel.from_pretrained(I2V_BASE_MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32) i2v_vae = AutoencoderKLWan.from_pretrained(I2V_BASE_MODEL_ID, subfolder="vae", torch_dtype=torch.float32) i2v_text_encoder = UMT5EncoderModel.from_pretrained(I2V_BASE_MODEL_ID, subfolder="text_encoder", torch_dtype=torch.bfloat16) i2v_tokenizer = AutoTokenizer.from_pretrained(I2V_BASE_MODEL_ID, subfolder="tokenizer") i2v_image_processor = CLIPImageProcessor.from_pretrained(I2V_BASE_MODEL_ID, subfolder="image_processor") # Create scheduler with custom flow_shift scheduler_config = UniPCMultistepScheduler.load_config(I2V_BASE_MODEL_ID, subfolder="scheduler") scheduler_config['flow_shift'] = 8.0 i2v_scheduler = UniPCMultistepScheduler.from_config(scheduler_config) # Load the main transformer from the repo and filename i2v_transformer = WanTransformer3DModel.from_single_file( "https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX/blob/main/Wan14Bi2vFusioniX.safetensors", torch_dtype=torch.bfloat16 ) # Manually assemble the pipeline with the custom transformer i2v_pipe = WanImageToVideoPipeline( vae=i2v_vae, text_encoder=i2v_text_encoder, tokenizer=i2v_tokenizer, image_encoder=i2v_image_encoder, image_processor=i2v_image_processor, scheduler=i2v_scheduler, transformer=i2v_transformer ) i2v_pipe.to("cuda") print("✅ I2V pipeline loaded successfully from single file.") except Exception as e: print(f"❌ Critical Error: Failed to load I2V pipeline from single file.") traceback.print_exc() # --- Constants and Configuration --- MOD_VALUE = 32 DEFAULT_H_SLIDER_VALUE = 640 DEFAULT_W_SLIDER_VALUE = 1024 NEW_FORMULA_MAX_AREA = 640.0 * 1024.0 SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024 SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024 MAX_SEED = np.iinfo(np.int32).max FIXED_FPS = 16 T2V_FIXED_FPS = 16 MIN_FRAMES_MODEL = 8 MAX_FRAMES_MODEL = 81 # --- Default Prompts --- default_prompt_i2v = "Cinematic motion, smooth animation, detailed textures, dynamic lighting, professional cinematography" default_negative_prompt = "Static image, no motion, blurred details, overexposed, underexposed, low quality, worst quality, JPEG artifacts, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, watermark, text, signature, three legs, many people in the background, walking backwards" # --- Helper Functions --- def sanitize_prompt_for_filename(prompt: str, max_len: int = 60) -> str: """Sanitizes a prompt string to be used as a valid filename.""" if not prompt: prompt = "video" sanitized = re.sub(r'[^\w\s_-]', '', prompt).strip() sanitized = re.sub(r'[\s_-]+', '_', sanitized) return sanitized[:max_len] def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area, min_slider_h, max_slider_h, min_slider_w, max_slider_w, default_h, default_w): orig_w, orig_h = pil_image.size if orig_w <= 0 or orig_h <= 0: return default_h, default_w aspect_ratio = orig_h / orig_w calc_h = round(np.sqrt(calculation_max_area * aspect_ratio)) calc_w = round(np.sqrt(calculation_max_area / aspect_ratio)) calc_h = max(mod_val, (calc_h // mod_val) * mod_val) calc_w = max(mod_val, (calc_w // mod_val) * mod_val) new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val)) new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val)) return new_h, new_w def handle_image_upload_for_dims_wan(uploaded_pil_image): if uploaded_pil_image is None: return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE) try: new_h, new_w = _calculate_new_dimensions_wan( uploaded_pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA, SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W, DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE ) return gr.update(value=new_h), gr.update(value=new_w) except Exception as e: gr.Warning("Error calculating new dimensions. Resetting to default.") return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE) # --- GPU Duration Estimators for @spaces.GPU --- def get_i2v_duration(steps, duration_seconds): """Estimates GPU time for Image-to-Video generation.""" if steps > 8 and duration_seconds > 3: return 600 elif steps > 8 or duration_seconds > 3: return 300 else: return 150 def get_t2v_duration(steps, duration_seconds): """Estimates GPU time for Text-to-Video generation.""" if steps > 15 and duration_seconds > 4: return 700 elif steps > 15 or duration_seconds > 4: return 400 else: return 200 # --- Core Generation Functions --- @spaces.GPU(duration_from_args=get_i2v_duration) def generate_i2v_video(input_image, prompt, height, width, negative_prompt, duration_seconds, guidance_scale, steps, seed, randomize_seed, progress=gr.Progress(track_tqdm=True)): """Generates a video from an initial image and a prompt.""" if input_image is None: raise gr.Error("Please upload an input image for Image-to-Video generation.") target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE) target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE) # Calculate and adjust num_frames to be compatible with video codecs target_frames = int(round(duration_seconds * FIXED_FPS)) adjusted_frames = 4 * round((target_frames - 1) / 4) + 1 num_frames = int(np.clip(adjusted_frames, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)) current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) resized_image = input_image.resize((target_w, target_h)) enhanced_prompt = f"{prompt}, cinematic quality, smooth motion, detailed animation, dynamic lighting" with torch.inference_mode(): output_frames_list = i2v_pipe( image=resized_image, prompt=enhanced_prompt, negative_prompt=negative_prompt, height=target_h, width=target_w, num_frames=num_frames, guidance_scale=float(guidance_scale), num_inference_steps=int(steps), generator=torch.Generator(device="cuda").manual_seed(current_seed) ).frames[0] sanitized_prompt = sanitize_prompt_for_filename(prompt) filename = f"i2v_{sanitized_prompt}_{current_seed}.mp4" temp_dir = tempfile.mkdtemp() video_path = os.path.join(temp_dir, filename) export_to_video(output_frames_list, video_path, fps=FIXED_FPS) return video_path, current_seed, gr.File(value=video_path, visible=True, label=f"📥 Download: {filename}") # --- Gradio UI Layout --- with gr.Blocks() as demo: with gr.Column(elem_classes=["main-container"]): gr.Markdown("# ⚡ FusionX Enhanced Wan 2.1 Video Suite") with gr.Tabs(elem_classes=["gr-tabs"]): # --- Image-to-Video Tab --- with gr.TabItem("🖼️ Image-to-Video", id="i2v_tab"): with gr.Row(): with gr.Column(elem_classes=["input-container"]): i2v_input_image = gr.Image( type="pil", label="🖼️ Input Image (auto-resizes H/W sliders)", elem_classes=["image-upload"] ) i2v_prompt = gr.Textbox( label="✏️ Prompt", value=default_prompt_i2v, lines=3 ) i2v_duration = gr.Slider( minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1), step=0.1, value=2, label="⏱️ Duration (seconds)", info=f"Generates {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps." ) with gr.Accordion("⚙️ Advanced Settings", open=False): i2v_neg_prompt = gr.Textbox(label="❌ Negative Prompt", value=default_negative_prompt, lines=4) i2v_seed = gr.Slider(label="🎲 Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True) i2v_rand_seed = gr.Checkbox(label="🔀 Randomize seed", value=True, interactive=True) with gr.Row(): i2v_height = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"📏 Height ({MOD_VALUE}px steps)") i2v_width = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"📐 Width ({MOD_VALUE}px steps)") i2v_steps = gr.Slider(minimum=1, maximum=20, step=1, value=8, label="🚀 Inference Steps", info="8-10 recommended for great results.") i2v_guidance = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="🎯 Guidance Scale", visible=False) i2v_generate_btn = gr.Button("🎬 Generate I2V", variant="primary", elem_classes=["generate-btn"]) with gr.Column(elem_classes=["output-container"]): i2v_output_video = gr.Video(label="🎥 Generated Video", autoplay=True, interactive=False) i2v_download = gr.File(label="📥 Download Video", visible=False) # --- Event Handlers --- # I2V Handlers i2v_input_image.upload( fn=handle_image_upload_for_dims_wan, inputs=[i2v_input_image], outputs=[i2v_height, i2v_width] ) i2v_input_image.clear( fn=lambda: (DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE), inputs=[], outputs=[i2v_height, i2v_width] ) i2v_generate_btn.click( fn=generate_i2v_video, inputs=[i2v_input_image, i2v_prompt, i2v_height, i2v_width, i2v_neg_prompt, i2v_duration, i2v_guidance, i2v_steps, i2v_seed, i2v_rand_seed], outputs=[i2v_output_video, i2v_seed, i2v_download] ) if __name__ == "__main__": demo.queue().launch()