dream2589632147's picture
Update app.py
042a8b6 verified
raw
history blame
15.7 kB
import os
import spaces
import torch
from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
from diffusers.utils.export_utils import export_to_video
import gradio as gr
import tempfile
import numpy as np
from PIL import Image
import random
import gc
from torchao.quantization import quantize_
from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
import aoti
# =========================================================
# MODEL CONFIGURATION
# =========================================================
MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
HF_TOKEN = os.environ.get("HF_TOKEN")
MAX_DIM = 832
MIN_DIM = 480
SQUARE_DIM = 640
MULTIPLE_OF = 16
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 16
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 7720
MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
# =========================================================
# LOAD PIPELINE
# =========================================================
print("Loading pipeline components...")
# Load models in bfloat16
transformer = WanTransformer3DModel.from_pretrained(
MODEL_ID,
subfolder="transformer",
torch_dtype=torch.bfloat16,
token=HF_TOKEN
)
transformer_2 = WanTransformer3DModel.from_pretrained(
MODEL_ID,
subfolder="transformer_2",
torch_dtype=torch.bfloat16,
token=HF_TOKEN
)
print("Assembling pipeline...")
pipe = WanImageToVideoPipeline.from_pretrained(
MODEL_ID,
transformer=transformer,
transformer_2=transformer_2,
torch_dtype=torch.bfloat16,
token=HF_TOKEN
)
print("Moving to CUDA...")
pipe = pipe.to("cuda")
# =========================================================
# LOAD LORA ADAPTERS
# =========================================================
print("Loading LoRA adapters...")
try:
pipe.load_lora_weights(
"Kijai/WanVideo_comfy",
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
adapter_name="lightx2v"
)
pipe.load_lora_weights(
"Kijai/WanVideo_comfy",
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
adapter_name="lightx2v_2",
load_into_transformer_2=True
)
pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
pipe.unload_lora_weights()
print("LoRA loaded and fused successfully.")
except Exception as e:
print(f"Warning: Failed to load LoRA. Continuing without it. Error: {e}")
# =========================================================
# QUANTIZATION & AOT OPTIMIZATION
# =========================================================
print("Applying quantization...")
torch.cuda.empty_cache()
gc.collect()
try:
quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
print("Loading AOTI blocks...")
aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
except Exception as e:
print(f"Warning: Quantization/AOTI failed. Running in standard mode might OOM. Error: {e}")
# =========================================================
# DEFAULT PROMPTS
# =========================================================
default_prompt_i2v = "Make this image come alive with dynamic, cinematic human motion. Create smooth, natural, lifelike animation with fluid transitions, expressive body movement, realistic physics, and elegant camera flow. Deliver a polished, high-quality motion style that feels immersive, artistic, and visually captivating."
default_negative_prompt = (
"low quality, worst quality, motion artifacts, unstable motion, jitter, frame jitter, wobbling limbs, motion distortion, inconsistent movement, robotic movement, animation-like motion, awkward transitions, incorrect body mechanics, unnatural posing, off-balance poses, broken motion paths, frozen frames, duplicated frames, frame skipping, warped motion, stretching artifacts bad anatomy, incorrect proportions, deformed body, twisted torso, broken joints, dislocated limbs, distorted neck, unnatural spine curvature, malformed hands, extra fingers, missing fingers, fused fingers, distorted legs, extra limbs, collapsed feet, floating feet, foot sliding, foot jitter, backward walking, unnatural gait blurry details, long exposure blur, ghosting, shadow trails, smearing, washed-out colors, overexposure, underexposure, excessive contrast, blown highlights, poorly rendered clothing, fabric glitches, texture warping, clothing merging with body, incorrect cloth physics ugly background, cluttered scene, crowded background, random objects, unwanted text, subtitles, logos, graffiti, grain, noise, static artifacts, compression noise, jpeg artifacts, image-like stillness, painting-like look, cartoon texture, low-resolution textures"
)
# =========================================================
# IMAGE RESIZING LOGIC
# =========================================================
def resize_image(image: Image.Image) -> Image.Image:
width, height = image.size
if width == height:
return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
aspect_ratio = width / height
MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
image_to_resize = image
if aspect_ratio > MAX_ASPECT_RATIO:
crop_width = int(round(height * MAX_ASPECT_RATIO))
left = (width - crop_width) // 2
image_to_resize = image.crop((left, 0, left + crop_width, height))
elif aspect_ratio < MIN_ASPECT_RATIO:
crop_height = int(round(width / MIN_ASPECT_RATIO))
top = (height - crop_height) // 2
image_to_resize = image.crop((0, top, width, top + crop_height))
if width > height:
target_w = MAX_DIM
target_h = int(round(target_w / aspect_ratio))
else:
target_h = MAX_DIM
target_w = int(round(target_h * aspect_ratio))
final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
final_w = max(MIN_DIM, min(MAX_DIM, final_w))
final_h = max(MIN_DIM, min(MAX_DIM, final_h))
return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
# =========================================================
# UTILITY FUNCTIONS
# =========================================================
def get_num_frames(duration_seconds: float):
return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
# =========================================================
# MAIN GENERATION FUNCTION
# =========================================================
@spaces.GPU(duration=180)
def generate_video(
input_image_path, # Receives file path now, not PIL object
prompt,
steps=4,
negative_prompt=default_negative_prompt,
duration_seconds=MAX_DURATION,
guidance_scale=1,
guidance_scale_2=1,
seed=42,
randomize_seed=False,
progress=gr.Progress(track_tqdm=True),
):
# Cleanup memory
gc.collect()
torch.cuda.empty_cache()
try:
# 1. Validation checks
if not input_image_path:
raise gr.Error("Please upload an input image.")
if not os.path.exists(input_image_path):
raise gr.Error("Image file not found! Please re-upload the image.")
# 2. Manual Image Opening
# We open it inside the function to avoid connection timeouts
input_image = Image.open(input_image_path).convert("RGB")
num_frames = get_num_frames(duration_seconds)
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
resized_image = resize_image(input_image)
print(f"Generating video with seed: {current_seed}, frames: {num_frames}")
output_frames_list = pipe(
image=resized_image,
prompt=prompt,
negative_prompt=negative_prompt,
height=resized_image.height,
width=resized_image.width,
num_frames=num_frames,
guidance_scale=float(guidance_scale),
guidance_scale_2=float(guidance_scale_2),
num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed),
).frames[0]
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
# Cleanup
del output_frames_list
del input_image
del resized_image
torch.cuda.empty_cache()
return video_path, current_seed
except Exception as e:
print(f"Error during generation: {e}")
raise gr.Error(f"Generation failed: {str(e)}")
# =========================================================
# GRADIO UI
# =========================================================
# Google Analytics Script
ga_script = """
<script async src="https://www.googletagmanager.com/gtag/js?id=G-1TD40BVM04"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-1TD40BVM04');
</script>
"""
with gr.Blocks(theme=gr.themes.Soft(), head=ga_script) as demo:
# --- PROFESSIONAL YOUTUBE EMBED SECTION ---
gr.HTML("""
<div style="background: linear-gradient(135deg, #b90000 0%, #ff0000 100%); color: white; padding: 25px; border-radius: 16px; text-align: center; margin-bottom: 25px; box-shadow: 0 10px 30px rgba(185, 0, 0, 0.3);">
<div style="display: flex; align-items: center; justify-content: center; gap: 25px; flex-wrap: wrap; margin-bottom: 20px;">
<div style="display: flex; align-items: center; gap: 15px;">
<div style="background: white; width: 50px; height: 50px; border-radius: 50%; display: flex; align-items: center; justify-content: center; box-shadow: 0 4px 8px rgba(0,0,0,0.2);">
<span style="font-size: 24px;">▶️</span>
</div>
<div style="text-align: left;">
<h3 style="margin: 0; font-weight: 800; font-size: 22px; letter-spacing: 0.5px;">Imagination Engineering</h3>
<p style="margin: 4px 0 0 0; opacity: 0.95; font-size: 14px; font-weight: 400;">Mastering AI & Creative Tech</p>
</div>
</div>
<a href="https://www.youtube.com/@ImaginationEngineering" target="_blank" style="text-decoration: none;">
<button style="background-color: white; color: #cc0000; border: none; padding: 10px 28px; border-radius: 30px; font-weight: 700; cursor: pointer; transition: transform 0.2s, box-shadow 0.2s; font-size: 15px; box-shadow: 0 4px 12px rgba(0,0,0,0.2);">
SUBSCRIBE & WATCH 📺
</button>
</a>
</div>
<div style="width: 100%; max-width: 650px; margin: 0 auto; border-radius: 12px; overflow: hidden; box-shadow: 0 15px 40px rgba(0,0,0,0.4); border: 4px solid rgba(255,255,255,0.15);">
<div style="position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden;">
<iframe
src="https://www.youtube.com/embed/w_7wL_i3f1k?rel=0&modestbranding=1"
style="position: absolute; top: 0; left: 0; width: 100%; height: 100%;"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen
title="Imagination Engineering Feature">
</iframe>
</div>
</div>
</div>
""")
# ---------------------------------------------
gr.Markdown("# 🚀 Dream Wan 2.2 Faster Pro (14B) — Ultra Fast I2V with Lightning LoRA")
gr.Markdown("Optimized FP8 quantized pipeline with AoT blocks & 4-step fast inference ⚡")
with gr.Row():
with gr.Column():
# CHANGE: type="filepath" fixes the file not found error
input_image_component = gr.Image(type="filepath", label="Input Image")
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
duration_seconds_input = gr.Slider(
minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5,
label="Duration (seconds)",
info=f"Model range: {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale (high noise)")
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 (low noise)")
generate_button = gr.Button("🎬 Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True)
ui_inputs = [
input_image_component, prompt_input, steps_slider,
negative_prompt_input, duration_seconds_input,
guidance_scale_input, guidance_scale_2_input,
seed_input, randomize_seed_checkbox
]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
# --- BOTTOM ADVERTISEMENT BANNER ---
gr.HTML("""
<div style="background: linear-gradient(90deg, #4f46e5, #9333ea); color: white; padding: 15px; border-radius: 10px; text-align: center; margin-top: 20px; box-shadow: 0 4px 15px rgba(0,0,0,0.1);">
<div style="display: flex; align-items: center; justify-content: center; gap: 20px; flex-wrap: wrap;">
<div style="text-align: left;">
<h3 style="margin: 0; font-weight: bold; font-size: 18px;">✨ New: Dream Hub Pro (All-in-One)</h3>
<p style="margin: 5px 0 0 0; opacity: 0.9; font-size: 14px;">Access all your pro tools (Wan2.1, Qwen, Audio, Video Enhance) in one place!</p>
</div>
<a href="https://huggingface.co/spaces/dream2589632147/Dream-Hub-Pro" target="_blank" style="text-decoration: none;">
<button style="background-color: white; color: #4f46e5; border: none; padding: 10px 25px; border-radius: 25px; font-weight: bold; cursor: pointer; transition: all 0.2s; font-size: 15px; box-shadow: 0 2px 5px rgba(0,0,0,0.2);">
🚀 Open Hub Pro Now
</button>
</a>
</div>
</div>
""")
if __name__ == "__main__":
demo.queue().launch()