VividFlow / ui_manager.py
DawnC's picture
Update ui_manager.py
cca4b71 verified
import gradio as gr
from PIL import Image
from typing import Tuple
from FlowFacade import FlowFacade
from css_style import DELTAFLOW_CSS
from prompt_examples import PROMPT_EXAMPLES
class UIManager:
def __init__(self, facade: FlowFacade):
self.facade = facade
def create_interface(self) -> gr.Blocks:
with gr.Blocks(
theme=gr.themes.Soft(),
css=DELTAFLOW_CSS,
title="VividFlow - Fast AI Image to Video"
) as interface:
# Header
gr.HTML("""
<div class="header-container">
<h1 class="header-title">🌊 VividFlow</h1>
<p class="header-subtitle">
Bring Your Images to Life with AI Magic ✨<br>
Transform any still image into dynamic, cinematic videos
</p>
</div>
""")
with gr.Row():
# Left Panel: Input
with gr.Column(scale=1, elem_classes="input-card"):
gr.Markdown("### 📤 Input")
image_input = gr.Image(
label="Upload Image (any type: photo, art, cartoon, etc.)",
type="pil",
elem_classes="image-upload",
height=320
)
resolution_info = gr.Markdown(
value="",
visible=False,
elem_classes="info-text"
)
prompt_input = gr.Textbox(
label="Motion Instruction",
placeholder="Describe camera movements (zoom, pan, orbit) and subject actions (head turn, hair flow, expression change). Be specific and cinematic! Example: 'Camera slowly zooms in, subject's eyes sparkle, hair flows gently in wind'",
lines=3,
max_lines=6
)
# Quick preset selector
category_dropdown = gr.Dropdown(
choices=list(PROMPT_EXAMPLES.keys()),
label="💡 Quick Prompt Category",
value="💃 Fashion / Beauty (Facial Only)",
interactive=True
)
example_dropdown = gr.Dropdown(
choices=PROMPT_EXAMPLES["💃 Fashion / Beauty (Facial Only)"],
label="Example Prompts (click to use)",
value=None,
interactive=True
)
# Quality tips banner (blue)
gr.HTML("""
<div class="quality-banner">
<strong>💡 Choose the Right Prompt Category:</strong><br>
• <strong>💃 Facial Only:</strong> Safe for headshots and portraits without visible hands<br>
• <strong>🙌 Hands Visible Required:</strong> Only use if hands are fully visible in your image (prevents artifacts)<br>
• <strong>🌄 Scenery/Objects:</strong> For landscapes, products, and abstract content
</div>
""")
# Generate button with patience banner
gr.HTML("""
<div class="patience-banner">
<strong>⏱️ Models are Initializing!</strong><br>
This first-time generation may take a moment while high-fidelity assets load into memory.<br>
Grab a coffee ☕, and watch the magic happen! Subsequent runs will be significantly faster.
</div>
""")
generate_btn = gr.Button(
"🎬 Generate Video",
variant="primary",
elem_classes="primary-button",
size="lg"
)
# Advanced settings
with gr.Accordion("⚙️ Advanced Settings", open=False):
duration_slider = gr.Slider(
minimum=0.5,
maximum=5.0,
step=0.5,
value=3.0,
label="Duration (seconds)",
info="3.0s = 49 frames, 5.0s = 81 frames (16fps)"
)
steps_slider = gr.Slider(
minimum=4,
maximum=12,
step=1,
value=4,
label="Inference Steps",
info="4-6 recommended • Higher steps = longer generation time"
)
with gr.Row():
guidance_scale = gr.Slider(
minimum=0.0,
maximum=5.0,
step=0.5,
value=1.0,
label="Guidance Scale (high noise)"
)
guidance_scale_2 = gr.Slider(
minimum=0.0,
maximum=5.0,
step=0.5,
value=1.0,
label="Guidance Scale (low noise)"
)
with gr.Row():
seed_input = gr.Number(
label="Seed",
value=42,
precision=0,
minimum=0,
maximum=2147483647,
info="Use same seed for reproducible results"
)
randomize_seed = gr.Checkbox(
label="Randomize Seed",
value=True,
info="Generate different results each time"
)
enable_ai_prompt = gr.Checkbox(
label="🤖 Enable AI Prompt Expansion (Qwen2.5)",
value=False,
info="Use AI to enhance your prompt (adds ~30s)"
)
# Right Panel: Output
with gr.Column(scale=1, elem_classes="output-card"):
gr.Markdown("### 🎥 Output")
video_output = gr.Video(
label="Generated Video",
height=400,
autoplay=True
)
with gr.Row():
prompt_output = gr.Textbox(
label="Final Prompt Used",
lines=3,
interactive=False,
scale=3
)
seed_output = gr.Number(
label="Seed Used",
precision=0,
interactive=False,
scale=1
)
# Info section
with gr.Row():
gr.HTML("""
<div class="info-box">
<strong>ℹ️ Tips for Best Results:</strong><br>
• <strong>Use example prompts:</strong> Select a category above and click an example to get started<br>
• <strong>Works with ANY image:</strong> Fashion portraits, anime, landscapes, products, abstract art, etc.<br>
• <strong>For dramatic effects:</strong> Choose prompts with words like "explosive", "dramatic", "swirls", "transforms"<br>
• <strong>Image quality matters:</strong> Higher resolution and clear subjects produce better results
</div>
""")
# Footer
gr.HTML("""
<div class="footer">
<p style="font-size: 0.9rem;">
<strong>Powered by:</strong><br>
<a href="https://huggingface.co/Wan-AI/Wan2.2-I2V-A14B-Diffusers" target="_blank" style="color: #6366f1; text-decoration: none;">Wan2.2-I2V-A14B</a> (Wan-AI, optimized by <a href="https://huggingface.co/cbensimon" target="_blank" style="color: #6366f1; text-decoration: none;">cbensimon</a>)
· Lightning LoRA (<a href="https://huggingface.co/Kijai/WanVideo_comfy" target="_blank" style="color: #6366f1; text-decoration: none;">Lightx2v</a>)
· <a href="https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct" target="_blank" style="color: #6366f1; text-decoration: none;">Qwen2.5-0.5B</a>
</p>
</div>
""")
def update_examples(category):
return gr.update(choices=PROMPT_EXAMPLES[category], value=None)
def fill_prompt(selected_example):
return selected_example if selected_example else ""
def show_resolution_info(image):
if image is None:
return gr.update(value="", visible=False)
from PIL import Image
original_w, original_h = image.size
resized_image = self.facade.video_engine.resize_image(image)
output_w, output_h = resized_image.width, resized_image.height
info = f"**📐 Resolution:** Input: {original_w}×{original_h} → Output: {output_w}×{output_h}"
return gr.update(value=info, visible=True)
category_dropdown.change(fn=update_examples, inputs=[category_dropdown],
outputs=[example_dropdown])
example_dropdown.change(fn=fill_prompt, inputs=[example_dropdown],
outputs=[prompt_input])
image_input.change(fn=show_resolution_info, inputs=[image_input],
outputs=[resolution_info])
generate_btn.click(
fn=self._handle_generation,
inputs=[
image_input,
prompt_input,
duration_slider,
steps_slider,
guidance_scale,
guidance_scale_2,
seed_input,
randomize_seed,
enable_ai_prompt
],
outputs=[video_output, prompt_output, seed_output],
show_progress=True
)
return interface
def _handle_generation(self, image: Image.Image, prompt: str, duration: float,
steps: int, guidance_1: float, guidance_2: float, seed: int,
randomize: bool, enable_ai: bool,
progress=gr.Progress()) -> Tuple[str, str, int]:
try:
if image is None:
raise gr.Error("❌ Please upload an image")
if not prompt or prompt.strip() == "":
raise gr.Error("❌ Please provide a motion instruction")
if not self.facade.validate_image(image):
raise gr.Error("❌ Image dimensions invalid (256-4096px)")
video_path, final_prompt, seed_used = self.facade.generate_video_from_image(
image=image,
user_instruction=prompt,
duration_seconds=duration,
num_inference_steps=steps,
guidance_scale=guidance_1,
guidance_scale_2=guidance_2,
seed=int(seed),
randomize_seed=randomize,
enable_prompt_expansion=enable_ai,
progress=progress
)
return video_path, final_prompt, seed_used
except gr.Error:
raise
except Exception as e:
import traceback
import os
error_msg = str(e)
if os.environ.get('DEBUG'):
print(f"\n✗ UI Error: {type(e).__name__}")
print(traceback.format_exc())
if "CUDA out of memory" in error_msg or "OutOfMemoryError" in error_msg:
raise gr.Error("❌ GPU memory insufficient. Try reducing duration/steps or restart.")
else:
raise gr.Error(f"❌ Generation failed: {error_msg}")
def launch(self, share: bool = False, server_name: str = "0.0.0.0",
server_port: int = None, **kwargs) -> None:
interface = self.create_interface()
interface.launch(share=share, server_name=server_name,
server_port=server_port, **kwargs)