Spaces:
Running
Running
| import gradio as gr | |
| import torch | |
| import os | |
| import gc | |
| from typing import Optional, Tuple | |
| from model import WanVideoGenerator, get_available_loras, LORA_CACHE_DIR | |
| # Initialize the generator | |
| generator = WanVideoGenerator() | |
| def generate_video( | |
| prompt: str, | |
| negative_prompt: str, | |
| image_input: Optional[dict], | |
| lora_name: str, | |
| lora_scale: float, | |
| height: int, | |
| width: int, | |
| num_frames: int, | |
| guidance_scale: float, | |
| num_inference_steps: int, | |
| fps: int, | |
| seed: int, | |
| progress: gr.Progress = gr.Progress() | |
| ) -> Tuple[str, str]: | |
| """Generate video from text or image prompt with optional LoRA.""" | |
| if not prompt.strip(): | |
| raise gr.Error("Please provide a prompt!") | |
| progress(0, desc="Initializing...") | |
| # Handle image input for TI2V (Text-Image-to-Video) | |
| image = None | |
| if image_input is not None: | |
| image = image_input | |
| try: | |
| # Load LoRA if specified | |
| if lora_name != "None": | |
| progress(0.1, desc=f"Loading LoRA: {lora_name}...") | |
| generator.load_lora(lora_name, lora_scale) | |
| else: | |
| generator.unload_lora() | |
| progress(0.2, desc="Generating video...") | |
| # Generate video | |
| video_path = generator.generate( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| image=image, | |
| height=height, | |
| width=width, | |
| num_frames=num_frames, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=num_inference_steps, | |
| fps=fps, | |
| seed=seed if seed >= 0 else None, | |
| progress_callback=lambda p: progress(0.2 + 0.7 * p, desc="Generating frames...") | |
| ) | |
| progress(1.0, desc="Complete!") | |
| # Get generation info | |
| info = f"""Generation Info: | |
| - Prompt: {prompt} | |
| - LoRA: {lora_name} (scale: {lora_scale}) | |
| - Resolution: {width}x{height} | |
| - Frames: {num_frames} | |
| - Steps: {num_inference_steps} | |
| - Guidance: {guidance_scale} | |
| - FPS: {fps} | |
| - Seed: {seed if seed >= 0 else 'Random'}""" | |
| return video_path, info | |
| except Exception as e: | |
| raise gr.Error(f"Generation failed: {str(e)}") | |
| finally: | |
| # Cleanup | |
| gc.collect() | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| def refresh_loras(): | |
| """Refresh the list of available LoRAs.""" | |
| loras = get_available_loras() | |
| return gr.Dropdown(choices=["None"] + loras, value="None") | |
| # Custom CSS for professional look | |
| custom_css = """ | |
| #header-text { | |
| text-align: center; | |
| margin-bottom: 20px; | |
| } | |
| #header-text h1 { | |
| font-size: 2.5em; | |
| font-weight: bold; | |
| background: linear-gradient(90deg, #667eea 0%, #764ba2 100%); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| } | |
| #header-text p { | |
| color: #666; | |
| font-size: 1.1em; | |
| } | |
| .lora-section { | |
| background: linear-gradient(135deg, #667eea22 0%, #764ba222 100%); | |
| border-radius: 12px; | |
| padding: 15px; | |
| margin-bottom: 15px; | |
| } | |
| .generation-info { | |
| font-family: monospace; | |
| font-size: 0.9em; | |
| background: #1a1a2e; | |
| color: #eee; | |
| padding: 15px; | |
| border-radius: 8px; | |
| } | |
| """ | |
| # Build the Gradio interface | |
| with gr.Blocks() as demo: | |
| # Header | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.HTML(""" | |
| <div id="header-text"> | |
| <h1>π¬ Wan2.2-TI2V-5B Video Generator</h1> | |
| <p>Generate stunning videos from text or image prompts with LoRA acceleration</p> | |
| <p style="font-size: 0.9em;"> | |
| <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #667eea; text-decoration: none;"> | |
| Built with anycoder | |
| </a> | |
| </p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| # Left column - Inputs | |
| with gr.Column(scale=1): | |
| # Prompt section | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| placeholder="Describe the video you want to generate...", | |
| lines=3, | |
| info="Be descriptive for better results" | |
| ) | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| placeholder="What to avoid in the video...", | |
| lines=2, | |
| value="blur, distortion, low quality, jittery, morphing" | |
| ) | |
| # Image input for TI2V | |
| with gr.Accordion("Image Input (TI2V)", open=False): | |
| gr.Markdown("Upload an image for Text-Image-to-Video generation") | |
| image_input = gr.Image( | |
| label="Input Image", | |
| type="pil", | |
| height=300 | |
| ) | |
| # LoRA section | |
| with gr.Group(elem_classes="lora-section"): | |
| gr.Markdown("### π LoRA Acceleration") | |
| with gr.Row(): | |
| lora_dropdown = gr.Dropdown( | |
| choices=["None"] + get_available_loras(), | |
| value="None", | |
| label="Select LoRA", | |
| info="Choose a LoRA for faster generation" | |
| ) | |
| refresh_btn = gr.Button("π", size="sm", variant="secondary") | |
| lora_scale = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.8, | |
| step=0.05, | |
| label="LoRA Scale", | |
| info="Higher values = stronger LoRA effect" | |
| ) | |
| gr.Markdown(""" | |
| **Available LoRAs:** | |
| - `wan-fast-lora`: Optimized for speed (2-3x faster) | |
| - `wan-quality-lora`: Enhanced quality | |
| - `wan-motion-lora`: Better motion dynamics | |
| """) | |
| # Generation parameters | |
| with gr.Accordion("βοΈ Advanced Settings", open=True): | |
| with gr.Row(): | |
| height = gr.Slider(256, 1024, value=480, step=64, label="Height") | |
| width = gr.Slider(256, 1024, value=848, step=64, label="Width") | |
| with gr.Row(): | |
| num_frames = gr.Slider(8, 81, value=25, step=1, label="Number of Frames") | |
| fps = gr.Slider(6, 30, value=16, step=1, label="FPS") | |
| with gr.Row(): | |
| guidance_scale = gr.Slider(1.0, 20.0, value=5.0, step=0.5, label="Guidance Scale") | |
| num_inference_steps = gr.Slider(4, 50, value=20, step=1, label="Inference Steps") | |
| seed = gr.Slider(-1, 999999, value=-1, step=1, label="Seed (-1 for random)") | |
| # Generate button | |
| generate_btn = gr.Button("π¬ Generate Video", variant="primary", size="lg") | |
| # Right column - Output | |
| with gr.Column(scale=1): | |
| video_output = gr.Video( | |
| label="Generated Video", | |
| height=450, | |
| autoplay=True | |
| ) | |
| info_output = gr.Textbox( | |
| label="Generation Info", | |
| lines=10, | |
| elem_classes="generation-info", | |
| show_copy_button=True | |
| ) | |
| # Examples | |
| with gr.Accordion("π Example Prompts", open=False): | |
| gr.Examples( | |
| examples=[ | |
| ["A majestic dragon flying through clouds at sunset, cinematic lighting, 4K quality", None, "wan-fast-lora"], | |
| ["A serene ocean wave crashing on a beach, slow motion, golden hour", None, "wan-fast-lora"], | |
| ["A futuristic city with flying cars and neon lights, cyberpunk style", None, "wan-quality-lora"], | |
| ["A cat playing with a ball of yarn, cute, fluffy, soft lighting", None, "wan-fast-lora"], | |
| ["Time-lapse of flowers blooming in a garden, vibrant colors", None, "wan-motion-lora"], | |
| ], | |
| inputs=[prompt, image_input, lora_dropdown], | |
| label="Click to use example prompts" | |
| ) | |
| # Footer | |
| gr.HTML(""" | |
| <div style="text-align: center; margin-top: 20px; padding: 10px; border-top: 1px solid #ddd;"> | |
| <p style="color: #666; font-size: 0.9em;"> | |
| Model: <a href="https://huggingface.co/Wan-AI/Wan2.1-T2V-14B" target="_blank">Wan2.2-TI2V-5B</a> | | |
| Powered by <a href="https://huggingface.co" target="_blank">π€ Hugging Face</a> | |
| </p> | |
| </div> | |
| """) | |
| # Event handlers | |
| refresh_btn.click(refresh_loras, outputs=lora_dropdown) | |
| generate_btn.click( | |
| fn=generate_video, | |
| inputs=[ | |
| prompt, | |
| negative_prompt, | |
| image_input, | |
| lora_dropdown, | |
| lora_scale, | |
| height, | |
| width, | |
| num_frames, | |
| guidance_scale, | |
| num_inference_steps, | |
| fps, | |
| seed | |
| ], | |
| outputs=[video_output, info_output], | |
| api_visibility="public" | |
| ) | |
| # Launch the app | |
| demo.launch( | |
| theme=gr.themes.Soft( | |
| primary_hue="indigo", | |
| secondary_hue="purple", | |
| neutral_hue="slate", | |
| font=gr.themes.GoogleFont("Inter"), | |
| text_size="md", | |
| spacing_size="md", | |
| radius_size="lg" | |
| ), | |
| css=custom_css, | |
| footer_links=[ | |
| {"label": "API", "url": "/api"}, | |
| {"label": "Gradio", "url": "https://gradio.app"}, | |
| {"label": "anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"} | |
| ], | |
| allowed_paths=[LORA_CACHE_DIR] if os.path.exists(LORA_CACHE_DIR) else [] | |
| ) |