AverageAiLiker's picture
Deploy Gradio app with multiple files
838951c verified
import gradio as gr
import spaces
import torch
from diffusers import DiffusionPipeline
import numpy as np
from PIL import Image
import os
import tempfile
from typing import Optional, Tuple
import time
from config import MODEL_ID, DEFAULT_HEIGHT, DEFAULT_WIDTH, DEFAULT_NUM_FRAMES, DEFAULT_NUM_INFERENCE_STEPS
from utils import create_video_from_frames, save_video_temp, cleanup_temp_files
from models import load_pipeline
# Global pipeline variable
pipeline = None
@spaces.GPU(duration=300)
def initialize_model():
"""Initialize the Open-Sora-v2 pipeline"""
global pipeline
if pipeline is None:
pipeline = load_pipeline()
return "Model loaded successfully!"
@spaces.GPU(duration=180)
def generate_video(
prompt: str,
height: int = DEFAULT_HEIGHT,
width: int = DEFAULT_WIDTH,
num_frames: int = DEFAULT_NUM_FRAMES,
num_inference_steps: int = DEFAULT_NUM_INFERENCE_STEPS,
seed: Optional[int] = None,
progress=gr.Progress()
) -> Tuple[str, str]:
"""
Generate a video from text prompt using Open-Sora-v2
Args:
prompt (str): Text description of the video to generate
height (int): Height of the video frames
width (int): Width of the video frames
num_frames (int): Number of frames to generate
num_inference_steps (int): Number of denoising steps
seed (int, optional): Random seed for reproducible generation
Returns:
Tuple[str, str]: Path to generated video file and status message
"""
try:
# Initialize model if not already done
if pipeline is None:
progress(0.1, desc="Loading model...")
initialize_model()
# Set seed for reproducibility
if seed is not None:
torch.manual_seed(seed)
progress(0.2, desc="Generating video frames...")
# Generate video frames
video_frames = pipeline(
prompt=prompt,
height=height,
width=width,
num_frames=num_frames,
num_inference_steps=num_inference_steps,
guidance_scale=7.5,
).frames
progress(0.8, desc="Processing video...")
# Convert frames to video
video_path = save_video_temp(video_frames, fps=24)
progress(1.0, desc="Complete!")
return video_path, f"βœ… Video generated successfully! ({len(video_frames)} frames)"
except Exception as e:
error_msg = f"❌ Error generating video: {str(e)}"
return None, error_msg
def update_interface():
"""Update interface based on model availability"""
return gr.update(interactive=True)
def create_demo():
"""Create the Gradio demo interface"""
with gr.Blocks(
title="Open-Sora-v2 Text to Video",
theme=gr.themes.Soft(),
css="""
.gradio-container {
max-width: 1200px !important;
}
.generate-btn {
background: linear-gradient(45deg, #667eea 0%, #764ba2 100%) !important;
}
"""
) as demo:
gr.HTML("""
<div style="text-align: center; margin-bottom: 20px;">
<h1>🎬 Open-Sora-v2 Text to Video Generator</h1>
<p>Generate amazing videos from text descriptions using Open-Sora-v2 model</p>
<p><a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank">Built with anycoder</a></p>
</div>
""")
with gr.Row():
with gr.Column(scale=2):
# Input section
gr.Markdown("## πŸ“ Input")
prompt_input = gr.Textbox(
label="Video Description",
placeholder="Describe the video you want to generate...",
lines=3,
value="A beautiful sunset over the ocean with waves gently rolling"
)
with gr.Accordion("βš™οΈ Advanced Settings", open=False):
with gr.Row():
height_input = gr.Number(
label="Height",
value=DEFAULT_HEIGHT,
minimum=256,
maximum=1024,
step=64
)
width_input = gr.Number(
label="Width",
value=DEFAULT_WIDTH,
minimum=256,
maximum=1024,
step=64
)
with gr.Row():
num_frames_input = gr.Slider(
label="Number of Frames",
value=DEFAULT_NUM_FRAMES,
minimum=16,
maximum=120,
step=8
)
num_steps_input = gr.Slider(
label="Inference Steps",
value=DEFAULT_NUM_INFERENCE_STEPS,
minimum=10,
maximum=100,
step=5
)
seed_input = gr.Number(
label="Seed (optional)",
value=None,
precision=0
)
generate_btn = gr.Button(
"πŸŽ₯ Generate Video",
variant="primary",
size="lg",
elem_classes=["generate-btn"]
)
with gr.Column(scale=1):
# Output section
gr.Markdown("## πŸŽ₯ Output")
video_output = gr.Video(
label="Generated Video",
height=400
)
status_output = gr.Textbox(
label="Status",
interactive=False
)
# Example prompts
gr.Markdown("## πŸ’‘ Example Prompts")
examples = [
"A majestic eagle soaring through mountain peaks at sunrise",
"A busy city street with neon lights at night, cyberpunk style",
"A peaceful garden with butterflies fluttering around colorful flowers",
"A robot dancing in a futuristic disco with colorful lights",
"A serene lake reflecting autumn trees with falling leaves"
]
with gr.Row():
for i, example in enumerate(examples):
example_btn = gr.Button(example, size="sm")
example_btn.click(
lambda x=example: x,
outputs=prompt_input
)
# Event handlers
generate_btn.click(
fn=generate_video,
inputs=[
prompt_input,
height_input,
width_input,
num_frames_input,
num_steps_input,
seed_input
],
outputs=[video_output, status_output],
show_progress=True
)
# Initialize model on startup
demo.load(
fn=initialize_model,
outputs=[status_output]
)
# Cleanup on page close
demo.unload(
fn=cleanup_temp_files
)
return demo
if __name__ == "__main__":
demo = create_demo()
demo.launch(
share=True,
show_error=True,
show_api=True
)