Spaces:
Runtime error
Runtime error
File size: 7,845 Bytes
838951c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
import gradio as gr
import spaces
import torch
from diffusers import DiffusionPipeline
import numpy as np
from PIL import Image
import os
import tempfile
from typing import Optional, Tuple
import time
from config import MODEL_ID, DEFAULT_HEIGHT, DEFAULT_WIDTH, DEFAULT_NUM_FRAMES, DEFAULT_NUM_INFERENCE_STEPS
from utils import create_video_from_frames, save_video_temp, cleanup_temp_files
from models import load_pipeline
# Global pipeline variable
pipeline = None
@spaces.GPU(duration=300)
def initialize_model():
"""Initialize the Open-Sora-v2 pipeline"""
global pipeline
if pipeline is None:
pipeline = load_pipeline()
return "Model loaded successfully!"
@spaces.GPU(duration=180)
def generate_video(
prompt: str,
height: int = DEFAULT_HEIGHT,
width: int = DEFAULT_WIDTH,
num_frames: int = DEFAULT_NUM_FRAMES,
num_inference_steps: int = DEFAULT_NUM_INFERENCE_STEPS,
seed: Optional[int] = None,
progress=gr.Progress()
) -> Tuple[str, str]:
"""
Generate a video from text prompt using Open-Sora-v2
Args:
prompt (str): Text description of the video to generate
height (int): Height of the video frames
width (int): Width of the video frames
num_frames (int): Number of frames to generate
num_inference_steps (int): Number of denoising steps
seed (int, optional): Random seed for reproducible generation
Returns:
Tuple[str, str]: Path to generated video file and status message
"""
try:
# Initialize model if not already done
if pipeline is None:
progress(0.1, desc="Loading model...")
initialize_model()
# Set seed for reproducibility
if seed is not None:
torch.manual_seed(seed)
progress(0.2, desc="Generating video frames...")
# Generate video frames
video_frames = pipeline(
prompt=prompt,
height=height,
width=width,
num_frames=num_frames,
num_inference_steps=num_inference_steps,
guidance_scale=7.5,
).frames
progress(0.8, desc="Processing video...")
# Convert frames to video
video_path = save_video_temp(video_frames, fps=24)
progress(1.0, desc="Complete!")
return video_path, f"β
Video generated successfully! ({len(video_frames)} frames)"
except Exception as e:
error_msg = f"β Error generating video: {str(e)}"
return None, error_msg
def update_interface():
"""Update interface based on model availability"""
return gr.update(interactive=True)
def create_demo():
"""Create the Gradio demo interface"""
with gr.Blocks(
title="Open-Sora-v2 Text to Video",
theme=gr.themes.Soft(),
css="""
.gradio-container {
max-width: 1200px !important;
}
.generate-btn {
background: linear-gradient(45deg, #667eea 0%, #764ba2 100%) !important;
}
"""
) as demo:
gr.HTML("""
<div style="text-align: center; margin-bottom: 20px;">
<h1>π¬ Open-Sora-v2 Text to Video Generator</h1>
<p>Generate amazing videos from text descriptions using Open-Sora-v2 model</p>
<p><a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank">Built with anycoder</a></p>
</div>
""")
with gr.Row():
with gr.Column(scale=2):
# Input section
gr.Markdown("## π Input")
prompt_input = gr.Textbox(
label="Video Description",
placeholder="Describe the video you want to generate...",
lines=3,
value="A beautiful sunset over the ocean with waves gently rolling"
)
with gr.Accordion("βοΈ Advanced Settings", open=False):
with gr.Row():
height_input = gr.Number(
label="Height",
value=DEFAULT_HEIGHT,
minimum=256,
maximum=1024,
step=64
)
width_input = gr.Number(
label="Width",
value=DEFAULT_WIDTH,
minimum=256,
maximum=1024,
step=64
)
with gr.Row():
num_frames_input = gr.Slider(
label="Number of Frames",
value=DEFAULT_NUM_FRAMES,
minimum=16,
maximum=120,
step=8
)
num_steps_input = gr.Slider(
label="Inference Steps",
value=DEFAULT_NUM_INFERENCE_STEPS,
minimum=10,
maximum=100,
step=5
)
seed_input = gr.Number(
label="Seed (optional)",
value=None,
precision=0
)
generate_btn = gr.Button(
"π₯ Generate Video",
variant="primary",
size="lg",
elem_classes=["generate-btn"]
)
with gr.Column(scale=1):
# Output section
gr.Markdown("## π₯ Output")
video_output = gr.Video(
label="Generated Video",
height=400
)
status_output = gr.Textbox(
label="Status",
interactive=False
)
# Example prompts
gr.Markdown("## π‘ Example Prompts")
examples = [
"A majestic eagle soaring through mountain peaks at sunrise",
"A busy city street with neon lights at night, cyberpunk style",
"A peaceful garden with butterflies fluttering around colorful flowers",
"A robot dancing in a futuristic disco with colorful lights",
"A serene lake reflecting autumn trees with falling leaves"
]
with gr.Row():
for i, example in enumerate(examples):
example_btn = gr.Button(example, size="sm")
example_btn.click(
lambda x=example: x,
outputs=prompt_input
)
# Event handlers
generate_btn.click(
fn=generate_video,
inputs=[
prompt_input,
height_input,
width_input,
num_frames_input,
num_steps_input,
seed_input
],
outputs=[video_output, status_output],
show_progress=True
)
# Initialize model on startup
demo.load(
fn=initialize_model,
outputs=[status_output]
)
# Cleanup on page close
demo.unload(
fn=cleanup_temp_files
)
return demo
if __name__ == "__main__":
demo = create_demo()
demo.launch(
share=True,
show_error=True,
show_api=True
) |