t2v / app.py
dannyboy84's picture
Create app.py
3373062 verified
import gradio as gr
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
import torch
import tempfile
import os
# Load the model
pipe = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid-xt",
torch_dtype=torch.float16,
variant="fp16"
)
pipe.enable_model_cpu_offload() # Optimize for low memory
# Function to generate video
def generate_video(image, num_frames=14, fps=7):
if image is None:
return None
frames = pipe(image, num_frames=num_frames, fps=fps).frames[0]
with tempfile.TemporaryDirectory() as tmpdirname:
temp_video_path = os.path.join(tmpdirname, "output.mp4")
export_to_video(frames, temp_video_path, fps=fps)
return temp_video_path
# Gradio interface
iface = gr.Interface(
fn=generate_video,
inputs=[
gr.Image(type="pil", label="Upload an Image"),
gr.Slider(5, 25, value=14, label="Number of Frames"),
gr.Slider(5, 10, value=7, label="FPS")
],
outputs=gr.Video(label="Generated Video"),
title="AI Video Generator",
description="Turn an image into a short video using Stable Video Diffusion!"
)
if __name__ == "__main__":
iface.launch()