import torch
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import imageio
import gradio as gr
pipe = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid",
torch_dtype=torch.float16
)
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
def generate_video(image):
image = image.convert("RGB")
frames = pipe(image, num_frames=12).frames[0]
output_path = "video.mp4"
imageio.mimsave(output_path, frames, fps=8)
return output_path
app = gr.Interface(
fn=generate_video,
inputs=gr.Image(type="pil"),
outputs=gr.Video(),
title="Image → Video AI"
)
app.launch()