tittoosai / app.py
tittoos's picture
Update app.py
121494c verified
import subprocess, sys
# ---- Force install moviepy + ffmpeg at runtime (HF build sometimes skips it)
subprocess.run(
[sys.executable, "-m", "pip", "install", "moviepy==1.0.3", "imageio[ffmpeg]"],
check=True
)
import gradio as gr
from diffusers import StableVideoDiffusionPipeline
import torch
import moviepy.editor as mpy
from PIL import Image
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid",
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
variant="fp16" if device == "cuda" else None
).to(device)
def generate_video(image, frames=25, motion_strength=0.8, fps=8):
image = image.convert("RGB")
result = pipe(
image,
num_frames=frames,
motion_bucket_id=int(motion_strength * 127),
noise_aug_strength=0.05
)
frames_list = result.frames[0]
clip = mpy.ImageSequenceClip(frames_list, fps=fps)
output_path = "/tmp/generated_video.mp4"
clip.write_videofile(output_path, codec="libx264", audio=False, verbose=False, logger=None)
return output_path
iface = gr.Interface(
fn=generate_video,
inputs=[
gr.Image(label="Upload Image", type="pil"),
gr.Slider(10, 50, value=25, step=1, label="Frames"),
gr.Slider(0.0, 1.0, value=0.8, step=0.05, label="Motion Strength"),
gr.Slider(4, 12, value=8, step=1, label="FPS")
],
outputs=gr.Video(label="Generated Video"),
title="🎥 AI Image → Video Generator",
description="Upload any photo and generate a short cinematic motion video using Stable Video Diffusion."
)
iface.launch()