|
|
import gradio as gr |
|
|
import torch |
|
|
from diffusers import StableDiffusionPipeline |
|
|
import imageio |
|
|
from moviepy.editor import ImageSequenceClip |
|
|
|
|
|
|
|
|
def load_model(): |
|
|
model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-v1-4") |
|
|
model.to("cuda") |
|
|
return model |
|
|
|
|
|
model = load_model() |
|
|
|
|
|
|
|
|
def generate_video(prompt, num_frames=30, fps=10): |
|
|
frames = [] |
|
|
for i in range(num_frames): |
|
|
|
|
|
frame_prompt = f"{prompt}, frame {i}" |
|
|
image = model(frame_prompt).images[0] |
|
|
frames.append(image) |
|
|
|
|
|
|
|
|
video_path = "generated_video.mp4" |
|
|
clip = ImageSequenceClip([f for f in frames], fps=fps) |
|
|
clip.write_videofile(video_path, codec="libx264") |
|
|
|
|
|
return video_path |
|
|
|
|
|
|
|
|
def process_prompt(prompt): |
|
|
video_path = generate_video(prompt) |
|
|
return video_path |
|
|
|
|
|
interface = gr.Interface( |
|
|
fn=process_prompt, |
|
|
inputs="text", |
|
|
outputs="video", |
|
|
title="Text-to-Video Generator", |
|
|
description="Enter a prompt to generate a video based on your description." |
|
|
) |
|
|
|
|
|
|
|
|
interface.launch() |
|
|
|