import gradio as gr import torch from diffusers import StableDiffusionPipeline import imageio from moviepy.editor import ImageSequenceClip # Load the text-to-video model def load_model(): model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-v1-4") model.to("cuda") # Ensure the model runs on GPU return model model = load_model() # Generate video frames def generate_video(prompt, num_frames=30, fps=10): frames = [] for i in range(num_frames): # Add variation to the prompt for each frame frame_prompt = f"{prompt}, frame {i}" image = model(frame_prompt).images[0] frames.append(image) # Save frames as video video_path = "generated_video.mp4" clip = ImageSequenceClip([f for f in frames], fps=fps) clip.write_videofile(video_path, codec="libx264") return video_path # Gradio Interface def process_prompt(prompt): video_path = generate_video(prompt) return video_path interface = gr.Interface( fn=process_prompt, inputs="text", outputs="video", title="Text-to-Video Generator", description="Enter a prompt to generate a video based on your description." ) # Launch the app interface.launch()