| import gradio as gr |
| import torch |
| from diffusers import DiffusionPipeline |
| import imageio |
| import os |
| import uuid |
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
| print("Loading model...") |
|
|
| pipe = DiffusionPipeline.from_pretrained( |
| "damo-vilab/text-to-video-ms-1.7b", |
| torch_dtype=torch.float16 if device == "cuda" else torch.float32 |
| ) |
|
|
| pipe.to(device) |
|
|
| print("Model loaded successfully!") |
|
|
|
|
| def generate_video(prompt, num_frames, fps): |
| try: |
| video_frames = pipe(prompt, num_frames=num_frames).frames |
|
|
| filename = f"output_{uuid.uuid4().hex}.mp4" |
| imageio.mimsave(filename, video_frames, fps=fps) |
|
|
| return filename |
|
|
| except Exception as e: |
| return f"Error: {str(e)}" |
|
|
|
|
| with gr.Blocks(theme=gr.themes.Soft()) as demo: |
| gr.Markdown("# π¬ AI Text to Video Generator") |
| gr.Markdown("Generate high-quality AI videos from text prompts") |
|
|
| with gr.Row(): |
| prompt = gr.Textbox(label="Enter your prompt", placeholder="A cinematic drone shot of mountains at sunset") |
|
|
| with gr.Row(): |
| num_frames = gr.Slider(8, 32, value=16, step=4, label="Number of Frames") |
| fps = gr.Slider(4, 12, value=8, step=1, label="FPS") |
|
|
| generate_btn = gr.Button("Generate Video π") |
|
|
| output_video = gr.Video(label="Generated Video") |
|
|
| generate_btn.click( |
| fn=generate_video, |
| inputs=[prompt, num_frames, fps], |
| outputs=output_video |
| ) |
|
|
| demo.launch() |