NaqchoAli commited on
Commit
075c786
·
verified ·
1 Parent(s): ea6b138

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
+ import imageio
5
+ from moviepy.editor import ImageSequenceClip
6
+
7
+ # Load the text-to-video model
8
+ def load_model():
9
+ model = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-v1-4")
10
+ model.to("cuda") # Ensure the model runs on GPU
11
+ return model
12
+
13
+ model = load_model()
14
+
15
+ # Generate video frames
16
+ def generate_video(prompt, num_frames=30, fps=10):
17
+ frames = []
18
+ for i in range(num_frames):
19
+ # Add variation to the prompt for each frame
20
+ frame_prompt = f"{prompt}, frame {i}"
21
+ image = model(frame_prompt).images[0]
22
+ frames.append(image)
23
+
24
+ # Save frames as video
25
+ video_path = "generated_video.mp4"
26
+ clip = ImageSequenceClip([f for f in frames], fps=fps)
27
+ clip.write_videofile(video_path, codec="libx264")
28
+
29
+ return video_path
30
+
31
+ # Gradio Interface
32
+ def process_prompt(prompt):
33
+ video_path = generate_video(prompt)
34
+ return video_path
35
+
36
+ interface = gr.Interface(
37
+ fn=process_prompt,
38
+ inputs="text",
39
+ outputs="video",
40
+ title="Text-to-Video Generator",
41
+ description="Enter a prompt to generate a video based on your description."
42
+ )
43
+
44
+ # Launch the app
45
+ interface.launch()