adminuser742150 commited on
Commit
95120dc
·
verified ·
1 Parent(s): 980ace9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -20
app.py CHANGED
@@ -1,24 +1,26 @@
 
 
 
1
  import gradio as gr
2
 
3
- # Dummy function (replace this with your actual Zeroscope video generation logic)
4
- def generate_video(prompt):
5
- # You can replace this string with actual file path or video URL
6
- return f"🎬 Video generated from prompt: '{prompt}'"
 
 
 
 
 
 
 
7
 
8
- # Gradio Blocks UI with API exposure
9
- with gr.Blocks() as demo:
10
- gr.Markdown("## 🎥 Zeroscope Video Generator")
11
-
12
- with gr.Row():
13
- prompt = gr.Textbox(label="Enter Prompt", placeholder="Type your video idea here...")
14
-
15
- with gr.Row():
16
- output = gr.Textbox(label="Result")
17
-
18
- submit = gr.Button("Generate Video")
19
-
20
- # Expose this to API by naming the endpoint
21
- submit.click(fn=generate_video, inputs=prompt, outputs=output, api_name="generate")
22
 
23
- # Launch with API docs enabled
24
- demo.queue().launch(show_api=True)
 
1
+
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
  import gradio as gr
5
 
6
+ # Load Stable Diffusion on CPU
7
+ pipe = StableDiffusionPipeline.from_pretrained(
8
+ "stabilityai/stable-diffusion-2-1-base",
9
+ torch_dtype=torch.float32
10
+ ).to("cpu")
11
+ pipe.enable_attention_slicing()
12
+
13
+ # Function for generation
14
+ def generate(prompt):
15
+ image = pipe(prompt, num_inference_steps=15, guidance_scale=7.5).images[0]
16
+ return image
17
 
18
+ # Gradio Interface
19
+ demo = gr.Interface(
20
+ fn=generate,
21
+ inputs=gr.Textbox(label="Prompt"),
22
+ outputs=gr.Image(type="pil"),
23
+ )
 
 
 
 
 
 
 
 
24
 
25
+ # Launch Space
26
+ demo.launch()