Stephen Ebert commited on
Commit
fa83b40
·
1 Parent(s): ff1edeb

Add Stable Diffusion v1.5 Text→Image Gradio demo

Browse files
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import StableDiffusionPipeline
3
+ import gradio as gr
4
+
5
+ # Pick the fastest device available
6
+ device = (
7
+ "mps" if torch.backends.mps.is_available()
8
+ else "cuda" if torch.cuda.is_available()
9
+ else "cpu"
10
+ )
11
+
12
+ # Load the model (you can remove safety_checker=None for public deploys)
13
+ model_id = "runwayml/stable-diffusion-v1-5"
14
+ pipe = StableDiffusionPipeline.from_pretrained(
15
+ model_id,
16
+ torch_dtype=torch.float16,
17
+ safety_checker=None
18
+ ).to(device)
19
+
20
+ def generate(prompt: str, steps: int, guidance: float, seed: float):
21
+ """
22
+ Generate one or more images from a text prompt.
23
+ """
24
+ # If seed > 0, use it; else let Diffusers pick a random seed.
25
+ generator = (
26
+ torch.Generator(device=device).manual_seed(int(seed))
27
+ if seed and seed > 0
28
+ else None
29
+ )
30
+ output = pipe(
31
+ prompt,
32
+ num_inference_steps=steps,
33
+ guidance_scale=guidance,
34
+ generator=generator
35
+ )
36
+ # returns a list of PIL images
37
+ return output.images
38
+
39
+ # Build the Gradio UI
40
+ demo = gr.Blocks()
41
+
42
+ with demo:
43
+ gr.Markdown("# Stable Diffusion Text→Image Generation Demo")
44
+ with gr.Row():
45
+ with gr.Column():
46
+ prompt = gr.Textbox(label="Prompt", placeholder="e.g. ‘A serene forest at dawn’")
47
+ steps = gr.Slider(1, 100, value=50, step=1, label="Inference Steps")
48
+ guidance = gr.Slider(1, 15, value=7.5, step=0.1, label="Guidance Scale")
49
+ seed = gr.Number(value=0, label="Random Seed (0 = random)")
50
+ btn = gr.Button("Generate")
51
+ with gr.Column():
52
+ gallery = gr.Gallery(label="Generated Images", columns=2, height="auto")
53
+
54
+ # wire up the button
55
+ btn.click(
56
+ fn=generate,
57
+ inputs=[prompt, steps, guidance, seed],
58
+ outputs=gallery,
59
+ )
60
+
61
+ if __name__ == "__main__":
62
+ demo.launch()
create_space.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi
2
+
3
+ api = HfApi()
4
+
5
+ # Replace with your actual namespace (e.g. "stephenebert") and desired Space name
6
+ repo_id = "stephenebert/sd-text2image"
7
+
8
+ # Create a public Gradio Space
9
+ api.create_repo(
10
+ repo_id=repo_id,
11
+ repo_type="space",
12
+ space_sdk="gradio",
13
+ private=False,
14
+ )
15
+ print(f"Created Space: https://huggingface.co/spaces/{repo_id}")
images/bear walking in SD.png ADDED

Git LFS Details

  • SHA256: 7e1c18d5b4d3aed84bd2e57667ad539d0c7f9e7df249850722696c835f5bb59d
  • Pointer size: 132 Bytes
  • Size of remote file: 1.4 MB
images/bear walking prompt with SD.png ADDED

Git LFS Details

  • SHA256: 197c395ff8ae03e15b2f305a6b16c6106e0f74a14bc158aec068088e93e303f2
  • Pointer size: 132 Bytes
  • Size of remote file: 2.11 MB
images/cyber punk SD.png ADDED

Git LFS Details

  • SHA256: 7db99dce5fdae9674967866dde20df9c014e999fe1ccb36ab53998e4953720ac
  • Pointer size: 132 Bytes
  • Size of remote file: 2.04 MB
images/terminal.png ADDED

Git LFS Details

  • SHA256: 6ddd217bd90e3b0e7b3f2d6d9f17f84241149312c4900157744bef99c26e84dc
  • Pointer size: 131 Bytes
  • Size of remote file: 307 kB
pyaudioop.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ __all__ = []
2
+ # Stub for the removed audioop stdlib module on Python 3.13+
3
+ # This lets pydub/gradio import pyaudioop without error,
4
+ # even though we never actually use any audio functions here.
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch>=2.2.0
2
+ diffusers>=0.18.0
3
+ transformers>=4.31.0
4
+ accelerate>=0.20.3
5
+ safetensors
6
+ gradio>=3.50.2
space.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # space.yaml
2
+ sdk: gradio
3
+ duplicate: false
4
+ title: "Stable Diffusion v1.5 — Text → Image Demo"
5
+ tags:
6
+ - stable-diffusion
7
+ - gradio
8
+ - diffusers
text2image_demo.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import StableDiffusionPipeline
3
+ import gradio as gr
4
+
5
+ # Pick the fastest device available
6
+ device = (
7
+ "mps" if torch.backends.mps.is_available()
8
+ else "cuda" if torch.cuda.is_available()
9
+ else "cpu"
10
+ )
11
+
12
+ # Load the model (you can remove safety_checker=None for public deploys)
13
+ model_id = "runwayml/stable-diffusion-v1-5"
14
+ pipe = StableDiffusionPipeline.from_pretrained(
15
+ model_id,
16
+ torch_dtype=torch.float16,
17
+ safety_checker=None
18
+ ).to(device)
19
+
20
+ def generate(prompt: str, steps: int, guidance: float, seed: float):
21
+ """
22
+ Generate one or more images from a text prompt.
23
+ """
24
+ # If seed > 0, use it; else let Diffusers pick a random seed.
25
+ generator = (
26
+ torch.Generator(device=device).manual_seed(int(seed))
27
+ if seed and seed > 0
28
+ else None
29
+ )
30
+ output = pipe(
31
+ prompt,
32
+ num_inference_steps=steps,
33
+ guidance_scale=guidance,
34
+ generator=generator
35
+ )
36
+ # returns a list of PIL images
37
+ return output.images
38
+
39
+ # Build the Gradio UI
40
+ demo = gr.Blocks()
41
+
42
+ with demo:
43
+ gr.Markdown("# Stable Diffusion Text→Image Generation Demo")
44
+ with gr.Row():
45
+ with gr.Column():
46
+ prompt = gr.Textbox(label="Prompt", placeholder="e.g. ‘A serene forest at dawn’")
47
+ steps = gr.Slider(1, 100, value=50, step=1, label="Inference Steps")
48
+ guidance = gr.Slider(1, 15, value=7.5, step=0.1, label="Guidance Scale")
49
+ seed = gr.Number(value=0, label="Random Seed (0 = random)")
50
+ btn = gr.Button("Generate")
51
+ with gr.Column():
52
+ gallery = gr.Gallery(label="Generated Images", columns=2, height="auto")
53
+
54
+ # wire up the button
55
+ btn.click(
56
+ fn=generate,
57
+ inputs=[prompt, steps, guidance, seed],
58
+ outputs=gallery,
59
+ )
60
+
61
+ if __name__ == "__main__":
62
+ demo.launch()