tester343 commited on
Commit
c1ba63c
·
verified ·
1 Parent(s): 3ad9157

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -45
app.py CHANGED
@@ -1,47 +1,76 @@
 
 
1
  import torch
 
2
  from diffusers import StableDiffusionPipeline
3
- from PIL import Image
4
-
5
- # ----------------------------
6
- # Settings
7
- # ----------------------------
8
- MODEL_ID = "stabilityai/stable-diffusion-1.5-turbo" # Turbo model for CPU speed
9
- DEVICE = "cpu" # CPU only
10
- DTYPE = torch.float32 # CPU-friendly
11
-
12
- PROMPT = "A cute cat wearing sunglasses, digital art"
13
- WIDTH = 768
14
- HEIGHT = 768
15
- NUM_STEPS = 1 # Fastest possible
16
- GUIDANCE_SCALE = 0 # Disable guidance for speed
17
-
18
- # ----------------------------
19
- # Load pipeline
20
- # ----------------------------
21
- pipe = StableDiffusionPipeline.from_pretrained(
22
- MODEL_ID,
23
- torch_dtype=DTYPE,
24
- safety_checker=None, # Optional, disable for speed
25
- feature_extractor=None # Optional, disable for speed
26
- )
27
- pipe.to(DEVICE)
28
-
29
- # ----------------------------
30
- # Generate image
31
- # ----------------------------
32
- generator = torch.Generator(device=DEVICE).manual_seed(42) # Optional seed
33
-
34
- image: Image.Image = pipe(
35
- prompt=PROMPT,
36
- height=HEIGHT,
37
- width=WIDTH,
38
- num_inference_steps=NUM_STEPS,
39
- guidance_scale=GUIDANCE_SCALE,
40
- generator=generator
41
- ).images[0]
42
-
43
- # ----------------------------
44
- # Save or show
45
- # ----------------------------
46
- image.save("output.png")
47
- image.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os
3
  import torch
4
+ import gradio as gr
5
  from diffusers import StableDiffusionPipeline
6
+
7
+ # -----------------------
8
+ # Config
9
+ # -----------------------
10
+ # Use a valid public turbo model
11
+ MODEL_ID = "stabilityai/sd-turbo" # <- change to "stabilityai/sdxl-turbo" if you prefer SDXL-Turbo
12
+
13
+ # Optional: supply HF token in Spaces / env: HF_TOKEN=hf_xxx
14
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
15
+
16
+ DEVICE = "cpu"
17
+ DTYPE = torch.float32
18
+
19
+ # Limit threads to avoid oversubscription on Spaces
20
+ os.environ["OMP_NUM_THREADS"] = "4"
21
+ os.environ["MKL_NUM_THREADS"] = "4"
22
+ torch.set_num_threads(4)
23
+
24
+ # -----------------------
25
+ # Load pipeline (robust)
26
+ # -----------------------
27
+ download_kwargs = {}
28
+ if HF_TOKEN:
29
+ download_kwargs["token"] = HF_TOKEN
30
+
31
+ try:
32
+ pipe = StableDiffusionPipeline.from_pretrained(
33
+ MODEL_ID,
34
+ torch_dtype=DTYPE,
35
+ safety_checker=None, # optional: speed + avoid HF safety issues on CPU
36
+ **download_kwargs
37
+ )
38
+ except Exception as e:
39
+ raise RuntimeError(
40
+ f"Failed to download/load {MODEL_ID}. "
41
+ "If this is a gated model, add a valid HF_TOKEN env var. "
42
+ f"Underlying error: {e}"
43
+ )
44
+
45
+ pipe = pipe.to(DEVICE)
46
+ pipe.enable_attention_slicing()
47
+
48
+ # -----------------------
49
+ # Inference (1-step turbo)
50
+ # -----------------------
51
+ def generate(prompt: str, seed: int = 42):
52
+ generator = torch.Generator(device=DEVICE).manual_seed(seed) if seed else None
53
+
54
+ image = pipe(
55
+ prompt=prompt,
56
+ height=768,
57
+ width=768,
58
+ num_inference_steps=1, # one step
59
+ guidance_scale=0.0, # disable classifier-free guidance for turbo models
60
+ generator=generator,
61
+ ).images[0]
62
+ return image
63
+
64
+ # -----------------------
65
+ # Gradio UI
66
+ # -----------------------
67
+ with gr.Blocks() as demo:
68
+ gr.Markdown("## Turbo (1-step) Image Generation — CPU")
69
+ prompt = gr.Textbox(placeholder="A cute cat wearing sunglasses", lines=2)
70
+ seed = gr.Number(value=42, label="Seed (0 = random)")
71
+ btn = gr.Button("Generate")
72
+ out = gr.Image()
73
+ btn.click(generate, [prompt, seed], out)
74
+
75
+ if __name__ == "__main__":
76
+ demo.launch()