NeelTA commited on
Commit
65fb989
·
1 Parent(s): ce27c20

library added

Browse files
Files changed (2) hide show
  1. app.py +17 -14
  2. requirements.txt +4 -1
app.py CHANGED
@@ -1,32 +1,35 @@
1
  import gradio as gr
2
  import torch
3
- from optimum.onnxruntime import ORTStableDiffusionPipeline
 
4
 
5
- # Load model once at startup
6
- pipe = ORTStableDiffusionPipeline.from_pretrained(
7
- "optimum/stable-diffusion-v1-5-onnx",
8
- provider="CPUExecutionProvider"
 
 
 
 
9
  )
 
10
  pipe.set_progress_bar_config(disable=True)
 
11
 
12
  def generate(prompt):
13
- # Fast generation
14
  result = pipe(
15
  prompt=prompt,
16
- num_inference_steps=15, # Keep low for Spaces
17
- guidance_scale=7.5,
18
- height=512,
19
- width=512
20
  )
21
  return result.images[0]
22
 
23
- # Gradio interface
24
  interface = gr.Interface(
25
  fn=generate,
26
- inputs=gr.Textbox(lines=3, placeholder="Enter your prompt here..."),
27
  outputs=gr.Image(type="pil"),
28
- title="Fast Stable Diffusion",
29
- description="Generate images in under 60 seconds!"
30
  )
31
 
32
  interface.launch()
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import StableDiffusionPipeline
4
+ import os
5
 
6
+ # Use CPU optimized model
7
+ os.environ["HF_HOME"] = "/tmp/hf_cache"
8
+
9
+ print("Loading model...")
10
+ pipe = StableDiffusionPipeline.from_pretrained(
11
+ "stabilityai/stable-diffusion-2-1-base",
12
+ torch_dtype=torch.float32,
13
+ low_cpu_mem_usage=True
14
  )
15
+ pipe = pipe.to("cpu")
16
  pipe.set_progress_bar_config(disable=True)
17
+ print("Model loaded!")
18
 
19
  def generate(prompt):
 
20
  result = pipe(
21
  prompt=prompt,
22
+ num_inference_steps=15,
23
+ guidance_scale=7.5
 
 
24
  )
25
  return result.images[0]
26
 
 
27
  interface = gr.Interface(
28
  fn=generate,
29
+ inputs=gr.Textbox(lines=3, label="Prompt"),
30
  outputs=gr.Image(type="pil"),
31
+ title=" Lightweight Stable Diffusion",
32
+ description="Faster CPU-based image generation"
33
  )
34
 
35
  interface.launch()
requirements.txt CHANGED
@@ -1,3 +1,6 @@
1
  optimum[onnxruntime]
2
  torch
3
- gradio
 
 
 
 
1
  optimum[onnxruntime]
2
  torch
3
+ diffusers
4
+ transformers
5
+ gradio
6
+ accelerate