Mohansai2004 commited on
Commit
85a30db
·
1 Parent(s): a39dc79

feat: add CPU-optimized image generator

Browse files
Files changed (3) hide show
  1. README.md +14 -16
  2. app.py +17 -26
  3. requirements.txt +5 -4
README.md CHANGED
@@ -1,27 +1,25 @@
1
  ---
2
- title: CPU Image Generator
3
  emoji: 🎨
4
- colorFrom: indigo
5
- colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
- short_description: CPU-based image generation using SDXL 1.0
11
  ---
12
 
13
- # CPU Image Generator
14
- Image generation using Stable Diffusion XL 1.0 (CPU Mode)
15
 
16
  ## Features
17
- - CPU-compatible (no GPU required)
18
- - Memory-optimized processing
19
- - Reduced resolution options for faster generation
20
- - Automatic memory management
21
 
22
- ## Performance Notes
23
- - Expect 5-10 minutes per image
24
- - Recommended settings:
25
- - 256x256 or 512x512 resolution
26
- - 10-15 inference steps
27
- - Use lower resolution for faster results
 
1
  ---
2
+ title: Fast Image Generator
3
  emoji: 🎨
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
+ short_description: CPU-optimized image generation using SD-v1.4
11
  ---
12
 
13
+ # Fast Image Generator
14
+ Quick image generation optimized for CPU using Stable Diffusion v1.4
15
 
16
  ## Features
17
+ - Optimized for CPU usage
18
+ - Fast generation (2-5 minutes)
19
+ - Memory efficient
20
+ - Stable performance
21
 
22
+ ## Recommended Settings
23
+ - Resolution: 384x384 for best speed/quality ratio
24
+ - Steps: 30-50 for good quality
25
+ - Use clear, simple prompts
 
 
app.py CHANGED
@@ -1,21 +1,18 @@
1
  import streamlit as st
2
- from diffusers import StableDiffusionXLPipeline
3
  import torch
4
  import gc
5
 
6
  @st.cache_resource
7
  def load_model():
8
- # Configure model loading
9
- model_id = "stabilityai/stable-diffusion-xl-base-1.0"
10
- pipe = StableDiffusionXLPipeline.from_pretrained(
11
  model_id,
12
  torch_dtype=torch.float32,
13
- use_safetensors=True,
14
- variant="fp32",
15
- device_map="balanced", # Changed from "cpu" to "balanced"
16
- local_files_only=False,
17
- low_cpu_mem_usage=True
18
- )
19
 
20
  # Memory optimizations
21
  pipe.enable_attention_slicing(slice_size=1)
@@ -26,23 +23,22 @@ def load_model():
26
 
27
  return pipe
28
 
29
- st.title("🎨 High-Quality Image Generator (CPU Mode)")
30
- st.write("Using Stable Diffusion XL 1.0 in CPU mode")
31
 
32
  # Initialize model
33
  pipeline = load_model()
34
 
35
- # Reduce image size further for CPU
36
  with st.sidebar:
37
- st.header("Advanced Settings (CPU Mode)")
38
- num_steps = st.slider("Steps (keep low for CPU)", 1, 15, 5)
39
  guidance_scale = st.slider("CFG Scale", 5.0, 15.0, 7.5)
40
- width = st.select_slider("Width", options=[256, 384, 512], value=256)
41
- height = st.select_slider("Height", options=[256, 384, 512], value=256)
42
 
43
- st.warning("⚠️ CPU mode: Generation may take 10-15 minutes")
44
 
45
- # Main interface
46
  prompt = st.text_area(
47
  "Describe your image:",
48
  "A professional photograph of a serene landscape at sunset, golden hour lighting, 8k uhd, highly detailed"
@@ -53,18 +49,13 @@ negative_prompt = st.text_area(
53
  "ugly, blurry, low quality, distorted, disfigured, poor details, bad anatomy, watermark"
54
  )
55
 
56
- # Update the generation part to handle device properly
57
  if st.button("Generate Image"):
58
- with st.spinner("Creating image (this may take 10-15 minutes)..."):
59
  try:
60
  gc.collect()
61
 
62
- # Enhanced prompt
63
- enhanced_prompt = f"{prompt}, best quality, highly detailed, sharp focus, ultra realistic, professional"
64
-
65
- # Move inputs to CPU explicitly
66
  image = pipeline(
67
- prompt=enhanced_prompt,
68
  negative_prompt=negative_prompt,
69
  num_inference_steps=num_steps,
70
  guidance_scale=guidance_scale,
 
1
  import streamlit as st
2
+ from diffusers import StableDiffusionPipeline
3
  import torch
4
  import gc
5
 
6
  @st.cache_resource
7
  def load_model():
8
+ # Use smaller model better suited for CPU
9
+ model_id = "CompVis/stable-diffusion-v1-4"
10
+ pipe = StableDiffusionPipeline.from_pretrained(
11
  model_id,
12
  torch_dtype=torch.float32,
13
+ safety_checker=None, # Disable safety checker for memory efficiency
14
+ requires_safety_checker=False
15
+ ).to("cpu")
 
 
 
16
 
17
  # Memory optimizations
18
  pipe.enable_attention_slicing(slice_size=1)
 
23
 
24
  return pipe
25
 
26
+ st.title("🎨 Fast Image Generator (CPU-Optimized)")
27
+ st.write("Using Stable Diffusion v1.4 optimized for CPU")
28
 
29
  # Initialize model
30
  pipeline = load_model()
31
 
32
+ # Adjust settings for faster generation
33
  with st.sidebar:
34
+ st.header("Generation Settings")
35
+ num_steps = st.slider("Steps", 20, 50, 30)
36
  guidance_scale = st.slider("CFG Scale", 5.0, 15.0, 7.5)
37
+ width = st.select_slider("Width", options=[256, 384, 512], value=384)
38
+ height = st.select_slider("Height", options=[256, 384, 512], value=384)
39
 
40
+ st.info("💡 Generation time: 2-5 minutes on CPU")
41
 
 
42
  prompt = st.text_area(
43
  "Describe your image:",
44
  "A professional photograph of a serene landscape at sunset, golden hour lighting, 8k uhd, highly detailed"
 
49
  "ugly, blurry, low quality, distorted, disfigured, poor details, bad anatomy, watermark"
50
  )
51
 
 
52
  if st.button("Generate Image"):
53
+ with st.spinner("Creating your image (2-5 minutes)..."):
54
  try:
55
  gc.collect()
56
 
 
 
 
 
57
  image = pipeline(
58
+ prompt=prompt,
59
  negative_prompt=negative_prompt,
60
  num_inference_steps=num_steps,
61
  guidance_scale=guidance_scale,
requirements.txt CHANGED
@@ -2,9 +2,10 @@
2
  # streamlit is already pre-installed
3
  pandas
4
  numpy
5
- transformers>=4.34.0
6
- torch>=2.0.0
7
- accelerate>=0.20.0
8
  einops
9
  diffusers
10
- safetensors
 
 
2
  # streamlit is already pre-installed
3
  pandas
4
  numpy
5
+ transformers
6
+ torch
7
+ accelerate
8
  einops
9
  diffusers
10
+ safetensors
11
+ scipy