Ntdeseb commited on
Commit
dec3622
·
verified ·
1 Parent(s): 359039c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +255 -229
app.py CHANGED
@@ -3,130 +3,142 @@ import numpy as np
3
  import random
4
  import torch
5
  import gc
6
- from typing import Optional, Tuple
7
  import warnings
 
 
 
 
8
  warnings.filterwarnings("ignore")
 
9
 
10
- # CPU-optimized imports
11
- from diffusers import (
12
- StableDiffusionPipeline,
13
- DiffusionPipeline,
14
- DDIMScheduler,
15
- DPMSolverMultistepScheduler
16
- )
17
- from transformers import CLIPTokenizer
18
 
19
- # Force CPU usage and optimize for low-resource environment
 
 
 
 
 
 
 
 
 
20
  device = "cpu"
21
- torch.set_num_threads(2) # Match vCPU count
22
  MAX_SEED = np.iinfo(np.int32).max
23
 
24
- # CPU-optimized model configurations
25
  MODEL_CONFIGS = {
26
- "🚀 Tiny SD (Ultra Fast)": {
27
- "repo_id": "nota-ai/bk-sdm-small",
28
- "torch_dtype": torch.float32,
29
- "max_resolution": 512,
30
- "default_steps": 10,
31
- "default_guidance": 6.0,
32
- "memory_usage": "Very Low",
33
- "speed": "Ultra Fast"
34
- },
35
- "⚡ SD 1.4 (Fast)": {
36
  "repo_id": "CompVis/stable-diffusion-v1-4",
 
37
  "torch_dtype": torch.float32,
38
  "max_resolution": 512,
39
  "default_steps": 15,
40
  "default_guidance": 7.5,
41
  "memory_usage": "Low",
42
- "speed": "Fast"
43
  },
44
- "🎨 SD 1.5 (Balanced)": {
45
  "repo_id": "runwayml/stable-diffusion-v1-5",
 
46
  "torch_dtype": torch.float32,
47
  "max_resolution": 512,
48
  "default_steps": 20,
49
  "default_guidance": 7.5,
50
  "memory_usage": "Medium",
51
- "speed": "Medium"
52
  },
53
- "🖼️ OpenJourney (Artistic)": {
54
- "repo_id": "prompthero/openjourney",
 
55
  "torch_dtype": torch.float32,
56
  "max_resolution": 512,
57
  "default_steps": 18,
58
  "default_guidance": 8.0,
59
  "memory_usage": "Medium",
60
- "speed": "Medium"
61
  },
62
- "🌟 Dreamlike (Quality)": {
63
- "repo_id": "dreamlike-art/dreamlike-diffusion-1.0",
 
64
  "torch_dtype": torch.float32,
65
- "max_resolution": 448,
66
- "default_steps": 25,
67
- "default_guidance": 8.0,
68
- "memory_usage": "Medium-High",
69
- "speed": "Slower"
70
  }
71
  }
72
 
73
- # Global variables for memory management
74
  current_pipeline = None
75
  current_model_name = None
76
 
77
  def clear_memory():
78
- """Aggressive memory cleanup for CPU environment"""
79
  global current_pipeline
80
  if current_pipeline is not None:
81
  del current_pipeline
82
  current_pipeline = None
83
-
84
- # Force garbage collection
85
  gc.collect()
86
-
87
- # Clear any cached models
88
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
89
 
90
- def load_model_cpu_optimized(model_name: str):
91
- """Load model with maximum CPU optimization"""
92
  global current_pipeline, current_model_name
93
 
94
- # Return cached pipeline if same model
95
  if current_model_name == model_name and current_pipeline is not None:
96
  return current_pipeline, "✅ Using cached model"
97
 
98
- # Clear previous model
99
  clear_memory()
100
 
 
 
 
101
  try:
102
  config = MODEL_CONFIGS[model_name]
103
 
104
- # Load with CPU optimizations
105
  pipe = StableDiffusionPipeline.from_pretrained(
106
  config["repo_id"],
 
107
  torch_dtype=config["torch_dtype"],
108
- safety_checker=None, # Disable for speed
109
  requires_safety_checker=False,
110
- use_safetensors=False, # Faster loading on CPU
111
- local_files_only=False
 
 
 
112
  )
113
 
114
- # Apply CPU-specific optimizations
115
  pipe = pipe.to(device)
116
 
117
  # Enable attention slicing for memory efficiency
118
- pipe.enable_attention_slicing(1)
 
119
 
120
- # Use memory efficient scheduler
121
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(
122
- pipe.scheduler.config,
123
- use_karras_sigmas=True,
124
- algorithm_type="dpmsolver++"
125
- )
126
 
127
- # Additional CPU optimizations
128
- pipe.unet.to(memory_format=torch.channels_last)
129
- pipe.vae.to(memory_format=torch.channels_last)
 
 
130
 
131
  current_pipeline = pipe
132
  current_model_name = model_name
@@ -134,9 +146,15 @@ def load_model_cpu_optimized(model_name: str):
134
  return pipe, f"✅ {model_name} loaded successfully!"
135
 
136
  except Exception as e:
137
- return None, f"❌ Failed to load {model_name}: {str(e)}"
 
 
 
 
 
 
138
 
139
- def generate_image_cpu(
140
  model_name: str,
141
  prompt: str,
142
  negative_prompt: str,
@@ -152,8 +170,8 @@ def generate_image_cpu(
152
  if not prompt.strip():
153
  return None, seed, "⚠️ Please enter a prompt"
154
 
155
- # Load model
156
- pipe, status = load_model_cpu_optimized(model_name)
157
  if pipe is None:
158
  return None, seed, status
159
 
@@ -164,7 +182,7 @@ def generate_image_cpu(
164
 
165
  generator = torch.Generator().manual_seed(seed)
166
 
167
- # CPU-specific constraints
168
  config = MODEL_CONFIGS[model_name]
169
  max_res = config["max_resolution"]
170
  width = min(width, max_res)
@@ -174,27 +192,42 @@ def generate_image_cpu(
174
  width = (width // 8) * 8
175
  height = (height // 8) * 8
176
 
177
- # Limit steps for CPU
178
  num_inference_steps = min(num_inference_steps, 30)
179
 
180
- progress(0, desc="Starting generation...")
181
 
182
- # Generate with CPU optimizations
183
  with torch.no_grad():
184
- result = pipe(
185
- prompt=prompt,
186
- negative_prompt=negative_prompt if negative_prompt.strip() else None,
187
- guidance_scale=guidance_scale,
188
- num_inference_steps=num_inference_steps,
189
- width=width,
190
- height=height,
191
- generator=generator,
192
- callback_on_step_end=lambda step, timestep, latents: progress(step/num_inference_steps)
193
- )
194
-
195
- image = result.images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
- # Memory cleanup after generation
198
  del result
199
  gc.collect()
200
 
@@ -203,220 +236,213 @@ def generate_image_cpu(
203
  except Exception as e:
204
  error_msg = f"❌ Generation failed: {str(e)}"
205
  if "memory" in str(e).lower() or "out of" in str(e).lower():
206
- error_msg += "\n💡 Try: Smaller resolution (256x256), fewer steps (10-15), or Tiny SD model"
 
 
207
  return None, seed, error_msg
208
 
209
- # Optimized example prompts for CPU generation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  examples = [
211
- "a cute cat sitting in a garden, digital art",
212
- "mountain landscape at sunset, beautiful painting",
213
- "portrait of a smiling person, photography",
214
- "colorful flowers in a vase, oil painting",
215
- "futuristic city skyline, concept art",
216
- "peaceful forest path, nature photography"
217
  ]
218
 
219
- # CSS optimized for CPU performance (minimal animations)
220
  css = """
221
  #col-container {
222
  margin: 0 auto;
223
  max-width: 800px;
224
  padding: 20px;
225
  }
226
- .model-card {
227
  padding: 15px;
228
  margin: 10px 0;
229
  border-radius: 8px;
230
- background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
231
- border: 1px solid #ddd;
232
- }
233
- .cpu-optimized {
234
- background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
235
- font-weight: bold;
236
  }
237
- .status-text {
238
  padding: 10px;
 
239
  border-radius: 5px;
240
- margin: 10px 0;
 
241
  }
242
- .status-success { background-color: #d4edda; color: #155724; }
243
- .status-error { background-color: #f8d7da; color: #721c24; }
244
- .status-warning { background-color: #fff3cd; color: #856404; }
245
  """
246
 
247
- # Main interface optimized for CPU
248
- with gr.Blocks(css=css, title="CPU-Optimized AI Image Generator") as demo:
249
  with gr.Column(elem_id="col-container"):
250
- gr.Markdown("""
251
- # 🖥️ CPU-Optimized AI Image Generator
252
- ### Specially optimized for CPU Basic (2 vCPUs, 16GB RAM)
253
- """)
 
 
254
 
255
- # System info
256
- gr.Markdown("""
257
- <div class="model-card cpu-optimized">
258
- 📊 <strong>System Configuration:</strong> CPU Basic | 2 vCPUs | 16GB RAM | PyTorch CPU
259
- </div>
260
- """)
 
 
 
 
 
 
 
 
261
 
 
262
  with gr.Row():
263
  with gr.Column(scale=3):
264
  prompt = gr.Textbox(
265
- label="✨ Your Creative Prompt",
266
- placeholder="Describe the image you want to create...",
267
- lines=3,
268
- max_lines=5
269
  )
270
  with gr.Column(scale=1):
271
  generate_btn = gr.Button(
272
- "🎨 Generate Image",
273
  variant="primary",
274
- size="lg",
275
- elem_classes="generate-button"
276
  )
277
 
278
- # Model selection with detailed info
279
- with gr.Row():
280
- model_dropdown = gr.Dropdown(
281
- choices=list(MODEL_CONFIGS.keys()),
282
- value="🚀 Tiny SD (Ultra Fast)",
283
- label="🤖 AI Model Selection",
284
- info="Choose based on speed vs quality preference"
285
- )
286
-
287
- # Model info display
288
- model_info_display = gr.Markdown("", elem_classes="model-card")
289
-
290
- # Generated image display
291
- result_image = gr.Image(
292
- label="🖼️ Generated Image",
293
- height=400,
294
- show_label=True
295
  )
296
 
297
- # Status display
298
- status_display = gr.Markdown(
299
- "🚀 Ready to generate! Select a model and enter your prompt.",
300
- elem_classes="status-text status-success"
301
- )
302
 
303
- # Advanced settings in accordion
304
- with gr.Accordion("⚙️ Advanced Settings", open=False):
305
- with gr.Column():
306
- negative_prompt = gr.Textbox(
307
- label="🚫 Negative Prompt (Optional)",
308
- placeholder="What you don't want in the image...",
309
- lines=2
310
- )
311
-
312
- with gr.Row():
313
- seed = gr.Slider(0, MAX_SEED, value=0, label="🎲 Seed")
314
- randomize_seed = gr.Checkbox(label="🔄 Random Seed", value=True)
315
-
316
- with gr.Row():
317
- width = gr.Slider(
318
- 256, 512, value=384, step=64,
319
- label="📏 Width",
320
- info="Lower = faster generation"
321
- )
322
- height = gr.Slider(
323
- 256, 512, value=384, step=64,
324
- label="📐 Height",
325
- info="Lower = faster generation"
326
- )
327
-
328
- with gr.Row():
329
- guidance_scale = gr.Slider(
330
- 1.0, 15.0, value=7.5, step=0.5,
331
- label="🎯 Guidance Scale",
332
- info="How closely to follow the prompt"
333
- )
334
- num_inference_steps = gr.Slider(
335
- 5, 30, value=15, step=1,
336
- label="🔄 Steps",
337
- info="More steps = better quality but slower"
338
- )
339
 
340
- # CPU Performance Tips
341
- with gr.Accordion("💡 CPU Optimization Tips", open=False):
342
- gr.Markdown("""
343
- ### 🚀 For Fastest Generation:
344
- - Use **Tiny SD** model
345
- - Set resolution to **256x256** or **384x384**
346
- - Use **10-15 steps**
347
- - Keep guidance scale around **6-8**
 
 
 
348
 
349
- ### 🎨 For Best Quality:
350
- - Use **Dreamlike** or **SD 1.5** model
351
- - Set resolution to **512x512** (max)
352
- - Use **20-25 steps**
353
- - Guidance scale **7-9**
354
 
355
- ### ⚡ Memory Saving:
356
- - Generate one image at a time
357
- - Use shorter prompts when possible
358
- - Avoid very high guidance scales (>12)
 
 
 
 
 
 
 
 
 
359
  """)
360
 
361
  # Examples
362
- gr.Examples(
363
- examples=examples,
364
- inputs=[prompt],
365
- label="💡 Example Prompts (Click to try!)"
366
- )
367
 
368
- # Footer with helpful info
369
- gr.Markdown("""
370
- ---
371
- <div style="text-align: center; color: #666; font-size: 0.9em;">
372
- 🖥️ Optimized for CPU Basic | Generation time: 30s-3min depending on settings
373
- </div>
374
- """)
375
 
376
- # Function to update model info
377
  def update_model_info(model_name):
378
- config = MODEL_CONFIGS[model_name]
379
- info = f"""
380
- <div class="model-card">
381
- <strong>{model_name}</strong><br>
382
- 📊 <strong>Memory Usage:</strong> {config['memory_usage']} |
383
- ⚡ <strong>Speed:</strong> {config['speed']}<br>
384
- 📏 <strong>Max Resolution:</strong> {config['max_resolution']}px |
385
- 🔄 <strong>Recommended Steps:</strong> {config['default_steps']}<br>
386
- 🎯 <strong>Recommended Guidance:</strong> {config['default_guidance']}
387
- </div>
388
- """
389
- return info, config['default_steps'], config['default_guidance']
390
 
391
  # Event handlers
392
  model_dropdown.change(
393
  update_model_info,
394
  inputs=[model_dropdown],
395
- outputs=[model_info_display, num_inference_steps, guidance_scale]
396
  )
397
 
398
- # Generation handler
399
  generate_btn.click(
400
- generate_image_cpu,
401
  inputs=[
402
  model_dropdown, prompt, negative_prompt,
403
  seed, randomize_seed, width, height,
404
  guidance_scale, num_inference_steps
405
  ],
406
- outputs=[result_image, seed, status_display]
407
  )
408
 
409
- # Auto-trigger model info update on load
410
  demo.load(
411
  update_model_info,
412
  inputs=[model_dropdown],
413
- outputs=[model_info_display, num_inference_steps, guidance_scale]
414
  )
415
 
416
  if __name__ == "__main__":
417
  demo.launch(
418
  share=True,
419
- server_name="0.0.0.0",
420
  server_port=7860,
421
  show_error=True,
422
  quiet=True
 
3
  import random
4
  import torch
5
  import gc
 
6
  import warnings
7
+ from typing import Optional, Tuple
8
+ import os
9
+
10
+ # Suppress warnings
11
  warnings.filterwarnings("ignore")
12
+ os.environ["TRANSFORMERS_VERBOSITY"] = "error"
13
 
14
+ # Import compatible with older versions
15
+ try:
16
+ from diffusers import StableDiffusionPipeline, DiffusionPipeline
17
+ from diffusers import DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler
18
+ DIFFUSERS_AVAILABLE = True
19
+ except ImportError:
20
+ DIFFUSERS_AVAILABLE = False
 
21
 
22
+ # Fallback to transformers + torch if diffusers fails
23
+ if not DIFFUSERS_AVAILABLE:
24
+ try:
25
+ from transformers import CLIPTokenizer, CLIPTextModel
26
+ from transformers import AutoTokenizer, AutoModel
27
+ TRANSFORMERS_AVAILABLE = True
28
+ except ImportError:
29
+ TRANSFORMERS_AVAILABLE = False
30
+
31
+ # Force CPU and optimize
32
  device = "cpu"
33
+ torch.set_num_threads(2)
34
  MAX_SEED = np.iinfo(np.int32).max
35
 
36
+ # Compatible model configurations for older versions
37
  MODEL_CONFIGS = {
38
+ "🚀 CompVis SD 1.4 (Fast)": {
 
 
 
 
 
 
 
 
 
39
  "repo_id": "CompVis/stable-diffusion-v1-4",
40
+ "revision": "main",
41
  "torch_dtype": torch.float32,
42
  "max_resolution": 512,
43
  "default_steps": 15,
44
  "default_guidance": 7.5,
45
  "memory_usage": "Low",
46
+ "compatible": True
47
  },
48
+ " Runway SD 1.5 (Recommended)": {
49
  "repo_id": "runwayml/stable-diffusion-v1-5",
50
+ "revision": "main",
51
  "torch_dtype": torch.float32,
52
  "max_resolution": 512,
53
  "default_steps": 20,
54
  "default_guidance": 7.5,
55
  "memory_usage": "Medium",
56
+ "compatible": True
57
  },
58
+ "🎨 OpenJourney v4 (Artistic)": {
59
+ "repo_id": "prompthero/openjourney-v4",
60
+ "revision": "main",
61
  "torch_dtype": torch.float32,
62
  "max_resolution": 512,
63
  "default_steps": 18,
64
  "default_guidance": 8.0,
65
  "memory_usage": "Medium",
66
+ "compatible": True
67
  },
68
+ "🌟 Anything v3 (Anime Style)": {
69
+ "repo_id": "Linaqruf/anything-v3.0",
70
+ "revision": "main",
71
  "torch_dtype": torch.float32,
72
+ "max_resolution": 512,
73
+ "default_steps": 20,
74
+ "default_guidance": 8.5,
75
+ "memory_usage": "Medium",
76
+ "compatible": True
77
  }
78
  }
79
 
80
+ # Global pipeline cache
81
  current_pipeline = None
82
  current_model_name = None
83
 
84
  def clear_memory():
85
+ """Aggressive memory cleanup"""
86
  global current_pipeline
87
  if current_pipeline is not None:
88
  del current_pipeline
89
  current_pipeline = None
 
 
90
  gc.collect()
91
+ if torch.cuda.is_available():
92
+ torch.cuda.empty_cache()
 
93
 
94
+ def load_pipeline_safe(model_name: str):
95
+ """Load pipeline with maximum compatibility"""
96
  global current_pipeline, current_model_name
97
 
 
98
  if current_model_name == model_name and current_pipeline is not None:
99
  return current_pipeline, "✅ Using cached model"
100
 
 
101
  clear_memory()
102
 
103
+ if not DIFFUSERS_AVAILABLE:
104
+ return None, "❌ Diffusers library not available. Please install compatible versions."
105
+
106
  try:
107
  config = MODEL_CONFIGS[model_name]
108
 
109
+ # Load with maximum compatibility
110
  pipe = StableDiffusionPipeline.from_pretrained(
111
  config["repo_id"],
112
+ revision=config.get("revision", "main"),
113
  torch_dtype=config["torch_dtype"],
114
+ safety_checker=None,
115
  requires_safety_checker=False,
116
+ use_auth_token=False,
117
+ cache_dir=None,
118
+ local_files_only=False,
119
+ low_cpu_mem_usage=True,
120
+ ignore_mismatched_sizes=True
121
  )
122
 
123
+ # Move to CPU and optimize
124
  pipe = pipe.to(device)
125
 
126
  # Enable attention slicing for memory efficiency
127
+ if hasattr(pipe, 'enable_attention_slicing'):
128
+ pipe.enable_attention_slicing(1)
129
 
130
+ # Use compatible scheduler
131
+ if hasattr(pipe, 'scheduler'):
132
+ try:
133
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
134
+ except:
135
+ pass # Keep original scheduler if DDIMScheduler fails
136
 
137
+ # Memory optimizations
138
+ if hasattr(pipe.unet, 'to'):
139
+ pipe.unet.to(memory_format=torch.channels_last)
140
+ if hasattr(pipe.vae, 'to'):
141
+ pipe.vae.to(memory_format=torch.channels_last)
142
 
143
  current_pipeline = pipe
144
  current_model_name = model_name
 
146
  return pipe, f"✅ {model_name} loaded successfully!"
147
 
148
  except Exception as e:
149
+ error_msg = str(e)
150
+ if "torch" in error_msg and "float8" in error_msg:
151
+ return None, "❌ Version incompatibility. Please update PyTorch to >= 2.1 or use older Diffusers version."
152
+ elif "out of memory" in error_msg.lower():
153
+ return None, "❌ Out of memory. Try using a different model or restart the space."
154
+ else:
155
+ return None, f"❌ Failed to load model: {error_msg[:200]}..."
156
 
157
+ def generate_image_compatible(
158
  model_name: str,
159
  prompt: str,
160
  negative_prompt: str,
 
170
  if not prompt.strip():
171
  return None, seed, "⚠️ Please enter a prompt"
172
 
173
+ # Load pipeline
174
+ pipe, status = load_pipeline_safe(model_name)
175
  if pipe is None:
176
  return None, seed, status
177
 
 
182
 
183
  generator = torch.Generator().manual_seed(seed)
184
 
185
+ # Apply constraints for CPU
186
  config = MODEL_CONFIGS[model_name]
187
  max_res = config["max_resolution"]
188
  width = min(width, max_res)
 
192
  width = (width // 8) * 8
193
  height = (height // 8) * 8
194
 
195
+ # Limit steps for CPU performance
196
  num_inference_steps = min(num_inference_steps, 30)
197
 
198
+ progress(0, desc="Initializing generation...")
199
 
200
+ # Generate with error handling
201
  with torch.no_grad():
202
+ try:
203
+ result = pipe(
204
+ prompt=prompt,
205
+ negative_prompt=negative_prompt if negative_prompt.strip() else None,
206
+ guidance_scale=guidance_scale,
207
+ num_inference_steps=num_inference_steps,
208
+ width=width,
209
+ height=height,
210
+ generator=generator,
211
+ )
212
+ image = result.images[0]
213
+ except Exception as gen_error:
214
+ # Fallback: try with minimal parameters
215
+ if "memory" in str(gen_error).lower():
216
+ width, height = 256, 256
217
+ num_inference_steps = 10
218
+ result = pipe(
219
+ prompt=prompt,
220
+ guidance_scale=guidance_scale,
221
+ num_inference_steps=num_inference_steps,
222
+ width=width,
223
+ height=height,
224
+ generator=generator,
225
+ )
226
+ image = result.images[0]
227
+ else:
228
+ raise gen_error
229
 
230
+ # Cleanup
231
  del result
232
  gc.collect()
233
 
 
236
  except Exception as e:
237
  error_msg = f"❌ Generation failed: {str(e)}"
238
  if "memory" in str(e).lower() or "out of" in str(e).lower():
239
+ error_msg += "\n💡 Try: 256x256 resolution, 10 steps, or restart the space"
240
+ elif "CUDA" in str(e):
241
+ error_msg += "\n💡 CUDA error detected, using CPU fallback"
242
  return None, seed, error_msg
243
 
244
+ # Check system compatibility
245
+ def check_system():
246
+ """Check system compatibility and return status"""
247
+ status = []
248
+
249
+ # Check PyTorch
250
+ torch_version = torch.__version__
251
+ status.append(f"🔧 PyTorch: {torch_version}")
252
+
253
+ # Check diffusers
254
+ if DIFFUSERS_AVAILABLE:
255
+ try:
256
+ import diffusers
257
+ status.append(f"✅ Diffusers: {diffusers.__version__}")
258
+ except:
259
+ status.append("⚠️ Diffusers: Version unknown")
260
+ else:
261
+ status.append("❌ Diffusers: Not available")
262
+
263
+ # Check transformers
264
+ if TRANSFORMERS_AVAILABLE:
265
+ try:
266
+ import transformers
267
+ status.append(f"✅ Transformers: {transformers.__version__}")
268
+ except:
269
+ status.append("⚠️ Transformers: Version unknown")
270
+ else:
271
+ status.append("❌ Transformers: Not available")
272
+
273
+ # Check device
274
+ status.append(f"🖥️ Device: {device.upper()}")
275
+ status.append(f"🧵 CPU Threads: {torch.get_num_threads()}")
276
+
277
+ return "\n".join(status)
278
+
279
+ # Example prompts optimized for compatibility
280
  examples = [
281
+ "a beautiful landscape with mountains and lake",
282
+ "portrait of a cat, digital art style",
283
+ "colorful flowers in a garden, painting",
284
+ "medieval castle on a hill, fantasy art",
285
+ "astronaut in space, realistic style",
286
+ "cozy coffee shop interior, warm lighting"
287
  ]
288
 
289
+ # Minimal CSS for compatibility
290
  css = """
291
  #col-container {
292
  margin: 0 auto;
293
  max-width: 800px;
294
  padding: 20px;
295
  }
296
+ .info-box {
297
  padding: 15px;
298
  margin: 10px 0;
299
  border-radius: 8px;
300
+ background-color: #f8f9fa;
301
+ border-left: 4px solid #007bff;
 
 
 
 
302
  }
303
+ .model-info {
304
  padding: 10px;
305
+ margin: 5px 0;
306
  border-radius: 5px;
307
+ background-color: #e9ecef;
308
+ font-size: 0.9em;
309
  }
310
+ .status-success { background-color: #d4edda; color: #155724; padding: 10px; border-radius: 5px; }
311
+ .status-error { background-color: #f8d7da; color: #721c24; padding: 10px; border-radius: 5px; }
 
312
  """
313
 
314
+ # Main interface with maximum compatibility
315
+ with gr.Blocks(css=css, title="Compatible AI Image Generator") as demo:
316
  with gr.Column(elem_id="col-container"):
317
+ gr.Markdown("# 🎨 Compatible AI Image Generator")
318
+ gr.Markdown("### Optimized for CPU Basic with older PyTorch versions")
319
+
320
+ # System status
321
+ with gr.Accordion("🔧 System Status", open=False):
322
+ system_status = gr.Markdown(check_system())
323
 
324
+ # Warning for incompatible systems
325
+ if not DIFFUSERS_AVAILABLE:
326
+ gr.Markdown("""
327
+ <div style="background-color: #f8d7da; color: #721c24; padding: 15px; border-radius: 8px; margin: 10px 0;">
328
+ ⚠️ <strong>Compatibility Issue Detected</strong><br>
329
+ Please update your requirements.txt with compatible versions:<br>
330
+ <code>
331
+ torch>=2.1.0<br>
332
+ diffusers>=0.21.0,<0.25.0<br>
333
+ transformers>=4.25.0,<4.35.0<br>
334
+ accelerate>=0.20.0<br>
335
+ </code>
336
+ </div>
337
+ """)
338
 
339
+ # Main interface
340
  with gr.Row():
341
  with gr.Column(scale=3):
342
  prompt = gr.Textbox(
343
+ label="✨ Describe your image",
344
+ placeholder="Enter your creative prompt here...",
345
+ lines=3
 
346
  )
347
  with gr.Column(scale=1):
348
  generate_btn = gr.Button(
349
+ "🎨 Generate",
350
  variant="primary",
351
+ size="lg"
 
352
  )
353
 
354
+ # Model selection
355
+ model_dropdown = gr.Dropdown(
356
+ choices=list(MODEL_CONFIGS.keys()),
357
+ value="⚡ Runway SD 1.5 (Recommended)",
358
+ label="🤖 AI Model",
359
+ info="All models optimized for CPU usage"
 
 
 
 
 
 
 
 
 
 
 
360
  )
361
 
362
+ # Model info
363
+ model_info = gr.Markdown("", elem_classes="model-info")
 
 
 
364
 
365
+ # Result
366
+ result_image = gr.Image(label="Generated Image", height=400)
367
+ status_text = gr.Markdown("🚀 Ready to generate!", elem_classes="status-success")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
+ # Settings
370
+ with gr.Accordion("⚙️ Generation Settings", open=False):
371
+ negative_prompt = gr.Textbox(
372
+ label="🚫 Negative Prompt",
373
+ placeholder="What you don't want...",
374
+ lines=2
375
+ )
376
+
377
+ with gr.Row():
378
+ seed = gr.Slider(0, MAX_SEED, value=0, label="🎲 Seed")
379
+ randomize_seed = gr.Checkbox(label="🔄 Random", value=True)
380
 
381
+ with gr.Row():
382
+ width = gr.Slider(256, 512, value=384, step=64, label="Width")
383
+ height = gr.Slider(256, 512, value=384, step=64, label="Height")
 
 
384
 
385
+ with gr.Row():
386
+ guidance_scale = gr.Slider(1.0, 15.0, value=7.5, step=0.5, label="Guidance")
387
+ num_inference_steps = gr.Slider(5, 30, value=15, step=1, label="Steps")
388
+
389
+ # Tips
390
+ with gr.Accordion("💡 Performance Tips", open=False):
391
+ gr.Markdown("""
392
+ ### For Best Results on CPU Basic:
393
+ - **Fast Generation**: Use 256x256, 10-15 steps
394
+ - **Quality Generation**: Use 384x384, 20 steps
395
+ - **Maximum Quality**: Use 512x512, 25 steps (slower)
396
+ - **Memory Issues**: Restart the space if you get memory errors
397
+ - **Compatibility**: Update PyTorch to 2.1+ for best performance
398
  """)
399
 
400
  # Examples
401
+ gr.Examples(examples=examples, inputs=[prompt])
 
 
 
 
402
 
403
+ # Footer
404
+ gr.Markdown("---")
405
+ gr.Markdown("🖥️ **CPU Optimized** | Generation time: 30s-3min depending on settings")
 
 
 
 
406
 
407
+ # Update model info
408
  def update_model_info(model_name):
409
+ if model_name in MODEL_CONFIGS:
410
+ config = MODEL_CONFIGS[model_name]
411
+ info = f"""
412
+ **Memory Usage:** {config['memory_usage']} | **Max Resolution:** {config['max_resolution']}px
413
+ **Recommended:** {config['default_steps']} steps, {config['default_guidance']} guidance
414
+ """
415
+ return info, config['default_steps'], config['default_guidance']
416
+ return "", 15, 7.5
 
 
 
 
417
 
418
  # Event handlers
419
  model_dropdown.change(
420
  update_model_info,
421
  inputs=[model_dropdown],
422
+ outputs=[model_info, num_inference_steps, guidance_scale]
423
  )
424
 
 
425
  generate_btn.click(
426
+ generate_image_compatible,
427
  inputs=[
428
  model_dropdown, prompt, negative_prompt,
429
  seed, randomize_seed, width, height,
430
  guidance_scale, num_inference_steps
431
  ],
432
+ outputs=[result_image, seed, status_text]
433
  )
434
 
435
+ # Initialize model info
436
  demo.load(
437
  update_model_info,
438
  inputs=[model_dropdown],
439
+ outputs=[model_info, num_inference_steps, guidance_scale]
440
  )
441
 
442
  if __name__ == "__main__":
443
  demo.launch(
444
  share=True,
445
+ server_name="0.0.0.0",
446
  server_port=7860,
447
  show_error=True,
448
  quiet=True