alexander00001 commited on
Commit
fed79e5
·
verified ·
1 Parent(s): a94e6e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +205 -315
app.py CHANGED
@@ -1,13 +1,13 @@
1
- # ===== 必须首先导入spaces =====
 
2
  try:
3
  import spaces
4
  SPACES_AVAILABLE = True
5
- print("✅ Spaces available - ZeroGPU mode")
6
  except ImportError:
7
  SPACES_AVAILABLE = False
8
- print("⚠️ Spaces not available - running in regular mode")
9
 
10
- # ===== 其他导入 =====
11
  import os
12
  from datetime import datetime
13
  import random
@@ -18,190 +18,126 @@ from PIL import Image
18
  import traceback
19
  import numpy as np
20
  import gc
 
 
21
 
22
- # ===== 配置常量 =====
23
- COMPEL_AVAILABLE = False
24
- print("⚠️ Compel disabled for FLUX compatibility")
 
25
 
26
  STYLE_PRESETS = {
27
  "None": "",
28
- "Realistic": "photorealistic, 8k, ultra-detailed, cinematic lighting, masterpiece",
29
- "Anime": "anime style, detailed, high quality, masterpiece, best quality",
30
- "Comic": "comic book style, bold outlines, vibrant colors, cel shading",
31
- "Watercolor": "watercolor illustration, soft gradients, pastel palette"
32
  }
33
 
34
- FIXED_MODEL = "aoxo/flux.1dev-abliterated"
35
-
36
- QUALITY_ENHANCERS = [
37
- "detailed anatomy", "perfect anatomy", "soft skin",
38
- "high resolution", "masterpiece", "best quality",
39
- "professional photography", "natural lighting"
40
- ]
41
-
42
- STYLE_ENHANCERS = {
43
- "Realistic": ["photorealistic", "ultra realistic", "natural lighting"],
44
- "Anime": ["anime style", "high quality anime", "detailed eyes"],
45
- "Comic": ["comic book style", "bold outlines", "vibrant colors"],
46
- "Watercolor": ["watercolor style", "artistic", "soft gradients"]
47
- }
48
-
49
- SAVE_DIR = "generated_images"
50
- os.makedirs(SAVE_DIR, exist_ok=True)
51
-
52
  # ===== 全局变量 =====
53
  pipeline = None
54
  device = None
55
  model_loaded = False
56
 
57
 
58
- # ===== 工具函数(必须在装饰器之前定义) =====
59
  def cleanup_memory():
60
- """清理GPU内存"""
 
61
  if torch.cuda.is_available():
62
  torch.cuda.empty_cache()
63
  torch.cuda.synchronize()
64
- gc.collect()
65
 
66
 
67
- def enhance_prompt(prompt: str, style: str) -> str:
68
- """增强提示词"""
69
- quality_terms = ", ".join(QUALITY_ENHANCERS[:3])
70
-
71
- style_terms = ""
72
- if style in STYLE_ENHANCERS:
73
- style_terms = ", " + ", ".join(STYLE_ENHANCERS[style][:2])
74
-
 
 
75
  style_suffix = STYLE_PRESETS.get(style, "")
76
 
77
- enhanced_parts = [prompt.strip()]
78
-
79
  if style_suffix:
80
- enhanced_parts.append(style_suffix)
81
-
82
- if style_terms:
83
- enhanced_parts.append(style_terms.lstrip(", "))
84
-
85
- enhanced_parts.append(quality_terms)
86
-
87
- enhanced_prompt = ", ".join(filter(None, enhanced_parts))
88
 
89
- if len(enhanced_prompt) > 800:
90
- enhanced_prompt = enhanced_prompt[:800]
 
 
91
 
92
- return enhanced_prompt
93
 
94
 
95
- def create_metadata_content(prompt, enhanced_prompt, seed, steps, cfg_scale, width, height, style):
96
- """创建元数据内容"""
97
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
98
- return f"""Generated Image Metadata
99
- ======================
100
- Timestamp: {timestamp}
101
- Original Prompt: {prompt}
102
- Enhanced Prompt: {enhanced_prompt}
103
- Seed: {seed}
104
- Steps: {steps}
105
- CFG Scale: {cfg_scale}
106
- Dimensions: {width}x{height}
107
- Style: {style}
108
- Model: FLUX.1-dev
109
- """
110
-
111
-
112
- # ===== 装饰器定义(必须在使用之前) =====
113
- def apply_spaces_decorator(func):
114
- """应用 spaces 装饰器,增加更长的超时时间"""
115
- if SPACES_AVAILABLE:
116
- return spaces.GPU(duration=120)(func)
117
- return func
118
-
119
-
120
- # ===== 模型相关函数 =====
121
  def initialize_model():
122
- """优化的模型初始化函数"""
123
  global pipeline, device, model_loaded
124
 
125
  if model_loaded and pipeline is not None:
126
  return True
127
 
128
  try:
129
- cleanup_memory()
130
-
131
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
132
- print(f"🖥️ Using device: {device}")
133
 
134
- print(f"📦 Loading fixed model: {FIXED_MODEL}")
135
 
136
  pipeline = AutoPipelineForText2Image.from_pretrained(
137
  FIXED_MODEL,
138
- torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
139
- variant=None,
140
- use_safetensors=True
141
  )
142
 
143
  pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
144
  pipeline.scheduler.config
145
  )
 
 
146
  pipeline = pipeline.to(device)
147
 
 
148
  if torch.cuda.is_available():
149
- # 关键优化:使用sequential代替model cpu offload
150
- pipeline.enable_sequential_cpu_offload()
151
  pipeline.enable_vae_slicing()
152
  pipeline.enable_vae_tiling()
153
 
154
- print("✅ Model initialization complete (Optimized)")
155
  model_loaded = True
156
  return True
157
 
158
  except Exception as e:
159
- print(f"❌ Critical model loading error: {e}")
160
- print(traceback.format_exc())
161
- cleanup_memory()
162
- model_loaded = False
163
  return False
164
 
165
 
166
  @apply_spaces_decorator
167
- def generate_image(prompt: str, style: str, negative_prompt: str = "",
168
- steps: int = 15, cfg_scale: float = 3.5,
169
- seed: int = -1, width: int = 1024, height: int = 1024,
170
- progress=gr.Progress()):
171
- """图像生成函数(优化版本)"""
172
  try:
173
- if not prompt or prompt.strip() == "":
174
- return None, "", "❌ Please enter a prompt"
175
-
176
- # 优化的参数限制
177
- steps = max(10, min(steps, 25))
178
- width = min(width, 1024)
179
- height = min(height, 1024)
180
-
181
- progress(0.1, desc="Initializing model...")
182
- if not initialize_model():
183
- cleanup_memory()
184
- return None, "", "❌ Failed to initialize model"
185
-
186
- progress(0.2, desc="Processing prompt...")
187
 
188
  if seed == -1:
189
- seed = random.randint(0, np.iinfo(np.int32).max)
190
 
191
- enhanced_prompt = enhance_prompt(prompt.strip(), style)
192
 
193
- if not negative_prompt.strip():
194
- negative_prompt = "(low quality, worst quality:1.4), (bad anatomy, bad hands:1.2), blurry, deformed"
195
 
196
  generator = torch.Generator("cpu").manual_seed(seed)
197
 
198
- progress(0.4, desc="Starting generation...")
199
- print(f"🔥 Inference: steps={steps}, guidance={cfg_scale}, size={width}x{height}")
200
 
201
  cleanup_memory()
202
 
203
- # 关键改动:提高max_sequence_length
204
- with torch.no_grad():
205
  result = pipeline(
206
  prompt=enhanced_prompt,
207
  negative_prompt=negative_prompt,
@@ -209,236 +145,187 @@ def generate_image(prompt: str, style: str, negative_prompt: str = "",
209
  guidance_scale=cfg_scale,
210
  width=width,
211
  height=height,
212
- max_sequence_length=512, # 从256改到512
213
  generator=generator,
214
  output_type="pil"
215
  )
216
 
217
  image = result.images[0]
218
- print("✅ Inference complete")
 
219
 
220
- progress(0.9, desc="Finalizing...")
 
221
 
222
- del result
223
  cleanup_memory()
 
 
 
 
 
 
 
 
 
224
 
225
- filename = f"IMG_{seed}.png"
226
- filepath = os.path.join(SAVE_DIR, filename)
227
- image.save(filepath, format="PNG", optimize=True)
 
 
 
 
228
 
229
- metadata_content = create_metadata_content(
230
- prompt, enhanced_prompt, seed, steps, cfg_scale, width, height, style
 
 
 
 
 
 
 
 
 
 
 
 
231
  )
232
 
233
- progress(1.0, desc="Complete!")
234
 
235
- generation_info = f"Prompt: {prompt}\nSeed: {seed} | Size: {width}×{height} | Steps: {steps} | CFG: {cfg_scale}"
 
 
236
 
237
- return image, generation_info, metadata_content
 
 
 
 
 
 
238
 
239
- except torch.cuda.OutOfMemoryError as e:
240
- cleanup_memory()
241
- error_msg = "❌ GPU memory insufficient. Try 768x768 or fewer steps."
242
- print(f"CUDA OOM: {error_msg}")
243
- return None, "", error_msg
244
 
245
  except Exception as e:
246
  cleanup_memory()
247
- error_msg = str(e)
248
- print(f"❌ Generation error: {error_msg}")
249
- print(traceback.format_exc())
250
- return None, "", f"❌ Generation failed: {error_msg}"
251
 
252
 
253
- # ===== CSS样式 =====
254
- css = """
255
- /* 保持原有CSS不变 */
256
- .gradio-container {
257
- max-width: 100% !important;
258
- margin: 0 !important;
259
- padding: 0 !important;
260
- background: linear-gradient(135deg, #e6a4f2 0%, #1197e4 100%) !important;
261
- min-height: 100vh !important;
262
- }
263
-
264
- .main-content {
265
- background: rgba(255, 255, 255, 0.95) !important;
266
- border-radius: 20px !important;
267
- padding: 20px !important;
268
- margin: 15px !important;
269
- box-shadow: 0 10px 25px rgba(0,0,0,0.2) !important;
270
- }
271
-
272
- .title {
273
- text-align: center !important;
274
- background: linear-gradient(45deg, #bb6ded, #08676b) !important;
275
- -webkit-background-clip: text !important;
276
- -webkit-text-fill-color: transparent !important;
277
- font-size: 2rem !important;
278
- margin-bottom: 15px !important;
279
- font-weight: bold !important;
280
- }
281
-
282
- .generate-btn {
283
- background: linear-gradient(45deg, #bb6ded, #08676b) !important;
284
- color: white !important;
285
- border: none !important;
286
- padding: 15px 25px !important;
287
- border-radius: 25px !important;
288
- font-size: 16px !important;
289
- font-weight: bold !important;
290
- width: 100% !important;
291
- }
292
- """
293
-
294
-
295
- # ===== 创建UI =====
296
  def create_interface():
297
- with gr.Blocks(css=css, title="NSFW FLUX Image Generator") as interface:
298
- with gr.Column(elem_classes=["main-content"]):
299
- gr.HTML('<div class="title">NSFW FLUX Image Generator</div>')
300
-
301
- gr.HTML('<div class="warning-box">⚠️ 18+ CONTENT WARNING ⚠️</div>')
302
-
303
- with gr.Row():
304
- with gr.Column(scale=2):
305
- prompt_input = gr.Textbox(
306
- label="Main Prompt",
307
- placeholder="beautiful woman, detailed portrait...",
308
- lines=6,
309
- elem_classes=["prompt-box"]
310
- )
311
-
312
- gr.HTML('''
313
- <div style="background: rgba(255, 193, 7, 0.1); padding: 10px; border-radius: 8px; margin: 10px 0;">
314
- <small><strong>💡 Optimized Settings:</strong><br>
315
- • Max sequence length: 512 tokens (supports longer prompts)<br>
316
- • Recommended steps: 15-20 for best speed/quality<br>
317
- • Try 768x768 for faster generation</small>
318
- </div>
319
- ''')
320
-
321
- negative_prompt_input = gr.Textbox(
322
- label="Negative Prompt (Optional)",
323
- placeholder="low quality, blurry...",
324
- lines=3,
325
- elem_classes=["prompt-box"]
326
- )
327
 
328
- with gr.Column(scale=1):
329
- with gr.Group():
330
- style_input = gr.Radio(
331
- label="Style Preset",
332
- choices=list(STYLE_PRESETS.keys()),
333
- value="Realistic"
334
- )
335
-
336
- with gr.Group():
337
- seed_input = gr.Number(
338
- label="Seed (-1 for random)",
339
- value=-1,
340
- precision=0
341
- )
342
-
343
- with gr.Group():
344
- size_preset = gr.Radio(
345
- label="Size (smaller = faster)",
346
- choices=["768x768", "1024x1024"],
347
- value="768x768"
348
- )
349
-
350
- with gr.Group():
351
- steps_input = gr.Slider(
352
- label="Steps (15-20 recommended)",
353
- minimum=10,
354
- maximum=25,
355
- value=15,
356
- step=1
357
- )
358
-
359
- cfg_input = gr.Slider(
360
- label="CFG Scale",
361
- minimum=1.0,
362
- maximum=15.0,
363
- value=3.5,
364
- step=0.1
365
- )
366
-
367
- generate_button = gr.Button(
368
- "GENERATE",
369
- elem_classes=["generate-btn"],
370
- variant="primary"
371
- )
372
-
373
- image_output = gr.Image(
374
- label="Generated Image",
375
- elem_classes=["image-output"],
376
- show_label=False
377
- )
378
-
379
- generation_info = gr.Textbox(
380
- label="Generation Info",
381
- interactive=False,
382
- visible=False
383
- )
384
-
385
- metadata_content = gr.Textbox(visible=False)
386
- current_seed = gr.Number(visible=False)
387
- current_image = gr.Image(visible=False)
388
-
389
- with gr.Row(visible=False) as download_row:
390
- download_image_btn = gr.Button("Save Image", size="sm")
391
- download_metadata_btn = gr.Button("Save Metadata", size="sm")
392
-
393
- def parse_size(size_str):
394
- """解析尺寸字符串"""
395
- size = int(size_str.split('x')[0])
396
- return size, size
397
-
398
- def on_generate(prompt, style, neg_prompt, steps, cfg, seed, size_preset):
399
- width, height = parse_size(size_preset)
400
- image, info, metadata = generate_image(
401
- prompt, style, neg_prompt, steps, cfg, seed, width, height
402
- )
403
 
404
- if image is not None:
405
- try:
406
- actual_seed = seed if seed != -1 else int(info.split("Seed:")[1].split("|")[0].strip())
407
- except:
408
- actual_seed = seed if seed != -1 else random.randint(0, 999999)
 
409
 
410
- return (
411
- image, info, metadata, actual_seed, image,
412
- gr.update(visible=True), gr.update(visible=True)
 
413
  )
414
- else:
415
- return (
416
- None, info, "", 0, None,
417
- gr.update(visible=False), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
  )
419
 
 
 
 
 
 
 
 
 
 
 
 
420
  generate_button.click(
421
- fn=on_generate,
422
  inputs=[
423
- prompt_input, style_input, negative_prompt_input,
424
  steps_input, cfg_input, seed_input, size_preset
425
  ],
426
  outputs=[
427
- image_output, generation_info, metadata_content,
428
- current_seed, current_image, generation_info, download_row
429
  ],
430
  show_progress=True
431
  )
432
 
433
  prompt_input.submit(
434
- fn=on_generate,
435
  inputs=[
436
- prompt_input, style_input, negative_prompt_input,
437
  steps_input, cfg_input, seed_input, size_preset
438
  ],
439
  outputs=[
440
- image_output, generation_info, metadata_content,
441
- current_seed, current_image, generation_info, download_row
442
  ],
443
  show_progress=True
444
  )
@@ -446,14 +333,17 @@ def create_interface():
446
  return interface
447
 
448
 
449
- # ===== 启动应用 =====
450
  if __name__ == "__main__":
451
- print("🎨 Starting NSFW FLUX Image Generator (Optimized)...")
452
- print(f"🔧 Fixed Model: {FIXED_MODEL}")
453
- print(f"🔧 CUDA: {'✅ Available' if torch.cuda.is_available() else '❌ Not Available'}")
 
 
 
 
454
 
455
  app = create_interface()
456
- app.queue(max_size=5, default_concurrency_limit=1)
457
 
458
  app.launch(
459
  server_name="0.0.0.0",
 
1
+ # ===== ZeroGPU 超时优化终极版 =====
2
+
3
  try:
4
  import spaces
5
  SPACES_AVAILABLE = True
6
+ print("✅ ZeroGPU mode enabled")
7
  except ImportError:
8
  SPACES_AVAILABLE = False
9
+ print("⚠️ Running in regular mode")
10
 
 
11
  import os
12
  from datetime import datetime
13
  import random
 
18
  import traceback
19
  import numpy as np
20
  import gc
21
+ import warnings
22
+ warnings.filterwarnings('ignore')
23
 
24
+ # ===== 配置 =====
25
+ FIXED_MODEL = "aoxo/flux.1dev-abliterated"
26
+ SAVE_DIR = "generated_images"
27
+ os.makedirs(SAVE_DIR, exist_ok=True)
28
 
29
  STYLE_PRESETS = {
30
  "None": "",
31
+ "Realistic": "photorealistic, detailed",
32
+ "Anime": "anime style, high quality",
33
+ "Comic": "comic book style",
34
+ "Watercolor": "watercolor painting"
35
  }
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # ===== 全局变量 =====
38
  pipeline = None
39
  device = None
40
  model_loaded = False
41
 
42
 
 
43
  def cleanup_memory():
44
+ """激进的内存清理"""
45
+ gc.collect()
46
  if torch.cuda.is_available():
47
  torch.cuda.empty_cache()
48
  torch.cuda.synchronize()
 
49
 
50
 
51
+ def apply_spaces_decorator(func):
52
+ """ZeroGPU 装饰器 - 60秒限制"""
53
+ if SPACES_AVAILABLE:
54
+ # ZeroGPU 实际只给 60 秒!
55
+ return spaces.GPU(duration=60)(func)
56
+ return func
57
+
58
+
59
+ def enhance_prompt_minimal(prompt: str, style: str) -> str:
60
+ """最小化提示词增强 - 严格控制长度"""
61
  style_suffix = STYLE_PRESETS.get(style, "")
62
 
 
 
63
  if style_suffix:
64
+ enhanced = f"{prompt}, {style_suffix}, masterpiece"
65
+ else:
66
+ enhanced = f"{prompt}, masterpiece"
 
 
 
 
 
67
 
68
+ # CLIP 硬限制: 77 tokens ≈ 200-250 字符
69
+ if len(enhanced) > 200:
70
+ enhanced = prompt[:180] + ", masterpiece"
71
+ print(f"⚠️ Prompt truncated to fit CLIP limit")
72
 
73
+ return enhanced
74
 
75
 
76
+ # ===== 分离模型初始化(不使用 GPU 装饰器)=====
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  def initialize_model():
78
+ """模型初始化 - 不占用 GPU 时间"""
79
  global pipeline, device, model_loaded
80
 
81
  if model_loaded and pipeline is not None:
82
  return True
83
 
84
  try:
 
 
85
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
86
+ print(f"🖥️ Device: {device}")
87
 
88
+ print(f"📦 Loading: {FIXED_MODEL}")
89
 
90
  pipeline = AutoPipelineForText2Image.from_pretrained(
91
  FIXED_MODEL,
92
+ dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
93
+ use_safetensors=True,
 
94
  )
95
 
96
  pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
97
  pipeline.scheduler.config
98
  )
99
+
100
+ # 关键优化:不用 offload,直接全部加载
101
  pipeline = pipeline.to(device)
102
 
103
+ # 只保留最必要的优化
104
  if torch.cuda.is_available():
 
 
105
  pipeline.enable_vae_slicing()
106
  pipeline.enable_vae_tiling()
107
 
108
+ print("✅ Model ready")
109
  model_loaded = True
110
  return True
111
 
112
  except Exception as e:
113
+ print(f"❌ Init failed: {e}")
 
 
 
114
  return False
115
 
116
 
117
  @apply_spaces_decorator
118
+ def generate_image_fast(prompt: str, style: str, negative_prompt: str,
119
+ steps: int, cfg_scale: float, seed: int,
120
+ width: int, height: int):
121
+ """超快速生成 - 必须在 60 秒内完成"""
 
122
  try:
123
+ print(f"⏱️ GPU timer started (60s limit)")
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  if seed == -1:
126
+ seed = random.randint(0, 999999)
127
 
128
+ enhanced_prompt = enhance_prompt_minimal(prompt, style)
129
 
130
+ if not negative_prompt:
131
+ negative_prompt = "low quality, blurry"
132
 
133
  generator = torch.Generator("cpu").manual_seed(seed)
134
 
135
+ print(f"🚀 Generating: {steps} steps, {width}x{height}")
 
136
 
137
  cleanup_memory()
138
 
139
+ # 极简推理参数
140
+ with torch.inference_mode(): # 比 no_grad 更快
141
  result = pipeline(
142
  prompt=enhanced_prompt,
143
  negative_prompt=negative_prompt,
 
145
  guidance_scale=cfg_scale,
146
  width=width,
147
  height=height,
 
148
  generator=generator,
149
  output_type="pil"
150
  )
151
 
152
  image = result.images[0]
153
+ del result
154
+ cleanup_memory()
155
 
156
+ print(f"✅ Done in <60s")
157
+ return image, seed
158
 
159
+ except Exception as e:
160
  cleanup_memory()
161
+ print(f"❌ Error: {e}")
162
+ raise e
163
+
164
+
165
+ def generate_wrapper(prompt, style, neg_prompt, steps, cfg, seed, size_preset, progress=gr.Progress()):
166
+ """包装函数 - 处理 UI 逻辑"""
167
+ try:
168
+ if not prompt.strip():
169
+ return None, "❌ Enter a prompt", "", None
170
 
171
+ # 解析尺寸
172
+ if size_preset == "512x512 (Ultra Fast)":
173
+ width = height = 512
174
+ elif size_preset == "768x768 (Fast)":
175
+ width = height = 768
176
+ else:
177
+ width = height = 1024
178
 
179
+ # 限制步数
180
+ steps = max(8, min(steps, 15))
181
+
182
+ progress(0.1, desc="Initializing...")
183
+
184
+ # 预加载模型(不计入 GPU 时间)
185
+ if not initialize_model():
186
+ return None, "❌ Model init failed", "", None
187
+
188
+ progress(0.2, desc="Generating (30-50s)...")
189
+
190
+ # 调用 GPU 函数
191
+ image, actual_seed = generate_image_fast(
192
+ prompt, style, neg_prompt, steps, cfg, seed, width, height
193
  )
194
 
195
+ progress(0.9, desc="Saving...")
196
 
197
+ filename = f"IMG_{actual_seed}.png"
198
+ filepath = os.path.join(SAVE_DIR, filename)
199
+ image.save(filepath)
200
 
201
+ metadata = f"""Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
202
+ Prompt: {prompt}
203
+ Style: {style}
204
+ Seed: {actual_seed}
205
+ Steps: {steps} | CFG: {cfg}
206
+ Size: {width}x{height}
207
+ """
208
 
209
+ info = f"Seed: {actual_seed} | {width}×{height} | {steps} steps"
210
+
211
+ progress(1.0, desc="Complete!")
212
+
213
+ return image, info, metadata, image
214
 
215
  except Exception as e:
216
  cleanup_memory()
217
+ error_msg = f"Generation failed: {str(e)[:100]}"
218
+ print(f"❌ {error_msg}")
219
+ return None, error_msg, "", None
 
220
 
221
 
222
+ # ===== UI =====
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  def create_interface():
224
+ with gr.Blocks(title="Fast FLUX Generator") as interface:
225
+ gr.HTML('<h1 style="text-align:center">⚡ Fast FLUX Generator</h1>')
226
+
227
+ gr.HTML('''
228
+ <div style="background:#fff3cd;padding:10px;border-radius:8px;margin:10px 0;">
229
+ <strong>⚠️ ZeroGPU Limits:</strong><br>
230
+ 60 second GPU timeout (hard limit)<br>
231
+ Recommended: 512x512 or 768x768, 10-15 steps<br>
232
+ Keep prompts under 200 characters
233
+ </div>
234
+ ''')
235
+
236
+ with gr.Row():
237
+ with gr.Column(scale=2):
238
+ prompt_input = gr.Textbox(
239
+ label="Prompt (keep it short!)",
240
+ placeholder="woman, portrait, detailed",
241
+ lines=4,
242
+ max_lines=4
243
+ )
 
 
 
 
 
 
 
 
 
 
244
 
245
+ negative_prompt_input = gr.Textbox(
246
+ label="Negative Prompt",
247
+ placeholder="low quality, blurry",
248
+ lines=2
249
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
+ with gr.Column(scale=1):
252
+ style_input = gr.Radio(
253
+ label="Style",
254
+ choices=list(STYLE_PRESETS.keys()),
255
+ value="Realistic"
256
+ )
257
 
258
+ seed_input = gr.Number(
259
+ label="Seed (-1 = random)",
260
+ value=-1,
261
+ precision=0
262
  )
263
+
264
+ size_preset = gr.Radio(
265
+ label="Size (smaller = faster)",
266
+ choices=[
267
+ "512x512 (Ultra Fast)",
268
+ "768x768 (Fast)",
269
+ "1024x1024 (Slow)"
270
+ ],
271
+ value="768x768 (Fast)"
272
+ )
273
+
274
+ steps_input = gr.Slider(
275
+ label="Steps (10-15 recommended)",
276
+ minimum=8,
277
+ maximum=15,
278
+ value=12,
279
+ step=1
280
+ )
281
+
282
+ cfg_input = gr.Slider(
283
+ label="CFG Scale",
284
+ minimum=1.0,
285
+ maximum=10.0,
286
+ value=3.5,
287
+ step=0.5
288
+ )
289
+
290
+ generate_button = gr.Button(
291
+ "🚀 GENERATE (30-50s)",
292
+ variant="primary",
293
+ size="lg"
294
  )
295
 
296
+ image_output = gr.Image(label="Result", show_label=False)
297
+
298
+ generation_info = gr.Textbox(
299
+ label="Info",
300
+ interactive=False,
301
+ visible=True
302
+ )
303
+
304
+ metadata_content = gr.Textbox(visible=False)
305
+ current_image = gr.Image(visible=False)
306
+
307
  generate_button.click(
308
+ fn=generate_wrapper,
309
  inputs=[
310
+ prompt_input, style_input, negative_prompt_input,
311
  steps_input, cfg_input, seed_input, size_preset
312
  ],
313
  outputs=[
314
+ image_output, generation_info,
315
+ metadata_content, current_image
316
  ],
317
  show_progress=True
318
  )
319
 
320
  prompt_input.submit(
321
+ fn=generate_wrapper,
322
  inputs=[
323
+ prompt_input, style_input, negative_prompt_input,
324
  steps_input, cfg_input, seed_input, size_preset
325
  ],
326
  outputs=[
327
+ image_output, generation_info,
328
+ metadata_content, current_image
329
  ],
330
  show_progress=True
331
  )
 
333
  return interface
334
 
335
 
 
336
  if __name__ == "__main__":
337
+ print("🚀 Starting Fast FLUX Generator")
338
+ print(f"🔧 Model: {FIXED_MODEL}")
339
+ print(f"🔧 CUDA: {torch.cuda.is_available()}")
340
+
341
+ # 预加载模型
342
+ print("📦 Pre-loading model...")
343
+ initialize_model()
344
 
345
  app = create_interface()
346
+ app.queue(max_size=3, default_concurrency_limit=1)
347
 
348
  app.launch(
349
  server_name="0.0.0.0",