alexander00001 commited on
Commit
a94e6e4
·
verified ·
1 Parent(s): 3bf5836

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +340 -59
app.py CHANGED
@@ -1,5 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def initialize_model():
2
- """优化后的模型初始化函数"""
3
  global pipeline, device, model_loaded
4
 
5
  if model_loaded and pipeline is not None:
@@ -13,12 +133,11 @@ def initialize_model():
13
 
14
  print(f"📦 Loading fixed model: {FIXED_MODEL}")
15
 
16
- # 优化的模型加载
17
  pipeline = AutoPipelineForText2Image.from_pretrained(
18
  FIXED_MODEL,
19
- torch_dtype=torch.bfloat16,
20
- use_safetensors=True,
21
- variant=None
22
  )
23
 
24
  pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
@@ -26,14 +145,11 @@ def initialize_model():
26
  )
27
  pipeline = pipeline.to(device)
28
 
29
- # 关键优化:使用sequential而不是model cpu offload
30
  if torch.cuda.is_available():
31
- pipeline.to(device) # 改这里!
 
32
  pipeline.enable_vae_slicing()
33
  pipeline.enable_vae_tiling()
34
-
35
- # 可选:如果内存充足,可以完全不用offload
36
- # pipeline.to(device) # 全部加载到GPU,最快但吃内存
37
 
38
  print("✅ Model initialization complete (Optimized)")
39
  model_loaded = True
@@ -49,16 +165,16 @@ def initialize_model():
49
 
50
  @apply_spaces_decorator
51
  def generate_image(prompt: str, style: str, negative_prompt: str = "",
52
- steps: int = 20, cfg_scale: float = 3.5,
53
  seed: int = -1, width: int = 1024, height: int = 1024,
54
  progress=gr.Progress()):
55
- """优化后的图像生成函数"""
56
  try:
57
  if not prompt or prompt.strip() == "":
58
  return None, "", "❌ Please enter a prompt"
59
 
60
  # 优化的参数限制
61
- steps = max(10, min(steps, 25)) # 降低最大步数
62
  width = min(width, 1024)
63
  height = min(height, 1024)
64
 
@@ -72,11 +188,10 @@ def generate_image(prompt: str, style: str, negative_prompt: str = "",
72
  if seed == -1:
73
  seed = random.randint(0, np.iinfo(np.int32).max)
74
 
75
- # 不要过度增强提示词
76
  enhanced_prompt = enhance_prompt(prompt.strip(), style)
77
 
78
  if not negative_prompt.strip():
79
- negative_prompt = "(low quality, worst quality:1.4), blurry, deformed"
80
 
81
  generator = torch.Generator("cpu").manual_seed(seed)
82
 
@@ -94,7 +209,7 @@ def generate_image(prompt: str, style: str, negative_prompt: str = "",
94
  guidance_scale=cfg_scale,
95
  width=width,
96
  height=height,
97
- max_sequence_length=512, # 从256改到512!
98
  generator=generator,
99
  output_type="pil"
100
  )
@@ -123,7 +238,7 @@ def generate_image(prompt: str, style: str, negative_prompt: str = "",
123
 
124
  except torch.cuda.OutOfMemoryError as e:
125
  cleanup_memory()
126
- error_msg = "❌ GPU memory insufficient. Try: 768x768 or fewer steps"
127
  print(f"CUDA OOM: {error_msg}")
128
  return None, "", error_msg
129
 
@@ -135,48 +250,214 @@ def generate_image(prompt: str, style: str, negative_prompt: str = "",
135
  return None, "", f"❌ Generation failed: {error_msg}"
136
 
137
 
138
- def enhance_prompt(prompt: str, style: str) -> str:
139
- """优化的提示词增强"""
140
- # 减少质量词,避免提示词过长
141
- quality_terms = ", ".join(QUALITY_ENHANCERS[:3]) # 从5减到3
142
-
143
- style_terms = ""
144
- if style in STYLE_ENHANCERS:
145
- style_terms = ", " + ", ".join(STYLE_ENHANCERS[style][:2]) # 从3减到2
146
-
147
- style_suffix = STYLE_PRESETS.get(style, "")
148
-
149
- enhanced_parts = [prompt.strip()]
150
-
151
- if style_suffix:
152
- enhanced_parts.append(style_suffix)
153
-
154
- if style_terms:
155
- enhanced_parts.append(style_terms.lstrip(", "))
156
-
157
- enhanced_parts.append(quality_terms)
158
-
159
- enhanced_prompt = ", ".join(filter(None, enhanced_parts))
160
-
161
- # 提高长度限制,但不要太长
162
- if len(enhanced_prompt) > 800: # 从500提高到800
163
- enhanced_prompt = enhanced_prompt[:800]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
- return enhanced_prompt
166
 
167
 
168
- # UI中的步数滑块也要调整
169
- steps_input = gr.Slider(
170
- label="Steps (15-20 recommended for speed)",
171
- minimum=10,
172
- maximum=25, # 从30降到25
173
- value=15, # 从20降到15
174
- step=1
175
- )
176
-
177
- # 添加尺寸预设方便快速选择
178
- size_preset = gr.Radio(
179
- label="Size Preset (smaller = faster)",
180
- choices=["768x768 (Fast)", "1024x1024 (Quality)"],
181
- value="768x768 (Fast)"
182
- )
 
1
+ # ===== 必须首先导入spaces =====
2
+ try:
3
+ import spaces
4
+ SPACES_AVAILABLE = True
5
+ print("✅ Spaces available - ZeroGPU mode")
6
+ except ImportError:
7
+ SPACES_AVAILABLE = False
8
+ print("⚠️ Spaces not available - running in regular mode")
9
+
10
+ # ===== 其他导入 =====
11
+ import os
12
+ from datetime import datetime
13
+ import random
14
+ import torch
15
+ import gradio as gr
16
+ from diffusers import AutoPipelineForText2Image, FlowMatchEulerDiscreteScheduler
17
+ from PIL import Image
18
+ import traceback
19
+ import numpy as np
20
+ import gc
21
+
22
+ # ===== 配置常量 =====
23
+ COMPEL_AVAILABLE = False
24
+ print("⚠️ Compel disabled for FLUX compatibility")
25
+
26
+ STYLE_PRESETS = {
27
+ "None": "",
28
+ "Realistic": "photorealistic, 8k, ultra-detailed, cinematic lighting, masterpiece",
29
+ "Anime": "anime style, detailed, high quality, masterpiece, best quality",
30
+ "Comic": "comic book style, bold outlines, vibrant colors, cel shading",
31
+ "Watercolor": "watercolor illustration, soft gradients, pastel palette"
32
+ }
33
+
34
+ FIXED_MODEL = "aoxo/flux.1dev-abliterated"
35
+
36
+ QUALITY_ENHANCERS = [
37
+ "detailed anatomy", "perfect anatomy", "soft skin",
38
+ "high resolution", "masterpiece", "best quality",
39
+ "professional photography", "natural lighting"
40
+ ]
41
+
42
+ STYLE_ENHANCERS = {
43
+ "Realistic": ["photorealistic", "ultra realistic", "natural lighting"],
44
+ "Anime": ["anime style", "high quality anime", "detailed eyes"],
45
+ "Comic": ["comic book style", "bold outlines", "vibrant colors"],
46
+ "Watercolor": ["watercolor style", "artistic", "soft gradients"]
47
+ }
48
+
49
+ SAVE_DIR = "generated_images"
50
+ os.makedirs(SAVE_DIR, exist_ok=True)
51
+
52
+ # ===== 全局变量 =====
53
+ pipeline = None
54
+ device = None
55
+ model_loaded = False
56
+
57
+
58
+ # ===== 工具函数(必须在装饰器之前定义) =====
59
+ def cleanup_memory():
60
+ """清理GPU内存"""
61
+ if torch.cuda.is_available():
62
+ torch.cuda.empty_cache()
63
+ torch.cuda.synchronize()
64
+ gc.collect()
65
+
66
+
67
+ def enhance_prompt(prompt: str, style: str) -> str:
68
+ """增强提示词"""
69
+ quality_terms = ", ".join(QUALITY_ENHANCERS[:3])
70
+
71
+ style_terms = ""
72
+ if style in STYLE_ENHANCERS:
73
+ style_terms = ", " + ", ".join(STYLE_ENHANCERS[style][:2])
74
+
75
+ style_suffix = STYLE_PRESETS.get(style, "")
76
+
77
+ enhanced_parts = [prompt.strip()]
78
+
79
+ if style_suffix:
80
+ enhanced_parts.append(style_suffix)
81
+
82
+ if style_terms:
83
+ enhanced_parts.append(style_terms.lstrip(", "))
84
+
85
+ enhanced_parts.append(quality_terms)
86
+
87
+ enhanced_prompt = ", ".join(filter(None, enhanced_parts))
88
+
89
+ if len(enhanced_prompt) > 800:
90
+ enhanced_prompt = enhanced_prompt[:800]
91
+
92
+ return enhanced_prompt
93
+
94
+
95
+ def create_metadata_content(prompt, enhanced_prompt, seed, steps, cfg_scale, width, height, style):
96
+ """创建元数据内容"""
97
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
98
+ return f"""Generated Image Metadata
99
+ ======================
100
+ Timestamp: {timestamp}
101
+ Original Prompt: {prompt}
102
+ Enhanced Prompt: {enhanced_prompt}
103
+ Seed: {seed}
104
+ Steps: {steps}
105
+ CFG Scale: {cfg_scale}
106
+ Dimensions: {width}x{height}
107
+ Style: {style}
108
+ Model: FLUX.1-dev
109
+ """
110
+
111
+
112
+ # ===== 装饰器定义(必须在使用之前) =====
113
+ def apply_spaces_decorator(func):
114
+ """应用 spaces 装饰器,增加更长的超时时间"""
115
+ if SPACES_AVAILABLE:
116
+ return spaces.GPU(duration=120)(func)
117
+ return func
118
+
119
+
120
+ # ===== 模型相关函数 =====
121
  def initialize_model():
122
+ """优化的模型初始化函数"""
123
  global pipeline, device, model_loaded
124
 
125
  if model_loaded and pipeline is not None:
 
133
 
134
  print(f"📦 Loading fixed model: {FIXED_MODEL}")
135
 
 
136
  pipeline = AutoPipelineForText2Image.from_pretrained(
137
  FIXED_MODEL,
138
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
139
+ variant=None,
140
+ use_safetensors=True
141
  )
142
 
143
  pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
 
145
  )
146
  pipeline = pipeline.to(device)
147
 
 
148
  if torch.cuda.is_available():
149
+ # 关键优化:使用sequential代替model cpu offload
150
+ pipeline.enable_sequential_cpu_offload()
151
  pipeline.enable_vae_slicing()
152
  pipeline.enable_vae_tiling()
 
 
 
153
 
154
  print("✅ Model initialization complete (Optimized)")
155
  model_loaded = True
 
165
 
166
  @apply_spaces_decorator
167
  def generate_image(prompt: str, style: str, negative_prompt: str = "",
168
+ steps: int = 15, cfg_scale: float = 3.5,
169
  seed: int = -1, width: int = 1024, height: int = 1024,
170
  progress=gr.Progress()):
171
+ """图像生成函数(优化版本)"""
172
  try:
173
  if not prompt or prompt.strip() == "":
174
  return None, "", "❌ Please enter a prompt"
175
 
176
  # 优化的参数限制
177
+ steps = max(10, min(steps, 25))
178
  width = min(width, 1024)
179
  height = min(height, 1024)
180
 
 
188
  if seed == -1:
189
  seed = random.randint(0, np.iinfo(np.int32).max)
190
 
 
191
  enhanced_prompt = enhance_prompt(prompt.strip(), style)
192
 
193
  if not negative_prompt.strip():
194
+ negative_prompt = "(low quality, worst quality:1.4), (bad anatomy, bad hands:1.2), blurry, deformed"
195
 
196
  generator = torch.Generator("cpu").manual_seed(seed)
197
 
 
209
  guidance_scale=cfg_scale,
210
  width=width,
211
  height=height,
212
+ max_sequence_length=512, # 从256改到512
213
  generator=generator,
214
  output_type="pil"
215
  )
 
238
 
239
  except torch.cuda.OutOfMemoryError as e:
240
  cleanup_memory()
241
+ error_msg = "❌ GPU memory insufficient. Try 768x768 or fewer steps."
242
  print(f"CUDA OOM: {error_msg}")
243
  return None, "", error_msg
244
 
 
250
  return None, "", f"❌ Generation failed: {error_msg}"
251
 
252
 
253
+ # ===== CSS样式 =====
254
+ css = """
255
+ /* 保持原有CSS不变 */
256
+ .gradio-container {
257
+ max-width: 100% !important;
258
+ margin: 0 !important;
259
+ padding: 0 !important;
260
+ background: linear-gradient(135deg, #e6a4f2 0%, #1197e4 100%) !important;
261
+ min-height: 100vh !important;
262
+ }
263
+
264
+ .main-content {
265
+ background: rgba(255, 255, 255, 0.95) !important;
266
+ border-radius: 20px !important;
267
+ padding: 20px !important;
268
+ margin: 15px !important;
269
+ box-shadow: 0 10px 25px rgba(0,0,0,0.2) !important;
270
+ }
271
+
272
+ .title {
273
+ text-align: center !important;
274
+ background: linear-gradient(45deg, #bb6ded, #08676b) !important;
275
+ -webkit-background-clip: text !important;
276
+ -webkit-text-fill-color: transparent !important;
277
+ font-size: 2rem !important;
278
+ margin-bottom: 15px !important;
279
+ font-weight: bold !important;
280
+ }
281
+
282
+ .generate-btn {
283
+ background: linear-gradient(45deg, #bb6ded, #08676b) !important;
284
+ color: white !important;
285
+ border: none !important;
286
+ padding: 15px 25px !important;
287
+ border-radius: 25px !important;
288
+ font-size: 16px !important;
289
+ font-weight: bold !important;
290
+ width: 100% !important;
291
+ }
292
+ """
293
+
294
+
295
+ # ===== 创建UI =====
296
+ def create_interface():
297
+ with gr.Blocks(css=css, title="NSFW FLUX Image Generator") as interface:
298
+ with gr.Column(elem_classes=["main-content"]):
299
+ gr.HTML('<div class="title">NSFW FLUX Image Generator</div>')
300
+
301
+ gr.HTML('<div class="warning-box">⚠️ 18+ CONTENT WARNING ⚠️</div>')
302
+
303
+ with gr.Row():
304
+ with gr.Column(scale=2):
305
+ prompt_input = gr.Textbox(
306
+ label="Main Prompt",
307
+ placeholder="beautiful woman, detailed portrait...",
308
+ lines=6,
309
+ elem_classes=["prompt-box"]
310
+ )
311
+
312
+ gr.HTML('''
313
+ <div style="background: rgba(255, 193, 7, 0.1); padding: 10px; border-radius: 8px; margin: 10px 0;">
314
+ <small><strong>💡 Optimized Settings:</strong><br>
315
+ • Max sequence length: 512 tokens (supports longer prompts)<br>
316
+ • Recommended steps: 15-20 for best speed/quality<br>
317
+ • Try 768x768 for faster generation</small>
318
+ </div>
319
+ ''')
320
+
321
+ negative_prompt_input = gr.Textbox(
322
+ label="Negative Prompt (Optional)",
323
+ placeholder="low quality, blurry...",
324
+ lines=3,
325
+ elem_classes=["prompt-box"]
326
+ )
327
+
328
+ with gr.Column(scale=1):
329
+ with gr.Group():
330
+ style_input = gr.Radio(
331
+ label="Style Preset",
332
+ choices=list(STYLE_PRESETS.keys()),
333
+ value="Realistic"
334
+ )
335
+
336
+ with gr.Group():
337
+ seed_input = gr.Number(
338
+ label="Seed (-1 for random)",
339
+ value=-1,
340
+ precision=0
341
+ )
342
+
343
+ with gr.Group():
344
+ size_preset = gr.Radio(
345
+ label="Size (smaller = faster)",
346
+ choices=["768x768", "1024x1024"],
347
+ value="768x768"
348
+ )
349
+
350
+ with gr.Group():
351
+ steps_input = gr.Slider(
352
+ label="Steps (15-20 recommended)",
353
+ minimum=10,
354
+ maximum=25,
355
+ value=15,
356
+ step=1
357
+ )
358
+
359
+ cfg_input = gr.Slider(
360
+ label="CFG Scale",
361
+ minimum=1.0,
362
+ maximum=15.0,
363
+ value=3.5,
364
+ step=0.1
365
+ )
366
+
367
+ generate_button = gr.Button(
368
+ "GENERATE",
369
+ elem_classes=["generate-btn"],
370
+ variant="primary"
371
+ )
372
+
373
+ image_output = gr.Image(
374
+ label="Generated Image",
375
+ elem_classes=["image-output"],
376
+ show_label=False
377
+ )
378
+
379
+ generation_info = gr.Textbox(
380
+ label="Generation Info",
381
+ interactive=False,
382
+ visible=False
383
+ )
384
+
385
+ metadata_content = gr.Textbox(visible=False)
386
+ current_seed = gr.Number(visible=False)
387
+ current_image = gr.Image(visible=False)
388
+
389
+ with gr.Row(visible=False) as download_row:
390
+ download_image_btn = gr.Button("Save Image", size="sm")
391
+ download_metadata_btn = gr.Button("Save Metadata", size="sm")
392
+
393
+ def parse_size(size_str):
394
+ """解析尺寸字符串"""
395
+ size = int(size_str.split('x')[0])
396
+ return size, size
397
+
398
+ def on_generate(prompt, style, neg_prompt, steps, cfg, seed, size_preset):
399
+ width, height = parse_size(size_preset)
400
+ image, info, metadata = generate_image(
401
+ prompt, style, neg_prompt, steps, cfg, seed, width, height
402
+ )
403
+
404
+ if image is not None:
405
+ try:
406
+ actual_seed = seed if seed != -1 else int(info.split("Seed:")[1].split("|")[0].strip())
407
+ except:
408
+ actual_seed = seed if seed != -1 else random.randint(0, 999999)
409
+
410
+ return (
411
+ image, info, metadata, actual_seed, image,
412
+ gr.update(visible=True), gr.update(visible=True)
413
+ )
414
+ else:
415
+ return (
416
+ None, info, "", 0, None,
417
+ gr.update(visible=False), gr.update(visible=False)
418
+ )
419
+
420
+ generate_button.click(
421
+ fn=on_generate,
422
+ inputs=[
423
+ prompt_input, style_input, negative_prompt_input,
424
+ steps_input, cfg_input, seed_input, size_preset
425
+ ],
426
+ outputs=[
427
+ image_output, generation_info, metadata_content,
428
+ current_seed, current_image, generation_info, download_row
429
+ ],
430
+ show_progress=True
431
+ )
432
+
433
+ prompt_input.submit(
434
+ fn=on_generate,
435
+ inputs=[
436
+ prompt_input, style_input, negative_prompt_input,
437
+ steps_input, cfg_input, seed_input, size_preset
438
+ ],
439
+ outputs=[
440
+ image_output, generation_info, metadata_content,
441
+ current_seed, current_image, generation_info, download_row
442
+ ],
443
+ show_progress=True
444
+ )
445
 
446
+ return interface
447
 
448
 
449
+ # ===== 启动应用 =====
450
+ if __name__ == "__main__":
451
+ print("🎨 Starting NSFW FLUX Image Generator (Optimized)...")
452
+ print(f"🔧 Fixed Model: {FIXED_MODEL}")
453
+ print(f"🔧 CUDA: {'✅ Available' if torch.cuda.is_available() else '❌ Not Available'}")
454
+
455
+ app = create_interface()
456
+ app.queue(max_size=5, default_concurrency_limit=1)
457
+
458
+ app.launch(
459
+ server_name="0.0.0.0",
460
+ server_port=7860,
461
+ show_error=True,
462
+ share=False
463
+ )