kines9661 commited on
Commit
ceb4da3
·
verified ·
1 Parent(s): ae43774

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -56
app.py CHANGED
@@ -24,6 +24,8 @@ os.makedirs(MODEL_CACHE_DIR, exist_ok=True)
24
  os.makedirs(LORA_CACHE_DIR, exist_ok=True)
25
 
26
  SPACE_ID = os.getenv("SPACE_ID")
 
 
27
 
28
  pipe = None
29
  current_model_path = ""
@@ -36,8 +38,11 @@ PRESET_MODELS = {
36
  "Dreamlike Anime 1.0 (動漫)": "dreamlike-art/dreamlike-anime-1.0",
37
  "Kernel NSFW (寫實/成人)": "Kernel/sd-nsfw",
38
  "Realistic Vision V5.1 (高畫質寫實)": "SG161222/Realistic_Vision_V5.1_noVAE",
39
- # 【修復】:改為載入官方 SDXL 基礎型,並在生成時配合 Lightning LoRA 加速
40
- "SDXL 1.0 Base (高畫質)": "stabilityai/stable-diffusion-xl-base-1.0",
 
 
 
41
  }
42
 
43
  RESOLUTION_CHOICES = [
@@ -46,7 +51,7 @@ RESOLUTION_CHOICES = [
46
 
47
  def get_model_choices():
48
  local_models = [f for f in os.listdir(MODEL_CACHE_DIR) if f.endswith(".safetensors")]
49
- return list(PRESET_MODELS.keys()) + local_models
50
 
51
  def get_lora_choices():
52
  return [f for f in os.listdir(LORA_CACHE_DIR) if f.endswith(".safetensors")]
@@ -88,7 +93,7 @@ def download_and_backup(url, folder, progress, civit_token="", hf_token=""):
88
  f.write(data)
89
  downloaded += len(data)
90
  if total_size > 0:
91
- progress(downloaded / total_size, desc=f"下載 {fname[:15]}: {downloaded/1024/1024:.1f}MB")
92
 
93
  if os.path.getsize(local_filepath) < 1024 * 100:
94
  os.remove(local_filepath)
@@ -131,18 +136,32 @@ def load_pipeline(model_source, is_local_file=False):
131
  active_loras = {}
132
  gc.collect()
133
 
 
 
 
 
 
 
134
  try:
135
  if is_local_file:
136
- try:
137
  p = StableDiffusionXLPipeline.from_single_file(
138
  model_source, torch_dtype=torch.float32,
139
  safety_checker=None, requires_safety_checker=False, use_safetensors=True
140
  )
141
- except Exception:
142
- p = StableDiffusionPipeline.from_single_file(
143
- model_source, torch_dtype=torch.float32,
144
- safety_checker=None, requires_safety_checker=False, use_safetensors=True
145
- )
 
 
 
 
 
 
 
 
146
  else:
147
  p = AutoPipelineForText2Image.from_pretrained(
148
  model_source,
@@ -154,8 +173,13 @@ def load_pipeline(model_source, is_local_file=False):
154
  p.to("cpu")
155
  p.enable_attention_slicing()
156
 
157
- current_model_is_sdxl = "SDXL" in p.__class__.__name__
158
- model_type_str = "SDXL" if current_model_is_sdxl else "SD 1.5"
 
 
 
 
 
159
 
160
  pipe = p
161
  current_model_path = model_source
@@ -167,16 +191,31 @@ def load_pipeline(model_source, is_local_file=False):
167
 
168
  # ── 4. UI 互動事件處理 ─────────────────────────────────────────────
169
 
170
- def handle_model_dropdown(choice):
171
  if choice in PRESET_MODELS:
172
  source = PRESET_MODELS[choice]
173
- is_local = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  else:
175
  source = os.path.join(MODEL_CACHE_DIR, choice)
176
- is_local = True
177
-
178
- yield "⏳ 載入模型中 (若為 SDXL 可能需約 2 分鐘,請耐心等待)..."
179
- yield load_pipeline(source, is_local)
180
 
181
  def handle_civitai_model_download(url, civit_token, hf_token, progress=gr.Progress()):
182
  if not url:
@@ -186,7 +225,6 @@ def handle_civitai_model_download(url, civit_token, hf_token, progress=gr.Progre
186
  try:
187
  path, fname, backup_msg = download_and_backup(url, MODEL_CACHE_DIR, progress, civit_token, hf_token)
188
  yield f"⏳ 載入模型中... ({backup_msg})", gr.update()
189
-
190
  status = load_pipeline(path, True)
191
  choices = get_model_choices()
192
  yield f"{status} | {backup_msg}", gr.update(choices=choices, value=fname)
@@ -201,10 +239,8 @@ def handle_lora_dropdown(lora_filename, scale):
201
  global pipe, active_loras
202
  if pipe is None: return "⚠️ 請先載入主模型", update_lora_list_text()
203
  if not lora_filename: return "⚠️ 未選擇 LoRA", update_lora_list_text()
204
-
205
  path = os.path.join(LORA_CACHE_DIR, lora_filename)
206
  adapter_name = lora_filename.replace(".", "_")
207
-
208
  try:
209
  pipe.load_lora_weights(path, adapter_name=adapter_name)
210
  active_loras[adapter_name] = float(scale)
@@ -212,13 +248,12 @@ def handle_lora_dropdown(lora_filename, scale):
212
  except Exception as e:
213
  error_msg = str(e)
214
  if "size mismatch" in error_msg or "No modules were targeted" in error_msg:
215
- return f"❌ 載入失敗:架構不符!LoRA 與當前主模型不相容。", update_lora_list_text()
216
  return f"❌ LoRA 載入失敗: {error_msg}", update_lora_list_text()
217
 
218
  def handle_lora_download(url, scale, civit_token, hf_token, progress=gr.Progress()):
219
  global pipe, active_loras
220
  if pipe is None: return "⚠️ 請先載入主模型", update_lora_list_text(), gr.update()
221
-
222
  try:
223
  path, fname, backup_msg = download_and_backup(url, LORA_CACHE_DIR, progress, civit_token, hf_token)
224
  adapter_name = fname.replace(".", "_")
@@ -228,11 +263,10 @@ def handle_lora_download(url, scale, civit_token, hf_token, progress=gr.Progress
228
  choices = get_lora_choices()
229
  return f"✅ 已套用 {fname} | {backup_msg}", update_lora_list_text(), gr.update(choices=choices, value=fname)
230
  except Exception as e:
231
- if adapter_name in active_loras:
232
- del active_loras[adapter_name]
233
  error_msg = str(e)
234
  if "size mismatch" in error_msg or "No modules were targeted" in error_msg:
235
- return f"❌ 載入失敗:架構不符!LoRA 與當前主模型不相容。", update_lora_list_text(), gr.update()
236
  return f"❌ LoRA 載入失敗: {error_msg}", update_lora_list_text(), gr.update()
237
  except Exception as e:
238
  return f"❌ 錯誤: {e}", update_lora_list_text(), gr.update()
@@ -254,40 +288,38 @@ def generate_image(prompt, neg, steps, cfg, seed, width, height, use_lcm):
254
 
255
  adapters_to_use = []
256
  weights_to_use = []
257
-
258
  pipe.unload_lora_weights()
259
  pipe.disable_lora()
260
  warning_msg = ""
261
 
262
- # 【更新重點】:智慧處理極模式 (LCM 或 SDXL Lightning)
263
  if use_lcm:
264
  if current_model_is_sdxl:
265
- # 針對 SDXL,掛載 ByteDance Lightning 4-Step LoRA
266
  try:
267
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
268
  lightning_ckpt = hf_hub_download("ByteDance/SDXL-Lightning", "sdxl_lightning_4step_lora.safetensors")
269
  pipe.load_lora_weights(lightning_ckpt, adapter_name="lightning")
270
  adapters_to_use.append("lightning")
271
  weights_to_use.append(1.0)
272
- warning_msg = "⚡ SDXL Lightning 啟動。建議 Steps=4~8CFG=1.0~2.0。 "
273
- except Exception:
274
- warning_msg = "⚠️ Lightning 載入失敗,退回一般模式。 "
275
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
276
  else:
277
- # 針對 SD1.5,掛載 LCM-LoRA
278
  try:
279
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
280
  pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5", adapter_name="lcm")
281
  adapters_to_use.append("lcm")
282
  weights_to_use.append(1.0)
283
- warning_msg = "⚡ LCM 啟動。建議 Steps=4~8CFG=1.0~2.0。 "
284
- except Exception:
285
- warning_msg = "⚠️ LCM 載入失敗,退回一般模式。 "
286
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
287
  else:
288
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
289
 
290
- # 重新掛載使用者自訂的 LoRA
291
  for k, v in active_loras.items():
292
  try:
293
  lora_filename = k.replace("_", ".")
@@ -305,7 +337,7 @@ def generate_image(prompt, neg, steps, cfg, seed, width, height, use_lcm):
305
  # 生成影像
306
  image = pipe(
307
  prompt=prompt,
308
- negative_prompt=neg if not use_lcm else None, # 極速模式下忽略反向詞
309
  num_inference_steps=int(steps),
310
  guidance_scale=float(cfg),
311
  width=int(width), height=int(height),
@@ -313,34 +345,31 @@ def generate_image(prompt, neg, steps, cfg, seed, width, height, use_lcm):
313
  ).images[0]
314
 
315
  cost_time = time.time() - start_time
316
- return image, warning_msg + f"✅ 完成 | 解析度: {width}x{height} | 耗時: {cost_time:.1f} | Seed: {seed}"
317
 
318
 
319
  # ── 6. Gradio UI 介面設計 ──────────────────────────────────────────
320
 
321
- ENV_CIVITAI = os.getenv("CIVITAI_TOKEN", "")
322
- ENV_HF = os.getenv("HF_TOKEN", "")
323
-
324
  with gr.Blocks(title="Turbo CPU SD + 永久圖庫") as demo:
325
- gr.Markdown("# ⚡ Turbo CPU SD (無限制 + SDXL 極速版)")
326
 
327
  with gr.Row():
328
  with gr.Column(scale=1):
329
  with gr.Accordion("⚙️ 授權金鑰設定 (已自動帶入)", open=False):
330
  civit_token = gr.Textbox(label="Civitai API Token", value=ENV_CIVITAI, placeholder="下載 NSFW 模型用", type="password")
331
- hf_token = gr.Textbox(label="HF Write Token", value=ENV_HF, placeholder="永久備份檔案用", type="password")
332
 
333
  gr.Markdown("### 1. 主模型管理")
334
  with gr.Tabs():
335
  with gr.TabItem("🗂️ 選擇圖庫模型"):
336
- model_dropdown = gr.Dropdown(choices=get_model_choices(), value=get_model_choices()[0], label="選擇本機模型", interactive=True)
337
  load_model_btn = gr.Button("載入選擇的模型", variant="primary")
338
  with gr.TabItem("🌐 下載新模型"):
339
- civit_ckpt_url = gr.Textbox(label="Checkpoint 網址 / HF Repo ID", placeholder="輸入 Civitai 直連 或 HF 模型ID...")
340
  download_model_btn = gr.Button("下載、備份並載入")
341
 
342
  model_status = gr.Textbox(label="系統狀態", value="未載入", interactive=False)
343
-
344
  gr.Markdown("### 2. LoRA 管理")
345
  lora_scale = gr.Slider(0.1, 2.0, value=0.8, step=0.05, label="LoRA 權重 (Scale)")
346
  with gr.Tabs():
@@ -355,26 +384,28 @@ with gr.Blocks(title="Turbo CPU SD + 永久圖庫") as demo:
355
  lora_status = gr.Textbox(label="目前已套用清單", value="無", lines=2, interactive=False)
356
 
357
  with gr.Column(scale=2):
358
- use_lcm = gr.Checkbox(label="⚡ 啟用極速出圖 (SD1.5 將套用 LCM / SDXL 將套用 Lightning)", value=True)
359
- prompt = gr.Textbox(label="Prompt", value="a beautiful landscape painting, masterpiece", lines=3)
360
- neg = gr.Textbox(label="Negative Prompt (極速模式下將忽略)", value="low quality, bad anatomy, worst quality, text, watermark", lines=1)
 
 
361
 
362
  with gr.Row():
363
  steps = gr.Slider(1, 30, value=5, step=1, label="Steps (極速模式建議 4~8)")
364
- cfg = gr.Slider(1.0, 10.0, value=1.5, step=0.5, label="CFG (極速模式建議 1.0~2.0)")
365
  seed = gr.Number(-1, label="Seed (-1=隨機)", precision=0)
366
 
367
- gr.Markdown("*(提示:SD 1.5 建議 512~768;SDXL 建議 1024。解析度過高會導致 CPU 崩潰!)*")
368
  with gr.Row():
369
- width = gr.Dropdown(RESOLUTION_CHOICES, value=512, label="Width")
370
- height = gr.Dropdown(RESOLUTION_CHOICES, value=512, label="Height")
371
 
372
- gen_btn = gr.Button("✨ 生成圖片 (SDXL 需較長時間)", variant="primary", size="lg")
373
  gen_status = gr.Textbox(label="生成狀態", interactive=False)
374
  out_img = gr.Image(label="生成結果", type="pil")
375
 
376
  # ── 7. 綁定按鈕事件 ──
377
- load_model_btn.click(fn=handle_model_dropdown, inputs=[model_dropdown], outputs=[model_status])
378
  download_model_btn.click(fn=handle_civitai_model_download, inputs=[civit_ckpt_url, civit_token, hf_token], outputs=[model_status, model_dropdown])
379
 
380
  load_lora_btn.click(fn=handle_lora_dropdown, inputs=[lora_dropdown, lora_scale], outputs=[model_status, lora_status])
 
24
  os.makedirs(LORA_CACHE_DIR, exist_ok=True)
25
 
26
  SPACE_ID = os.getenv("SPACE_ID")
27
+ ENV_CIVITAI = os.getenv("CIVITAI_TOKEN", "")
28
+ ENV_HF = os.getenv("HF_TOKEN", "")
29
 
30
  pipe = None
31
  current_model_path = ""
 
38
  "Dreamlike Anime 1.0 (動漫)": "dreamlike-art/dreamlike-anime-1.0",
39
  "Kernel NSFW (寫實/成人)": "Kernel/sd-nsfw",
40
  "Realistic Vision V5.1 (高畫質寫實)": "SG161222/Realistic_Vision_V5.1_noVAE",
41
+ "SDXL 1.0 Base (高畫質底)": "stabilityai/stable-diffusion-xl-base-1.0",
42
+ }
43
+
44
+ HF_FILE_MODELS = {
45
+ "HomoSimile XL Pony v6 (你的模型 🔑)": ("kines9661/HomoSimile", "homosimileXLPony_v60NAIXLEPSV11.safetensors"),
46
  }
47
 
48
  RESOLUTION_CHOICES = [
 
51
 
52
  def get_model_choices():
53
  local_models = [f for f in os.listdir(MODEL_CACHE_DIR) if f.endswith(".safetensors")]
54
+ return list(PRESET_MODELS.keys()) + list(HF_FILE_MODELS.keys()) + local_models
55
 
56
  def get_lora_choices():
57
  return [f for f in os.listdir(LORA_CACHE_DIR) if f.endswith(".safetensors")]
 
93
  f.write(data)
94
  downloaded += len(data)
95
  if total_size > 0:
96
+ progress(downloaded / total_size, desc=f"下載 {fname[:20]}: {downloaded/1024/1024:.1f}MB")
97
 
98
  if os.path.getsize(local_filepath) < 1024 * 100:
99
  os.remove(local_filepath)
 
136
  active_loras = {}
137
  gc.collect()
138
 
139
+ # 【修復重點 1】:強制判定是否為 SDXL (從檔名或 Repo 屬性雙重驗證)
140
+ is_sdxl_target = False
141
+ source_lower = model_source.lower()
142
+ if "xl" in source_lower or "pony" in source_lower:
143
+ is_sdxl_target = True
144
+
145
  try:
146
  if is_local_file:
147
+ if is_sdxl_target:
148
  p = StableDiffusionXLPipeline.from_single_file(
149
  model_source, torch_dtype=torch.float32,
150
  safety_checker=None, requires_safety_checker=False, use_safetensors=True
151
  )
152
+ else:
153
+ # 若不是 XL 名字,先試 SD1.5,失敗再用 SDXL
154
+ try:
155
+ p = StableDiffusionPipeline.from_single_file(
156
+ model_source, torch_dtype=torch.float32,
157
+ safety_checker=None, requires_safety_checker=False, use_safetensors=True
158
+ )
159
+ except Exception:
160
+ p = StableDiffusionXLPipeline.from_single_file(
161
+ model_source, torch_dtype=torch.float32,
162
+ safety_checker=None, requires_safety_checker=False, use_safetensors=True
163
+ )
164
+ is_sdxl_target = True
165
  else:
166
  p = AutoPipelineForText2Image.from_pretrained(
167
  model_source,
 
173
  p.to("cpu")
174
  p.enable_attention_slicing()
175
 
176
+ # 【修復重點 2】:根據最終載入的 Pipeline 類型嚴格判定架構
177
+ if isinstance(p, StableDiffusionXLPipeline) or is_sdxl_target:
178
+ current_model_is_sdxl = True
179
+ model_type_str = "SDXL/Pony XL"
180
+ else:
181
+ current_model_is_sdxl = False
182
+ model_type_str = "SD 1.5"
183
 
184
  pipe = p
185
  current_model_path = model_source
 
191
 
192
  # ── 4. UI 互動事件處理 ─────────────────────────────────────────────
193
 
194
+ def handle_model_dropdown(choice, hf_token_val):
195
  if choice in PRESET_MODELS:
196
  source = PRESET_MODELS[choice]
197
+ yield "⏳ 載入模型中 (若為 SDXL 可能需 2 分鐘,請耐心等待)..."
198
+ yield load_pipeline(source, is_local_file=False)
199
+
200
+ elif choice in HF_FILE_MODELS:
201
+ repo_id, filename = HF_FILE_MODELS[choice]
202
+ yield f"⏳ 正在從 HF Hub 下載 {filename}... (首次需時較長)"
203
+ try:
204
+ token = hf_token_val.strip() if hf_token_val and hf_token_val.strip() else None
205
+ local_path = hf_hub_download(
206
+ repo_id=repo_id,
207
+ filename=filename,
208
+ token=token,
209
+ local_dir=MODEL_CACHE_DIR
210
+ )
211
+ yield "⏳ 下載完成!正在載入模型..."
212
+ yield load_pipeline(local_path, is_local_file=True)
213
+ except Exception as e:
214
+ yield f"❌ 下載失敗: {str(e)}。若為私人倉庫請確認 HF Token 已填入。"
215
  else:
216
  source = os.path.join(MODEL_CACHE_DIR, choice)
217
+ yield "⏳ 載入模型中..."
218
+ yield load_pipeline(source, is_local_file=True)
 
 
219
 
220
  def handle_civitai_model_download(url, civit_token, hf_token, progress=gr.Progress()):
221
  if not url:
 
225
  try:
226
  path, fname, backup_msg = download_and_backup(url, MODEL_CACHE_DIR, progress, civit_token, hf_token)
227
  yield f"⏳ 載入模型中... ({backup_msg})", gr.update()
 
228
  status = load_pipeline(path, True)
229
  choices = get_model_choices()
230
  yield f"{status} | {backup_msg}", gr.update(choices=choices, value=fname)
 
239
  global pipe, active_loras
240
  if pipe is None: return "⚠️ 請先載入主模型", update_lora_list_text()
241
  if not lora_filename: return "⚠️ 未選擇 LoRA", update_lora_list_text()
 
242
  path = os.path.join(LORA_CACHE_DIR, lora_filename)
243
  adapter_name = lora_filename.replace(".", "_")
 
244
  try:
245
  pipe.load_lora_weights(path, adapter_name=adapter_name)
246
  active_loras[adapter_name] = float(scale)
 
248
  except Exception as e:
249
  error_msg = str(e)
250
  if "size mismatch" in error_msg or "No modules were targeted" in error_msg:
251
+ return f"❌ 架構不符!LoRA 與主模型不相容。", update_lora_list_text()
252
  return f"❌ LoRA 載入失敗: {error_msg}", update_lora_list_text()
253
 
254
  def handle_lora_download(url, scale, civit_token, hf_token, progress=gr.Progress()):
255
  global pipe, active_loras
256
  if pipe is None: return "⚠️ 請先載入主模型", update_lora_list_text(), gr.update()
 
257
  try:
258
  path, fname, backup_msg = download_and_backup(url, LORA_CACHE_DIR, progress, civit_token, hf_token)
259
  adapter_name = fname.replace(".", "_")
 
263
  choices = get_lora_choices()
264
  return f"✅ 已套用 {fname} | {backup_msg}", update_lora_list_text(), gr.update(choices=choices, value=fname)
265
  except Exception as e:
266
+ if adapter_name in active_loras: del active_loras[adapter_name]
 
267
  error_msg = str(e)
268
  if "size mismatch" in error_msg or "No modules were targeted" in error_msg:
269
+ return f"❌ 架構不符!LoRA 與主模型不相容。", update_lora_list_text(), gr.update()
270
  return f"❌ LoRA 載入失敗: {error_msg}", update_lora_list_text(), gr.update()
271
  except Exception as e:
272
  return f"❌ 錯誤: {e}", update_lora_list_text(), gr.update()
 
288
 
289
  adapters_to_use = []
290
  weights_to_use = []
 
291
  pipe.unload_lora_weights()
292
  pipe.disable_lora()
293
  warning_msg = ""
294
 
295
+ # 【修復重點 3】:更精準的加LoRA 分配邏輯
296
  if use_lcm:
297
  if current_model_is_sdxl:
298
+ # 確認為 SDXL / Pony 模型,掛載 SDXL 專用 Lightning LoRA
299
  try:
300
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
301
  lightning_ckpt = hf_hub_download("ByteDance/SDXL-Lightning", "sdxl_lightning_4step_lora.safetensors")
302
  pipe.load_lora_weights(lightning_ckpt, adapter_name="lightning")
303
  adapters_to_use.append("lightning")
304
  weights_to_use.append(1.0)
305
+ warning_msg = "⚡ SDXL Lightning 啟動。建議 Steps=4~8, CFG=1.0~2.0。 "
306
+ except Exception as e:
307
+ warning_msg = f"⚠️ Lightning 載入失敗 ({str(e)[:50]}),退回一般模式。 "
308
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
309
  else:
310
+ # 確認為 SD1.5 模型,掛載 SD1.5 專用 LCM LoRA
311
  try:
312
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
313
  pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5", adapter_name="lcm")
314
  adapters_to_use.append("lcm")
315
  weights_to_use.append(1.0)
316
+ warning_msg = "⚡ LCM 啟動。建議 Steps=4~8, CFG=1.0~2.0。 "
317
+ except Exception as e:
318
+ warning_msg = f"⚠️ LCM 載入失敗 ({str(e)[:50]}),退回一般模式。 "
319
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
320
  else:
321
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
322
 
 
323
  for k, v in active_loras.items():
324
  try:
325
  lora_filename = k.replace("_", ".")
 
337
  # 生成影像
338
  image = pipe(
339
  prompt=prompt,
340
+ negative_prompt=neg if not use_lcm else None,
341
  num_inference_steps=int(steps),
342
  guidance_scale=float(cfg),
343
  width=int(width), height=int(height),
 
345
  ).images[0]
346
 
347
  cost_time = time.time() - start_time
348
+ return image, warning_msg + f"✅ 完成 | {width}x{height} | 耗時: {cost_time:.1f}s | Seed: {seed}"
349
 
350
 
351
  # ── 6. Gradio UI 介面設計 ──────────────────────────────────────────
352
 
 
 
 
353
  with gr.Blocks(title="Turbo CPU SD + 永久圖庫") as demo:
354
+ gr.Markdown("# ⚡ Turbo CPU SD (NSFW + SDXL/Pony 支援)")
355
 
356
  with gr.Row():
357
  with gr.Column(scale=1):
358
  with gr.Accordion("⚙️ 授權金鑰設定 (已自動帶入)", open=False):
359
  civit_token = gr.Textbox(label="Civitai API Token", value=ENV_CIVITAI, placeholder="下載 NSFW 模型用", type="password")
360
+ hf_token = gr.Textbox(label="HF Write Token", value=ENV_HF, placeholder="永久備份 + 私人模型用", type="password")
361
 
362
  gr.Markdown("### 1. 主模型管理")
363
  with gr.Tabs():
364
  with gr.TabItem("🗂️ 選擇圖庫模型"):
365
+ model_dropdown = gr.Dropdown(choices=get_model_choices(), value=get_model_choices()[0], label="選擇模型", interactive=True)
366
  load_model_btn = gr.Button("載入選擇的模型", variant="primary")
367
  with gr.TabItem("🌐 下載新模型"):
368
+ civit_ckpt_url = gr.Textbox(label="Checkpoint 網址", placeholder="輸入 Civitai 直連...")
369
  download_model_btn = gr.Button("下載、備份並載入")
370
 
371
  model_status = gr.Textbox(label="系統狀態", value="未載入", interactive=False)
372
+
373
  gr.Markdown("### 2. LoRA 管理")
374
  lora_scale = gr.Slider(0.1, 2.0, value=0.8, step=0.05, label="LoRA 權重 (Scale)")
375
  with gr.Tabs():
 
384
  lora_status = gr.Textbox(label="目前已套用清單", value="無", lines=2, interactive=False)
385
 
386
  with gr.Column(scale=2):
387
+ use_lcm = gr.Checkbox(label="⚡ 啟用極速模式 (SD1.5LCM / SDXLLightning)", value=True)
388
+
389
+ gr.Markdown("💡 **Pony XL 使用提示**:Prompt 開頭請加 `score_9, score_8_up, score_7_up,`")
390
+ prompt = gr.Textbox(label="Prompt", value="score_9, score_8_up, score_7_up, a beautiful woman, masterpiece", lines=3)
391
+ neg = gr.Textbox(label="Negative Prompt (極速模式下將忽略)", value="score_1, score_2, score_3, low quality, bad anatomy, worst quality", lines=1)
392
 
393
  with gr.Row():
394
  steps = gr.Slider(1, 30, value=5, step=1, label="Steps (極速模式建議 4~8)")
395
+ cfg = gr.Slider(1.0, 10.0, value=5.0, step=0.5, label="CFG (Pony 建議 5~7)")
396
  seed = gr.Number(-1, label="Seed (-1=隨機)", precision=0)
397
 
398
+ gr.Markdown("*(SD 1.5 建議 512~768;SDXL/Pony 建議 1024)*")
399
  with gr.Row():
400
+ width = gr.Dropdown(RESOLUTION_CHOICES, value=1024, label="Width")
401
+ height = gr.Dropdown(RESOLUTION_CHOICES, value=1024, label="Height")
402
 
403
+ gen_btn = gr.Button("✨ 生成圖片", variant="primary", size="lg")
404
  gen_status = gr.Textbox(label="生成狀態", interactive=False)
405
  out_img = gr.Image(label="生成結果", type="pil")
406
 
407
  # ── 7. 綁定按鈕事件 ──
408
+ load_model_btn.click(fn=handle_model_dropdown, inputs=[model_dropdown, hf_token], outputs=[model_status])
409
  download_model_btn.click(fn=handle_civitai_model_download, inputs=[civit_ckpt_url, civit_token, hf_token], outputs=[model_status, model_dropdown])
410
 
411
  load_lora_btn.click(fn=handle_lora_dropdown, inputs=[lora_dropdown, lora_scale], outputs=[model_status, lora_status])