Surae007 commited on
Commit
4b327fa
·
verified ·
1 Parent(s): 78205ef
Files changed (1) hide show
  1. app.py +431 -0
app.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ================= ZeroGPU-Optimized =================
2
+ # - Cache ไปที่ /tmp (ล้างทุก Restart)
3
+ # - Lazy load + LRU (เก็บ pipeline ล่าสุดแค่ 1-2 ตัว)
4
+ # - ใช้โมเดลเบาเป็นค่าเริ่มต้น (SD 1.5 / SD-Turbo)
5
+ # - ControlNet เฉพาะ Canny (เล็กและเร็ว)
6
+ # - ปุ่ม Clear cache ใน UI
7
+ # - Auto-retry ลดขนาด/steps เมื่อ OOM หรือค้างนาน
8
+ # =====================================================
9
+ import os, io, json, time, gc, shutil
10
+ from typing import Dict, List, Optional, Tuple
11
+ from collections import OrderedDict
12
+
13
+ # 1) ส่ง cache ไป /tmp เพื่อลดการสะสมพื้นที่
14
+ os.environ["HF_HOME"] = "/tmp/hf"
15
+ os.environ["HF_HUB_CACHE"] = "/tmp/hf/hub"
16
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf/transformers"
17
+ os.environ["DIFFUSERS_CACHE"] = "/tmp/hf/diffusers"
18
+
19
+ import gradio as gr
20
+ import numpy as np
21
+ from PIL import Image, ImageDraw
22
+ import torch
23
+ from diffusers import (
24
+ StableDiffusionPipeline,
25
+ StableDiffusionImg2ImgPipeline,
26
+ StableDiffusionInpaintPipelineLegacy,
27
+ StableDiffusionControlNetPipeline,
28
+ ControlNetModel,
29
+ DPMSolverMultistepScheduler, EulerDiscreteScheduler,
30
+ EulerAncestralDiscreteScheduler, HeunDiscreteScheduler,
31
+ )
32
+
33
+ # ---------- Optional (ไม่มีก็ข้าม) ----------
34
+ try:
35
+ from rembg import remove as rembg_remove
36
+ except Exception:
37
+ rembg_remove = None
38
+
39
+ # ---------- Runtime ----------
40
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
41
+ DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
42
+
43
+ # ---------- Model Registry (เบา/เร็ว เหมาะ ZeroGPU) ----------
44
+ # *SDXL ถูกตัดออกจากค่าเริ่มต้น (โหลดหนัก) แต่ยังรองรับถ้ากรอก Custom ID เอง
45
+ MODELS_TXT = [
46
+ ("runwayml/stable-diffusion-v1-5", "SD 1.5 (base, fast)"),
47
+ ("stabilityai/sd-turbo", "SD-Turbo (ultra-fast)"),
48
+ ("stabilityai/stable-diffusion-2-1", "SD 2.1 (landscape)"),
49
+ ]
50
+ MODEL_IMG2IMG_DEFAULT = "runwayml/stable-diffusion-v1-5"
51
+ MODEL_INPAINT_DEFAULT = "runwayml/stable-diffusion-inpainting" # legacy inpaint (เล็ก/เสถียร)
52
+
53
+ # ControlNet: เอาเฉพาะ Canny (เล็กและพอเพียง)
54
+ CONTROLNETS = [
55
+ ("lllyasviel/sd-controlnet-canny", "Canny (edges)"),
56
+ ]
57
+
58
+ PRESETS = {
59
+ "Cinematic": ", cinematic lighting, bokeh, film grain",
60
+ "Studio": ", studio photo, softbox lighting, sharp focus",
61
+ "Anime": ", anime style, clean lines, vibrant colors",
62
+ }
63
+ NEG_DEFAULT = "lowres, blurry, bad anatomy, extra fingers, watermark, jpeg artifacts, text"
64
+
65
+ SCHEDULERS = {
66
+ "DPM-Solver (Karras)": DPMSolverMultistepScheduler,
67
+ "Euler": EulerDiscreteScheduler,
68
+ "Euler a": EulerAncestralDiscreteScheduler,
69
+ "Heun": HeunDiscreteScheduler,
70
+ }
71
+
72
+ # ---------- Caches with LRU ----------
73
+ MAX_PIPE_CACHE = 2
74
+ PIPE_CACHE: "OrderedDict[str, object]" = OrderedDict()
75
+ CONTROL_CACHE: Dict[str, ControlNetModel] = {}
76
+
77
+ def _lru_put(key, pipe):
78
+ PIPE_CACHE[key] = pipe
79
+ PIPE_CACHE.move_to_end(key)
80
+ while len(PIPE_CACHE) > MAX_PIPE_CACHE:
81
+ old_key, old_pipe = PIPE_CACHE.popitem(last=False)
82
+ try:
83
+ del old_pipe
84
+ except Exception:
85
+ pass
86
+ gc.collect()
87
+ if torch.cuda.is_available():
88
+ torch.cuda.empty_cache()
89
+
90
+ # ---------- Utils ----------
91
+ def set_scheduler(pipe, name: str):
92
+ cls = SCHEDULERS.get(name, DPMSolverMultistepScheduler)
93
+ pipe.scheduler = cls.from_config(pipe.scheduler.config)
94
+
95
+ def seed_gen(seed: int):
96
+ if seed is None or int(seed) < 0: return None
97
+ g = torch.Generator(device=("cuda" if DEVICE=="cuda" else "cpu"))
98
+ g.manual_seed(int(seed))
99
+ return g
100
+
101
+ def _speed_tweaks(pipe):
102
+ # ลดหน่วยความจำ/เพิ่มเสถียร
103
+ try:
104
+ if DEVICE == "cuda":
105
+ pipe.enable_xformers_memory_efficient_attention()
106
+ pipe.enable_vae_tiling()
107
+ pipe.enable_vae_slicing()
108
+ else:
109
+ pipe.enable_sequential_cpu_offload()
110
+ pipe.enable_attention_slicing()
111
+ except Exception:
112
+ pass
113
+
114
+ # ---------- Lazy loaders ----------
115
+ def get_controlnet(model_id: str):
116
+ if model_id in CONTROL_CACHE:
117
+ return CONTROL_CACHE[model_id]
118
+ cn = ControlNetModel.from_pretrained(model_id, torch_dtype=DTYPE, use_safetensors=True)
119
+ cn.to(DEVICE)
120
+ CONTROL_CACHE[model_id] = cn
121
+ return cn
122
+
123
+ def get_txt2img_pipe(model_id: str, use_control: bool, control_id: Optional[str]):
124
+ key = f"t2i|{model_id}|{'cn' if use_control else 'none'}"
125
+ if key in PIPE_CACHE:
126
+ PIPE_CACHE.move_to_end(key)
127
+ return PIPE_CACHE[key]
128
+
129
+ if use_control and control_id:
130
+ cn = get_controlnet(control_id)
131
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
132
+ model_id, controlnet=cn, torch_dtype=DTYPE,
133
+ safety_checker=None, feature_extractor=None, use_safetensors=True
134
+ )
135
+ else:
136
+ pipe = StableDiffusionPipeline.from_pretrained(
137
+ model_id, torch_dtype=DTYPE,
138
+ safety_checker=None, feature_extractor=None, use_safetensors=True
139
+ )
140
+ pipe.to(DEVICE)
141
+ _speed_tweaks(pipe)
142
+ _lru_put(key, pipe)
143
+ return pipe
144
+
145
+ def get_img2img_pipe(model_id: str):
146
+ key = f"i2i|{model_id}"
147
+ if key in PIPE_CACHE:
148
+ PIPE_CACHE.move_to_end(key)
149
+ return PIPE_CACHE[key]
150
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
151
+ model_id, torch_dtype=DTYPE,
152
+ safety_checker=None, feature_extractor=None, use_safetensors=True
153
+ ).to(DEVICE)
154
+ _speed_tweaks(pipe)
155
+ _lru_put(key, pipe)
156
+ return pipe
157
+
158
+ def get_inpaint_pipe(model_id: str):
159
+ key = f"inpaint|{model_id}"
160
+ if key in PIPE_CACHE:
161
+ PIPE_CACHE.move_to_end(key)
162
+ return PIPE_CACHE[key]
163
+ pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained(
164
+ model_id, torch_dtype=DTYPE,
165
+ safety_checker=None, feature_extractor=None, use_safetensors=True
166
+ ).to(DEVICE)
167
+ _speed_tweaks(pipe)
168
+ _lru_put(key, pipe)
169
+ return pipe
170
+
171
+ # ---------- Post process ----------
172
+ def remove_bg(img: Image.Image) -> Image.Image:
173
+ if rembg_remove is None: return img
174
+ try:
175
+ return Image.open(io.BytesIO(rembg_remove(np.array(img))))
176
+ except Exception:
177
+ return img
178
+
179
+ # ---------- Auto-retry wrapper ----------
180
+ def run_with_retry(func, *, width: int, height: int, steps: int, max_time: float = 280.0):
181
+ """ลองรันด้วยพารามิเตอร์เดิม → ถ้า OOM/Timeout จะลดขนาดภาพ/จำนวนสเต็ปแล้วรันซ้ำ"""
182
+ t0 = time.time()
183
+ w, h, s = width, height, steps
184
+ for attempt in range(3):
185
+ try:
186
+ if time.time() - t0 > max_time:
187
+ raise gr.Error("งานนานเกินกำหนด โปรดลองลดขนาดภาพหรือจำนวนสเต็ป")
188
+ return func(w, h, s)
189
+ except RuntimeError as e:
190
+ msg = str(e).lower()
191
+ if "out of memory" in msg or "cuda oom" in msg or "alloc" in msg:
192
+ # ลดขนาดครึ่งหนึ่ง และลดสเต็ปเล็กน้อย
193
+ w = max(384, int(w * 0.75) // 64 * 64)
194
+ h = max(384, int(h * 0.75) // 64 * 64)
195
+ s = max(10, s - 4)
196
+ gc.collect()
197
+ if torch.cuda.is_available():
198
+ torch.cuda.empty_cache()
199
+ continue
200
+ raise
201
+ raise gr.Error("หน่วยความจำไม่พอ แม้จะลดขนาดแล้ว — ลองลดพารามิเตอร์เพิ่มเติม")
202
+
203
+ # ---------- Generators ----------
204
+ def txt2img(
205
+ model_id, custom_model, prompt, preset, negative,
206
+ steps, cfg, width, height, scheduler, seed,
207
+ use_control, control_choice, control_image,
208
+ do_rembg
209
+ ):
210
+ if not prompt or not str(prompt).strip():
211
+ raise gr.Error("กรุณากรอก prompt")
212
+
213
+ model = (custom_model.strip() or model_id or MODELS_TXT[0][0]).strip()
214
+ if preset and preset in PRESETS: prompt = prompt + PRESETS[preset]
215
+ if not negative or not str(negative).strip(): negative = NEG_DEFAULT
216
+ width, height = int(width), int(height)
217
+ use_control = bool(use_control and control_choice and control_image is not None)
218
+
219
+ def _run(w, h, s):
220
+ pipe = get_txt2img_pipe(model, use_control, CONTROLNETS[0][0] if use_control else None)
221
+ set_scheduler(pipe, scheduler)
222
+ gen = seed_gen(seed)
223
+ if use_control:
224
+ image = pipe(
225
+ prompt=prompt, negative_prompt=negative,
226
+ image=control_image, width=w, height=h,
227
+ num_inference_steps=int(s), guidance_scale=float(cfg),
228
+ generator=gen
229
+ ).images[0]
230
+ else:
231
+ image = pipe(
232
+ prompt=prompt, negative_prompt=negative,
233
+ width=w, height=h,
234
+ num_inference_steps=int(s), guidance_scale=float(cfg),
235
+ generator=gen
236
+ ).images[0]
237
+ if do_rembg: image = remove_bg(image)
238
+ meta = {
239
+ "mode":"txt2img","model":model,"control":("canny" if use_control else None),
240
+ "prompt":prompt,"neg":negative,"size":f"{w}x{h}",
241
+ "steps":int(s),"cfg":float(cfg),"scheduler":scheduler,"seed":seed
242
+ }
243
+ return image, json.dumps(meta, ensure_ascii=False, indent=2)
244
+
245
+ return run_with_retry(_run, width=width, height=height, steps=int(steps))
246
+
247
+ def img2img(
248
+ model_id, custom_model, init_img, strength,
249
+ prompt, preset, negative, steps, cfg, width, height, scheduler, seed,
250
+ do_rembg
251
+ ):
252
+ if init_img is None: raise gr.Error("โปรดอัปโหลดภาพเริ่มต้น")
253
+ model = (custom_model.strip() or model_id or MODEL_IMG2IMG_DEFAULT).strip()
254
+ if preset and preset in PRESETS: prompt = prompt + PRESETS[preset]
255
+ if not negative or not str(negative).strip(): negative = NEG_DEFAULT
256
+ width, height = int(width), int(height)
257
+
258
+ def _run(w, h, s):
259
+ pipe = get_img2img_pipe(model)
260
+ set_scheduler(pipe, scheduler)
261
+ gen = seed_gen(seed)
262
+ image = pipe(
263
+ prompt=prompt, negative_prompt=negative, image=init_img, strength=float(strength),
264
+ num_inference_steps=int(s), guidance_scale=float(cfg),
265
+ generator=gen
266
+ ).images[0]
267
+ if do_rembg: image = remove_bg(image)
268
+ meta = {"mode":"img2img","model":model,"prompt":prompt,"neg":negative,
269
+ "steps":int(s),"cfg":float(cfg),"seed":seed,"strength":float(strength)}
270
+ return image, json.dumps(meta, ensure_ascii=False, indent=2)
271
+
272
+ return run_with_retry(_run, width=width, height=height, steps=int(steps))
273
+
274
+ def expand_canvas_for_outpaint(img: Image.Image, expand_px: int, direction: str) -> Tuple[Image.Image, Image.Image]:
275
+ w, h = img.size
276
+ if direction == "left":
277
+ new = Image.new("RGBA",(w+expand_px,h),(0,0,0,0)); new.paste(img,(expand_px,0))
278
+ mask = Image.new("L",(w+expand_px,h),0); ImageDraw.Draw(mask).rectangle([0,0,expand_px,h], fill=255)
279
+ elif direction == "right":
280
+ new = Image.new("RGBA",(w+expand_px,h),(0,0,0,0)); new.paste(img,(0,0))
281
+ mask = Image.new("L",(w+expand_px,h),0); ImageDraw.Draw(mask).rectangle([w,0,w+expand_px,h], fill=255)
282
+ elif direction == "top":
283
+ new = Image.new("RGBA",(w,h+expand_px),(0,0,0,0)); new.paste(img,(0,expand_px))
284
+ mask = Image.new("L",(w,h+expand_px),0); ImageDraw.Draw(mask).rectangle([0,0,w,expand_px], fill=255)
285
+ else:
286
+ new = Image.new("RGBA",(w,h+expand_px),(0,0,0,0)); new.paste(img,(0,0))
287
+ mask = Image.new("L",(w,h+expand_px),0); ImageDraw.Draw(mask).rectangle([0,h,w,h+expand_px], fill=255)
288
+ return new.convert("RGB"), mask
289
+
290
+ def inpaint_outpaint(
291
+ model_id, custom_model, base_img, mask_img, mode, expand_px, expand_dir,
292
+ prompt, preset, negative, steps, cfg, width, height, scheduler, seed,
293
+ strength, do_rembg
294
+ ):
295
+ if base_img is None: raise gr.Error("โปรดอัปโหลดภาพฐาน")
296
+ model = (custom_model.strip() or model_id or MODEL_INPAINT_DEFAULT).strip()
297
+ if preset and preset in PRESETS: prompt = prompt + PRESETS[preset]
298
+ if not negative or not str(negative).strip(): negative = NEG_DEFAULT
299
+ width, height = int(width), int(height)
300
+
301
+ if mode == "Outpaint":
302
+ base_img, mask_img = expand_canvas_for_outpaint(base_img, int(expand_px), expand_dir)
303
+
304
+ def _run(w, h, s):
305
+ pipe = get_inpaint_pipe(model)
306
+ set_scheduler(pipe, scheduler)
307
+ gen = seed_gen(seed)
308
+ image = pipe(
309
+ prompt=prompt, negative_prompt=negative,
310
+ image=base_img, mask_image=mask_img, strength=float(strength),
311
+ num_inference_steps=int(s), guidance_scale=float(cfg),
312
+ generator=gen
313
+ ).images[0]
314
+ if do_rembg: image = remove_bg(image)
315
+ meta = {"mode":mode,"model":model,"prompt":prompt,"steps":int(s),"cfg":float(cfg),"seed":seed}
316
+ return image, json.dumps(meta, ensure_ascii=False, indent=2)
317
+
318
+ return run_with_retry(_run, width=width, height=height, steps=int(steps))
319
+
320
+ # ---------- Clear cache ----------
321
+ def clear_runtime_caches():
322
+ cache_root = os.environ.get("HF_HOME", "/tmp/hf")
323
+ try:
324
+ if os.path.isdir(cache_root):
325
+ shutil.rmtree(cache_root, ignore_errors=True)
326
+ except Exception as e:
327
+ print("[ClearCache] remove cache failed:", e)
328
+ PIPE_CACHE.clear()
329
+ CONTROL_CACHE.clear()
330
+ gc.collect()
331
+ if torch.cuda.is_available():
332
+ torch.cuda.empty_cache()
333
+ return "✅ Cache cleared. Pipelines will be reloaded on demand."
334
+
335
+ # ---------- UI ----------
336
+ def build_ui():
337
+ with gr.Blocks(theme=gr.themes.Soft(), title="ZeroGPU SD Studio") as demo:
338
+ gr.Markdown("## 🖼️ ZeroGPU SD Studio — เบา เร็ว เสถียร (CPU/ZeroGPU)")
339
+ with gr.Row():
340
+ model_dd = gr.Dropdown([m[0] for m in MODELS_TXT], value=MODELS_TXT[0][0], label="Base model")
341
+ model_custom = gr.Textbox(label="Custom model ID (optional)", placeholder="เช่น stabilityai/stable-diffusion-xl-base-1.0 (หนัก)")
342
+
343
+ preset = gr.Dropdown(list(PRESETS.keys()), value=None, label="Style Preset")
344
+ negative = gr.Textbox(value=NEG_DEFAULT, label="Negative Prompt")
345
+
346
+ with gr.Row():
347
+ steps = gr.Slider(10, 40, 18, 1, label="Steps (แนะนำ ≤20 บน ZeroGPU)")
348
+ cfg = gr.Slider(1.0, 12.0, 6.5, 0.1, label="CFG")
349
+ with gr.Row():
350
+ width = gr.Slider(384, 768, 512, 64, label="Width")
351
+ height = gr.Slider(384, 768, 512, 64, label="Height")
352
+ scheduler = gr.Dropdown(list(SCHEDULERS.keys()), value="DPM-Solver (Karras)", label="Scheduler")
353
+ seed = gr.Number(value=-1, precision=0, label="Seed (-1=random)")
354
+
355
+ # ControlNet (Canny เท่านั้น)
356
+ with gr.Accordion("ControlNet (Canny)", open=False):
357
+ use_control = gr.Checkbox(False, label="Enable Canny ControlNet")
358
+ control_choice = gr.Dropdown([CONTROLNETS[0][1]], value=CONTROLNETS[0][1], label="Type")
359
+ control_image = gr.Image(type="pil", label="Edge image")
360
+
361
+ with gr.Row():
362
+ do_rembg = gr.Checkbox(False, label="Remove background (ถ้ามี rembg)")
363
+
364
+ with gr.Tab("Text → Image"):
365
+ prompt_txt = gr.Textbox(lines=3, label="Prompt")
366
+ btn_txt = gr.Button("🚀 Generate")
367
+ out_img_txt = gr.Image(type="pil", label="Result")
368
+ out_meta_txt = gr.Textbox(label="Metadata", lines=10)
369
+
370
+ with gr.Tab("Image → Image"):
371
+ init_img = gr.Image(type="pil", label="Init image")
372
+ strength = gr.Slider(0.1, 1.0, 0.7, 0.05, label="Strength")
373
+ prompt_i2i = gr.Textbox(lines=3, label="Prompt")
374
+ btn_i2i = gr.Button("🚀 Img2Img")
375
+ out_img_i2i = gr.Image(type="pil", label="Result")
376
+ out_meta_i2i = gr.Textbox(label="Metadata", lines=10)
377
+
378
+ with gr.Tab("Inpaint / Outpaint"):
379
+ base_img = gr.Image(type="pil", label="Base image")
380
+ mask_img = gr.Image(type="pil", label="Mask (white = edit)")
381
+ mode_io = gr.Radio(["Inpaint","Outpaint"], value="Inpaint", label="Mode")
382
+ expand_px = gr.Slider(64, 512, 192, 64, label="Outpaint pixels")
383
+ expand_dir = gr.Radio(["left","right","top","bottom"], value="right", label="Outpaint direction")
384
+ prompt_io = gr.Textbox(lines=3, label="Prompt")
385
+ btn_io = gr.Button("🚀 Inpaint/Outpaint")
386
+ out_img_io = gr.Image(type="pil", label="Result")
387
+ out_meta_io = gr.Textbox(label="Metadata", lines=10)
388
+
389
+ with gr.Row():
390
+ btn_clear = gr.Button("🧹 Clear cache (runtime)")
391
+ msg_clear = gr.Markdown()
392
+
393
+ # Bindings
394
+ btn_txt.click(
395
+ fn=txt2img,
396
+ inputs=[model_dd, model_custom, prompt_txt, preset, negative,
397
+ steps, cfg, width, height, scheduler, seed,
398
+ use_control, control_choice, control_image,
399
+ do_rembg],
400
+ outputs=[out_img_txt, out_meta_txt],
401
+ api_name="txt2img"
402
+ )
403
+
404
+ btn_i2i.click(
405
+ fn=img2img,
406
+ inputs=[model_dd, model_custom, init_img, strength,
407
+ prompt_i2i, preset, negative, steps, cfg, width, height, scheduler, seed,
408
+ do_rembg],
409
+ outputs=[out_img_i2i, out_meta_i2i],
410
+ api_name="img2img"
411
+ )
412
+
413
+ btn_io.click(
414
+ fn=inpaint_outpaint,
415
+ inputs=[model_dd, model_custom, base_img, mask_img, mode_io, expand_px, expand_dir,
416
+ prompt_io, preset, negative, steps, cfg, width, height, scheduler, seed,
417
+ strength, do_rembg],
418
+ outputs=[out_img_io, out_meta_io],
419
+ api_name="inpaint_outpaint"
420
+ )
421
+
422
+ btn_clear.click(fn=clear_runtime_caches, outputs=[msg_clear])
423
+
424
+ gr.Markdown("ℹ️ โหมดนี้ออกแบบมาสำหรับ ZeroGPU/CPU: ถ้าต้องการ SDXL ให้กรอก Custom ID (จะช้าหนักขึ้น)")
425
+
426
+ return demo
427
+
428
+ demo = build_ui()
429
+ # ลดโอกาส connection หลุดใน ZeroGPU
430
+ demo.queue(concurrency_count=1, max_size=8)
431
+ demo.launch(share=False, show_api=False, max_threads=1, prevent_thread_lock=True)