PanCreator_1 / app.py
Surae007's picture
app.py
11915ae verified
raw
history blame
25.7 kB
import os, io, time, json, math
from typing import List, Dict, Optional, Tuple
import gradio as gr
import numpy as np
from PIL import Image, ImageOps
import torch
from diffusers import (
StableDiffusionXLPipeline,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionXLInpaintPipeline,
StableDiffusionXLControlNetPipeline,
ControlNetModel,
StableDiffusionUpscalePipeline,
DPMSolverMultistepScheduler, EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler, HeunDiscreteScheduler,
)
# ---------- Optional deps ----------
try:
from rembg import remove as rembg_remove
except Exception:
rembg_remove = None
# face restore
try:
from gfpgan import GFPGANer
_HAS_GFP = True
except Exception:
_HAS_GFP = False
# realesrgan (fallback upscaler)
try:
from realesrgan import RealESRGAN
_HAS_REALESRGAN = True
except Exception:
_HAS_REALESRGAN = False
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if device == "cuda" else torch.float32
# ---------------- Registry (เปลี่ยน/เพิ่มได้เอง) ----------------
MODELS: List[Tuple[str,str,str]] = [
# (id, label, note)
("stabilityai/stable-diffusion-xl-base-1.0", "SDXL Base 1.0", "เอนกประสงค์ สมดุล"),
("stabilityai/stable-diffusion-xl-refiner-1.0","SDXL Refiner", "เก็บรายละเอียด (pass 2)"),
("SG161222/RealVisXL_V4.0", "RealVis XL v4", "โฟโต้เรียล คน/สินค้าเนียน"),
("Lykon/dreamshaper-xl-v2", "DreamShaper XL","แฟนตาซี–เรียลลิสติกหลากสไตล์"),
("RunDiffusion/Juggernaut-XL", "Juggernaut XL", "คอนทราสต์แรง รายละเอียดหนัก"),
("emilianJR/epiCRealismXL", "EpicRealism XL","แฟชั่น/พอร์เทรตคอนทราสต์ดี"),
("black-forest-labs/FLUX.1-dev", "FLUX.1-dev", "สมัยใหม่/คุมสไตล์ดี (ไม่ใช่ SDXL)"),
("stabilityai/sd-turbo", "SD-Turbo", "ไวมาก เหมาะกับร่างไอเดีย"),
("stabilityai/stable-diffusion-2-1", "SD 2.1", "แลนด์สเคป/องค์ประกอบกว้าง"),
("runwayml/stable-diffusion-v1-5", "SD 1.5", "คลาสสิก/ทรัพยากรเยอะ"),
("timbrooks/instruct-pix2pix", "Instruct-Pix2Pix","แก้ภาพตามคำสั่ง (Img2Img)"),
]
LORAS: List[Tuple[str,str,str]] = [
# หมายเหตุ: รายชื่อ LoRA แพร่หลายและเปลี่ยนเร็ว; ถ้าโหลดไม่ได้ โปรแกรมจะไม่ล้ม
("ByteDance/SDXL-Lightning", "SDXL-Lightning", "สปีดเร็ว (LoRA)"),
("ostris/epicrealism-xl-lora", "EpicrealismXL-LoRA","โทนเรียลลิสติก"),
("XLabs-AI/flux-prompt-lora", "FLUX Prompt LoRA", "ปรับ prompt style (FLUX)"),
("XLabs-AI/realvisxl-v4-lora", "RealVisXL LoRA", "พอร์เทรต/สินค้า"),
("alpha-diffusion/sdxl-anime-lora", "Anime-Style XL", "อนิเม/เส้นใส"),
("alpha-diffusion/sdxl-cinematic-lora", "Cinematic-Drama", "แสงเงาแบบหนัง"),
("alpha-diffusion/sdxl-watercolor-lora", "Watercolor-Soft", "สีน้ำ/พาสเทล"),
("alpha-diffusion/sdxl-fashion-lora", "Fashion-Editorial","แฟชั่น/กองถ่าย"),
("alpha-diffusion/sdxl-product-lora", "Product-Studio", "สินค้า/แสงสตูดิโอ"),
("alpha-diffusion/sdxl-interior-lora", "Interior-Archi", "ห้อง/สถาปัตย์"),
("alpha-diffusion/sdxl-food-lora", "Food-Tasty", "อาหารฉ่ำ/เงางาม"),
]
CONTROLNETS: List[Tuple[str,str,str,str]] = [
# (id, label, note, key)
("diffusers/controlnet-canny-sdxl-1.0", "Canny", "คุมเส้นขอบ", "canny"),
("diffusers/controlnet-openpose-sdxl-1.0", "OpenPose", "คุมท่าทางคน", "pose"),
("diffusers/controlnet-depth-sdxl-1.0", "Depth", "คุมมุมมอง/ระยะลึก", "depth"),
("diffusers/controlnet-softedge-sdxl-1.0", "SoftEdge", "เส้นนุ่ม/ลดแตก", "softedge"),
("diffusers/controlnet-lineart-sdxl-1.0", "Lineart", "เส้นร่าง/การ์ตูน", "lineart"),
("diffusers/controlnet-anime-lineart-sdxl-1.0","Anime Lineart","เส้นอนิเม", "anime_lineart"),
("diffusers/controlnet-normal-sdxl-1.0", "Normal", "ทิศทางพื้นผิว", "normal"),
("diffusers/controlnet-mlsd-sdxl-1.0", "MLSD", "เส้นตรง/สถาปัตย์", "mlsd"),
("diffusers/controlnet-scribble-sdxl-1.0", "Scribble", "สเก็ตช์หยาบ→จริง", "scribble"),
("diffusers/controlnet-seg-sdxl-1.0", "Segmentation", "แบ่งส่วน/สี", "seg"),
("diffusers/controlnet-tile-sdxl-1.0", "Tile", "อัปสเกลแบบกระเบื้อง", "tile"),
]
PRESETS = {
"Cinematic": ", cinematic lighting, 50mm, bokeh, film grain, high dynamic range",
"Studio": ", studio photo, softbox lighting, sharp focus, high detail",
"Product": ", product photography, seamless background, diffused light, reflections",
"Anime": ", anime style, clean lineart, vibrant colors, high quality",
}
NEG_DEFAULT = "lowres, blurry, bad anatomy, extra fingers, watermark, jpeg artifacts, text"
SCHEDULERS = {
"DPM-Solver (Karras)": DPMSolverMultistepScheduler,
"Euler": EulerDiscreteScheduler,
"Euler a": EulerAncestralDiscreteScheduler,
"Heun": HeunDiscreteScheduler,
}
# ---------------- Cache & helpers ----------------
PIPE_CACHE: Dict[str, object] = {}
CONTROL_CACHE: Dict[str, ControlNetModel] = {}
UPSCALE_PIPE: Optional[StableDiffusionUpscalePipeline] = None
GFP: Optional[GFPGANer] = None
REALSR: Optional[RealESRGAN] = None
def set_sched(pipe, name: str):
cls = SCHEDULERS.get(name, DPMSolverMultistepScheduler)
pipe.scheduler = cls.from_config(pipe.scheduler.config)
def seed_gen(sd: int):
if sd is None or sd < 0: return None
g = torch.Generator(device=device if device=="cuda" else "cpu")
g.manual_seed(int(sd))
return g
def prep_pipe(model_id: str, control_ids: List[str]):
key = f"{model_id}|{'-'.join(control_ids) if control_ids else 'none'}"
if key in PIPE_CACHE: return PIPE_CACHE[key]
if control_ids:
cns = []
for cid in control_ids:
if cid not in CONTROL_CACHE:
CONTROL_CACHE[cid] = ControlNetModel.from_pretrained(cid, torch_dtype=dtype, use_safetensors=True)
cns.append(CONTROL_CACHE[cid])
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model_id, controlnet=cns, torch_dtype=dtype, use_safetensors=True)
else:
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=dtype, use_safetensors=True)
if device == "cuda":
pipe.to("cuda")
pipe.enable_vae_tiling(); pipe.enable_vae_slicing()
try: pipe.enable_xformers_memory_efficient_attention()
except Exception: pass
else:
pipe.to("cpu"); pipe.enable_attention_slicing()
PIPE_CACHE[key] = pipe
return pipe
def apply_loras(pipe, ids: List[str], scales: List[float]):
for i, rid in enumerate([x for x in ids if x]):
try:
pipe.load_lora_weights(rid)
try:
sc = scales[i] if i < len(scales) else 0.7
pipe.fuse_lora(lora_scale=float(sc))
except Exception: pass
except Exception as e:
print(f"[LoRA] load failed {rid}: {e}")
def to_png_info(meta: dict) -> str:
return json.dumps(meta, ensure_ascii=False, indent=2)
# ---------------- Optional Post-process ----------------
def ensure_upscalers():
global UPSCALE_PIPE, GFP, REALSR
if UPSCALE_PIPE is None:
try:
UPSCALE_PIPE = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler",
torch_dtype=torch.float16 if device=="cuda" else torch.float32,
use_safetensors=True
).to(device)
except Exception as e:
print("[Upscaler] SD x4 not available:", e)
if _HAS_GFP and GFP is None:
try:
GFP = GFPGANer(model_path=None, upscale=1, arch="clean", channel_multiplier=2)
except Exception as e:
print("[GFPGAN] init failed:", e)
if _HAS_REALESRGAN and REALSR is None and device == "cuda":
try:
REALSR = RealESRGAN(torch.device("cuda"), scale=4)
REALSR.load_weights("weights/RealESRGAN_x4plus.pth") # ใช้ได้เมื่อมีไฟล์ (ถ้าไม่มีจะข้าม)
except Exception as e:
print("[RealESRGAN] init failed:", e)
def post_process(img: Image.Image, do_upscale: bool, do_face: bool, do_rembg: bool):
ensure_upscalers()
out = img
# Upscale priority: RealESRGAN > SD x4 > none
if do_upscale:
try:
if REALSR is not None:
out = Image.fromarray(REALSR.predict(np.array(out)))
elif UPSCALE_PIPE is not None:
if device == "cuda":
with torch.autocast("cuda"):
out = UPSCALE_PIPE(prompt="", image=out).images[0]
else:
out = UPSCALE_PIPE(prompt="", image=out).images[0]
except Exception as e:
print("[Upscale] skipped:", e)
if do_face and _HAS_GFP and GFP is not None:
try:
_, _, out = GFP.enhance(np.array(out), has_aligned=False, only_center_face=False, paste_back=True)
out = Image.fromarray(out)
except Exception as e:
print("[GFPGAN] skipped:", e)
if do_rembg and rembg_remove is not None:
try:
out = Image.open(io.BytesIO(rembg_remove(np.array(out))))
except Exception as e:
print("[rembg] skipped:", e)
return out
# ---------------- Core generate ----------------
def run_txt2img(model_id, custom_model, prompt, preset, negative,
steps, cfg, width, height, scheduler_name, seed,
lora_list, lora_custom_csv, lora_s1, lora_s2, lora_s3,
ctrl_selected, ctrl_images, use_refiner, refine_strength,
do_upscale, do_face, do_rembg):
if not prompt.strip(): raise gr.Error("กรุณากรอก prompt")
model = (custom_model.strip() or model_id).strip()
if preset in PRESETS: prompt = prompt + PRESETS[preset]
if not negative.strip(): negative = NEG_DEFAULT
# Collect control images
cond_imgs, ctrl_ids = [], []
for (cid, label, note, key) in CONTROLNETS:
if label in ctrl_selected and key in ctrl_images and ctrl_images[key] is not None:
ctrl_ids.append(cid); cond_imgs.append(ctrl_images[key])
pipe = prep_pipe(model, ctrl_ids)
set_sched(pipe, scheduler_name)
loras = []
if lora_list: loras += lora_list
if lora_custom_csv.strip():
loras += [x.strip() for x in lora_custom_csv.split(",") if x.strip()]
apply_loras(pipe, loras, [lora_s1, lora_s2, lora_s3])
width = int(max(512, min(1024, width)))
height = int(max(512, min(1024, height)))
gen = seed_gen(seed)
if device == "cuda":
with torch.autocast("cuda"):
if ctrl_ids:
image = pipe(prompt=prompt, negative_prompt=negative,
width=width, height=height,
num_inference_steps=int(steps), guidance_scale=float(cfg),
controlnet_conditioning_image=cond_imgs if len(cond_imgs)>1 else cond_imgs[0],
generator=gen).images[0]
else:
image = pipe(prompt=prompt, negative_prompt=negative,
width=width, height=height,
num_inference_steps=int(steps), guidance_scale=float(cfg),
generator=gen).images[0]
else:
if ctrl_ids:
image = pipe(prompt=prompt, negative_prompt=negative,
width=width, height=height,
num_inference_steps=int(steps), guidance_scale=float(cfg),
controlnet_conditioning_image=cond_imgs if len(cond_imgs)>1 else cond_imgs[0],
generator=gen).images[0]
else:
image = pipe(prompt=prompt, negative_prompt=negative,
width=width, height=height,
num_inference_steps=int(steps), guidance_scale=float(cfg),
generator=gen).images[0]
# Refiner (GPU)
if use_refiner and device == "cuda":
try:
ref = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
torch_dtype=torch.float16, use_safetensors=True
).to("cuda")
set_sched(ref, scheduler_name)
with torch.autocast("cuda"):
image = ref(prompt=prompt, negative_prompt=negative,
image=image, strength=float(refine_strength),
num_inference_steps=max(10, int(steps)//2),
guidance_scale=float(cfg), generator=gen).images[0]
except Exception as e:
print("[Refiner] skipped:", e)
# Post-process
image = post_process(image, do_upscale, do_face, do_rembg)
meta = {
"model": model, "loras": loras, "controlnets": ctrl_selected,
"prompt": prompt, "negative": negative, "size": f"{width}x{height}",
"steps": steps, "cfg": cfg, "scheduler": scheduler_name, "seed": seed,
"post": {"upscale": do_upscale, "face_restore": do_face, "remove_bg": do_rembg}
}
return image, to_png_info(meta)
def run_img2img(model_id, custom_model, init_image, strength, **kw):
if init_image is None: raise gr.Error("โปรดอัปโหลดภาพเริ่มต้น (init image)")
model = (custom_model.strip() or model_id).strip()
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(model, torch_dtype=dtype, use_safetensors=True)
pipe = pipe.to(device);
try:
if device=="cuda": pipe.enable_xformers_memory_efficient_attention()
except: pass
set_sched(pipe, kw["scheduler_name"]); gen = seed_gen(kw["seed"])
prompt = kw["prompt"] + (PRESETS.get(kw["preset"], "") if kw["preset"] else "")
negative = kw["negative"] or NEG_DEFAULT
if device=="cuda":
with torch.autocast("cuda"):
img = pipe(prompt=prompt, negative_prompt=negative, image=init_image,
strength=float(strength), num_inference_steps=int(kw["steps"]),
guidance_scale=float(kw["cfg"]), generator=gen).images[0]
else:
img = pipe(prompt=prompt, negative_prompt=negative, image=init_image,
strength=float(strength), num_inference_steps=int(kw["steps"]),
guidance_scale=float(kw["cfg"]), generator=gen).images[0]
img = post_process(img, kw["do_upscale"], kw["do_face"], kw["do_rembg"])
meta = {"mode":"img2img","model":model,"prompt":prompt,"neg":negative,"steps":kw["steps"],"cfg":kw["cfg"],"seed":kw["seed"],"strength":strength}
return img, to_png_info(meta)
def expand_canvas_for_outpaint(img: Image.Image, expand_px: int, direction: str) -> Tuple[Image.Image, Image.Image]:
w, h = img.size
if direction == "left": new = Image.new("RGBA", (w+expand_px, h), (0,0,0,0)); new.paste(img, (expand_px,0)); mask = Image.new("L", (w+expand_px,h), 0); ImageDraw = ImageDraw if 'ImageDraw' in globals() else __import__('PIL.ImageDraw').ImageDraw; d=ImageDraw.Draw(mask); d.rectangle([0,0,expand_px,h], fill=255)
elif direction == "right": new = Image.new("RGBA",(w+expand_px,h),(0,0,0,0)); new.paste(img,(0,0)); mask=Image.new("L",(w+expand_px,h),0); ImageDraw=ImageDraw if 'ImageDraw' in globals() else __import__('PIL.ImageDraw').ImageDraw; d=ImageDraw.Draw(mask); d.rectangle([w,0,w+expand_px,h], fill=255)
elif direction == "top": new = Image.new("RGBA",(w,h+expand_px),(0,0,0,0)); new.paste(img,(0,expand_px)); mask=Image.new("L",(w,h+expand_px),0); ImageDraw=ImageDraw if 'ImageDraw' in globals() else __import__('PIL.ImageDraw').ImageDraw; d=ImageDraw.Draw(mask); d.rectangle([0,0,w,expand_px], fill=255)
else: new = Image.new("RGBA",(w,h+expand_px),(0,0,0,0)); new.paste(img,(0,0)); mask=Image.new("L",(w,h+expand_px),0); ImageDraw=ImageDraw if 'ImageDraw' in globals() else __import__('PIL.ImageDraw').ImageDraw; d=ImageDraw.Draw(mask); d.rectangle([0,h,w,h+expand_px], fill=255)
return new.convert("RGB"), mask
def run_inpaint_outpaint(model_id, custom_model, base_image, mask_image, mode, expand_px, expand_dir, **kw):
if base_image is None: raise gr.Error("โปรดอัปโหลดภาพฐาน")
model = (custom_model.strip() or model_id).strip()
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(model, torch_dtype=dtype, use_safetensors=True)
pipe = pipe.to(device);
try:
if device=="cuda": pipe.enable_xformers_memory_efficient_attention()
except: pass
set_sched(pipe, kw["scheduler_name"]); gen = seed_gen(kw["seed"])
prompt = kw["prompt"] + (PRESETS.get(kw["preset"], "") if kw["preset"] else "")
negative = kw["negative"] or NEG_DEFAULT
if mode == "Outpaint":
base_image, mask_image = expand_canvas_for_outpaint(base_image, int(expand_px), expand_dir)
if device=="cuda":
with torch.autocast("cuda"):
img = pipe(prompt=prompt, negative_prompt=negative,
image=base_image, mask_image=mask_image,
strength=kw.get("strength", 0.7),
num_inference_steps=int(kw["steps"]),
guidance_scale=float(kw["cfg"]),
generator=gen).images[0]
else:
img = pipe(prompt=prompt, negative_prompt=negative,
image=base_image, mask_image=mask_image,
strength=kw.get("strength", 0.7),
num_inference_steps=int(kw["steps"]),
guidance_scale=float(kw["cfg"]),
generator=gen).images[0]
img = post_process(img, kw["do_upscale"], kw["do_face"], kw["do_rembg"])
meta = {"mode":mode,"model":model,"prompt":prompt,"steps":kw["steps"],"cfg":kw["cfg"],"seed":kw["seed"]}
return img, to_png_info(meta)
# ---------------- UI ----------------
def build_ui():
with gr.Blocks(theme=gr.themes.Soft(), title="Masterpiece SDXL Studio Pro") as demo:
gr.Markdown("# 🖼️ Masterpiece SDXL Studio Pro")
gr.Markdown("เลือก **Models/LoRA/ControlNet** ได้หลายรายการ + **Img2Img / Inpaint / Outpaint** + **Upscale/FaceRestore/RemoveBG**")
# Common widgets
model_dd = gr.Dropdown(choices=[m[0] for m in MODELS], value=MODELS[0][0], label="Model (เลือก)")
model_custom = gr.Textbox(label="Custom Model ID (เช่น username/my-model)", placeholder="(ไม่จำเป็น)")
preset = gr.Dropdown(choices=list(PRESETS.keys()), value=None, label="Style Preset (optional)")
negative = gr.Textbox(value=NEG_DEFAULT, label="Negative Prompt")
steps = gr.Slider(10, 60, 30, step=1, label="Steps")
cfg = gr.Slider(1.0, 12.0, 7.0, step=0.1, label="CFG")
width = gr.Slider(512, 1024, 832, step=64, label="Width")
height= gr.Slider(512, 1024, 832, step=64, label="Height")
scheduler = gr.Dropdown(list(SCHEDULERS.keys()), value="DPM-Solver (Karras)", label="Scheduler")
seed = gr.Number(value=-1, precision=0, label="Seed (-1=random)")
# LoRA
lora_group = gr.CheckboxGroup(choices=[f"{rid}{lbl} ({note})" for rid,lbl,note in LORAS], label="LoRA (เลือกหลายตัวได้)")
lora_custom = gr.Textbox(label="Custom LoRA IDs (คั่นด้วย comma)")
lora_s1 = gr.Slider(0.0, 1.2, 0.7, 0.05, label="LoRA scale #1")
lora_s2 = gr.Slider(0.0, 1.2, 0.5, 0.05, label="LoRA scale #2")
lora_s3 = gr.Slider(0.0, 1.2, 0.5, 0.05, label="LoRA scale #3")
# ControlNet
ctrl_group = gr.CheckboxGroup(choices=[c[1]+" ("+c[2]+")" for c in CONTROLNETS], label="ControlNet (เลือกชนิด)")
imgs = {
"canny": gr.Image(type="pil", label="Canny"),
"pose": gr.Image(type="pil", label="OpenPose"),
"depth": gr.Image(type="pil", label="Depth"),
"softedge": gr.Image(type="pil", label="SoftEdge"),
"lineart": gr.Image(type="pil", label="Lineart"),
"anime_lineart": gr.Image(type="pil", label="Anime Lineart"),
"normal": gr.Image(type="pil", label="Normal"),
"mlsd": gr.Image(type="pil", label="MLSD"),
"scribble": gr.Image(type="pil", label="Scribble"),
"seg": gr.Image(type="pil", label="Segmentation"),
"tile": gr.Image(type="pil", label="Tile"),
}
# Post-process
with gr.Row():
do_upscale = gr.Checkbox(False, label="Upscale x4 (ถ้ามี)")
do_face = gr.Checkbox(False, label="Face Restore (ถ้ามี)")
do_rembg = gr.Checkbox(False, label="Remove Background (ถ้ามี)")
with gr.Tab("Text → Image"):
prompt_txt = gr.Textbox(lines=3, label="Prompt")
btn_txt = gr.Button("🚀 Generate")
out_img_txt = gr.Image(type="pil", label="Result")
out_meta_txt = gr.Textbox(label="Metadata", lines=10)
with gr.Tab("Image → Image"):
init_img = gr.Image(type="pil", label="Init Image (img2img)")
strength = gr.Slider(0.1, 1.0, 0.7, 0.05, label="Strength")
prompt_i2i = gr.Textbox(lines=3, label="Prompt")
btn_i2i = gr.Button("🚀 Img2Img")
out_img_i2i = gr.Image(type="pil", label="Result")
out_meta_i2i = gr.Textbox(label="Metadata", lines=10)
with gr.Tab("Inpaint / Outpaint"):
base_img = gr.Image(type="pil", label="Base Image")
mask_img = gr.Image(type="pil", label="Mask (ขาว=แก้, ดำ=คงเดิม)")
mode_io = gr.Radio(["Inpaint","Outpaint"], value="Inpaint", label="Mode")
expand_px = gr.Slider(64, 1024, 256, 64, label="Outpaint pixels")
expand_dir = gr.Radio(["left","right","top","bottom"], value="right", label="Outpaint direction")
prompt_io = gr.Textbox(lines=3, label="Prompt")
btn_io = gr.Button("🚀 Inpaint/Outpaint")
out_img_io = gr.Image(type="pil", label="Result")
out_meta_io = gr.Textbox(label="Metadata", lines=10)
def parse_lora_list(selected: List[str]) -> List[str]:
if not selected: return []
out = []
for s in selected:
rid = s.split(" — ")[0].strip()
out.append(rid)
return out
btn_txt.click(
fn=run_txt2img,
inputs=[
model_dd, model_custom, prompt_txt, preset, negative,
steps, cfg, width, height, scheduler, seed,
gr.Variable(parse_lora_list), lora_custom, lora_s1, lora_s2, lora_s3,
ctrl_group,
{k:v for k,v in imgs.items()}, # dict of images
gr.Checkbox(False), gr.Slider(0.05,0.5,0.2,0.05),
do_upscale, do_face, do_rembg
],
outputs=[out_img_txt, out_meta_txt],
api_name="txt2img"
)
btn_i2i.click(
fn=run_img2img,
inputs=[model_dd, model_custom, init_img, strength,
("prompt",), preset, negative, steps, cfg, width, height, scheduler, seed,
do_upscale, do_face, do_rembg],
outputs=[out_img_i2i, out_meta_i2i],
api_name="img2img"
)
btn_io.click(
fn=run_inpaint_outpaint,
inputs=[model_dd, model_custom, base_img, mask_img, mode_io, expand_px, expand_dir,
("prompt",), preset, negative, steps, cfg, width, height, scheduler, seed,
strength, do_upscale, do_face, do_rembg],
outputs=[out_img_io, out_meta_io],
api_name="inpaint_outpaint"
)
gr.Markdown("ℹ️ **หมายเหตุ**: ถ้า LoRA/ControlNet/โพสต์โปรเซสบางตัวไม่มีในสภาพแวดล้อม โปรแกรมจะข้ามให้อัตโนมัติและแจ้งใน Console")
return demo
demo = build_ui()
demo.queue(max_size=8).launch()