Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -26,6 +26,18 @@ from PIL import Image
|
|
| 26 |
from gradio_client import Client, handle_file
|
| 27 |
import uuid
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def clear_memory():
|
| 31 |
"""๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ ํจ์"""
|
|
@@ -119,8 +131,94 @@ try:
|
|
| 119 |
except Exception as e:
|
| 120 |
print(f"Warning: Could not move pipeline to CUDA: {str(e)}")
|
| 121 |
|
|
|
|
|
|
|
|
|
|
| 122 |
client = Client("NabeelShar/BiRefNet_for_text_writing")
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
class timer:
|
| 125 |
def __init__(self, method_name="timed process"):
|
| 126 |
self.method = method_name
|
|
|
|
| 26 |
from gradio_client import Client, handle_file
|
| 27 |
import uuid
|
| 28 |
|
| 29 |
+
import gradio as gr
|
| 30 |
+
import spaces
|
| 31 |
+
import torch
|
| 32 |
+
from diffusers import AutoencoderKL, TCDScheduler
|
| 33 |
+
from diffusers.models.model_loading_utils import load_state_dict
|
| 34 |
+
from gradio_imageslider import ImageSlider
|
| 35 |
+
from huggingface_hub import hf_hub_download
|
| 36 |
+
from transformers import pipeline
|
| 37 |
+
|
| 38 |
+
from controlnet_union import ControlNetModel_Union
|
| 39 |
+
from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
|
| 40 |
+
|
| 41 |
|
| 42 |
def clear_memory():
|
| 43 |
"""๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ ํจ์"""
|
|
|
|
| 131 |
except Exception as e:
|
| 132 |
print(f"Warning: Could not move pipeline to CUDA: {str(e)}")
|
| 133 |
|
| 134 |
+
|
| 135 |
+
#------------------------------- ์ด๋ฏธ์ง ์ธํ์ธํ
----------------------
|
| 136 |
+
|
| 137 |
client = Client("NabeelShar/BiRefNet_for_text_writing")
|
| 138 |
|
| 139 |
+
MODELS = {
|
| 140 |
+
"RealVisXL V5.0 Lightning": "SG161222/RealVisXL_V5.0_Lightning",
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
config_file = hf_hub_download(
|
| 144 |
+
"xinsir/controlnet-union-sdxl-1.0",
|
| 145 |
+
filename="config_promax.json",
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
config = ControlNetModel_Union.load_config(config_file)
|
| 149 |
+
controlnet_model = ControlNetModel_Union.from_config(config)
|
| 150 |
+
model_file = hf_hub_download(
|
| 151 |
+
"xinsir/controlnet-union-sdxl-1.0",
|
| 152 |
+
filename="diffusion_pytorch_model_promax.safetensors",
|
| 153 |
+
)
|
| 154 |
+
state_dict = load_state_dict(model_file)
|
| 155 |
+
model, _, _, _, _ = ControlNetModel_Union._load_pretrained_model(
|
| 156 |
+
controlnet_model, state_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
|
| 157 |
+
)
|
| 158 |
+
model.to(device="cuda", dtype=torch.float16)
|
| 159 |
+
|
| 160 |
+
vae = AutoencoderKL.from_pretrained(
|
| 161 |
+
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
| 162 |
+
).to("cuda")
|
| 163 |
+
|
| 164 |
+
pipe = StableDiffusionXLFillPipeline.from_pretrained(
|
| 165 |
+
"SG161222/RealVisXL_V5.0_Lightning",
|
| 166 |
+
torch_dtype=torch.float16,
|
| 167 |
+
vae=vae,
|
| 168 |
+
controlnet=model,
|
| 169 |
+
variant="fp16",
|
| 170 |
+
).to("cuda")
|
| 171 |
+
|
| 172 |
+
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
| 173 |
+
|
| 174 |
+
def translate_if_korean(text):
|
| 175 |
+
# ์
๋ ฅ๋ ํ
์คํธ๊ฐ ํ๊ธ์ ํฌํจํ๊ณ ์๋์ง ํ์ธ
|
| 176 |
+
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in text):
|
| 177 |
+
# ํ๊ธ์ด ํฌํจ๋์ด ์๋ค๋ฉด ๋ฒ์ญ
|
| 178 |
+
translated = translator(text)[0]['translation_text']
|
| 179 |
+
print(f"Translated prompt: {translated}") # ๋๋ฒ๊น
์ ์ํ ์ถ๋ ฅ
|
| 180 |
+
return translated
|
| 181 |
+
return text
|
| 182 |
+
|
| 183 |
+
@spaces.GPU
|
| 184 |
+
def fill_image(prompt, image, model_selection):
|
| 185 |
+
# ํ๋กฌํํธ ๋ฒ์ญ
|
| 186 |
+
translated_prompt = translate_if_korean(prompt)
|
| 187 |
+
|
| 188 |
+
(
|
| 189 |
+
prompt_embeds,
|
| 190 |
+
negative_prompt_embeds,
|
| 191 |
+
pooled_prompt_embeds,
|
| 192 |
+
negative_pooled_prompt_embeds,
|
| 193 |
+
) = pipe.encode_prompt(translated_prompt, "cuda", True)
|
| 194 |
+
|
| 195 |
+
source = image["background"]
|
| 196 |
+
mask = image["layers"][0]
|
| 197 |
+
|
| 198 |
+
alpha_channel = mask.split()[3]
|
| 199 |
+
binary_mask = alpha_channel.point(lambda p: p > 0 and 255)
|
| 200 |
+
cnet_image = source.copy()
|
| 201 |
+
cnet_image.paste(0, (0, 0), binary_mask)
|
| 202 |
+
|
| 203 |
+
for image in pipe(
|
| 204 |
+
prompt_embeds=prompt_embeds,
|
| 205 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 206 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 207 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 208 |
+
image=cnet_image,
|
| 209 |
+
):
|
| 210 |
+
yield image, cnet_image
|
| 211 |
+
|
| 212 |
+
image = image.convert("RGBA")
|
| 213 |
+
cnet_image.paste(image, (0, 0), binary_mask)
|
| 214 |
+
|
| 215 |
+
yield source, cnet_image
|
| 216 |
+
|
| 217 |
+
def clear_result():
|
| 218 |
+
return gr.update(value=None)
|
| 219 |
+
|
| 220 |
+
#--------------- ์ด๋ฏธ์ง ์ธํ์ธํ
๋ ----------------
|
| 221 |
+
|
| 222 |
class timer:
|
| 223 |
def __init__(self, method_name="timed process"):
|
| 224 |
self.method = method_name
|