Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,11 @@ from diffusers import (
|
|
| 11 |
DDIMScheduler,
|
| 12 |
UNet2DConditionModel,
|
| 13 |
)
|
| 14 |
-
from modules.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
from torchvision import transforms
|
| 16 |
from transformers import CLIPTokenizer, CLIPTextModel
|
| 17 |
from PIL import Image
|
|
@@ -20,11 +24,15 @@ from safetensors.torch import load_file
|
|
| 20 |
import modules.safe as _
|
| 21 |
|
| 22 |
models = [
|
| 23 |
-
("
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
]
|
| 25 |
|
| 26 |
-
base_name =
|
| 27 |
-
|
| 28 |
|
| 29 |
samplers_k_diffusion = [
|
| 30 |
("Euler a", "sample_euler_ancestral", {}),
|
|
@@ -36,24 +44,20 @@ samplers_k_diffusion = [
|
|
| 36 |
("DPM++ 2S a", "sample_dpmpp_2s_ancestral", {}),
|
| 37 |
("DPM++ 2M", "sample_dpmpp_2m", {}),
|
| 38 |
("DPM++ SDE", "sample_dpmpp_sde", {}),
|
| 39 |
-
("DPM fast", "sample_dpm_fast", {}),
|
| 40 |
-
("DPM adaptive", "sample_dpm_adaptive", {}),
|
| 41 |
("LMS Karras", "sample_lms", {"scheduler": "karras"}),
|
| 42 |
-
(
|
| 43 |
-
|
| 44 |
-
"sample_dpm_2",
|
| 45 |
-
{"scheduler": "karras", "discard_next_to_last_sigma": True},
|
| 46 |
-
),
|
| 47 |
-
(
|
| 48 |
-
"DPM2 a Karras",
|
| 49 |
-
"sample_dpm_2_ancestral",
|
| 50 |
-
{"scheduler": "karras", "discard_next_to_last_sigma": True},
|
| 51 |
-
),
|
| 52 |
("DPM++ 2S a Karras", "sample_dpmpp_2s_ancestral", {"scheduler": "karras"}),
|
| 53 |
("DPM++ 2M Karras", "sample_dpmpp_2m", {"scheduler": "karras"}),
|
| 54 |
("DPM++ SDE Karras", "sample_dpmpp_sde", {"scheduler": "karras"}),
|
| 55 |
]
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
start_time = time.time()
|
| 58 |
|
| 59 |
scheduler = DDIMScheduler.from_pretrained(
|
|
@@ -62,22 +66,22 @@ scheduler = DDIMScheduler.from_pretrained(
|
|
| 62 |
)
|
| 63 |
vae = AutoencoderKL.from_pretrained(
|
| 64 |
"stabilityai/sd-vae-ft-ema",
|
| 65 |
-
torch_dtype=torch.
|
| 66 |
)
|
| 67 |
text_encoder = CLIPTextModel.from_pretrained(
|
| 68 |
base_model,
|
| 69 |
subfolder="text_encoder",
|
| 70 |
-
torch_dtype=torch.
|
| 71 |
)
|
| 72 |
tokenizer = CLIPTokenizer.from_pretrained(
|
| 73 |
base_model,
|
| 74 |
subfolder="tokenizer",
|
| 75 |
-
torch_dtype=torch.
|
| 76 |
)
|
| 77 |
unet = UNet2DConditionModel.from_pretrained(
|
| 78 |
base_model,
|
| 79 |
subfolder="unet",
|
| 80 |
-
torch_dtype=torch.
|
| 81 |
)
|
| 82 |
pipe = StableDiffusionPipeline(
|
| 83 |
text_encoder=text_encoder,
|
|
@@ -88,15 +92,21 @@ pipe = StableDiffusionPipeline(
|
|
| 88 |
)
|
| 89 |
|
| 90 |
unet.set_attn_processor(CrossAttnProcessor)
|
|
|
|
| 91 |
if torch.cuda.is_available():
|
| 92 |
pipe = pipe.to("cuda")
|
| 93 |
-
|
| 94 |
-
def get_model_list():
|
| 95 |
-
return models
|
| 96 |
-
|
| 97 |
|
| 98 |
-
unet_cache = dict()
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
def get_model(name):
|
| 102 |
keys = [k[0] for k in models]
|
|
@@ -107,15 +117,14 @@ def get_model(name):
|
|
| 107 |
unet = UNet2DConditionModel.from_pretrained(
|
| 108 |
models[keys.index(name)][1],
|
| 109 |
subfolder="unet",
|
| 110 |
-
torch_dtype=torch.
|
| 111 |
)
|
| 112 |
unet_cache[name] = unet
|
| 113 |
-
|
| 114 |
g_unet = unet_cache[name]
|
| 115 |
g_unet.set_attn_processor(None)
|
| 116 |
return g_unet
|
| 117 |
|
| 118 |
-
|
| 119 |
def error_str(error, title="Error"):
|
| 120 |
return (
|
| 121 |
f"""#### {title}
|
|
@@ -132,7 +141,7 @@ def restore_all():
|
|
| 132 |
global te_base_weight, tokenizer
|
| 133 |
text_encoder.get_input_embeddings().weight.data = te_base_weight
|
| 134 |
tokenizer = CLIPTokenizer.from_pretrained(
|
| 135 |
-
|
| 136 |
subfolder="tokenizer",
|
| 137 |
torch_dtype=torch.float16,
|
| 138 |
)
|
|
@@ -163,11 +172,8 @@ def inference(
|
|
| 163 |
global pipe, unet, tokenizer, text_encoder
|
| 164 |
if seed is None or seed == 0:
|
| 165 |
seed = random.randint(0, 2147483647)
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
else:
|
| 169 |
-
generator = torch.Generator().manual_seed(int(seed))
|
| 170 |
-
|
| 171 |
local_unet = get_model(model)
|
| 172 |
if lora_state is not None and lora_state != "":
|
| 173 |
load_lora_attn_procs(lora_state, local_unet, lora_scale)
|
|
@@ -189,15 +195,16 @@ def inference(
|
|
| 189 |
loaded_learned_embeds = load_file(file, device="cpu")
|
| 190 |
loaded_learned_embeds = loaded_learned_embeds["string_to_param"]["*"]
|
| 191 |
added_length = tokenizer.add_tokens(name)
|
| 192 |
-
|
| 193 |
assert added_length == loaded_learned_embeds.shape[0]
|
| 194 |
delta_weight.append(loaded_learned_embeds)
|
| 195 |
|
| 196 |
delta_weight = torch.cat(delta_weight, dim=0)
|
| 197 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
| 198 |
-
text_encoder.get_input_embeddings().weight.data[
|
|
|
|
|
|
|
| 199 |
|
| 200 |
-
|
| 201 |
config = {
|
| 202 |
"negative_prompt": neg_prompt,
|
| 203 |
"num_inference_steps": int(steps),
|
|
@@ -275,6 +282,9 @@ def apply_new_res(w, h, state):
|
|
| 275 |
|
| 276 |
|
| 277 |
def detect_text(text, state, width, height):
|
|
|
|
|
|
|
|
|
|
| 278 |
|
| 279 |
t = text.split(",")
|
| 280 |
new_state = {}
|
|
@@ -287,11 +297,13 @@ def detect_text(text, state, width, height):
|
|
| 287 |
new_state[item] = {
|
| 288 |
"map": state[item]["map"],
|
| 289 |
"weight": state[item]["weight"],
|
|
|
|
| 290 |
}
|
| 291 |
else:
|
| 292 |
new_state[item] = {
|
| 293 |
"map": None,
|
| 294 |
"weight": 0.5,
|
|
|
|
| 295 |
}
|
| 296 |
update = gr.Radio.update(choices=[key for key in new_state.keys()], value=None)
|
| 297 |
update_img = gr.update(value=create_mixed_img("", new_state, width, height))
|
|
@@ -314,28 +326,43 @@ def resize(img, w, h):
|
|
| 314 |
def switch_canvas(entry, state, width, height):
|
| 315 |
if entry == None:
|
| 316 |
return None, 0.5, create_mixed_img("", state, width, height)
|
|
|
|
| 317 |
return (
|
| 318 |
gr.update(value=None, interactive=True),
|
| 319 |
-
gr.update(value=state[entry]["weight"]),
|
|
|
|
| 320 |
create_mixed_img(entry, state, width, height),
|
| 321 |
)
|
| 322 |
|
| 323 |
|
| 324 |
def apply_canvas(selected, draw, state, w, h):
|
| 325 |
-
|
| 326 |
-
|
|
|
|
| 327 |
return state, gr.Image.update(value=create_mixed_img(selected, state, w, h))
|
| 328 |
|
| 329 |
|
| 330 |
def apply_weight(selected, weight, state):
|
| 331 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
return state
|
| 333 |
|
| 334 |
|
| 335 |
# sp2, radio, width, height, global_stats
|
| 336 |
-
def apply_image(image, selected, w, h, strgength, state):
|
| 337 |
-
if selected
|
| 338 |
-
state[selected] = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
return state, gr.Image.update(value=create_mixed_img(selected, state, w, h))
|
| 340 |
|
| 341 |
|
|
@@ -356,11 +383,24 @@ def add_net(files, ti_state, lora_state):
|
|
| 356 |
else:
|
| 357 |
ti_state[stripedname] = file.name
|
| 358 |
|
| 359 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
|
| 361 |
# [ti_state, lora_state, ti_vals, lora_vals, uploads]
|
| 362 |
def clean_states(ti_state, lora_state):
|
| 363 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 364 |
|
| 365 |
|
| 366 |
latent_upscale_modes = {
|
|
@@ -564,15 +604,15 @@ with gr.Blocks(css=css) as demo:
|
|
| 564 |
with gr.Row():
|
| 565 |
with gr.Column(scale=90):
|
| 566 |
ti_vals = gr.Text(label="Loaded embeddings")
|
| 567 |
-
|
| 568 |
with gr.Row():
|
| 569 |
with gr.Column(scale=90):
|
| 570 |
lora_vals = gr.Text(label="Loaded loras")
|
| 571 |
|
| 572 |
with gr.Row():
|
| 573 |
-
|
| 574 |
uploads = gr.Files(label="Upload new embeddings/lora")
|
| 575 |
-
|
| 576 |
with gr.Column():
|
| 577 |
lora_scale = gr.Slider(
|
| 578 |
label="Lora scale",
|
|
@@ -583,12 +623,16 @@ with gr.Blocks(css=css) as demo:
|
|
| 583 |
)
|
| 584 |
btn = gr.Button(value="Upload")
|
| 585 |
btn_del = gr.Button(value="Reset")
|
| 586 |
-
|
| 587 |
btn.click(
|
| 588 |
-
add_net,
|
|
|
|
|
|
|
| 589 |
)
|
| 590 |
btn_del.click(
|
| 591 |
-
clean_states,
|
|
|
|
|
|
|
| 592 |
)
|
| 593 |
|
| 594 |
# error_output = gr.Markdown()
|
|
@@ -653,6 +697,11 @@ with gr.Blocks(css=css) as demo:
|
|
| 653 |
interactive=False,
|
| 654 |
)
|
| 655 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 656 |
strength = gr.Slider(
|
| 657 |
label="Token strength",
|
| 658 |
minimum=0,
|
|
@@ -660,6 +709,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 660 |
step=0.01,
|
| 661 |
value=0.5,
|
| 662 |
)
|
|
|
|
| 663 |
|
| 664 |
sk_update.click(
|
| 665 |
detect_text,
|
|
@@ -669,7 +719,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 669 |
radio.change(
|
| 670 |
switch_canvas,
|
| 671 |
inputs=[radio, global_stats, width, height],
|
| 672 |
-
outputs=[sp, strength, rendered],
|
| 673 |
)
|
| 674 |
sp.edit(
|
| 675 |
apply_canvas,
|
|
@@ -681,6 +731,11 @@ with gr.Blocks(css=css) as demo:
|
|
| 681 |
inputs=[radio, strength, global_stats],
|
| 682 |
outputs=[global_stats],
|
| 683 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 684 |
|
| 685 |
with gr.Tab("UploadFile"):
|
| 686 |
|
|
@@ -689,6 +744,11 @@ with gr.Blocks(css=css) as demo:
|
|
| 689 |
source="upload",
|
| 690 |
shape=(512, 512),
|
| 691 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 692 |
|
| 693 |
strength2 = gr.Slider(
|
| 694 |
label="Token strength",
|
|
@@ -701,7 +761,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 701 |
apply_style = gr.Button(value="Apply")
|
| 702 |
apply_style.click(
|
| 703 |
apply_image,
|
| 704 |
-
inputs=[sp2, radio, width, height, strength2, global_stats],
|
| 705 |
outputs=[global_stats, rendered],
|
| 706 |
)
|
| 707 |
|
|
@@ -740,7 +800,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 740 |
ti_state,
|
| 741 |
model,
|
| 742 |
lora_state,
|
| 743 |
-
lora_scale
|
| 744 |
]
|
| 745 |
outputs = [image_out]
|
| 746 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
|
|
|
| 11 |
DDIMScheduler,
|
| 12 |
UNet2DConditionModel,
|
| 13 |
)
|
| 14 |
+
from modules.model_pww import (
|
| 15 |
+
CrossAttnProcessor,
|
| 16 |
+
StableDiffusionPipeline,
|
| 17 |
+
load_lora_attn_procs,
|
| 18 |
+
)
|
| 19 |
from torchvision import transforms
|
| 20 |
from transformers import CLIPTokenizer, CLIPTextModel
|
| 21 |
from PIL import Image
|
|
|
|
| 24 |
import modules.safe as _
|
| 25 |
|
| 26 |
models = [
|
| 27 |
+
("AbyssOrangeMix2", "Korakoe/AbyssOrangeMix2-HF"),
|
| 28 |
+
("Anything 4.0", "andite/anything-v4.0"),
|
| 29 |
+
("Open Journey", "prompthero/openjourney"),
|
| 30 |
+
("Basil Mix", "nuigurumi/basil_mix"),
|
| 31 |
+
("ACertainModel", "JosephusCheung/ACertainModel"),
|
| 32 |
]
|
| 33 |
|
| 34 |
+
base_name, base_model = models[0]
|
| 35 |
+
clip_skip = 2
|
| 36 |
|
| 37 |
samplers_k_diffusion = [
|
| 38 |
("Euler a", "sample_euler_ancestral", {}),
|
|
|
|
| 44 |
("DPM++ 2S a", "sample_dpmpp_2s_ancestral", {}),
|
| 45 |
("DPM++ 2M", "sample_dpmpp_2m", {}),
|
| 46 |
("DPM++ SDE", "sample_dpmpp_sde", {}),
|
|
|
|
|
|
|
| 47 |
("LMS Karras", "sample_lms", {"scheduler": "karras"}),
|
| 48 |
+
("DPM2 Karras", "sample_dpm_2", {"scheduler": "karras", "discard_next_to_last_sigma": True}),
|
| 49 |
+
("DPM2 a Karras", "sample_dpm_2_ancestral", {"scheduler": "karras", "discard_next_to_last_sigma": True}),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
("DPM++ 2S a Karras", "sample_dpmpp_2s_ancestral", {"scheduler": "karras"}),
|
| 51 |
("DPM++ 2M Karras", "sample_dpmpp_2m", {"scheduler": "karras"}),
|
| 52 |
("DPM++ SDE Karras", "sample_dpmpp_sde", {"scheduler": "karras"}),
|
| 53 |
]
|
| 54 |
|
| 55 |
+
# samplers_diffusers = [
|
| 56 |
+
# ("DDIMScheduler", "diffusers.schedulers.DDIMScheduler", {})
|
| 57 |
+
# ("DDPMScheduler", "diffusers.schedulers.DDPMScheduler", {})
|
| 58 |
+
# ("DEISMultistepScheduler", "diffusers.schedulers.DEISMultistepScheduler", {})
|
| 59 |
+
# ]
|
| 60 |
+
|
| 61 |
start_time = time.time()
|
| 62 |
|
| 63 |
scheduler = DDIMScheduler.from_pretrained(
|
|
|
|
| 66 |
)
|
| 67 |
vae = AutoencoderKL.from_pretrained(
|
| 68 |
"stabilityai/sd-vae-ft-ema",
|
| 69 |
+
torch_dtype=torch.float16
|
| 70 |
)
|
| 71 |
text_encoder = CLIPTextModel.from_pretrained(
|
| 72 |
base_model,
|
| 73 |
subfolder="text_encoder",
|
| 74 |
+
torch_dtype=torch.float16,
|
| 75 |
)
|
| 76 |
tokenizer = CLIPTokenizer.from_pretrained(
|
| 77 |
base_model,
|
| 78 |
subfolder="tokenizer",
|
| 79 |
+
torch_dtype=torch.float16,
|
| 80 |
)
|
| 81 |
unet = UNet2DConditionModel.from_pretrained(
|
| 82 |
base_model,
|
| 83 |
subfolder="unet",
|
| 84 |
+
torch_dtype=torch.float16,
|
| 85 |
)
|
| 86 |
pipe = StableDiffusionPipeline(
|
| 87 |
text_encoder=text_encoder,
|
|
|
|
| 92 |
)
|
| 93 |
|
| 94 |
unet.set_attn_processor(CrossAttnProcessor)
|
| 95 |
+
pipe.set_clip_skip(clip_skip)
|
| 96 |
if torch.cuda.is_available():
|
| 97 |
pipe = pipe.to("cuda")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
|
|
|
| 99 |
|
| 100 |
+
def get_model_list():
|
| 101 |
+
model_available = []
|
| 102 |
+
for model in models:
|
| 103 |
+
if Path(model[1]).is_dir():
|
| 104 |
+
model_available.append(model)
|
| 105 |
+
return model_available
|
| 106 |
+
|
| 107 |
+
unet_cache = {
|
| 108 |
+
base_name: unet
|
| 109 |
+
}
|
| 110 |
|
| 111 |
def get_model(name):
|
| 112 |
keys = [k[0] for k in models]
|
|
|
|
| 117 |
unet = UNet2DConditionModel.from_pretrained(
|
| 118 |
models[keys.index(name)][1],
|
| 119 |
subfolder="unet",
|
| 120 |
+
torch_dtype=torch.float16,
|
| 121 |
)
|
| 122 |
unet_cache[name] = unet
|
| 123 |
+
|
| 124 |
g_unet = unet_cache[name]
|
| 125 |
g_unet.set_attn_processor(None)
|
| 126 |
return g_unet
|
| 127 |
|
|
|
|
| 128 |
def error_str(error, title="Error"):
|
| 129 |
return (
|
| 130 |
f"""#### {title}
|
|
|
|
| 141 |
global te_base_weight, tokenizer
|
| 142 |
text_encoder.get_input_embeddings().weight.data = te_base_weight
|
| 143 |
tokenizer = CLIPTokenizer.from_pretrained(
|
| 144 |
+
base_model,
|
| 145 |
subfolder="tokenizer",
|
| 146 |
torch_dtype=torch.float16,
|
| 147 |
)
|
|
|
|
| 172 |
global pipe, unet, tokenizer, text_encoder
|
| 173 |
if seed is None or seed == 0:
|
| 174 |
seed = random.randint(0, 2147483647)
|
| 175 |
+
generator = torch.Generator("cuda").manual_seed(int(seed))
|
| 176 |
+
|
|
|
|
|
|
|
|
|
|
| 177 |
local_unet = get_model(model)
|
| 178 |
if lora_state is not None and lora_state != "":
|
| 179 |
load_lora_attn_procs(lora_state, local_unet, lora_scale)
|
|
|
|
| 195 |
loaded_learned_embeds = load_file(file, device="cpu")
|
| 196 |
loaded_learned_embeds = loaded_learned_embeds["string_to_param"]["*"]
|
| 197 |
added_length = tokenizer.add_tokens(name)
|
| 198 |
+
|
| 199 |
assert added_length == loaded_learned_embeds.shape[0]
|
| 200 |
delta_weight.append(loaded_learned_embeds)
|
| 201 |
|
| 202 |
delta_weight = torch.cat(delta_weight, dim=0)
|
| 203 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
| 204 |
+
text_encoder.get_input_embeddings().weight.data[
|
| 205 |
+
-delta_weight.shape[0] :
|
| 206 |
+
] = delta_weight
|
| 207 |
|
|
|
|
| 208 |
config = {
|
| 209 |
"negative_prompt": neg_prompt,
|
| 210 |
"num_inference_steps": int(steps),
|
|
|
|
| 282 |
|
| 283 |
|
| 284 |
def detect_text(text, state, width, height):
|
| 285 |
+
|
| 286 |
+
if text is None or text == "":
|
| 287 |
+
return None, None, None, None
|
| 288 |
|
| 289 |
t = text.split(",")
|
| 290 |
new_state = {}
|
|
|
|
| 297 |
new_state[item] = {
|
| 298 |
"map": state[item]["map"],
|
| 299 |
"weight": state[item]["weight"],
|
| 300 |
+
"mask_outsides": state[item]["weight"],
|
| 301 |
}
|
| 302 |
else:
|
| 303 |
new_state[item] = {
|
| 304 |
"map": None,
|
| 305 |
"weight": 0.5,
|
| 306 |
+
"mask_outsides": False
|
| 307 |
}
|
| 308 |
update = gr.Radio.update(choices=[key for key in new_state.keys()], value=None)
|
| 309 |
update_img = gr.update(value=create_mixed_img("", new_state, width, height))
|
|
|
|
| 326 |
def switch_canvas(entry, state, width, height):
|
| 327 |
if entry == None:
|
| 328 |
return None, 0.5, create_mixed_img("", state, width, height)
|
| 329 |
+
|
| 330 |
return (
|
| 331 |
gr.update(value=None, interactive=True),
|
| 332 |
+
gr.update(value=state[entry]["weight"] if entry in state else 0.5),
|
| 333 |
+
gr.update(value=state[entry]["mask_outsides"] if entry in state else False),
|
| 334 |
create_mixed_img(entry, state, width, height),
|
| 335 |
)
|
| 336 |
|
| 337 |
|
| 338 |
def apply_canvas(selected, draw, state, w, h):
|
| 339 |
+
if selected in state:
|
| 340 |
+
w, h = int(w), int(h)
|
| 341 |
+
state[selected]["map"] = resize(draw, w, h)
|
| 342 |
return state, gr.Image.update(value=create_mixed_img(selected, state, w, h))
|
| 343 |
|
| 344 |
|
| 345 |
def apply_weight(selected, weight, state):
|
| 346 |
+
if selected in state:
|
| 347 |
+
state[selected]["weight"] = weight
|
| 348 |
+
return state
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def apply_option(selected, mask, state):
|
| 352 |
+
if selected in state:
|
| 353 |
+
state[selected]["mask_outsides"] = mask
|
| 354 |
return state
|
| 355 |
|
| 356 |
|
| 357 |
# sp2, radio, width, height, global_stats
|
| 358 |
+
def apply_image(image, selected, w, h, strgength, mask, state):
|
| 359 |
+
if selected in state:
|
| 360 |
+
state[selected] = {
|
| 361 |
+
"map": resize(image, w, h),
|
| 362 |
+
"weight": strgength,
|
| 363 |
+
"mask_outsides": mask
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
return state, gr.Image.update(value=create_mixed_img(selected, state, w, h))
|
| 367 |
|
| 368 |
|
|
|
|
| 383 |
else:
|
| 384 |
ti_state[stripedname] = file.name
|
| 385 |
|
| 386 |
+
return (
|
| 387 |
+
ti_state,
|
| 388 |
+
lora_state,
|
| 389 |
+
gr.Text.update(f"{[key for key in ti_state.keys()]}"),
|
| 390 |
+
gr.Text.update(f"{lora_state}"),
|
| 391 |
+
gr.Files.update(value=None),
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
|
| 395 |
# [ti_state, lora_state, ti_vals, lora_vals, uploads]
|
| 396 |
def clean_states(ti_state, lora_state):
|
| 397 |
+
return (
|
| 398 |
+
dict(),
|
| 399 |
+
None,
|
| 400 |
+
gr.Text.update(f""),
|
| 401 |
+
gr.Text.update(f""),
|
| 402 |
+
gr.File.update(value=None),
|
| 403 |
+
)
|
| 404 |
|
| 405 |
|
| 406 |
latent_upscale_modes = {
|
|
|
|
| 604 |
with gr.Row():
|
| 605 |
with gr.Column(scale=90):
|
| 606 |
ti_vals = gr.Text(label="Loaded embeddings")
|
| 607 |
+
|
| 608 |
with gr.Row():
|
| 609 |
with gr.Column(scale=90):
|
| 610 |
lora_vals = gr.Text(label="Loaded loras")
|
| 611 |
|
| 612 |
with gr.Row():
|
| 613 |
+
|
| 614 |
uploads = gr.Files(label="Upload new embeddings/lora")
|
| 615 |
+
|
| 616 |
with gr.Column():
|
| 617 |
lora_scale = gr.Slider(
|
| 618 |
label="Lora scale",
|
|
|
|
| 623 |
)
|
| 624 |
btn = gr.Button(value="Upload")
|
| 625 |
btn_del = gr.Button(value="Reset")
|
| 626 |
+
|
| 627 |
btn.click(
|
| 628 |
+
add_net,
|
| 629 |
+
inputs=[uploads, ti_state, lora_state],
|
| 630 |
+
outputs=[ti_state, lora_state, ti_vals, lora_vals, uploads],
|
| 631 |
)
|
| 632 |
btn_del.click(
|
| 633 |
+
clean_states,
|
| 634 |
+
inputs=[ti_state, lora_state],
|
| 635 |
+
outputs=[ti_state, lora_state, ti_vals, lora_vals, uploads],
|
| 636 |
)
|
| 637 |
|
| 638 |
# error_output = gr.Markdown()
|
|
|
|
| 697 |
interactive=False,
|
| 698 |
)
|
| 699 |
|
| 700 |
+
mask_outsides = gr.Checkbox(
|
| 701 |
+
label="Mask other areas",
|
| 702 |
+
value=False
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
strength = gr.Slider(
|
| 706 |
label="Token strength",
|
| 707 |
minimum=0,
|
|
|
|
| 709 |
step=0.01,
|
| 710 |
value=0.5,
|
| 711 |
)
|
| 712 |
+
|
| 713 |
|
| 714 |
sk_update.click(
|
| 715 |
detect_text,
|
|
|
|
| 719 |
radio.change(
|
| 720 |
switch_canvas,
|
| 721 |
inputs=[radio, global_stats, width, height],
|
| 722 |
+
outputs=[sp, strength, mask_outsides, rendered],
|
| 723 |
)
|
| 724 |
sp.edit(
|
| 725 |
apply_canvas,
|
|
|
|
| 731 |
inputs=[radio, strength, global_stats],
|
| 732 |
outputs=[global_stats],
|
| 733 |
)
|
| 734 |
+
mask_outsides.change(
|
| 735 |
+
apply_option,
|
| 736 |
+
inputs=[radio, mask_outsides, global_stats],
|
| 737 |
+
outputs=[global_stats],
|
| 738 |
+
)
|
| 739 |
|
| 740 |
with gr.Tab("UploadFile"):
|
| 741 |
|
|
|
|
| 744 |
source="upload",
|
| 745 |
shape=(512, 512),
|
| 746 |
)
|
| 747 |
+
|
| 748 |
+
mask_outsides2 = gr.Checkbox(
|
| 749 |
+
label="Mask other areas",
|
| 750 |
+
value=False
|
| 751 |
+
)
|
| 752 |
|
| 753 |
strength2 = gr.Slider(
|
| 754 |
label="Token strength",
|
|
|
|
| 761 |
apply_style = gr.Button(value="Apply")
|
| 762 |
apply_style.click(
|
| 763 |
apply_image,
|
| 764 |
+
inputs=[sp2, radio, width, height, strength2, mask_outsides2, global_stats],
|
| 765 |
outputs=[global_stats, rendered],
|
| 766 |
)
|
| 767 |
|
|
|
|
| 800 |
ti_state,
|
| 801 |
model,
|
| 802 |
lora_state,
|
| 803 |
+
lora_scale,
|
| 804 |
]
|
| 805 |
outputs = [image_out]
|
| 806 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|