Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,6 +12,7 @@ from diffusers import (
|
|
| 12 |
LCMScheduler,
|
| 13 |
AutoPipelineForText2Image,
|
| 14 |
DPMSolverMultistepScheduler,
|
|
|
|
| 15 |
)
|
| 16 |
from transformers import pipeline
|
| 17 |
from diffusers.utils import load_image, make_image_grid
|
|
@@ -134,6 +135,7 @@ def infer(
|
|
| 134 |
use_LCM_adapter=False, # Параметр для включения LCM_adapter
|
| 135 |
LCM_adapter=None, # Параметр для выбора типа LCM_adapter
|
| 136 |
use_DDIMScheduler=False, # Параметр для включения DDIMScheduler
|
|
|
|
| 137 |
progress=gr.Progress(track_tqdm=True)
|
| 138 |
):
|
| 139 |
|
|
@@ -557,40 +559,68 @@ def infer(
|
|
| 557 |
|
| 558 |
image = pipe_DDIMS(**params).images[0]
|
| 559 |
else:
|
| 560 |
-
# Генерация изображений с
|
| 561 |
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
controlnet_model_path = "lllyasviel/sd-controlnet-openpose"
|
| 566 |
-
controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch_dtype)
|
| 567 |
|
| 568 |
-
|
| 569 |
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
prompt_embeds
|
| 575 |
-
|
| 576 |
-
pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
| 577 |
-
pipe = pipe_default
|
| 578 |
-
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
| 579 |
-
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
| 580 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
| 581 |
-
pipe.fuse_lora(lora_scale=lora_scale)
|
| 582 |
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 592 |
|
| 593 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 594 |
|
| 595 |
# Если выбрано удаление фона
|
| 596 |
if remove_bg:
|
|
@@ -847,6 +877,15 @@ with gr.Blocks(css=css) as demo:
|
|
| 847 |
interactive=True
|
| 848 |
)
|
| 849 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 850 |
# Удаление фона------------------------------------------------------------------------------------------------
|
| 851 |
# Checkbox для удаления фона
|
| 852 |
with gr.Blocks():
|
|
@@ -893,6 +932,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 893 |
use_LCM_adapter, # Параметр для включения LCM_adapter
|
| 894 |
LCM_adapter, # Параметр для выбора типа LCM_adapter
|
| 895 |
use_DDIMScheduler, # Параметр для включения DDIMScheduler
|
|
|
|
| 896 |
],
|
| 897 |
outputs=[result],
|
| 898 |
)
|
|
|
|
| 12 |
LCMScheduler,
|
| 13 |
AutoPipelineForText2Image,
|
| 14 |
DPMSolverMultistepScheduler,
|
| 15 |
+
AutoencoderKL,
|
| 16 |
)
|
| 17 |
from transformers import pipeline
|
| 18 |
from diffusers.utils import load_image, make_image_grid
|
|
|
|
| 135 |
use_LCM_adapter=False, # Параметр для включения LCM_adapter
|
| 136 |
LCM_adapter=None, # Параметр для выбора типа LCM_adapter
|
| 137 |
use_DDIMScheduler=False, # Параметр для включения DDIMScheduler
|
| 138 |
+
use_Tiny_VAE=False, # Параметр для включения Tiny_VAE
|
| 139 |
progress=gr.Progress(track_tqdm=True)
|
| 140 |
):
|
| 141 |
|
|
|
|
| 559 |
|
| 560 |
image = pipe_DDIMS(**params).images[0]
|
| 561 |
else:
|
| 562 |
+
# Генерация изображений с Tiny_VAE ---------------------------------------------------------------------------------------------
|
| 563 |
|
| 564 |
+
if use_Tiny_VAE:
|
| 565 |
+
|
| 566 |
+
print('use_Tiny_VAE = ', use_Tiny_VAE)
|
|
|
|
|
|
|
| 567 |
|
| 568 |
+
generator = torch.Generator(device).manual_seed(seed)
|
| 569 |
|
| 570 |
+
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch_dtype)
|
| 571 |
+
|
| 572 |
+
pipe_Tiny_VAE = StableDiffusionPipeline.from_pretrained(model_default, vae=vae, torch_dtype=torch_dtype).to(device)
|
| 573 |
+
|
| 574 |
+
prompt_embeds = long_prompt_encoder(prompt, pipe_Tiny_VAE.tokenizer, pipe_Tiny_VAE.text_encoder)
|
| 575 |
+
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_Tiny_VAE.tokenizer, pipe_Tiny_VAE.text_encoder)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 576 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
|
|
|
| 577 |
|
| 578 |
+
params = {
|
| 579 |
+
'prompt_embeds': prompt_embeds,
|
| 580 |
+
'negative_prompt_embeds': negative_prompt_embeds,
|
| 581 |
+
'guidance_scale': guidance_scale,
|
| 582 |
+
'num_inference_steps': num_inference_steps,
|
| 583 |
+
'width': width,
|
| 584 |
+
'height': height,
|
| 585 |
+
'generator': generator,
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
image = pipe_Tiny_VAE(**params).images[0]
|
| 589 |
+
else:
|
| 590 |
+
# Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
|
| 591 |
+
|
| 592 |
+
print('Генерация изображений с LORA без ControlNet и IP_Adapter')
|
| 593 |
+
|
| 594 |
+
# Инициализация ControlNet
|
| 595 |
+
controlnet_model_path = "lllyasviel/sd-controlnet-openpose"
|
| 596 |
+
controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch_dtype)
|
| 597 |
+
|
| 598 |
+
generator = torch.Generator(device).manual_seed(seed)
|
| 599 |
|
| 600 |
+
if model != model_default:
|
| 601 |
+
pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype).to(device)
|
| 602 |
+
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
| 603 |
+
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
| 604 |
+
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
| 605 |
+
else:
|
| 606 |
+
pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
| 607 |
+
pipe = pipe_default
|
| 608 |
+
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
| 609 |
+
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
| 610 |
+
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
| 611 |
+
pipe.fuse_lora(lora_scale=lora_scale)
|
| 612 |
+
|
| 613 |
+
params = {
|
| 614 |
+
'prompt_embeds': prompt_embeds,
|
| 615 |
+
'negative_prompt_embeds': negative_prompt_embeds,
|
| 616 |
+
'guidance_scale': guidance_scale,
|
| 617 |
+
'num_inference_steps': num_inference_steps,
|
| 618 |
+
'width': width,
|
| 619 |
+
'height': height,
|
| 620 |
+
'generator': generator,
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
image = pipe(**params).images[0]
|
| 624 |
|
| 625 |
# Если выбрано удаление фона
|
| 626 |
if remove_bg:
|
|
|
|
| 877 |
interactive=True
|
| 878 |
)
|
| 879 |
|
| 880 |
+
# Tiny_VAE -----------------------------------------------------------------------------------------------------
|
| 881 |
+
# Checkbox для Tiny_VAE
|
| 882 |
+
with gr.Blocks():
|
| 883 |
+
use_Tiny_VAE = gr.Checkbox(
|
| 884 |
+
label="Use Tiny_VAE",
|
| 885 |
+
value=False,
|
| 886 |
+
interactive=True
|
| 887 |
+
)
|
| 888 |
+
|
| 889 |
# Удаление фона------------------------------------------------------------------------------------------------
|
| 890 |
# Checkbox для удаления фона
|
| 891 |
with gr.Blocks():
|
|
|
|
| 932 |
use_LCM_adapter, # Параметр для включения LCM_adapter
|
| 933 |
LCM_adapter, # Параметр для выбора типа LCM_adapter
|
| 934 |
use_DDIMScheduler, # Параметр для включения DDIMScheduler
|
| 935 |
+
use_Tiny_VAE, # Параметр для включения Tiny_VAE
|
| 936 |
],
|
| 937 |
outputs=[result],
|
| 938 |
)
|