changpu / app.py
sand74's picture
Update app.py
70fb6e0 verified
import gradio as gr
import numpy as np
import random
import cv2
import PIL
from controlnet_aux import OpenposeDetector
from transformers import pipeline
from rembg import remove
from diffusers.models import AutoencoderKL
#import spaces #[uncomment to use ZeroGPU]
from diffusers import (
DiffusionPipeline, StableDiffusionPipeline,
StableDiffusionControlNetPipeline, ControlNetModel,
DPMSolverMultistepScheduler
)
from peft import PeftModel, LoraConfig
import torch
import gc
from huggingface_hub import HfApi
# Создаем экземпляр API
api = HfApi()
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
model_names = [
"sand74/changpu_lora",
"stable-diffusion-v1-5/stable-diffusion-v1-5",
"stabilityai/sd-turbo",
]
def get_canny_image(image):
image = np.array(image)
low_threshold = 100
high_threshold = 200
image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
return PIL.Image.fromarray(image)
def get_openpos_image(image):
pil_image = PIL.Image.fromarray(image)
processor = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
pil_image = processor(pil_image, hand_and_face=False)
return pil_image
def get_depth_image(image):
pil_image = PIL.Image.fromarray(image)
depth_estimator = pipeline('depth-estimation')
pil_image = depth_estimator(pil_image)['depth']
pil_image = np.array(pil_image)
pil_image = pil_image[:, :, None]
pil_image = np.concatenate([pil_image, pil_image, pil_image], axis=2)
return PIL.Image.fromarray(pil_image)
control_net_modes = {
"lllyasviel/sd-controlnet-canny": get_canny_image,
"lllyasviel/control_v11p_sd15_openpose": get_openpos_image,
"lllyasviel/control_v11f1p_sd15_depth": get_depth_image,
}
def preview_control_net_image(controlnet_image, controlnet_mode):
return control_net_modes[controlnet_mode](controlnet_image)
def is_lora(model_name):
return model_name == "sand74/changpu_lora"
def remove_background(image):
image = remove(image)
return image
#@spaces.GPU #[uncomment to use ZeroGPU]
def infer(
model_id,
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
lora_scale,
use_controlnet=False,
controlnet_image=None,
controlnet_strength=None,
controlnet_mode=None,
use_ip_adapter=False,
ip_adapter_image=None,
ip_adapter_scale=None,
rm_background=True,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
pipe_params = dict(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
)
if is_lora(model_id):
base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
else:
base_model_id = model_id
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch_dtype)
if not use_controlnet:
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch_dtype, vae=vae)
else:
controlnet_image = cv2.resize(controlnet_image, (width, height), interpolation=cv2.INTER_AREA)
controlnet = ControlNetModel.from_pretrained(
controlnet_mode, torch_dtype=torch_dtype)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
base_model_id,
torch_dtype=torch_dtype,
controlnet=controlnet)
pipe_params["image"] = control_net_modes[controlnet_mode](controlnet_image)
pipe_params["controlnet_conditioning_scale"] = controlnet_strength
if is_lora(model_id):
lora = PeftModel.from_pretrained(pipe.unet, model_id, adapter_name="panda_hqwh")
pipe.set_adapters(["panda_hqwh"], adapter_weights=[lora_scale])
if use_ip_adapter:
ip_adapter_image = cv2.resize(ip_adapter_image, (width, height), interpolation=cv2.INTER_AREA)
pipe.load_ip_adapter(
"h94/IP-Adapter",
subfolder="models",
weight_name="ip-adapter-plus_sd15.bin",
)
pipe_params["ip_adapter_image"] = ip_adapter_image
pipe.set_ip_adapter_scale(ip_adapter_scale)
pipe.safety_checker = None
if torch_dtype in (torch.float16, torch.bfloat16):
pipe.unet.half()
pipe.text_encoder.half()
pipe.to(device)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
image = pipe(**pipe_params).images[0]
if rm_background:
image = remove(image)
return image, seed
examples = [
"Sad panda_hqwh drinking beer",
"panda_hqwh walk in a field",
"panda_hqwh play with ball",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
result = None
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
title = gr.Markdown(" # Text-to-Image Gradio Template")
model_id = gr.Dropdown(model_names, value=model_names[0], label="Select model")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
value="Sad panda_hqwh drinking beer",
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
rm_background = gr.Checkbox(label="Remove background?", scale=1, value=True)
with gr.Group(visible=True) as lora_section:
title = gr.Markdown(" ### LoRA section")
with gr.Row():
lora_scale = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.9,
step=0.1,
label="LoRA Strength"
)
# Показывать/скрывать секцию LoRA в зависимости от модели
model_id.change(
fn=lambda x: gr.update(visible=is_lora(x)),
inputs=model_id,
outputs=lora_section
)
with gr.Group():
title = gr.Markdown(" ### ControlNet section")
with gr.Column():
use_controlnet = gr.Checkbox(label="Use ControlNet", value=False)
# Секция ControlNet (изначально скрыта)
with gr.Column(visible=False) as controlnet_section:
controlnet_strength = gr.Slider(
minimum=0.1, maximum=1.0, value=0.8, step=0.1,
label="ControlNet Strength",
interactive=True
)
controlnet_mode = gr.Dropdown(
list(control_net_modes.keys()),
value=next(iter(control_net_modes.keys())),
label="ControlNet mode",
interactive=True
)
with gr.Row():
controlnet_image = gr.Image(
label="ControlNet image",
interactive=True
)
controlnet_view = gr.Image(
label="ControlNet preview",
interactive=False
)
controlnet_image.change(
fn=preview_control_net_image,
inputs=[controlnet_image, controlnet_mode],
outputs=controlnet_view
)
controlnet_mode.change(
fn=preview_control_net_image,
inputs=[controlnet_image, controlnet_mode],
outputs=controlnet_view
)
# Показывать/скрывать секцию ControlNet в зависимости от чекбокса
use_controlnet.change(
fn=lambda x: gr.update(visible=x),
inputs=use_controlnet,
outputs=controlnet_section
)
with gr.Group():
title = gr.Markdown(" ### IP-adapter section")
with gr.Column():
use_ip_adapter = gr.Checkbox(label="Use IP-adapter", value=False)
# Секция IP-adapter (изначально скрыта)
with gr.Column(visible=False) as ip_adapter_section:
ip_adapter_scale = gr.Slider(
minimum=0.1, maximum=1.0, value=0.5, step=0.1,
label="IP-adapter Scale",
interactive=True
)
ip_adapter_image = gr.Image(
label="IP-adapter image",
)
# Показывать/скрывать секцию IP-adapter в зависимости от чекбокса
use_ip_adapter.change(
fn=lambda x: gr.update(visible=x),
inputs=use_ip_adapter,
outputs=ip_adapter_section
)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
value="low quality, blurry, unfinished, text",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, # Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, # Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0,
maximum=20,
step=1,
value=7, # Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=500,
step=1,
value=30, # Replace with defaults that work for your model
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
model_id,
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
lora_scale,
use_controlnet,
controlnet_image,
controlnet_strength,
controlnet_mode,
use_ip_adapter,
ip_adapter_image,
ip_adapter_scale,
rm_background,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()