Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,16 +9,12 @@ import torchvision.transforms.functional as TF
|
|
| 9 |
|
| 10 |
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
| 11 |
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
|
| 12 |
-
from controlnet_aux import PidiNetDetector,
|
| 13 |
from diffusers.utils import load_image
|
| 14 |
from huggingface_hub import HfApi
|
| 15 |
from pathlib import Path
|
| 16 |
from PIL import Image, ImageOps
|
| 17 |
-
import torch
|
| 18 |
-
import numpy as np
|
| 19 |
import cv2
|
| 20 |
-
import os
|
| 21 |
-
import random
|
| 22 |
import spaces
|
| 23 |
from gradio_imageslider import ImageSlider
|
| 24 |
|
|
@@ -27,6 +23,7 @@ function refresh() {
|
|
| 27 |
const url = new URL(window.location);
|
| 28 |
}
|
| 29 |
"""
|
|
|
|
| 30 |
def nms(x, t, s):
|
| 31 |
x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
|
| 32 |
|
|
@@ -78,72 +75,38 @@ style_list = [
|
|
| 78 |
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
| 79 |
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
|
| 80 |
},
|
| 81 |
-
|
| 82 |
-
"name": "3D Model",
|
| 83 |
-
"prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
|
| 84 |
-
"negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
|
| 85 |
-
},
|
| 86 |
-
{
|
| 87 |
-
"name": "Anime",
|
| 88 |
-
"prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
|
| 89 |
-
"negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
|
| 90 |
-
},
|
| 91 |
-
{
|
| 92 |
-
"name": "Digital Art",
|
| 93 |
-
"prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
|
| 94 |
-
"negative_prompt": "photo, photorealistic, realism, ugly",
|
| 95 |
-
},
|
| 96 |
-
{
|
| 97 |
-
"name": "Photographic",
|
| 98 |
-
"prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
|
| 99 |
-
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
|
| 100 |
-
},
|
| 101 |
-
{
|
| 102 |
-
"name": "Pixel art",
|
| 103 |
-
"prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
|
| 104 |
-
"negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
|
| 105 |
-
},
|
| 106 |
-
{
|
| 107 |
-
"name": "Fantasy art",
|
| 108 |
-
"prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
|
| 109 |
-
"negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
|
| 110 |
-
},
|
| 111 |
-
{
|
| 112 |
-
"name": "Neonpunk",
|
| 113 |
-
"prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
|
| 114 |
-
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
|
| 115 |
-
},
|
| 116 |
-
{
|
| 117 |
-
"name": "Manga",
|
| 118 |
-
"prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style",
|
| 119 |
-
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
|
| 120 |
-
},
|
| 121 |
]
|
| 122 |
|
| 123 |
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
| 124 |
STYLE_NAMES = list(styles.keys())
|
| 125 |
DEFAULT_STYLE_NAME = "(No style)"
|
| 126 |
|
| 127 |
-
|
| 128 |
def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
|
| 129 |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
| 130 |
return p.replace("{prompt}", positive), n + negative
|
| 131 |
|
| 132 |
-
|
| 133 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 134 |
|
| 135 |
eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
|
| 136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
controlnet = ControlNetModel.from_pretrained(
|
| 139 |
"xinsir/controlnet-union-sdxl-1.0",
|
| 140 |
torch_dtype=torch.float16
|
| 141 |
)
|
| 142 |
-
|
| 143 |
-
"xinsir/controlnet-canny-sdxl-1.0",
|
| 144 |
-
torch_dtype=torch.float16
|
| 145 |
-
)
|
| 146 |
-
# when test with other base model, you need to change the vae also.
|
| 147 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 148 |
|
| 149 |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
@@ -154,36 +117,8 @@ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
| 154 |
scheduler=eulera_scheduler,
|
| 155 |
)
|
| 156 |
pipe.to(device)
|
| 157 |
-
# Load model.
|
| 158 |
-
pipe_canny = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 159 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 160 |
-
controlnet=controlnet_canny,
|
| 161 |
-
vae=vae,
|
| 162 |
-
safety_checker=None,
|
| 163 |
-
torch_dtype=torch.float16,
|
| 164 |
-
scheduler=eulera_scheduler,
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
pipe_canny.to(device)
|
| 168 |
|
| 169 |
MAX_SEED = np.iinfo(np.int32).max
|
| 170 |
-
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
|
| 171 |
-
def nms(x, t, s):
|
| 172 |
-
x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
|
| 173 |
-
|
| 174 |
-
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
| 175 |
-
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
| 176 |
-
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
| 177 |
-
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
| 178 |
-
|
| 179 |
-
y = np.zeros_like(x)
|
| 180 |
-
|
| 181 |
-
for f in [f1, f2, f3, f4]:
|
| 182 |
-
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
|
| 183 |
-
|
| 184 |
-
z = np.zeros_like(y, dtype=np.uint8)
|
| 185 |
-
z[y > t] = 255
|
| 186 |
-
return z
|
| 187 |
|
| 188 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
| 189 |
if randomize_seed:
|
|
@@ -200,71 +135,116 @@ def run(
|
|
| 200 |
guidance_scale: float = 5,
|
| 201 |
controlnet_conditioning_scale: float = 1.0,
|
| 202 |
seed: int = 0,
|
| 203 |
-
|
|
|
|
| 204 |
use_canny: bool = False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
progress=gr.Progress(track_tqdm=True),
|
| 206 |
) -> PIL.Image.Image:
|
| 207 |
-
# Get the composite image from the EditorValue dict
|
| 208 |
composite_image = image['composite']
|
| 209 |
width, height = composite_image.size
|
| 210 |
|
| 211 |
-
# Calculate new dimensions to fit within 1024x1024 while maintaining aspect ratio
|
| 212 |
max_size = 1024
|
| 213 |
ratio = min(max_size / width, max_size / height)
|
| 214 |
new_width = int(width * ratio)
|
| 215 |
new_height = int(height * ratio)
|
| 216 |
|
| 217 |
-
# Resize the image
|
| 218 |
resized_image = composite_image.resize((new_width, new_height), Image.LANCZOS)
|
| 219 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
if use_canny:
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
| 239 |
|
| 240 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 241 |
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
prompt=prompt,
|
| 257 |
-
negative_prompt=negative_prompt,
|
| 258 |
-
image=image,
|
| 259 |
-
num_inference_steps=num_steps,
|
| 260 |
-
generator=generator,
|
| 261 |
-
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 262 |
-
guidance_scale=guidance_scale,
|
| 263 |
-
width=new_width,
|
| 264 |
-
height=new_height,
|
| 265 |
-
).images[0]
|
| 266 |
-
|
| 267 |
-
return (controlnet_img, out)
|
| 268 |
|
| 269 |
with gr.Blocks(css="style.css", js=js_func) as demo:
|
| 270 |
gr.Markdown(DESCRIPTION, elem_id="description")
|
|
@@ -280,8 +260,19 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 280 |
image = gr.ImageEditor(type="pil", label="Sketch your image or upload one", width=512, height=512)
|
| 281 |
prompt = gr.Textbox(label="Prompt")
|
| 282 |
style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
| 283 |
-
|
| 284 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
run_button = gr.Button("Run")
|
| 286 |
with gr.Accordion("Advanced options", open=False):
|
| 287 |
negative_prompt = gr.Textbox(
|
|
@@ -303,7 +294,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 303 |
value=5,
|
| 304 |
)
|
| 305 |
controlnet_conditioning_scale = gr.Slider(
|
| 306 |
-
label="
|
| 307 |
minimum=0.5,
|
| 308 |
maximum=5.0,
|
| 309 |
step=0.1,
|
|
@@ -322,6 +313,14 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 322 |
with gr.Group():
|
| 323 |
image_slider = ImageSlider(position=0.5)
|
| 324 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
|
| 326 |
inputs = [
|
| 327 |
image,
|
|
@@ -332,8 +331,18 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 332 |
guidance_scale,
|
| 333 |
controlnet_conditioning_scale,
|
| 334 |
seed,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 335 |
use_hed,
|
| 336 |
-
|
|
|
|
|
|
|
|
|
|
| 337 |
]
|
| 338 |
outputs = [image_slider]
|
| 339 |
run_button.click(
|
|
@@ -345,7 +354,5 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 345 |
).then(lambda x: None, inputs=None, outputs=image_slider).then(
|
| 346 |
fn=run, inputs=inputs, outputs=outputs
|
| 347 |
)
|
| 348 |
-
|
| 349 |
-
|
| 350 |
|
| 351 |
-
demo.queue().launch
|
|
|
|
| 9 |
|
| 10 |
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
| 11 |
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
|
| 12 |
+
from controlnet_aux import OpenposeDetector, MidasDetector, CannyDetector, LineartDetector, LineartAnimeDetector, MLSDdetector, HEDdetector, PidiNetDetector, NormalBaeDetector, SamDetector
|
| 13 |
from diffusers.utils import load_image
|
| 14 |
from huggingface_hub import HfApi
|
| 15 |
from pathlib import Path
|
| 16 |
from PIL import Image, ImageOps
|
|
|
|
|
|
|
| 17 |
import cv2
|
|
|
|
|
|
|
| 18 |
import spaces
|
| 19 |
from gradio_imageslider import ImageSlider
|
| 20 |
|
|
|
|
| 23 |
const url = new URL(window.location);
|
| 24 |
}
|
| 25 |
"""
|
| 26 |
+
|
| 27 |
def nms(x, t, s):
|
| 28 |
x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
|
| 29 |
|
|
|
|
| 75 |
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
| 76 |
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
|
| 77 |
},
|
| 78 |
+
# ... (other styles)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
]
|
| 80 |
|
| 81 |
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
| 82 |
STYLE_NAMES = list(styles.keys())
|
| 83 |
DEFAULT_STYLE_NAME = "(No style)"
|
| 84 |
|
|
|
|
| 85 |
def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
|
| 86 |
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
| 87 |
return p.replace("{prompt}", positive), n + negative
|
| 88 |
|
|
|
|
| 89 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 90 |
|
| 91 |
eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
|
| 92 |
|
| 93 |
+
# Initialize all detectors
|
| 94 |
+
openpose_detector = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
|
| 95 |
+
midas_detector = MidasDetector.from_pretrained('lllyasviel/ControlNet')
|
| 96 |
+
canny_detector = CannyDetector()
|
| 97 |
+
lineart_detector = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
| 98 |
+
anime_lineart_detector = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
| 99 |
+
mlsd_detector = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
|
| 100 |
+
hed_detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
|
| 101 |
+
pidi_detector = PidiNetDetector.from_pretrained('lllyasviel/Annotators')
|
| 102 |
+
normal_detector = NormalBaeDetector.from_pretrained('lllyasviel/Annotators')
|
| 103 |
+
sam_detector = SamDetector.from_pretrained('ybelkada/segment-anything', subfolder='checkpoints')
|
| 104 |
|
| 105 |
controlnet = ControlNetModel.from_pretrained(
|
| 106 |
"xinsir/controlnet-union-sdxl-1.0",
|
| 107 |
torch_dtype=torch.float16
|
| 108 |
)
|
| 109 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
| 111 |
|
| 112 |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
|
|
| 117 |
scheduler=eulera_scheduler,
|
| 118 |
)
|
| 119 |
pipe.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
| 124 |
if randomize_seed:
|
|
|
|
| 135 |
guidance_scale: float = 5,
|
| 136 |
controlnet_conditioning_scale: float = 1.0,
|
| 137 |
seed: int = 0,
|
| 138 |
+
use_openpose: bool = False,
|
| 139 |
+
use_depth: bool = False,
|
| 140 |
use_canny: bool = False,
|
| 141 |
+
use_lineart: bool = False,
|
| 142 |
+
use_anime_lineart: bool = False,
|
| 143 |
+
use_mlsd: bool = False,
|
| 144 |
+
use_scribble: bool = False,
|
| 145 |
+
use_hed: bool = False,
|
| 146 |
+
use_softedge: bool = False,
|
| 147 |
+
use_teed: bool = False,
|
| 148 |
+
use_segment: bool = False,
|
| 149 |
+
use_normal: bool = False,
|
| 150 |
progress=gr.Progress(track_tqdm=True),
|
| 151 |
) -> PIL.Image.Image:
|
|
|
|
| 152 |
composite_image = image['composite']
|
| 153 |
width, height = composite_image.size
|
| 154 |
|
|
|
|
| 155 |
max_size = 1024
|
| 156 |
ratio = min(max_size / width, max_size / height)
|
| 157 |
new_width = int(width * ratio)
|
| 158 |
new_height = int(height * ratio)
|
| 159 |
|
|
|
|
| 160 |
resized_image = composite_image.resize((new_width, new_height), Image.LANCZOS)
|
| 161 |
|
| 162 |
+
control_images = []
|
| 163 |
+
control_type = [0, 0, 0, 0, 0, 0] # Initialize control type
|
| 164 |
+
|
| 165 |
+
if use_openpose:
|
| 166 |
+
openpose_image = openpose_detector(resized_image)
|
| 167 |
+
control_images.append(openpose_image)
|
| 168 |
+
control_type[0] = 1
|
| 169 |
+
|
| 170 |
+
if use_depth:
|
| 171 |
+
depth_image = midas_detector(resized_image)
|
| 172 |
+
control_images.append(depth_image)
|
| 173 |
+
control_type[1] = 1
|
| 174 |
+
|
| 175 |
if use_canny:
|
| 176 |
+
canny_image = canny_detector(resized_image)
|
| 177 |
+
control_images.append(canny_image)
|
| 178 |
+
control_type[3] = 1
|
| 179 |
+
|
| 180 |
+
if use_lineart:
|
| 181 |
+
lineart_image = lineart_detector(resized_image)
|
| 182 |
+
control_images.append(lineart_image)
|
| 183 |
+
control_type[3] = 1
|
| 184 |
+
|
| 185 |
+
if use_anime_lineart:
|
| 186 |
+
anime_lineart_image = anime_lineart_detector(resized_image)
|
| 187 |
+
control_images.append(anime_lineart_image)
|
| 188 |
+
control_type[3] = 1
|
| 189 |
+
|
| 190 |
+
if use_mlsd:
|
| 191 |
+
mlsd_image = mlsd_detector(resized_image)
|
| 192 |
+
control_images.append(mlsd_image)
|
| 193 |
+
control_type[3] = 1
|
| 194 |
+
|
| 195 |
+
if use_scribble:
|
| 196 |
+
# Assuming scribble is the same as the input image
|
| 197 |
+
control_images.append(resized_image)
|
| 198 |
+
control_type[2] = 1
|
| 199 |
+
|
| 200 |
+
if use_hed:
|
| 201 |
+
hed_image = hed_detector(resized_image)
|
| 202 |
+
control_images.append(hed_image)
|
| 203 |
+
control_type[2] = 1
|
| 204 |
|
| 205 |
+
if use_softedge:
|
| 206 |
+
softedge_image = pidi_detector(resized_image)
|
| 207 |
+
control_images.append(softedge_image)
|
| 208 |
+
control_type[2] = 1
|
| 209 |
+
|
| 210 |
+
if use_teed:
|
| 211 |
+
# Assuming TEED is similar to HED but with thinner lines
|
| 212 |
+
teed_image = hed_detector(resized_image, detect_resolution=512)
|
| 213 |
+
control_images.append(teed_image)
|
| 214 |
+
control_type[2] = 1
|
| 215 |
+
|
| 216 |
+
if use_segment:
|
| 217 |
+
segment_image = sam_detector(resized_image)
|
| 218 |
+
control_images.append(segment_image)
|
| 219 |
+
control_type[5] = 1
|
| 220 |
+
|
| 221 |
+
if use_normal:
|
| 222 |
+
normal_image = normal_detector(resized_image)
|
| 223 |
+
control_images.append(normal_image)
|
| 224 |
+
control_type[4] = 1
|
| 225 |
+
|
| 226 |
+
if not control_images:
|
| 227 |
+
control_images.append(resized_image)
|
| 228 |
+
control_type[2] = 1 # Default to thick line type if no specific control is used
|
| 229 |
+
|
| 230 |
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
| 231 |
|
| 232 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 233 |
|
| 234 |
+
out = pipe(
|
| 235 |
+
prompt=prompt,
|
| 236 |
+
negative_prompt=negative_prompt,
|
| 237 |
+
image=control_images,
|
| 238 |
+
num_inference_steps=num_steps,
|
| 239 |
+
generator=generator,
|
| 240 |
+
controlnet_conditioning_scale=[controlnet_conditioning_scale] * len(control_images),
|
| 241 |
+
control_type=control_type,
|
| 242 |
+
guidance_scale=guidance_scale,
|
| 243 |
+
width=new_width,
|
| 244 |
+
height=new_height,
|
| 245 |
+
).images[0]
|
| 246 |
+
|
| 247 |
+
return (control_images[0], out)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
|
| 249 |
with gr.Blocks(css="style.css", js=js_func) as demo:
|
| 250 |
gr.Markdown(DESCRIPTION, elem_id="description")
|
|
|
|
| 260 |
image = gr.ImageEditor(type="pil", label="Sketch your image or upload one", width=512, height=512)
|
| 261 |
prompt = gr.Textbox(label="Prompt")
|
| 262 |
style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
| 263 |
+
with gr.Accordion("Control options", open=False):
|
| 264 |
+
use_openpose = gr.Checkbox(label="Use Openpose", value=False)
|
| 265 |
+
use_depth = gr.Checkbox(label="Use Depth", value=False)
|
| 266 |
+
use_canny = gr.Checkbox(label="Use Canny", value=False)
|
| 267 |
+
use_lineart = gr.Checkbox(label="Use Lineart", value=False)
|
| 268 |
+
use_anime_lineart = gr.Checkbox(label="Use Anime Lineart", value=False)
|
| 269 |
+
use_mlsd = gr.Checkbox(label="Use MLSD", value=False)
|
| 270 |
+
use_scribble = gr.Checkbox(label="Use Scribble", value=False)
|
| 271 |
+
use_hed = gr.Checkbox(label="Use HED", value=False)
|
| 272 |
+
use_softedge = gr.Checkbox(label="Use Softedge (PIDI)", value=False)
|
| 273 |
+
use_teed = gr.Checkbox(label="Use TEED", value=False)
|
| 274 |
+
use_segment = gr.Checkbox(label="Use Segment", value=False)
|
| 275 |
+
use_normal = gr.Checkbox(label="Use Normal", value=False)
|
| 276 |
run_button = gr.Button("Run")
|
| 277 |
with gr.Accordion("Advanced options", open=False):
|
| 278 |
negative_prompt = gr.Textbox(
|
|
|
|
| 294 |
value=5,
|
| 295 |
)
|
| 296 |
controlnet_conditioning_scale = gr.Slider(
|
| 297 |
+
label="ControlNet conditioning scale",
|
| 298 |
minimum=0.5,
|
| 299 |
maximum=5.0,
|
| 300 |
step=0.1,
|
|
|
|
| 313 |
with gr.Group():
|
| 314 |
image_slider = ImageSlider(position=0.5)
|
| 315 |
|
| 316 |
+
gr.Markdown("""
|
| 317 |
+
## Multi Control Visual Examples
|
| 318 |
+
- Openpose + Canny
|
| 319 |
+
- Openpose + Depth
|
| 320 |
+
- Openpose + Scribble
|
| 321 |
+
- Openpose + Normal
|
| 322 |
+
- Openpose + Segment
|
| 323 |
+
""")
|
| 324 |
|
| 325 |
inputs = [
|
| 326 |
image,
|
|
|
|
| 331 |
guidance_scale,
|
| 332 |
controlnet_conditioning_scale,
|
| 333 |
seed,
|
| 334 |
+
use_openpose,
|
| 335 |
+
use_depth,
|
| 336 |
+
use_canny,
|
| 337 |
+
use_lineart,
|
| 338 |
+
use_anime_lineart,
|
| 339 |
+
use_mlsd,
|
| 340 |
+
use_scribble,
|
| 341 |
use_hed,
|
| 342 |
+
use_softedge,
|
| 343 |
+
use_teed,
|
| 344 |
+
use_segment,
|
| 345 |
+
use_normal
|
| 346 |
]
|
| 347 |
outputs = [image_slider]
|
| 348 |
run_button.click(
|
|
|
|
| 354 |
).then(lambda x: None, inputs=None, outputs=image_slider).then(
|
| 355 |
fn=run, inputs=inputs, outputs=outputs
|
| 356 |
)
|
|
|
|
|
|
|
| 357 |
|
| 358 |
+
demo.queue().launch
|