Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -201,7 +201,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
| 201 |
|
| 202 |
@spaces.GPU
|
| 203 |
def run(
|
| 204 |
-
image:
|
| 205 |
prompt: str,
|
| 206 |
negative_prompt: str,
|
| 207 |
style_name: str = DEFAULT_STYLE_NAME,
|
|
@@ -213,59 +213,65 @@ def run(
|
|
| 213 |
use_canny: bool = False,
|
| 214 |
progress=gr.Progress(track_tqdm=True),
|
| 215 |
) -> PIL.Image.Image:
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
if use_canny:
|
| 222 |
-
controlnet_img = np.array(
|
| 223 |
controlnet_img = cv2.Canny(controlnet_img, 100, 200)
|
| 224 |
controlnet_img = HWC3(controlnet_img)
|
| 225 |
image = Image.fromarray(controlnet_img)
|
| 226 |
-
|
| 227 |
elif not use_hed:
|
| 228 |
-
|
| 229 |
else:
|
| 230 |
-
controlnet_img = processor(
|
| 231 |
-
|
| 232 |
controlnet_img = np.array(controlnet_img)
|
| 233 |
controlnet_img = nms(controlnet_img, 127, 3)
|
| 234 |
controlnet_img = cv2.GaussianBlur(controlnet_img, (0, 0), 3)
|
| 235 |
-
|
| 236 |
-
# higher threshold, thiner line
|
| 237 |
random_val = int(round(random.uniform(0.01, 0.10), 2) * 255)
|
| 238 |
controlnet_img[controlnet_img > random_val] = 255
|
| 239 |
controlnet_img[controlnet_img < 255] = 0
|
| 240 |
image = Image.fromarray(controlnet_img)
|
| 241 |
|
| 242 |
-
|
| 243 |
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
| 244 |
|
| 245 |
generator = torch.Generator(device=device).manual_seed(seed)
|
|
|
|
| 246 |
if use_canny:
|
| 247 |
out = pipe_canny(
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
else:
|
| 259 |
out = pipe(
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
|
|
|
| 269 |
|
| 270 |
return (controlnet_img, out)
|
| 271 |
|
|
@@ -281,7 +287,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
| 281 |
with gr.Row():
|
| 282 |
with gr.Column():
|
| 283 |
with gr.Group():
|
| 284 |
-
image = gr.ImageEditor(type="pil",
|
| 285 |
prompt = gr.Textbox(label="Prompt")
|
| 286 |
style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
| 287 |
use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
|
|
|
|
| 201 |
|
| 202 |
@spaces.GPU
|
| 203 |
def run(
|
| 204 |
+
image: dict,
|
| 205 |
prompt: str,
|
| 206 |
negative_prompt: str,
|
| 207 |
style_name: str = DEFAULT_STYLE_NAME,
|
|
|
|
| 213 |
use_canny: bool = False,
|
| 214 |
progress=gr.Progress(track_tqdm=True),
|
| 215 |
) -> PIL.Image.Image:
|
| 216 |
+
# Get the composite image from the EditorValue dict
|
| 217 |
+
composite_image = image['composite']
|
| 218 |
+
width, height = composite_image.size
|
| 219 |
+
|
| 220 |
+
# Calculate new dimensions to fit within 1024x1024 while maintaining aspect ratio
|
| 221 |
+
max_size = 1024
|
| 222 |
+
ratio = min(max_size / width, max_size / height)
|
| 223 |
+
new_width = int(width * ratio)
|
| 224 |
+
new_height = int(height * ratio)
|
| 225 |
+
|
| 226 |
+
# Resize the image
|
| 227 |
+
resized_image = composite_image.resize((new_width, new_height), Image.LANCZOS)
|
| 228 |
|
| 229 |
if use_canny:
|
| 230 |
+
controlnet_img = np.array(resized_image)
|
| 231 |
controlnet_img = cv2.Canny(controlnet_img, 100, 200)
|
| 232 |
controlnet_img = HWC3(controlnet_img)
|
| 233 |
image = Image.fromarray(controlnet_img)
|
|
|
|
| 234 |
elif not use_hed:
|
| 235 |
+
controlnet_img = resized_image
|
| 236 |
else:
|
| 237 |
+
controlnet_img = processor(resized_image, scribble=False)
|
| 238 |
+
# Process controlnet_img as before...
|
| 239 |
controlnet_img = np.array(controlnet_img)
|
| 240 |
controlnet_img = nms(controlnet_img, 127, 3)
|
| 241 |
controlnet_img = cv2.GaussianBlur(controlnet_img, (0, 0), 3)
|
|
|
|
|
|
|
| 242 |
random_val = int(round(random.uniform(0.01, 0.10), 2) * 255)
|
| 243 |
controlnet_img[controlnet_img > random_val] = 255
|
| 244 |
controlnet_img[controlnet_img < 255] = 0
|
| 245 |
image = Image.fromarray(controlnet_img)
|
| 246 |
|
|
|
|
| 247 |
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
| 248 |
|
| 249 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 250 |
+
|
| 251 |
if use_canny:
|
| 252 |
out = pipe_canny(
|
| 253 |
+
prompt=prompt,
|
| 254 |
+
negative_prompt=negative_prompt,
|
| 255 |
+
image=image,
|
| 256 |
+
num_inference_steps=num_steps,
|
| 257 |
+
generator=generator,
|
| 258 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 259 |
+
guidance_scale=guidance_scale,
|
| 260 |
+
width=new_width,
|
| 261 |
+
height=new_height,
|
| 262 |
+
).images[0]
|
| 263 |
else:
|
| 264 |
out = pipe(
|
| 265 |
+
prompt=prompt,
|
| 266 |
+
negative_prompt=negative_prompt,
|
| 267 |
+
image=image,
|
| 268 |
+
num_inference_steps=num_steps,
|
| 269 |
+
generator=generator,
|
| 270 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 271 |
+
guidance_scale=guidance_scale,
|
| 272 |
+
width=new_width,
|
| 273 |
+
height=new_height,
|
| 274 |
+
).images[0]
|
| 275 |
|
| 276 |
return (controlnet_img, out)
|
| 277 |
|
|
|
|
| 287 |
with gr.Row():
|
| 288 |
with gr.Column():
|
| 289 |
with gr.Group():
|
| 290 |
+
image = gr.ImageEditor(type="pil",label="Sketch your image or upload one", crop_size="1:1", width=1024, height=1024,)
|
| 291 |
prompt = gr.Textbox(label="Prompt")
|
| 292 |
style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
| 293 |
use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
|