Bobby commited on
Commit
19def46
·
1 Parent(s): b85b7f4
Files changed (1) hide show
  1. app.py +31 -36
app.py CHANGED
@@ -106,13 +106,6 @@ if gr.NO_RELOAD:
106
  "runwayml/stable-diffusion-inpainting",
107
  torch_dtype=torch.float16,
108
  ).to("cuda")
109
-
110
- print('loading controlnet inpainting pipe')
111
- controlnet_inpaint_pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
112
- "runwayml/stable-diffusion-inpainting",
113
- controlnet=controlnet,
114
- torch_dtype=torch.float16,
115
- ).to("cuda")
116
 
117
  print("loading preprocessor")
118
  preprocessor = Preprocessor()
@@ -432,35 +425,21 @@ def process_image(
432
  guidance_scale,
433
  seed,
434
  ):
435
- preprocess_start = time.time()
436
- print("processing image")
437
-
438
  seed = random.randint(0, MAX_SEED)
439
  generator = torch.cuda.manual_seed(seed)
 
440
  preprocessor.load("NormalBae")
441
  control_image = preprocessor(
442
  image=image,
443
  image_resolution=image_resolution,
444
  detect_resolution=preprocess_resolution,
445
  )
446
- preprocess_time = time.time() - preprocess_start
447
- if style_selection is not None or style_selection != "None":
448
- prompt = "Photo from Pinterest of " + apply_style(style_selection) + " " + prompt + "," + a_prompt
449
  else:
450
- prompt=str(get_prompt(prompt, a_prompt))
451
- negative_prompt=str(n_prompt)
452
- print(prompt)
453
- print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
454
- start = time.time()
455
- # results = pipe(
456
- # prompt=prompt,
457
- # negative_prompt=negative_prompt,
458
- # guidance_scale=guidance_scale,
459
- # num_images_per_prompt=num_images,
460
- # num_inference_steps=num_steps,
461
- # generator=generator,
462
- # image=control_image,
463
- # ).images[0]
464
 
465
  initial_result = pipe(
466
  prompt=prompt,
@@ -479,28 +458,44 @@ def process_image(
479
  if furniture_type != "None":
480
  furniture_mask = generate_furniture_mask(initial_result, furniture_type)
481
  furniture_prompt = f"A {furniture_type} in the style of {style_selection}"
482
- inpainted_image = controlnet_inpaint_pipe(
 
 
 
 
 
 
 
 
 
483
  prompt=furniture_prompt,
484
  image=initial_result,
485
  mask_image=furniture_mask,
486
- control_image=control_image,
487
  negative_prompt=negative_prompt,
488
  num_inference_steps=num_steps,
489
  guidance_scale=guidance_scale,
490
  generator=generator,
491
  ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
492
  else:
493
- inpainted_image = initial_result
494
-
495
- print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
496
- torch.cuda.empty_cache()
497
 
498
- # upload block
499
  timestamp = int(time.time())
500
  img_path = f"{timestamp}.jpg"
501
  results_path = f"{timestamp}_out.jpg"
502
  imageio.imsave(img_path, image)
503
- imageio.imsave(results_path, results)
504
  api.upload_file(
505
  path_or_fileobj=img_path,
506
  path_in_repo=img_path,
@@ -517,7 +512,7 @@ def process_image(
517
  token=API_KEY,
518
  run_as_future=True,
519
  )
520
- return inpainted_image
521
 
522
  if prod:
523
  demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
 
106
  "runwayml/stable-diffusion-inpainting",
107
  torch_dtype=torch.float16,
108
  ).to("cuda")
 
 
 
 
 
 
 
109
 
110
  print("loading preprocessor")
111
  preprocessor = Preprocessor()
 
425
  guidance_scale,
426
  seed,
427
  ):
 
 
 
428
  seed = random.randint(0, MAX_SEED)
429
  generator = torch.cuda.manual_seed(seed)
430
+
431
  preprocessor.load("NormalBae")
432
  control_image = preprocessor(
433
  image=image,
434
  image_resolution=image_resolution,
435
  detect_resolution=preprocess_resolution,
436
  )
437
+
438
+ if style_selection is not None and style_selection != "None":
439
+ prompt = f"Photo from Pinterest of {apply_style(style_selection)} {prompt},{a_prompt}"
440
  else:
441
+ prompt = str(get_prompt(prompt, a_prompt))
442
+ negative_prompt = str(n_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
443
 
444
  initial_result = pipe(
445
  prompt=prompt,
 
458
  if furniture_type != "None":
459
  furniture_mask = generate_furniture_mask(initial_result, furniture_type)
460
  furniture_prompt = f"A {furniture_type} in the style of {style_selection}"
461
+
462
+ # Apply ControlNet to get the control image for inpainting
463
+ control_image_inpaint = preprocessor(
464
+ image=initial_result,
465
+ image_resolution=image_resolution,
466
+ detect_resolution=preprocess_resolution,
467
+ )
468
+
469
+ # Use the regular inpainting pipeline with ControlNet guidance
470
+ inpainted_image = inpaint_pipe(
471
  prompt=furniture_prompt,
472
  image=initial_result,
473
  mask_image=furniture_mask,
 
474
  negative_prompt=negative_prompt,
475
  num_inference_steps=num_steps,
476
  guidance_scale=guidance_scale,
477
  generator=generator,
478
  ).images[0]
479
+
480
+ # Apply ControlNet guidance on the inpainted result
481
+ final_result = pipe(
482
+ prompt=furniture_prompt,
483
+ negative_prompt=negative_prompt,
484
+ guidance_scale=guidance_scale,
485
+ num_images_per_prompt=1,
486
+ num_inference_steps=num_steps,
487
+ generator=generator,
488
+ image=control_image_inpaint,
489
+ controlnet_conditioning_scale=0.5, # Adjust this value as needed
490
+ ).images[0]
491
  else:
492
+ final_result = initial_result
 
 
 
493
 
 
494
  timestamp = int(time.time())
495
  img_path = f"{timestamp}.jpg"
496
  results_path = f"{timestamp}_out.jpg"
497
  imageio.imsave(img_path, image)
498
+ imageio.imsave(results_path, final_result)
499
  api.upload_file(
500
  path_or_fileobj=img_path,
501
  path_in_repo=img_path,
 
512
  token=API_KEY,
513
  run_as_future=True,
514
  )
515
+ return final_result
516
 
517
  if prod:
518
  demo.queue(max_size=20).launch(server_name="localhost", server_port=port)