oshita-n commited on
Commit
17a1c6f
·
1 Parent(s): 05ba6bb
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -7,6 +7,8 @@ from io import BytesIO
7
  from rembg import remove
8
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
9
  import torch
 
 
10
 
11
  def remove_background(input_image: Image.Image, to_grayscale: bool) -> Image.Image:
12
  output_image = remove(input_image)
@@ -27,20 +29,24 @@ def canny_image(image: Image.Image) -> Image.Image:
27
  return Image.fromarray(np_image)
28
 
29
  def process_image(input_image: Image.Image, to_grayscale: bool, prompt: str) -> Image.Image:
30
- canny_output = canny_image(input_image)
 
31
 
32
  controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32)
33
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
34
  "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float32
35
  )
36
  pipe.enable_model_cpu_offload()
 
 
 
37
  generator = torch.manual_seed(2)
38
  output = pipe(
39
- [prompt],
40
  canny_output,
41
- negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"],
42
  generator=generator,
43
- num_inference_steps=20,
44
  )
45
 
46
  return output.images[0]
 
7
  from rembg import remove
8
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
9
  import torch
10
+ from diffusers import UniPCMultistepScheduler
11
+
12
 
13
  def remove_background(input_image: Image.Image, to_grayscale: bool) -> Image.Image:
14
  output_image = remove(input_image)
 
29
  return Image.fromarray(np_image)
30
 
31
  def process_image(input_image: Image.Image, to_grayscale: bool, prompt: str) -> Image.Image:
32
+ output_image = remove_background(input_image, to_grayscale)
33
+ canny_output = canny_image(output_image)
34
 
35
  controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32)
36
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
37
  "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float32
38
  )
39
  pipe.enable_model_cpu_offload()
40
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
41
+ pipe.enable_xformers_memory_efficient_attention()
42
+
43
  generator = torch.manual_seed(2)
44
  output = pipe(
45
+ prompt,
46
  canny_output,
47
+ negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
48
  generator=generator,
49
+ num_inference_steps=2,
50
  )
51
 
52
  return output.images[0]