Update handler.py
Browse files- handler.py +4 -3
handler.py
CHANGED
|
@@ -116,14 +116,14 @@ class EndpointHandler():
|
|
| 116 |
#pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
|
| 117 |
|
| 118 |
# run inference pipeline
|
| 119 |
-
out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image)
|
| 120 |
|
| 121 |
print("1st pipeline part successful!")
|
| 122 |
|
| 123 |
image = out.images[0].resize((1024, 1024))
|
| 124 |
|
| 125 |
print("image resizing successful!")
|
| 126 |
-
|
| 127 |
image = self.pipe2(
|
| 128 |
prompt=prompt,
|
| 129 |
negative_prompt=negative_prompt,
|
|
@@ -146,9 +146,10 @@ class EndpointHandler():
|
|
| 146 |
).images[0]
|
| 147 |
|
| 148 |
print("3rd pipeline part successful!")
|
|
|
|
| 149 |
|
| 150 |
# return first generate PIL image
|
| 151 |
-
return
|
| 152 |
|
| 153 |
"""
|
| 154 |
control_image = self.make_inpaint_condition(image, mask_image)
|
|
|
|
| 116 |
#pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
|
| 117 |
|
| 118 |
# run inference pipeline
|
| 119 |
+
out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)
|
| 120 |
|
| 121 |
print("1st pipeline part successful!")
|
| 122 |
|
| 123 |
image = out.images[0].resize((1024, 1024))
|
| 124 |
|
| 125 |
print("image resizing successful!")
|
| 126 |
+
"""
|
| 127 |
image = self.pipe2(
|
| 128 |
prompt=prompt,
|
| 129 |
negative_prompt=negative_prompt,
|
|
|
|
| 146 |
).images[0]
|
| 147 |
|
| 148 |
print("3rd pipeline part successful!")
|
| 149 |
+
"""
|
| 150 |
|
| 151 |
# return first generate PIL image
|
| 152 |
+
return image
|
| 153 |
|
| 154 |
"""
|
| 155 |
control_image = self.make_inpaint_condition(image, mask_image)
|