Spaces:
Build error
Build error
Commit ·
51312b1
1
Parent(s): 2b3d7c6
Fix NoneType error, remove negative_prompt, use Blocks for layout
Browse files
app.py
CHANGED
|
@@ -26,62 +26,60 @@ else:
|
|
| 26 |
|
| 27 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 28 |
|
| 29 |
-
# Load the
|
| 30 |
pipe = FluxFillPipeline.from_pretrained(
|
| 31 |
"black-forest-labs/FLUX.1-Fill-dev",
|
| 32 |
torch_dtype=torch.bfloat16,
|
| 33 |
token=hf_token,
|
| 34 |
).to(device)
|
| 35 |
-
|
| 36 |
-
# Enable CPU offloading only if GPU is available
|
| 37 |
if torch.cuda.is_available():
|
| 38 |
pipe.enable_model_cpu_offload()
|
| 39 |
|
| 40 |
|
| 41 |
-
def inpaint(
|
| 42 |
-
image
|
| 43 |
-
|
| 44 |
-
image = image.convert("RGB")
|
| 45 |
-
mask = mask.convert("RGB")
|
| 46 |
-
|
| 47 |
-
mask_np = np.array(mask)
|
| 48 |
-
mask_np = cv2.cvtColor(mask_np, cv2.COLOR_RGB2GRAY)
|
| 49 |
-
_, mask_np = cv2.threshold(mask_np, 127, 255, cv2.THRESH_BINARY)
|
| 50 |
-
mask = Image.fromarray(mask_np)
|
| 51 |
|
| 52 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
output = pipe(
|
| 54 |
prompt=prompt + ", highly detailed, realistic, sepia tone",
|
| 55 |
-
negative_prompt=negative_prompt + ", blurry, low quality",
|
| 56 |
image=image,
|
| 57 |
-
mask_image=
|
| 58 |
num_inference_steps=50,
|
| 59 |
guidance_scale=7.5,
|
| 60 |
max_sequence_length=512,
|
| 61 |
).images[0]
|
| 62 |
-
return output
|
| 63 |
except Exception as e:
|
| 64 |
-
|
| 65 |
-
raise
|
| 66 |
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
gr.Image(label="
|
| 72 |
-
gr.
|
| 73 |
-
label="Mask (white=edit, black=keep)", type="pil"
|
| 74 |
-
), # Removed tool="sketch"
|
| 75 |
-
gr.Textbox(
|
| 76 |
label="Prompt",
|
| 77 |
value="add a golden crescent moon on the forehead, glowing red cat eyes",
|
| 78 |
-
)
|
| 79 |
-
gr.
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
if __name__ == "__main__":
|
| 87 |
-
|
|
|
|
| 26 |
|
| 27 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 28 |
|
| 29 |
+
# Load the pipeline
|
| 30 |
pipe = FluxFillPipeline.from_pretrained(
|
| 31 |
"black-forest-labs/FLUX.1-Fill-dev",
|
| 32 |
torch_dtype=torch.bfloat16,
|
| 33 |
token=hf_token,
|
| 34 |
).to(device)
|
|
|
|
|
|
|
| 35 |
if torch.cuda.is_available():
|
| 36 |
pipe.enable_model_cpu_offload()
|
| 37 |
|
| 38 |
|
| 39 |
+
def inpaint(image: Image.Image, mask: Image.Image, prompt: str):
|
| 40 |
+
if image is None or mask is None:
|
| 41 |
+
return None, None, "Please upload both a base image and a mask."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
try:
|
| 44 |
+
image = image.convert("RGB")
|
| 45 |
+
mask = mask.convert("RGB")
|
| 46 |
+
|
| 47 |
+
mask_np = np.array(mask)
|
| 48 |
+
mask_np = cv2.cvtColor(mask_np, cv2.COLOR_RGB2GRAY)
|
| 49 |
+
_, mask_np = cv2.threshold(mask_np, 127, 255, cv2.THRESH_BINARY)
|
| 50 |
+
processed_mask = Image.fromarray(mask_np)
|
| 51 |
+
|
| 52 |
output = pipe(
|
| 53 |
prompt=prompt + ", highly detailed, realistic, sepia tone",
|
|
|
|
| 54 |
image=image,
|
| 55 |
+
mask_image=processed_mask,
|
| 56 |
num_inference_steps=50,
|
| 57 |
guidance_scale=7.5,
|
| 58 |
max_sequence_length=512,
|
| 59 |
).images[0]
|
| 60 |
+
return output, processed_mask, None
|
| 61 |
except Exception as e:
|
| 62 |
+
return None, None, f"Error during inpainting: {str(e)}"
|
|
|
|
| 63 |
|
| 64 |
|
| 65 |
+
with gr.Blocks() as demo:
|
| 66 |
+
with gr.Row():
|
| 67 |
+
base_image = gr.Image(label="Base Image", type="pil")
|
| 68 |
+
mask_image = gr.Image(label="Mask (white=edit, black=keep)", type="pil")
|
| 69 |
+
prompt = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
| 70 |
label="Prompt",
|
| 71 |
value="add a golden crescent moon on the forehead, glowing red cat eyes",
|
| 72 |
+
)
|
| 73 |
+
submit = gr.Button("Submit")
|
| 74 |
+
with gr.Column():
|
| 75 |
+
output_image = gr.Image(label="Inpainted Output")
|
| 76 |
+
processed_mask_display = gr.Image(label="Processed Mask")
|
| 77 |
+
error_label = gr.Markdown()
|
| 78 |
+
submit.click(
|
| 79 |
+
inpaint,
|
| 80 |
+
inputs=[base_image, mask_image, prompt],
|
| 81 |
+
outputs=[output_image, processed_mask_display, error_label],
|
| 82 |
+
)
|
| 83 |
|
| 84 |
if __name__ == "__main__":
|
| 85 |
+
demo.launch()
|