Spaces:
Running
Running
File size: 2,954 Bytes
48a2fd6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | import torch
import gradio as gr
from diffusers import FluxKontextPipeline
from optimum.quanto import freeze, qfloat8, quantize
from PIL import Image
print("Loading FLUX Kontext with 8-bit quantization...")
pipe = FluxKontextPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev",
torch_dtype=torch.bfloat16,
)
print("Quantizing transformer to 8-bit...")
quantize(pipe.transformer, weights=qfloat8)
freeze(pipe.transformer)
print("Quantizing text encoder to 8-bit...")
quantize(pipe.text_encoder_2, weights=qfloat8)
freeze(pipe.text_encoder_2)
pipe.to("cuda")
print("Model ready!")
def edit_image(input_image, prompt, steps, guidance, seed, progress=gr.Progress()):
if input_image is None:
return None, "Please upload an image."
if not prompt.strip():
return None, "Please enter an edit instruction."
progress(0.1, desc="Preparing...")
input_image = input_image.convert("RGB")
generator = torch.Generator().manual_seed(int(seed))
def step_cb(pipe, i, t, kwargs):
progress(0.1 + 0.9 * (i / steps), desc=f"Step {i}/{steps}")
return kwargs
result = pipe(
image=input_image,
prompt=prompt.strip(),
num_inference_steps=steps,
guidance_scale=guidance,
generator=generator,
callback_on_step_end=step_cb,
).images[0]
progress(1.0, desc="Done!")
return result, "Done!"
EXAMPLES = [
["Make the sky look like a sunset"],
["Remove the background and make it white"],
["Make it look like a watercolor painting"],
["Add snow to the ground"],
["Change the style to anime"],
]
with gr.Blocks(title="FLUX Kontext Image Editor") as demo:
gr.Markdown("# FLUX.1 Kontext Image Editor")
gr.Markdown("Edit images with natural language. Powered by FLUX.1 Kontext running locally.")
with gr.Row():
with gr.Column():
input_img = gr.Image(type="pil", label="Upload Image")
prompt = gr.Textbox(
label="Edit Instruction",
placeholder="e.g. remove the person and smooth the background",
lines=2,
)
with gr.Accordion("Advanced Settings", open=False):
steps = gr.Slider(10, 50, value=28, step=1, label="Steps (less = faster)")
guidance = gr.Slider(1, 10, value=2.5, step=0.5, label="Guidance Scale")
seed = gr.Number(value=42, label="Seed")
run_btn = gr.Button("Edit Image", variant="primary")
gr.Examples(examples=EXAMPLES, inputs=[prompt], label="Example Prompts")
with gr.Column():
output_img = gr.Image(label="Edited Image")
status = gr.Textbox(label="Status", interactive=False)
run_btn.click(
fn=edit_image,
inputs=[input_img, prompt, steps, guidance, seed],
outputs=[output_img, status],
)
if __name__ == "__main__":
demo.launch(share=False)
|