Spaces:
Running
Running
| import torch | |
| import gradio as gr | |
| from diffusers import FluxKontextPipeline | |
| from optimum.quanto import freeze, qfloat8, quantize | |
| from PIL import Image | |
| print("Loading FLUX Kontext with 8-bit quantization...") | |
| pipe = FluxKontextPipeline.from_pretrained( | |
| "black-forest-labs/FLUX.1-Kontext-dev", | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| print("Quantizing transformer to 8-bit...") | |
| quantize(pipe.transformer, weights=qfloat8) | |
| freeze(pipe.transformer) | |
| print("Quantizing text encoder to 8-bit...") | |
| quantize(pipe.text_encoder_2, weights=qfloat8) | |
| freeze(pipe.text_encoder_2) | |
| pipe.to("cuda") | |
| print("Model ready!") | |
| def edit_image(input_image, prompt, steps, guidance, seed, progress=gr.Progress()): | |
| if input_image is None: | |
| return None, "Please upload an image." | |
| if not prompt.strip(): | |
| return None, "Please enter an edit instruction." | |
| progress(0.1, desc="Preparing...") | |
| input_image = input_image.convert("RGB") | |
| generator = torch.Generator().manual_seed(int(seed)) | |
| def step_cb(pipe, i, t, kwargs): | |
| progress(0.1 + 0.9 * (i / steps), desc=f"Step {i}/{steps}") | |
| return kwargs | |
| result = pipe( | |
| image=input_image, | |
| prompt=prompt.strip(), | |
| num_inference_steps=steps, | |
| guidance_scale=guidance, | |
| generator=generator, | |
| callback_on_step_end=step_cb, | |
| ).images[0] | |
| progress(1.0, desc="Done!") | |
| return result, "Done!" | |
| EXAMPLES = [ | |
| ["Make the sky look like a sunset"], | |
| ["Remove the background and make it white"], | |
| ["Make it look like a watercolor painting"], | |
| ["Add snow to the ground"], | |
| ["Change the style to anime"], | |
| ] | |
| with gr.Blocks(title="FLUX Kontext Image Editor") as demo: | |
| gr.Markdown("# FLUX.1 Kontext Image Editor") | |
| gr.Markdown("Edit images with natural language. Powered by FLUX.1 Kontext running locally.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_img = gr.Image(type="pil", label="Upload Image") | |
| prompt = gr.Textbox( | |
| label="Edit Instruction", | |
| placeholder="e.g. remove the person and smooth the background", | |
| lines=2, | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| steps = gr.Slider(10, 50, value=28, step=1, label="Steps (less = faster)") | |
| guidance = gr.Slider(1, 10, value=2.5, step=0.5, label="Guidance Scale") | |
| seed = gr.Number(value=42, label="Seed") | |
| run_btn = gr.Button("Edit Image", variant="primary") | |
| gr.Examples(examples=EXAMPLES, inputs=[prompt], label="Example Prompts") | |
| with gr.Column(): | |
| output_img = gr.Image(label="Edited Image") | |
| status = gr.Textbox(label="Status", interactive=False) | |
| run_btn.click( | |
| fn=edit_image, | |
| inputs=[input_img, prompt, steps, guidance, seed], | |
| outputs=[output_img, status], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=False) | |