pikapool1994 commited on
Commit
48a2fd6
·
1 Parent(s): a38bfc0

Switch to local FLUX Kontext for A100

Browse files
Files changed (3) hide show
  1. app.py +36 -54
  2. app_local.py +85 -0
  3. requirements.txt +5 -1
app.py CHANGED
@@ -1,39 +1,40 @@
1
- import io
2
  import gradio as gr
3
- from huggingface_hub import InferenceClient
4
  from PIL import Image
5
 
6
- def edit_image(hf_token, input_image, prompt, progress=gr.Progress()):
7
- if not hf_token.strip():
8
- return None, "Please enter your HF token."
 
 
 
 
 
9
  if input_image is None:
10
  return None, "Please upload an image."
11
  if not prompt.strip():
12
  return None, "Please enter an edit instruction."
13
 
14
- try:
15
- client = InferenceClient(
16
- provider="fal-ai",
17
- api_key=hf_token.strip(),
18
- )
19
-
20
- progress(0.2, desc="Uploading image...")
21
- img_bytes = io.BytesIO()
22
- input_image.save(img_bytes, format="PNG")
23
- img_bytes = img_bytes.getvalue()
24
 
25
- progress(0.4, desc="Running FLUX Kontext...")
26
- result = client.image_to_image(
27
- img_bytes,
28
- prompt=prompt.strip(),
29
- model="black-forest-labs/FLUX.1-Kontext-dev",
30
- )
31
 
32
- progress(1.0, desc="Done!")
33
- return result, "Done!"
 
 
 
 
 
 
34
 
35
- except Exception as e:
36
- return None, f"Error: {str(e)}"
37
 
38
  EXAMPLES = [
39
  ["Make the sky look like a sunset"],
@@ -41,52 +42,33 @@ EXAMPLES = [
41
  ["Make it look like a watercolor painting"],
42
  ["Add snow to the ground"],
43
  ["Change the style to anime"],
44
- ["Make it look like it was taken at night"],
45
  ]
46
 
47
  with gr.Blocks(title="FLUX Kontext Image Editor") as demo:
48
  gr.Markdown("# FLUX.1 Kontext Image Editor")
49
- gr.Markdown("Edit any image using natural language. Uses your own HF token your credits, your privacy.")
50
-
51
- with gr.Accordion("Setup: Enter your HF Token", open=True):
52
- gr.Markdown("""
53
- 1. Get your free token at [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)
54
- 2. Make sure **Inference Providers** permission is enabled
55
- 3. Paste it below — it stays in your browser only
56
- """)
57
- hf_token = gr.Textbox(
58
- label="HF Token",
59
- placeholder="hf_...",
60
- type="password",
61
- )
62
-
63
  with gr.Row():
64
  with gr.Column():
65
- input_img = gr.Image(type="pil", label="Input Image")
66
  prompt = gr.Textbox(
67
  label="Edit Instruction",
68
- placeholder="e.g. make the sky look like a sunset",
69
  lines=2,
70
  )
 
 
 
 
71
  run_btn = gr.Button("Edit Image", variant="primary")
72
  gr.Examples(examples=EXAMPLES, inputs=[prompt], label="Example Prompts")
73
  with gr.Column():
74
  output_img = gr.Image(label="Edited Image")
75
- status = gr.Textbox(label="Status", interactive=False)
76
-
77
  run_btn.click(
78
  fn=edit_image,
79
- inputs=[hf_token, input_img, prompt],
80
  outputs=[output_img, status],
81
  )
82
 
83
- gr.Markdown("""
84
- ### Tips
85
- - Be specific: "make the sky orange and purple at sunset"
86
- - Object removal: "remove the car on the left and fill with grass"
87
- - Style: "make it look like a Van Gogh painting"
88
- - Your HF token is never stored or sent anywhere except HF servers
89
- """)
90
-
91
  if __name__ == "__main__":
92
- demo.launch(share=False)
 
1
+ import torch
2
  import gradio as gr
3
+ from diffusers import FluxKontextPipeline
4
  from PIL import Image
5
 
6
+ print("Loading FLUX.1 Kontext model...")
7
+ pipe = FluxKontextPipeline.from_pretrained(
8
+ "black-forest-labs/FLUX.1-Kontext-dev",
9
+ torch_dtype=torch.bfloat16,
10
+ ).to("cuda")
11
+ print("Model ready!")
12
+
13
+ def edit_image(input_image, prompt, steps, guidance, seed, progress=gr.Progress()):
14
  if input_image is None:
15
  return None, "Please upload an image."
16
  if not prompt.strip():
17
  return None, "Please enter an edit instruction."
18
 
19
+ progress(0.1, desc="Preparing...")
20
+ input_image = input_image.convert("RGB")
21
+ generator = torch.Generator().manual_seed(int(seed))
 
 
 
 
 
 
 
22
 
23
+ def step_cb(pipe, i, t, kwargs):
24
+ progress(0.1 + 0.9 * (i / steps), desc=f"Step {i}/{steps}")
25
+ return kwargs
 
 
 
26
 
27
+ result = pipe(
28
+ image=input_image,
29
+ prompt=prompt.strip(),
30
+ num_inference_steps=steps,
31
+ guidance_scale=guidance,
32
+ generator=generator,
33
+ callback_on_step_end=step_cb,
34
+ ).images[0]
35
 
36
+ progress(1.0, desc="Done!")
37
+ return result, "Done!"
38
 
39
  EXAMPLES = [
40
  ["Make the sky look like a sunset"],
 
42
  ["Make it look like a watercolor painting"],
43
  ["Add snow to the ground"],
44
  ["Change the style to anime"],
 
45
  ]
46
 
47
  with gr.Blocks(title="FLUX Kontext Image Editor") as demo:
48
  gr.Markdown("# FLUX.1 Kontext Image Editor")
49
+ gr.Markdown("Edit images with natural language. Powered by FLUX.1 Kontext dev on A100 GPU.")
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  with gr.Row():
51
  with gr.Column():
52
+ input_img = gr.Image(type="pil", label="Upload Image")
53
  prompt = gr.Textbox(
54
  label="Edit Instruction",
55
+ placeholder="e.g. remove the person and smooth the background",
56
  lines=2,
57
  )
58
+ with gr.Accordion("Advanced Settings", open=False):
59
+ steps = gr.Slider(10, 50, value=28, step=1, label="Steps")
60
+ guidance = gr.Slider(1, 10, value=2.5, step=0.5, label="Guidance Scale")
61
+ seed = gr.Number(value=42, label="Seed")
62
  run_btn = gr.Button("Edit Image", variant="primary")
63
  gr.Examples(examples=EXAMPLES, inputs=[prompt], label="Example Prompts")
64
  with gr.Column():
65
  output_img = gr.Image(label="Edited Image")
66
+ status = gr.Textbox(label="Status", interactive=False)
 
67
  run_btn.click(
68
  fn=edit_image,
69
+ inputs=[input_img, prompt, steps, guidance, seed],
70
  outputs=[output_img, status],
71
  )
72
 
 
 
 
 
 
 
 
 
73
  if __name__ == "__main__":
74
+ demo.launch()
app_local.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from diffusers import FluxKontextPipeline
4
+ from optimum.quanto import freeze, qfloat8, quantize
5
+ from PIL import Image
6
+
7
+ print("Loading FLUX Kontext with 8-bit quantization...")
8
+ pipe = FluxKontextPipeline.from_pretrained(
9
+ "black-forest-labs/FLUX.1-Kontext-dev",
10
+ torch_dtype=torch.bfloat16,
11
+ )
12
+
13
+ print("Quantizing transformer to 8-bit...")
14
+ quantize(pipe.transformer, weights=qfloat8)
15
+ freeze(pipe.transformer)
16
+
17
+ print("Quantizing text encoder to 8-bit...")
18
+ quantize(pipe.text_encoder_2, weights=qfloat8)
19
+ freeze(pipe.text_encoder_2)
20
+
21
+ pipe.to("cuda")
22
+ print("Model ready!")
23
+
24
+ def edit_image(input_image, prompt, steps, guidance, seed, progress=gr.Progress()):
25
+ if input_image is None:
26
+ return None, "Please upload an image."
27
+ if not prompt.strip():
28
+ return None, "Please enter an edit instruction."
29
+
30
+ progress(0.1, desc="Preparing...")
31
+ input_image = input_image.convert("RGB")
32
+ generator = torch.Generator().manual_seed(int(seed))
33
+
34
+ def step_cb(pipe, i, t, kwargs):
35
+ progress(0.1 + 0.9 * (i / steps), desc=f"Step {i}/{steps}")
36
+ return kwargs
37
+
38
+ result = pipe(
39
+ image=input_image,
40
+ prompt=prompt.strip(),
41
+ num_inference_steps=steps,
42
+ guidance_scale=guidance,
43
+ generator=generator,
44
+ callback_on_step_end=step_cb,
45
+ ).images[0]
46
+
47
+ progress(1.0, desc="Done!")
48
+ return result, "Done!"
49
+
50
+ EXAMPLES = [
51
+ ["Make the sky look like a sunset"],
52
+ ["Remove the background and make it white"],
53
+ ["Make it look like a watercolor painting"],
54
+ ["Add snow to the ground"],
55
+ ["Change the style to anime"],
56
+ ]
57
+
58
+ with gr.Blocks(title="FLUX Kontext Image Editor") as demo:
59
+ gr.Markdown("# FLUX.1 Kontext Image Editor")
60
+ gr.Markdown("Edit images with natural language. Powered by FLUX.1 Kontext running locally.")
61
+ with gr.Row():
62
+ with gr.Column():
63
+ input_img = gr.Image(type="pil", label="Upload Image")
64
+ prompt = gr.Textbox(
65
+ label="Edit Instruction",
66
+ placeholder="e.g. remove the person and smooth the background",
67
+ lines=2,
68
+ )
69
+ with gr.Accordion("Advanced Settings", open=False):
70
+ steps = gr.Slider(10, 50, value=28, step=1, label="Steps (less = faster)")
71
+ guidance = gr.Slider(1, 10, value=2.5, step=0.5, label="Guidance Scale")
72
+ seed = gr.Number(value=42, label="Seed")
73
+ run_btn = gr.Button("Edit Image", variant="primary")
74
+ gr.Examples(examples=EXAMPLES, inputs=[prompt], label="Example Prompts")
75
+ with gr.Column():
76
+ output_img = gr.Image(label="Edited Image")
77
+ status = gr.Textbox(label="Status", interactive=False)
78
+ run_btn.click(
79
+ fn=edit_image,
80
+ inputs=[input_img, prompt, steps, guidance, seed],
81
+ outputs=[output_img, status],
82
+ )
83
+
84
+ if __name__ == "__main__":
85
+ demo.launch(share=False)
requirements.txt CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  gradio
2
- huggingface_hub
3
  Pillow
 
 
1
+ torch
2
+ diffusers
3
+ transformers
4
+ accelerate
5
  gradio
 
6
  Pillow
7
+ huggingface_hub