Update README.md
Browse files
README.md
CHANGED
|
@@ -23,7 +23,7 @@ library_name: diffusers
|
|
| 23 |
|
| 24 |
---
|
| 25 |
|
| 26 |
-
## Sample Inferences : Demo
|
| 27 |
|
| 28 |
<table style="width:100%; border-collapse:collapse;">
|
| 29 |
<tr>
|
|
@@ -66,7 +66,147 @@ library_name: diffusers
|
|
| 66 |
|
| 67 |
---
|
| 68 |
|
| 69 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |

|
| 72 |
|
|
|
|
| 23 |
|
| 24 |
---
|
| 25 |
|
| 26 |
+
## **Sample Inferences : Demo**
|
| 27 |
|
| 28 |
<table style="width:100%; border-collapse:collapse;">
|
| 29 |
<tr>
|
|
|
|
| 66 |
|
| 67 |
---
|
| 68 |
|
| 69 |
+
# **Quick start with diffusers**
|
| 70 |
+
|
| 71 |
+
## Required Packages
|
| 72 |
+
|
| 73 |
+
> [!note]
|
| 74 |
+
diffusers torch gradio transformers pillow gradio-imageslider huggingface_hub sentencepiece spaces peft torchvision
|
| 75 |
+
|
| 76 |
+
## Run Demo
|
| 77 |
+
|
| 78 |
+
```py
|
| 79 |
+
import os
|
| 80 |
+
import gradio as gr
|
| 81 |
+
import numpy as np
|
| 82 |
+
import spaces
|
| 83 |
+
import torch
|
| 84 |
+
import random
|
| 85 |
+
from PIL import Image
|
| 86 |
+
from typing import Iterable
|
| 87 |
+
|
| 88 |
+
from diffusers import FluxKontextPipeline
|
| 89 |
+
from diffusers.utils import load_image
|
| 90 |
+
from huggingface_hub import hf_hub_download
|
| 91 |
+
from gradio_imageslider import ImageSlider
|
| 92 |
+
from gradio.themes import Soft
|
| 93 |
+
from gradio.themes.utils import colors, fonts, sizes
|
| 94 |
+
|
| 95 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 96 |
+
|
| 97 |
+
# --- Main Model Initialization ---
|
| 98 |
+
MAX_SEED = np.iinfo(np.int32).max
|
| 99 |
+
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
| 100 |
+
|
| 101 |
+
# --- Load New Adapter ---
|
| 102 |
+
pipe.load_lora_weights("prithivMLmods/Kontext-Watermark-Remover", weight_name="Kontext-Watermark-Remover.safetensors", adapter_name="watermark_remover")
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@spaces.GPU
|
| 106 |
+
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 107 |
+
"""
|
| 108 |
+
Perform image editing, returning a pair for the ImageSlider.
|
| 109 |
+
"""
|
| 110 |
+
if not input_image:
|
| 111 |
+
raise gr.Error("Please upload an image for editing.")
|
| 112 |
+
|
| 113 |
+
pipe.set_adapters(["watermark_remover"], adapter_weights=[1.0])
|
| 114 |
+
|
| 115 |
+
if randomize_seed:
|
| 116 |
+
seed = random.randint(0, MAX_SEED)
|
| 117 |
+
|
| 118 |
+
original_image = input_image.copy().convert("RGB")
|
| 119 |
+
|
| 120 |
+
image = pipe(
|
| 121 |
+
image=original_image,
|
| 122 |
+
prompt=prompt,
|
| 123 |
+
guidance_scale=guidance_scale,
|
| 124 |
+
width = original_image.size[0],
|
| 125 |
+
height = original_image.size[1],
|
| 126 |
+
num_inference_steps=steps,
|
| 127 |
+
generator=torch.Generator().manual_seed(seed),
|
| 128 |
+
).images[0]
|
| 129 |
+
|
| 130 |
+
return (original_image, image), seed, gr.Button(visible=True)
|
| 131 |
+
|
| 132 |
+
css="""
|
| 133 |
+
#col-container {
|
| 134 |
+
margin: 0 auto;
|
| 135 |
+
max-width: 960px;
|
| 136 |
+
}
|
| 137 |
+
#main-title h1 {font-size: 2.1em !important;}
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
with gr.Blocks(css=css) as demo:
|
| 141 |
+
|
| 142 |
+
with gr.Column(elem_id="col-container"):
|
| 143 |
+
gr.Markdown("# **Photo-Mate-i2i: Watermark Remover**", elem_id="main-title")
|
| 144 |
+
gr.Markdown("Image manipulation with FLUX.1 Kontext. This demo focuses on watermark removal.")
|
| 145 |
+
|
| 146 |
+
with gr.Row():
|
| 147 |
+
with gr.Column():
|
| 148 |
+
input_image = gr.Image(label="Upload Image with Watermark", type="pil", height="300")
|
| 149 |
+
with gr.Row():
|
| 150 |
+
prompt = gr.Text(
|
| 151 |
+
label="Edit Prompt",
|
| 152 |
+
show_label=False,
|
| 153 |
+
max_lines=1,
|
| 154 |
+
placeholder="e.g., 'Remove the watermark'",
|
| 155 |
+
container=False,
|
| 156 |
+
value="[photo content], remove any watermark text or logos from the image while preserving the background, texture, lighting, and overall realism. Ensure the edited areas blend seamlessly with surrounding details, leaving no visible traces of watermark removal."
|
| 157 |
+
)
|
| 158 |
+
run_button = gr.Button("Run", variant="primary", scale=0)
|
| 159 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 160 |
+
|
| 161 |
+
seed = gr.Slider(
|
| 162 |
+
label="Seed",
|
| 163 |
+
minimum=0,
|
| 164 |
+
maximum=MAX_SEED,
|
| 165 |
+
step=1,
|
| 166 |
+
value=0,
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 170 |
+
|
| 171 |
+
guidance_scale = gr.Slider(
|
| 172 |
+
label="Guidance Scale",
|
| 173 |
+
minimum=1,
|
| 174 |
+
maximum=10,
|
| 175 |
+
step=0.1,
|
| 176 |
+
value=2.5,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
steps = gr.Slider(
|
| 180 |
+
label="Steps",
|
| 181 |
+
minimum=1,
|
| 182 |
+
maximum=30,
|
| 183 |
+
value=28,
|
| 184 |
+
step=1
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
with gr.Column():
|
| 188 |
+
output_slider = ImageSlider(label="Before / After", show_label=False, interactive=False)
|
| 189 |
+
reuse_button = gr.Button("Reuse this image", visible=False)
|
| 190 |
+
|
| 191 |
+
gr.on(
|
| 192 |
+
triggers=[run_button.click, prompt.submit],
|
| 193 |
+
fn=infer,
|
| 194 |
+
inputs=[input_image, prompt, seed, randomize_seed, guidance_scale, steps],
|
| 195 |
+
outputs=[output_slider, seed, reuse_button]
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
reuse_button.click(
|
| 199 |
+
fn=lambda images: images[1] if isinstance(images, (list, tuple)) and len(images) > 1 else images,
|
| 200 |
+
inputs=[output_slider],
|
| 201 |
+
outputs=[input_image]
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
demo.launch(mcp_server=True, ssr_mode=False, show_error=True)
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
---
|
| 208 |
+
|
| 209 |
+
## **Sample Inferences : Inference Providers**
|
| 210 |
|
| 211 |

|
| 212 |
|