Spaces:
Running
on
Zero
Running
on
Zero
add lora + updates (#9)
Browse files- add lora + updates (50f36be8d82a4b3bc31705e604a1fe4e373ff835)
- Update flux_loras.json (ab82c1e04ee0145b5fe4ab436c5634103b3cd63b)
- Update app.py (a192e5461e61648bae16c264938af551bc2b36df)
- Update flux_loras.json (01159528470a26c85d78b37b8cddcf78e2b0fea4)
- README.md +2 -2
- app.py +16 -10
- flux_loras.json +11 -0
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title: FLUX.1 Kontext
|
| 3 |
emoji: π©π»βπ€
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: gray
|
|
@@ -8,7 +8,7 @@ sdk_version: 5.34.2
|
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: FLUX.1 Kontext LoRA the Explorer
|
| 3 |
emoji: π©π»βπ€
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: gray
|
|
|
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
| 11 |
+
short_description: edit images with Kontext and LoRAs
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -31,6 +31,8 @@ with open("flux_loras.json", "r") as file:
|
|
| 31 |
"trigger_position": item.get("trigger_position", "prepend"),
|
| 32 |
"weights": item.get("weights", "pytorch_lora_weights.safetensors"),
|
| 33 |
"lora_type": item.get("lora_type", "flux"),
|
|
|
|
|
|
|
| 34 |
}
|
| 35 |
for item in data
|
| 36 |
]
|
|
@@ -59,7 +61,11 @@ def update_selection(selected_state: gr.SelectData, flux_loras):
|
|
| 59 |
trigger_word = flux_loras[selected_state.index]["trigger_word"]
|
| 60 |
|
| 61 |
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo})"
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
return updated_text, gr.update(placeholder=new_placeholder), selected_state.index
|
| 65 |
|
|
@@ -154,6 +160,9 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 154 |
pipe.unload_lora_weights()
|
| 155 |
|
| 156 |
# Load new LoRA
|
|
|
|
|
|
|
|
|
|
| 157 |
lora_path = load_lora_weights(lora_to_use["repo"], lora_to_use["weights"])
|
| 158 |
if lora_path:
|
| 159 |
pipe.load_lora_weights(lora_path, adapter_name="selected_lora")
|
|
@@ -190,19 +199,19 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 190 |
prompt = f"{trigger_word}. {prompt}."
|
| 191 |
try:
|
| 192 |
image = pipe(
|
| 193 |
-
image=input_image,
|
| 194 |
-
prompt=prompt,
|
| 195 |
-
guidance_scale=guidance_scale,
|
| 196 |
width=input_image.size[0],
|
| 197 |
height=input_image.size[1],
|
|
|
|
|
|
|
| 198 |
generator=torch.Generator().manual_seed(seed),
|
| 199 |
).images[0]
|
| 200 |
|
| 201 |
-
return image, seed, gr.update(visible=True)
|
| 202 |
|
| 203 |
except Exception as e:
|
| 204 |
print(f"Error during inference: {e}")
|
| 205 |
-
return None, seed, gr.update(visible=False)
|
| 206 |
|
| 207 |
# CSS styling
|
| 208 |
css = """
|
|
@@ -217,9 +226,6 @@ css = """
|
|
| 217 |
color: #2563eb;
|
| 218 |
font-weight: bold;
|
| 219 |
}
|
| 220 |
-
#title{text-align: center}
|
| 221 |
-
#title h1{font-size: 3em; display:inline-flex; align-items:center}
|
| 222 |
-
#title img{width: 100px; margin-right: 0.5em}
|
| 223 |
#prompt {
|
| 224 |
flex-grow: 1;
|
| 225 |
}
|
|
@@ -347,7 +353,7 @@ with gr.Blocks(css=css, theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Lexend
|
|
| 347 |
triggers=[run_button.click, prompt.submit],
|
| 348 |
fn=infer_with_lora_wrapper,
|
| 349 |
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, portrait_mode, gr_flux_loras],
|
| 350 |
-
outputs=[result, seed, reuse_button]
|
| 351 |
)
|
| 352 |
|
| 353 |
reuse_button.click(
|
|
|
|
| 31 |
"trigger_position": item.get("trigger_position", "prepend"),
|
| 32 |
"weights": item.get("weights", "pytorch_lora_weights.safetensors"),
|
| 33 |
"lora_type": item.get("lora_type", "flux"),
|
| 34 |
+
"lora_scale_config": item.get("lora_scale", 0),
|
| 35 |
+
"prompt_placeholder": item.get("prompt_placeholder", ""),
|
| 36 |
}
|
| 37 |
for item in data
|
| 38 |
]
|
|
|
|
| 61 |
trigger_word = flux_loras[selected_state.index]["trigger_word"]
|
| 62 |
|
| 63 |
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo})"
|
| 64 |
+
config_placeholder = flux_loras[selected_state.index]["prompt_placeholder"]
|
| 65 |
+
if config_placeholder:
|
| 66 |
+
new_placeholder = config_placeholder
|
| 67 |
+
else:
|
| 68 |
+
new_placeholder = f"opt - describe the person/subject, e.g. 'a man with glasses and a beard'"
|
| 69 |
|
| 70 |
return updated_text, gr.update(placeholder=new_placeholder), selected_state.index
|
| 71 |
|
|
|
|
| 160 |
pipe.unload_lora_weights()
|
| 161 |
|
| 162 |
# Load new LoRA
|
| 163 |
+
if lora_to_use["lora_scale_config"]:
|
| 164 |
+
lora_scale = lora_to_use["lora_scale_config"]
|
| 165 |
+
print("lora scale loaded from config", lora_scale)
|
| 166 |
lora_path = load_lora_weights(lora_to_use["repo"], lora_to_use["weights"])
|
| 167 |
if lora_path:
|
| 168 |
pipe.load_lora_weights(lora_path, adapter_name="selected_lora")
|
|
|
|
| 199 |
prompt = f"{trigger_word}. {prompt}."
|
| 200 |
try:
|
| 201 |
image = pipe(
|
| 202 |
+
image=input_image,
|
|
|
|
|
|
|
| 203 |
width=input_image.size[0],
|
| 204 |
height=input_image.size[1],
|
| 205 |
+
prompt=prompt,
|
| 206 |
+
guidance_scale=guidance_scale,
|
| 207 |
generator=torch.Generator().manual_seed(seed),
|
| 208 |
).images[0]
|
| 209 |
|
| 210 |
+
return image, seed, gr.update(visible=True), lora_scale
|
| 211 |
|
| 212 |
except Exception as e:
|
| 213 |
print(f"Error during inference: {e}")
|
| 214 |
+
return None, seed, gr.update(visible=False), lora_scale
|
| 215 |
|
| 216 |
# CSS styling
|
| 217 |
css = """
|
|
|
|
| 226 |
color: #2563eb;
|
| 227 |
font-weight: bold;
|
| 228 |
}
|
|
|
|
|
|
|
|
|
|
| 229 |
#prompt {
|
| 230 |
flex-grow: 1;
|
| 231 |
}
|
|
|
|
| 353 |
triggers=[run_button.click, prompt.submit],
|
| 354 |
fn=infer_with_lora_wrapper,
|
| 355 |
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, portrait_mode, gr_flux_loras],
|
| 356 |
+
outputs=[result, seed, reuse_button, lora_scale]
|
| 357 |
)
|
| 358 |
|
| 359 |
reuse_button.click(
|
flux_loras.json
CHANGED
|
@@ -1,4 +1,15 @@
|
|
| 1 |
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
{
|
| 3 |
"image": "https://huggingface.co/ostris/kontext_big_head_lora/resolve/main/imgs/06_big.png",
|
| 4 |
"title": "Big Head LoRA",
|
|
|
|
| 1 |
[
|
| 2 |
+
{
|
| 3 |
+
"image": "https://huggingface.co/fal/Youtube-Thumbnails-Kontext-Dev-LoRA/resolve/main/images/2.png",
|
| 4 |
+
"title": "Youtube Thumbnails",
|
| 5 |
+
"repo": "fal/Youtube-Thumbnails-Kontext-Dev-LoRA",
|
| 6 |
+
"weights": "thumbnails_lora_rank_32.safetensors",
|
| 7 |
+
"trigger_word": "Generate youtube thumbnails using text ",
|
| 8 |
+
"trigger_position": "prepend",
|
| 9 |
+
"lora_type": "kontext",
|
| 10 |
+
"lora_scale": 0.5,
|
| 11 |
+
"prompt_placeholder": "text for thumnail, e.g.'MIND BLOWN!'"
|
| 12 |
+
},
|
| 13 |
{
|
| 14 |
"image": "https://huggingface.co/ostris/kontext_big_head_lora/resolve/main/imgs/06_big.png",
|
| 15 |
"title": "Big Head LoRA",
|