Update app.py
Browse files
app.py
CHANGED
|
@@ -11,9 +11,30 @@ from diffusers.utils import load_image
|
|
| 11 |
|
| 12 |
from huggingface_hub import hf_hub_download
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
MAX_SEED = np.iinfo(np.int32).max
|
| 19 |
|
|
@@ -286,14 +307,18 @@ def infer(input_image, prompt, illumination_dropdown, direction_dropdown, seed=4
|
|
| 286 |
prompt_with_template = f"Relight the image{prompt_prefix}. {prompt} Maintain the identity of the foreground subjects."
|
| 287 |
|
| 288 |
print(prompt_with_template)
|
| 289 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
image = pipe(
|
| 291 |
-
image=input_image,
|
| 292 |
prompt=prompt_with_template,
|
| 293 |
guidance_scale=guidance_scale,
|
| 294 |
width=input_image.size[0],
|
| 295 |
height=input_image.size[1],
|
| 296 |
-
generator=
|
| 297 |
).images[0]
|
| 298 |
return [input_image, image], seed, prompt_with_template
|
| 299 |
|
|
|
|
| 11 |
|
| 12 |
from huggingface_hub import hf_hub_download
|
| 13 |
|
| 14 |
+
# --- Lazy pipeline init to avoid CUDA errors on Zero GPU ---
|
| 15 |
+
PIPE = None
|
| 16 |
+
|
| 17 |
+
def get_pipe():
|
| 18 |
+
"""Create the pipeline lazily inside a GPU context when available.
|
| 19 |
+
On Hugging Face Zero GPU, CUDA is only present inside @spaces.GPU functions.
|
| 20 |
+
"""
|
| 21 |
+
global PIPE
|
| 22 |
+
if PIPE is None:
|
| 23 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 24 |
+
dtype = torch.float16 if device == "cuda" else torch.bfloat16
|
| 25 |
+
pipe = FluxKontextPipeline.from_pretrained(
|
| 26 |
+
"black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=dtype
|
| 27 |
+
)
|
| 28 |
+
pipe.load_lora_weights(
|
| 29 |
+
"kontext-community/relighting-kontext-dev-lora-v3",
|
| 30 |
+
weight_name="relighting-kontext-dev-lora-v3.safetensors",
|
| 31 |
+
adapter_name="lora",
|
| 32 |
+
)
|
| 33 |
+
pipe.set_adapters(["lora"], adapter_weights=[0.75])
|
| 34 |
+
pipe.to(device)
|
| 35 |
+
PIPE = pipe
|
| 36 |
+
return PIPE
|
| 37 |
+
# ----------------------------------------------------------
|
| 38 |
|
| 39 |
MAX_SEED = np.iinfo(np.int32).max
|
| 40 |
|
|
|
|
| 307 |
prompt_with_template = f"Relight the image{prompt_prefix}. {prompt} Maintain the identity of the foreground subjects."
|
| 308 |
|
| 309 |
print(prompt_with_template)
|
| 310 |
+
|
| 311 |
+
pipe = get_pipe()
|
| 312 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 313 |
+
gen = torch.Generator(device=device).manual_seed(seed)
|
| 314 |
+
|
| 315 |
image = pipe(
|
| 316 |
+
image=input_image,
|
| 317 |
prompt=prompt_with_template,
|
| 318 |
guidance_scale=guidance_scale,
|
| 319 |
width=input_image.size[0],
|
| 320 |
height=input_image.size[1],
|
| 321 |
+
generator=gen,
|
| 322 |
).images[0]
|
| 323 |
return [input_image, image], seed, prompt_with_template
|
| 324 |
|