Update app.py
Browse files
app.py
CHANGED
|
@@ -10,7 +10,7 @@ from huggingface_hub import InferenceClient
|
|
| 10 |
|
| 11 |
# Project by Nymbo
|
| 12 |
|
| 13 |
-
def query_with_auto_routing(prompt, model, custom_lora,
|
| 14 |
"""
|
| 15 |
Generate images using HF's automatic provider routing
|
| 16 |
"""
|
|
@@ -62,14 +62,39 @@ def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps
|
|
| 62 |
num_inference_steps=steps,
|
| 63 |
guidance_scale=cfg_scale,
|
| 64 |
seed=seed if seed != -1 else None,
|
|
|
|
| 65 |
)
|
| 66 |
|
| 67 |
print(f'Generation {key} completed with automatic routing!')
|
| 68 |
return image
|
| 69 |
|
| 70 |
except Exception as e:
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
def get_model_id_from_name(model_name):
|
| 75 |
"""
|
|
@@ -331,11 +356,11 @@ def apply_model_prompt_enhancements(model_name, prompt):
|
|
| 331 |
|
| 332 |
return prompt
|
| 333 |
|
| 334 |
-
def query(prompt, model, custom_lora,
|
| 335 |
"""
|
| 336 |
Main query function - now uses automatic provider routing
|
| 337 |
"""
|
| 338 |
-
return query_with_auto_routing(prompt, model, custom_lora,
|
| 339 |
|
| 340 |
# Custom CSS to hide the footer in the interface
|
| 341 |
css = """
|
|
|
|
| 10 |
|
| 11 |
# Project by Nymbo
|
| 12 |
|
| 13 |
+
def query_with_auto_routing(prompt, model, custom_lora, negative_prompt: str | None = None, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
| 14 |
"""
|
| 15 |
Generate images using HF's automatic provider routing
|
| 16 |
"""
|
|
|
|
| 62 |
num_inference_steps=steps,
|
| 63 |
guidance_scale=cfg_scale,
|
| 64 |
seed=seed if seed != -1 else None,
|
| 65 |
+
negative_prompt=negative_prompt if negative_prompt else None,
|
| 66 |
)
|
| 67 |
|
| 68 |
print(f'Generation {key} completed with automatic routing!')
|
| 69 |
return image
|
| 70 |
|
| 71 |
except Exception as e:
|
| 72 |
+
# Sanitize payment/credit-related errors so users don't see sensitive details
|
| 73 |
+
err_text = str(e)
|
| 74 |
+
is_payment_err = False
|
| 75 |
+
try:
|
| 76 |
+
# Try to detect HTTP status code 402 if available (e.g., httpx.HTTPStatusError)
|
| 77 |
+
status = getattr(getattr(e, "response", None), "status_code", None)
|
| 78 |
+
if status == 402:
|
| 79 |
+
is_payment_err = True
|
| 80 |
+
except Exception:
|
| 81 |
+
pass
|
| 82 |
+
# Fallback string heuristics
|
| 83 |
+
payment_markers = (
|
| 84 |
+
"402",
|
| 85 |
+
"Payment Required",
|
| 86 |
+
"exceeded your monthly included credits",
|
| 87 |
+
"credits for Inference Providers",
|
| 88 |
+
)
|
| 89 |
+
if any(m.lower() in err_text.lower() for m in payment_markers):
|
| 90 |
+
is_payment_err = True
|
| 91 |
+
|
| 92 |
+
if is_payment_err:
|
| 93 |
+
print("Generation failed due to billing/credit limits (details hidden from user).")
|
| 94 |
+
raise gr.Error("Generation temporarily unavailable. Please try again later.")
|
| 95 |
+
else:
|
| 96 |
+
print(f"Error with automatic routing: {err_text}")
|
| 97 |
+
raise gr.Error("Failed to generate image. Please try again.")
|
| 98 |
|
| 99 |
def get_model_id_from_name(model_name):
|
| 100 |
"""
|
|
|
|
| 356 |
|
| 357 |
return prompt
|
| 358 |
|
| 359 |
+
def query(prompt, model, custom_lora, negative_prompt=None, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
| 360 |
"""
|
| 361 |
Main query function - now uses automatic provider routing
|
| 362 |
"""
|
| 363 |
+
return query_with_auto_routing(prompt, model, custom_lora, negative_prompt, steps, cfg_scale, sampler, seed, strength, width, height)
|
| 364 |
|
| 365 |
# Custom CSS to hide the footer in the interface
|
| 366 |
css = """
|