Spaces:
Running
Running
adding sign-in with HF
Browse files
app.py
CHANGED
|
@@ -6,11 +6,12 @@ import os
|
|
| 6 |
import time
|
| 7 |
from PIL import Image
|
| 8 |
import json
|
|
|
|
| 9 |
from huggingface_hub import InferenceClient
|
| 10 |
|
| 11 |
# Project by Nymbo
|
| 12 |
|
| 13 |
-
def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
|
| 14 |
"""
|
| 15 |
Generate images using HF's automatic provider routing
|
| 16 |
"""
|
|
@@ -35,12 +36,20 @@ def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps
|
|
| 35 |
|
| 36 |
try:
|
| 37 |
# Use automatic provider routing
|
| 38 |
-
api_key =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
if not api_key:
|
| 40 |
-
raise gr.Error("
|
| 41 |
-
|
| 42 |
# Initialize client with automatic provider selection (default is "auto")
|
| 43 |
-
client = InferenceClient(
|
| 44 |
|
| 45 |
# Determine the model to use
|
| 46 |
if custom_lora.strip() != "":
|
|
@@ -331,11 +340,31 @@ def apply_model_prompt_enhancements(model_name, prompt):
|
|
| 331 |
|
| 332 |
return prompt
|
| 333 |
|
| 334 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 335 |
"""
|
| 336 |
Main query function - now uses automatic provider routing
|
| 337 |
"""
|
| 338 |
-
return query_with_auto_routing(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
|
| 340 |
# Custom CSS to hide the footer in the interface
|
| 341 |
css = """
|
|
@@ -347,6 +376,10 @@ print("Initializing Gradio interface...") # Debug log
|
|
| 347 |
|
| 348 |
# Define the Gradio interface
|
| 349 |
with gr.Blocks(theme='Nymbo/Nymbo_Theme') as dalle:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 350 |
# Tab for basic settings
|
| 351 |
with gr.Tab("Basic Settings"):
|
| 352 |
with gr.Row():
|
|
@@ -603,6 +636,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme') as dalle:
|
|
| 603 |
# Set up button click event to call the main query function
|
| 604 |
text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
|
| 605 |
|
|
|
|
|
|
|
|
|
|
| 606 |
print("Launching Gradio interface...") # Debug log
|
| 607 |
# Launch the Gradio interface without showing the API or sharing externally
|
| 608 |
dalle.launch(show_api=False, share=False, ssr_mode=False)
|
|
|
|
| 6 |
import time
|
| 7 |
from PIL import Image
|
| 8 |
import json
|
| 9 |
+
from typing import Optional
|
| 10 |
from huggingface_hub import InferenceClient
|
| 11 |
|
| 12 |
# Project by Nymbo
|
| 13 |
|
| 14 |
+
def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024, oauth_token: Optional[gr.OAuthToken] = None):
|
| 15 |
"""
|
| 16 |
Generate images using HF's automatic provider routing
|
| 17 |
"""
|
|
|
|
| 36 |
|
| 37 |
try:
|
| 38 |
# Use automatic provider routing
|
| 39 |
+
api_key: Optional[str] = None
|
| 40 |
+
if oauth_token is not None and getattr(oauth_token, "token", None):
|
| 41 |
+
api_key = oauth_token.token
|
| 42 |
+
print("Using OAuth token from signed-in user for inference.")
|
| 43 |
+
else:
|
| 44 |
+
api_key = os.getenv("HF_READ_TOKEN")
|
| 45 |
+
if api_key:
|
| 46 |
+
print("Using HF_READ_TOKEN environment variable for inference.")
|
| 47 |
+
|
| 48 |
if not api_key:
|
| 49 |
+
raise gr.Error("No Hugging Face token available. Sign in with Hugging Face or set HF_READ_TOKEN.")
|
| 50 |
+
|
| 51 |
# Initialize client with automatic provider selection (default is "auto")
|
| 52 |
+
client = InferenceClient(token=api_key)
|
| 53 |
|
| 54 |
# Determine the model to use
|
| 55 |
if custom_lora.strip() != "":
|
|
|
|
| 340 |
|
| 341 |
return prompt
|
| 342 |
|
| 343 |
+
def render_profile(profile: Optional[gr.OAuthProfile]) -> str:
|
| 344 |
+
"""Display the current authentication status in the UI."""
|
| 345 |
+
if profile is None:
|
| 346 |
+
return "Not signed in. Use the button above to sign in with Hugging Face and run inference with your own account."
|
| 347 |
+
display_name = getattr(profile, "name", None) or getattr(profile, "username", "Hugging Face user")
|
| 348 |
+
return f"Signed in as **{display_name}**."
|
| 349 |
+
|
| 350 |
+
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024, oauth_token: Optional[gr.OAuthToken] = None):
|
| 351 |
"""
|
| 352 |
Main query function - now uses automatic provider routing
|
| 353 |
"""
|
| 354 |
+
return query_with_auto_routing(
|
| 355 |
+
prompt,
|
| 356 |
+
model,
|
| 357 |
+
custom_lora,
|
| 358 |
+
is_negative,
|
| 359 |
+
steps,
|
| 360 |
+
cfg_scale,
|
| 361 |
+
sampler,
|
| 362 |
+
seed,
|
| 363 |
+
strength,
|
| 364 |
+
width,
|
| 365 |
+
height,
|
| 366 |
+
oauth_token=oauth_token,
|
| 367 |
+
)
|
| 368 |
|
| 369 |
# Custom CSS to hide the footer in the interface
|
| 370 |
css = """
|
|
|
|
| 376 |
|
| 377 |
# Define the Gradio interface
|
| 378 |
with gr.Blocks(theme='Nymbo/Nymbo_Theme') as dalle:
|
| 379 |
+
with gr.Row(elem_id="oauth-row"):
|
| 380 |
+
login_button = gr.LoginButton()
|
| 381 |
+
auth_status = gr.Markdown(render_profile(None), elem_id="oauth-status")
|
| 382 |
+
|
| 383 |
# Tab for basic settings
|
| 384 |
with gr.Tab("Basic Settings"):
|
| 385 |
with gr.Row():
|
|
|
|
| 636 |
# Set up button click event to call the main query function
|
| 637 |
text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
|
| 638 |
|
| 639 |
+
# Update authentication status once the app loads (and after OAuth redirects)
|
| 640 |
+
dalle.load(render_profile, inputs=None, outputs=auth_status)
|
| 641 |
+
|
| 642 |
print("Launching Gradio interface...") # Debug log
|
| 643 |
# Launch the Gradio interface without showing the API or sharing externally
|
| 644 |
dalle.launch(show_api=False, share=False, ssr_mode=False)
|