Nymbo commited on
Commit
fc20ab2
·
verified ·
1 Parent(s): d513ed7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -7
app.py CHANGED
@@ -1,10 +1,10 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
- import json
5
  import base64
6
  from PIL import Image
7
  import io
 
8
 
9
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
10
  print("Access token loaded.")
@@ -39,6 +39,15 @@ def encode_image(image_path):
39
  print(f"Error encoding image: {e}")
40
  return None
41
 
 
 
 
 
 
 
 
 
 
42
  def respond(
43
  message,
44
  image_files, # Changed parameter name and structure
@@ -51,7 +60,8 @@ def respond(
51
  seed,
52
  custom_model,
53
  model_search_term,
54
- selected_model
 
55
  ):
56
  print(f"Received message: {message}")
57
  print(f"Received {len(image_files) if image_files else 0} images")
@@ -63,9 +73,22 @@ def respond(
63
  print(f"Model search term: {model_search_term}")
64
  print(f"Selected model from radio: {selected_model}")
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  # Initialize the Inference Client with default HF inference
67
- client = InferenceClient(token=ACCESS_TOKEN)
68
- print(f"Hugging Face Inference Client initialized with standard HF inference.")
69
 
70
  # Convert seed to None if -1 (meaning random)
71
  if seed == -1:
@@ -201,6 +224,10 @@ def respond(
201
 
202
  # GRADIO UI
203
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
 
 
 
 
204
  # Create the chatbot component
205
  chatbot = gr.Chatbot(
206
  height=600,
@@ -386,7 +413,19 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
386
  return history
387
 
388
  # Define bot response function
389
- def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, custom_model, search_term, selected_model):
 
 
 
 
 
 
 
 
 
 
 
 
390
  # Check if history is valid
391
  if not history or len(history) == 0:
392
  print("No history to process")
@@ -435,7 +474,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
435
  seed,
436
  custom_model,
437
  search_term,
438
- selected_model
 
439
  ):
440
  history[-1][1] = response
441
  yield history
@@ -453,7 +493,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
453
  seed,
454
  custom_model,
455
  search_term,
456
- selected_model
 
457
  ):
458
  history[-1][1] = response
459
  yield history
@@ -492,6 +533,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
492
  )
493
  print("Featured model radio button change event linked.")
494
 
 
 
495
  print("Gradio interface initialized.")
496
 
497
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
 
4
  import base64
5
  from PIL import Image
6
  import io
7
+ from typing import Optional
8
 
9
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
10
  print("Access token loaded.")
 
39
  print(f"Error encoding image: {e}")
40
  return None
41
 
42
+
43
+ def render_profile(profile: Optional[gr.OAuthProfile]) -> str:
44
+ """Display the current authentication status in the UI."""
45
+ if profile is None:
46
+ return "Not signed in. Use the button above to sign in with Hugging Face and run inference with your own account."
47
+
48
+ display_name = getattr(profile, "name", None) or getattr(profile, "username", "Hugging Face user")
49
+ return f"Signed in as **{display_name}**."
50
+
51
  def respond(
52
  message,
53
  image_files, # Changed parameter name and structure
 
60
  seed,
61
  custom_model,
62
  model_search_term,
63
+ selected_model,
64
+ oauth_token: Optional[gr.OAuthToken] = None
65
  ):
66
  print(f"Received message: {message}")
67
  print(f"Received {len(image_files) if image_files else 0} images")
 
73
  print(f"Model search term: {model_search_term}")
74
  print(f"Selected model from radio: {selected_model}")
75
 
76
+ api_token: Optional[str] = None
77
+
78
+ if oauth_token is not None and getattr(oauth_token, "token", None):
79
+ api_token = oauth_token.token
80
+ print("Using OAuth token from signed-in user for inference.")
81
+ elif ACCESS_TOKEN:
82
+ api_token = ACCESS_TOKEN
83
+ print("Using server-configured Hugging Face token for inference.")
84
+ else:
85
+ raise gr.Error(
86
+ "No Hugging Face session detected. Please sign in with your Hugging Face account before running the chat."
87
+ )
88
+
89
  # Initialize the Inference Client with default HF inference
90
+ client = InferenceClient(token=api_token)
91
+ print("Hugging Face Inference Client initialized with available token.")
92
 
93
  # Convert seed to None if -1 (meaning random)
94
  if seed == -1:
 
224
 
225
  # GRADIO UI
226
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
227
+ with gr.Row(elem_id="oauth-row"):
228
+ login_button = gr.LoginButton()
229
+ auth_status = gr.Markdown(render_profile(None), elem_id="oauth-status")
230
+
231
  # Create the chatbot component
232
  chatbot = gr.Chatbot(
233
  height=600,
 
413
  return history
414
 
415
  # Define bot response function
416
+ def bot(
417
+ history,
418
+ system_msg,
419
+ max_tokens,
420
+ temperature,
421
+ top_p,
422
+ freq_penalty,
423
+ seed,
424
+ custom_model,
425
+ search_term,
426
+ selected_model,
427
+ oauth_token: Optional[gr.OAuthToken] = None,
428
+ ):
429
  # Check if history is valid
430
  if not history or len(history) == 0:
431
  print("No history to process")
 
474
  seed,
475
  custom_model,
476
  search_term,
477
+ selected_model,
478
+ oauth_token
479
  ):
480
  history[-1][1] = response
481
  yield history
 
493
  seed,
494
  custom_model,
495
  search_term,
496
+ selected_model,
497
+ oauth_token
498
  ):
499
  history[-1][1] = response
500
  yield history
 
533
  )
534
  print("Featured model radio button change event linked.")
535
 
536
+ demo.load(render_profile, inputs=None, outputs=auth_status)
537
+
538
  print("Gradio interface initialized.")
539
 
540
  if __name__ == "__main__":