w1r4 commited on
Commit
f726ac6
·
verified ·
1 Parent(s): 837be14

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -2,26 +2,33 @@ import gradio as gr
2
  import os
3
  from huggingface_hub import InferenceClient
4
 
 
5
  model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
6
 
7
  def respond(message, history, system_message, temperature, request: gr.Request):
8
- # 1. Try to get the user's token (if they logged in via the button)
9
  token = None
 
 
10
  if request:
11
- token = request.token
 
12
 
13
  # 2. If no user token, fall back to the Space's secret token (HF_TOKEN)
14
  if token is None:
15
  token = os.getenv("HF_TOKEN")
16
 
 
17
  if token is None:
18
- yield "Error: No authentication token found. Please add 'HF_TOKEN' to Space Secrets."
19
  return
20
 
21
- # Initialize client with the found token
22
  client = InferenceClient(model_id, token=token)
23
 
 
24
  messages = [{"role": "system", "content": system_message}]
 
 
25
  for user_msg, bot_msg in history:
26
  messages.append({"role": "user", "content": user_msg})
27
  messages.append({"role": "assistant", "content": bot_msg})
@@ -49,7 +56,6 @@ with gr.Blocks(fill_height=True) as demo:
49
  with gr.Sidebar():
50
  gr.Markdown("# AI Coding Assistant")
51
  gr.Markdown(f"Running **{model_id}**")
52
- # This button allows users to use their OWN token if they want
53
  gr.LoginButton("Sign in")
54
 
55
  gr.ChatInterface(
 
2
  import os
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Use Qwen 2.5 Coder
6
  model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
7
 
8
  def respond(message, history, system_message, temperature, request: gr.Request):
 
9
  token = None
10
+
11
+ # 1. Safely try to get the user's token (works on HF Spaces if logged in)
12
  if request:
13
+ # We use getattr() to avoid the "AttributeError" if running locally
14
+ token = getattr(request, "token", None)
15
 
16
  # 2. If no user token, fall back to the Space's secret token (HF_TOKEN)
17
  if token is None:
18
  token = os.getenv("HF_TOKEN")
19
 
20
+ # 3. If still no token, stop.
21
  if token is None:
22
+ yield "Error: No authentication token found. Please add 'HF_TOKEN' to Space Secrets or run locally with an env variable."
23
  return
24
 
25
+ # Initialize client
26
  client = InferenceClient(model_id, token=token)
27
 
28
+ # Build the message format
29
  messages = [{"role": "system", "content": system_message}]
30
+
31
+ # 'history' comes as tuples [(user, bot), (user, bot)...]
32
  for user_msg, bot_msg in history:
33
  messages.append({"role": "user", "content": user_msg})
34
  messages.append({"role": "assistant", "content": bot_msg})
 
56
  with gr.Sidebar():
57
  gr.Markdown("# AI Coding Assistant")
58
  gr.Markdown(f"Running **{model_id}**")
 
59
  gr.LoginButton("Sign in")
60
 
61
  gr.ChatInterface(