os-odyssey commited on
Commit
a9d3da2
·
verified ·
1 Parent(s): 22a1588

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -52
app.py CHANGED
@@ -1,8 +1,8 @@
1
  # app.py
2
- # GPT Chatbot powered by HuggingFace Inference API
3
- # Clean, safe, production-ready for Hugging Face Spaces
 
4
 
5
- import os
6
  import time
7
  import gradio as gr
8
  from huggingface_hub import InferenceClient
@@ -10,86 +10,93 @@ from huggingface_hub import InferenceClient
10
  # -----------------------
11
  # Configuration
12
  # -----------------------
13
- HF_TOKEN = os.getenv("HF_API_TOKEN")
14
- GPT_MODEL_ID = os.getenv("GPT_MODEL_ID", "HuggingFaceH4/zephyr-7b-beta")
15
-
16
- if HF_TOKEN is None:
17
- raise ValueError("❌ Please define HF_API_TOKEN in Hugging Face Secrets.")
18
-
19
- # Create inference client
20
- client = InferenceClient(
21
- model=GPT_MODEL_ID,
22
- token=HF_TOKEN,
23
- )
24
 
25
  # -----------------------
26
  # Chatbot Core
27
  # -----------------------
28
- def gpt_chat(prompt: str, history, thinking_mode=False, thinking_time=2.0):
29
  """
30
- Generate a chat response using GPT model on HuggingFace Inference API.
31
- Clean, without chain-of-thought exposure.
32
  """
 
 
33
 
34
  if not prompt.strip():
35
- return history, "لطفاً پیام بنویسید."
 
 
 
 
 
 
36
 
37
- # Optional "thinking" simulation
38
- if thinking_mode:
39
- time.sleep(thinking_time)
 
 
 
40
 
41
  try:
42
- # Build clean conversation prompt
43
  conversation_text = ""
44
  for user_msg, bot_msg in history:
45
- conversation_text += f"User: {user_msg}\nAssistant: {bot_msg}\n"
 
46
  conversation_text += f"User: {prompt}\nAssistant:"
47
 
48
- # Query HuggingFace API
49
  response = client.text_generation(
50
  prompt=conversation_text,
51
  max_new_tokens=200,
52
  temperature=0.7,
53
- do_sample=True,
54
  )
55
 
56
- # Clean output
57
  answer = response.replace(conversation_text, "").strip()
58
- answer = answer.split("Assistant:")[-1].strip()
 
59
 
60
- # Update history
61
- history.append((prompt, answer))
62
  return history, "OK"
63
 
64
  except Exception as e:
65
- print("Error:", e)
66
- history.append((prompt, "⚠ خطا در ارتباط با مدل."))
67
  return history, str(e)
68
 
69
  # -----------------------
70
  # Gradio UI
71
  # -----------------------
72
- with gr.Blocks(title="GPT Chatbot") as demo:
73
  gr.Markdown("""
74
- # 🤖 GPT Chatbot (HuggingFace Inference API)
75
- مدل: **{GPT_MODEL_ID}**
76
-
77
- این چت‌بات از API رسمی هاگینگ‌فیس استفاده می‌کند.
78
  """)
79
 
80
- chatbot = gr.Chatbot(label="Chat")
81
- user_input = gr.Textbox(placeholder="پیام خود را بنویسید...", show_label=False)
82
- thinking_mode = gr.Checkbox(label="حالت فکر کردن", value=True)
83
- thinking_time = gr.Slider(0, 5, value=2, step=0.5, label="مدت فکر کردن (ثانیه)")
84
- status_box = gr.Textbox(label="وضعیت")
85
-
86
- def respond(history, msg, thinking, thinking_t):
87
- return gpt_chat(msg, history, thinking, thinking_t)
88
-
89
- user_input.submit(
90
- respond,
91
- inputs=[chatbot, user_input, thinking_mode, thinking_time],
92
- outputs=[chatbot, status_box]
93
- )
94
-
95
- demo.launch()
 
 
 
 
 
 
 
 
1
  # app.py
2
+ # GPT Chatbot with thinking mode
3
+ # Users enter their HF_API_TOKEN in the UI
4
+ # Ready for Hugging Face Spaces
5
 
 
6
  import time
7
  import gradio as gr
8
  from huggingface_hub import InferenceClient
 
10
  # -----------------------
11
  # Configuration
12
  # -----------------------
13
+ GPT_MODEL_ID = "HuggingFaceH4/zephyr-7b-beta" # default model, can change
 
 
 
 
 
 
 
 
 
 
14
 
15
  # -----------------------
16
  # Chatbot Core
17
  # -----------------------
18
+ def gpt_chat(prompt, history, thinking, thinking_time, hf_token):
19
  """
20
+ Generate a chat response using Hugging Face Inference API.
21
+ Users provide their own HF_API_TOKEN.
22
  """
23
+ if not hf_token or hf_token.strip() == "":
24
+ return history, "❌ Please enter your Hugging Face API Token."
25
 
26
  if not prompt.strip():
27
+ return history, "Please write a message."
28
+
29
+ # Initialize client
30
+ try:
31
+ client = InferenceClient(model=GPT_MODEL_ID, token=hf_token)
32
+ except Exception as e:
33
+ return history, f"Error creating HF client: {str(e)}"
34
 
35
+ # Append user message to history
36
+ history = history + [(prompt, None)]
37
+
38
+ # Simulate thinking
39
+ if thinking:
40
+ time.sleep(max(0.0, thinking_time))
41
 
42
  try:
43
+ # Build conversation prompt
44
  conversation_text = ""
45
  for user_msg, bot_msg in history:
46
+ if bot_msg is not None:
47
+ conversation_text += f"User: {user_msg}\nAssistant: {bot_msg}\n"
48
  conversation_text += f"User: {prompt}\nAssistant:"
49
 
50
+ # Generate response
51
  response = client.text_generation(
52
  prompt=conversation_text,
53
  max_new_tokens=200,
54
  temperature=0.7,
55
+ do_sample=True
56
  )
57
 
 
58
  answer = response.replace(conversation_text, "").strip()
59
+ if "Assistant:" in answer:
60
+ answer = answer.split("Assistant:")[-1].strip()
61
 
62
+ # Update last entry in history
63
+ history = history[:-1] + [(prompt, answer)]
64
  return history, "OK"
65
 
66
  except Exception as e:
67
+ history = history[:-1] + [(prompt, "Error generating response.")]
 
68
  return history, str(e)
69
 
70
  # -----------------------
71
  # Gradio UI
72
  # -----------------------
73
+ with gr.Blocks(title="NovaTalk - Chatbot") as demo:
74
  gr.Markdown("""
75
+ # 🤖 NovaTalk Chatbot
76
+ Enter your Hugging Face API Token below to start chatting.
77
+ Built with ❤️ by [JumpLander](https://jumplander.org)
 
78
  """)
79
 
80
+ with gr.Row():
81
+ with gr.Column(scale=2):
82
+ chatbot = gr.Chatbot(label="Chat")
83
+ user_input = gr.Textbox(show_label=False, placeholder="Write a message and press Send")
84
+ hf_token_input = gr.Textbox(
85
+ label="Hugging Face API Token",
86
+ placeholder="Paste your HF_API_TOKEN here",
87
+ type="password"
88
+ )
89
+ thinking_mode = gr.Checkbox(label="Thinking mode", value=True)
90
+ thinking_time = gr.Slider(0, 5, value=2.0, step=0.5, label="Thinking time (seconds)")
91
+ status_box = gr.Textbox(label="Status")
92
+ send_btn = gr.Button("Send")
93
+
94
+ # handle send
95
+ def respond(history, msg, thinking, thinking_t, hf_token):
96
+ return gpt_chat(msg, history or [], thinking, thinking_t, hf_token)
97
+
98
+ send_btn.click(respond, inputs=[chatbot, user_input, thinking_mode, thinking_time, hf_token_input], outputs=[chatbot, status_box])
99
+ user_input.submit(respond, inputs=[chatbot, user_input, thinking_mode, thinking_time, hf_token_input], outputs=[chatbot, status_box])
100
+
101
+ if __name__ == "__main__":
102
+ demo.launch()