sudo-soldier commited on
Commit
31c9373
·
verified ·
1 Parent(s): c94fd44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -1,10 +1,10 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Load the Hugging Face model
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
- # Chatbot response function
8
  def respond(
9
  message,
10
  history: list[tuple[str, str]],
@@ -13,20 +13,20 @@ def respond(
13
  temperature,
14
  top_p,
15
  ):
16
- # Initialize message list with system prompt
17
  messages = [{"role": "system", "content": system_message}]
18
 
19
- # Add previous chat history
20
  for user_msg, assistant_msg in history:
21
  if user_msg:
22
  messages.append({"role": "user", "content": user_msg})
23
  if assistant_msg:
24
  messages.append({"role": "assistant", "content": assistant_msg})
25
 
26
- # Append latest user message
27
  messages.append({"role": "user", "content": message})
28
 
29
- # Generate response using streaming
30
  response = ""
31
  for message in client.chat_completion(
32
  messages,
@@ -39,7 +39,7 @@ def respond(
39
  response += token
40
  yield response
41
 
42
- # Gradio UI with Jesse's professional identity
43
  demo = gr.ChatInterface(
44
  fn=respond,
45
  additional_inputs=[
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
+
8
  def respond(
9
  message,
10
  history: list[tuple[str, str]],
 
13
  temperature,
14
  top_p,
15
  ):
16
+
17
  messages = [{"role": "system", "content": system_message}]
18
 
19
+
20
  for user_msg, assistant_msg in history:
21
  if user_msg:
22
  messages.append({"role": "user", "content": user_msg})
23
  if assistant_msg:
24
  messages.append({"role": "assistant", "content": assistant_msg})
25
 
26
+
27
  messages.append({"role": "user", "content": message})
28
 
29
+
30
  response = ""
31
  for message in client.chat_completion(
32
  messages,
 
39
  response += token
40
  yield response
41
 
42
+
43
  demo = gr.ChatInterface(
44
  fn=respond,
45
  additional_inputs=[