sudo-soldier commited on
Commit
5c89cc9
·
verified ·
1 Parent(s): 64e69e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -16
app.py CHANGED
@@ -1,8 +1,10 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
4
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
5
 
 
6
  def respond(
7
  message,
8
  history: list[tuple[str, str]],
@@ -11,18 +13,21 @@ def respond(
11
  temperature,
12
  top_p,
13
  ):
 
14
  messages = [{"role": "system", "content": system_message}]
15
 
16
- for val in history:
17
- if val[0]:
18
- messages.append({"role": "user", "content": val[0]})
19
- if val[1]:
20
- messages.append({"role": "assistant", "content": val[1]})
 
21
 
 
22
  messages.append({"role": "user", "content": message})
23
 
 
24
  response = ""
25
-
26
  for message in client.chat_completion(
27
  messages,
28
  max_tokens=max_tokens,
@@ -34,23 +39,28 @@ def respond(
34
  response += token
35
  yield response
36
 
37
- # Customized system message for 'Jesse'
38
  demo = gr.ChatInterface(
39
- respond,
40
  additional_inputs=[
41
- gr.Textbox(value="You are Jesse, a CompTIA certified Network+ Genius. You are knowledgeable in all things networking and provide expert advice.", label="System message"),
 
 
 
 
 
 
 
42
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
43
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
44
- gr.Slider(
45
- minimum=0.1,
46
- maximum=1.0,
47
- value=0.95,
48
- step=0.05,
49
- label="Top-p (nucleus sampling)",
50
- ),
51
  ],
 
 
 
52
  )
53
 
54
  if __name__ == "__main__":
55
  demo.launch()
56
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Load the Hugging Face model
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
+ # Chatbot response function
8
  def respond(
9
  message,
10
  history: list[tuple[str, str]],
 
13
  temperature,
14
  top_p,
15
  ):
16
+ # Initialize message list with system prompt
17
  messages = [{"role": "system", "content": system_message}]
18
 
19
+ # Add previous chat history
20
+ for user_msg, assistant_msg in history:
21
+ if user_msg:
22
+ messages.append({"role": "user", "content": user_msg})
23
+ if assistant_msg:
24
+ messages.append({"role": "assistant", "content": assistant_msg})
25
 
26
+ # Append latest user message
27
  messages.append({"role": "user", "content": message})
28
 
29
+ # Generate response using streaming
30
  response = ""
 
31
  for message in client.chat_completion(
32
  messages,
33
  max_tokens=max_tokens,
 
39
  response += token
40
  yield response
41
 
42
+ # Gradio UI with Jesse's identity
43
  demo = gr.ChatInterface(
44
+ fn=respond,
45
  additional_inputs=[
46
+ gr.Textbox(
47
+ value=(
48
+ "Your name is Jesse. You are a friendly, helpful, and knowledgeable AI assistant "
49
+ "who is CompTIA certified in Network+. Always refer to yourself as Jesse when responding. "
50
+ "You are an expert in networking, troubleshooting, IT support, and network security."
51
+ ),
52
+ label="System message"
53
+ ),
54
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
55
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
56
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
57
  ],
58
+ title="Jesse - CompTIA Network+ Genius",
59
+ description="Talk to Jesse, your certified networking expert. Ask anything about networking, security, IT support, and more.",
60
+ theme="default"
61
  )
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
65
 
66
+