accesscreate012 commited on
Commit
f35e7cd
·
verified ·
1 Parent(s): 764489a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -25
app.py CHANGED
@@ -4,64 +4,85 @@ from datasets import load_dataset
4
  import threading
5
  import time
6
 
7
- # Initialize the InferenceClient for Zephyr model
 
 
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
10
- # Function to load the latest dataset from Hugging Face
11
  def load_data():
12
  dataset = load_dataset("accesscreate012/abhinav-academy-chatbot", split="train")
13
  return {entry["instruction"]: entry["response"] for entry in dataset}
14
 
 
15
  # Global variable to store the dataset
16
  data = load_data()
17
 
18
- # Function to update dataset every 24 hours
19
  def auto_update():
20
  global data
21
  while True:
22
- time.sleep(86400) # Wait for 24 hours before refreshing dataset
23
  data = load_data() # Reload the dataset
24
  print("Dataset updated.")
25
 
 
26
  # Start the auto-update thread (runs in the background)
27
  threading.Thread(target=auto_update, daemon=True).start()
28
 
29
- # Chatbot response function using the InferenceClient
30
- def respond(message, system_message, max_tokens, temperature, top_p):
31
- # Prepare the message history for the model
 
 
 
 
 
 
32
  messages = [{"role": "system", "content": system_message}]
33
-
34
- # Add the new user message
 
 
 
 
 
35
  messages.append({"role": "user", "content": message})
36
 
37
  response = ""
38
 
39
- # Generate the response using the model
40
- for msg in client.chat_completion(
41
  messages,
42
  max_tokens=max_tokens,
43
  stream=True,
44
  temperature=temperature,
45
  top_p=top_p,
46
  ):
47
- token = msg.choices[0].delta.content
 
48
  response += token
49
- yield response # Streaming response
50
-
51
- # Gradio interface (using gr.Interface instead of gr.ChatInterface)
52
- demo = gr.Interface(
53
- fn=respond,
54
- inputs=[
55
- gr.Textbox(label="User Message"),
56
- gr.Textbox(value="You are a helpful chatbot.", label="System Message"),
57
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
 
 
58
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
59
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
60
  ],
61
- outputs=gr.Textbox(label="Chatbot Response"), # Changed to Textbox to capture the response
62
- live=True # Enable live responses
63
  )
64
 
65
- # Launch the interface
66
  if __name__ == "__main__":
67
  demo.launch()
 
4
  import threading
5
  import time
6
 
7
+ """
8
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
9
+ """
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
12
+
13
  def load_data():
14
  dataset = load_dataset("accesscreate012/abhinav-academy-chatbot", split="train")
15
  return {entry["instruction"]: entry["response"] for entry in dataset}
16
 
17
+
18
  # Global variable to store the dataset
19
  data = load_data()
20
 
21
+
22
  def auto_update():
23
  global data
24
  while True:
25
+ time.sleep(86400) # Wait for 24 hours before refreshing the dataset
26
  data = load_data() # Reload the dataset
27
  print("Dataset updated.")
28
 
29
+
30
  # Start the auto-update thread (runs in the background)
31
  threading.Thread(target=auto_update, daemon=True).start()
32
 
33
+
34
+ def respond(
35
+ message,
36
+ history: list[tuple[str, str]],
37
+ system_message,
38
+ max_tokens,
39
+ temperature,
40
+ top_p,
41
+ ):
42
  messages = [{"role": "system", "content": system_message}]
43
+
44
+ for val in history:
45
+ if val[0]:
46
+ messages.append({"role": "user", "content": val[0]})
47
+ if val[1]:
48
+ messages.append({"role": "assistant", "content": val[1]})
49
+
50
  messages.append({"role": "user", "content": message})
51
 
52
  response = ""
53
 
54
+ for message in client.chat_completion(
 
55
  messages,
56
  max_tokens=max_tokens,
57
  stream=True,
58
  temperature=temperature,
59
  top_p=top_p,
60
  ):
61
+ token = message.choices[0].delta.content
62
+
63
  response += token
64
+ yield response
65
+
66
+
67
+ """
68
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
69
+ """
70
+ demo = gr.ChatInterface(
71
+ respond,
72
+ additional_inputs=[
73
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
74
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
75
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
76
+ gr.Slider(
77
+ minimum=0.1,
78
+ maximum=1.0,
79
+ value=0.95,
80
+ step=0.05,
81
+ label="Top-p (nucleus sampling)",
82
+ ),
83
  ],
 
 
84
  )
85
 
86
+
87
  if __name__ == "__main__":
88
  demo.launch()