abakerdp commited on
Commit
b718109
·
verified ·
1 Parent(s): 8ec1d9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -8
app.py CHANGED
@@ -1,11 +1,11 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
10
  def respond(
11
  message,
@@ -15,6 +15,8 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
 
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
@@ -25,6 +27,12 @@ def respond(
25
 
26
  messages.append({"role": "user", "content": message})
27
 
 
 
 
 
 
 
28
  response = ""
29
 
30
  for message in client.chat_completion(
@@ -37,11 +45,25 @@ def respond(
37
  token = message.choices[0].delta.content
38
 
39
  response += token
 
 
 
 
40
  yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
 
 
 
 
 
 
 
 
 
 
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
@@ -58,6 +80,5 @@ demo = gr.ChatInterface(
58
  ],
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import random
4
 
 
 
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
+ # Initialize conversation state
8
+ conversation_state = {"ask_question": False, "last_message": ""}
9
 
10
  def respond(
11
  message,
 
15
  temperature,
16
  top_p,
17
  ):
18
+ global conversation_state
19
+
20
  messages = [{"role": "system", "content": system_message}]
21
 
22
  for val in history:
 
27
 
28
  messages.append({"role": "user", "content": message})
29
 
30
+ # Check if the chatbot should pose a question based on the user's previous response
31
+ if conversation_state["ask_question"]:
32
+ conversation_state["ask_question"] = False
33
+ question = pose_follow_up_question(conversation_state["last_message"])
34
+ messages.append({"role": "assistant", "content": question})
35
+
36
  response = ""
37
 
38
  for message in client.chat_completion(
 
45
  token = message.choices[0].delta.content
46
 
47
  response += token
48
+
49
+ # Update conversation state with the last user message
50
+ conversation_state["last_message"] = message.choices[0].delta.content
51
+
52
  yield response
53
 
54
+ def pose_follow_up_question(user_response):
55
+ # Example follow-up questions based on user responses
56
+ follow_up_questions = {
57
+ "I believe scientists should prioritize ethical considerations in their research":
58
+ "That's a commendable perspective! What measures do you think scientists should take to ensure ethical research practices?",
59
+ "I'm not sure about the ethical implications of genetic engineering":
60
+ "It's a complex topic indeed! What aspects of genetic engineering do you find most concerning?",
61
+ "I think technology has the potential to both benefit and harm society":
62
+ "You raise an interesting point! How do you think society can mitigate the risks associated with emerging technologies?"
63
+ }
64
+ return follow_up_questions.get(user_response, "I'd love to hear more about your thoughts on this topic.")
65
+
66
+ # Gradio interface definition
67
  demo = gr.ChatInterface(
68
  respond,
69
  additional_inputs=[
 
80
  ],
81
  )
82
 
 
83
  if __name__ == "__main__":
84
+ demo.launch()