shreyasa09 commited on
Commit
e12b770
·
verified ·
1 Parent(s): de82702

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -27
app.py CHANGED
@@ -1,33 +1,21 @@
1
  import gradio as gr
2
- import random
3
  from huggingface_hub import InferenceClient
4
-
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
-
7
  def respond(message, history):
8
-
9
- messages = [{"role": "system", "content": "You are a friendly chatbot."}]
10
- if history:
11
  messages.extend(history)
12
-
13
- messages.append({"role" : "user", "content" : message})
14
-
15
- response = client.chat_completion(
16
- messages,
17
- max_tokens = 30,
18
- temperature =.9,
19
- top_p=.7
20
-
21
- )
22
-
23
-
24
- return response["choices"][0]["message"]["content"].strip()
25
-
26
- def echo(messaged, history):
27
- my_list = ["sure", "ask again", "maybe"]
28
- return random.choice(my_list)
29
- chatbot = gr.ChatInterface(respond, type = "messages")
30
-
31
-
32
-
33
  chatbot.launch()
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
 
3
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
4
  def respond(message, history):
5
+ messages = [{"role": "system", "content": "You are a friendly chatbot.""}]
6
+ if history:
 
7
  messages.extend(history)
8
+ messages.append({"role": "user", "content": message})
9
+ response = ""
10
+ for message in client.chat_completion(
11
+ messages,
12
+ max_tokens=500,
13
+ temperature=0.2,
14
+ top_p=0.9,
15
+ stream=True
16
+ ):
17
+ token = message.choices[0].delta.content
18
+ response += token
19
+ yield response
20
+ chatbot = gr.ChatInterface(respond, type=“messages”)
 
 
 
 
 
 
 
 
21
  chatbot.launch()