limeiz commited on
Commit
1f23eea
·
verified ·
1 Parent(s): b07a1d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -6
app.py CHANGED
@@ -7,24 +7,33 @@ client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
7
 
8
  def respond(message, history):
9
 
10
- messages = [{"role": "system", "content": "You are a wise 80 year old chatbot."}] #change the personality
11
 
12
  if history:
13
  messages.extend(history)
14
 
15
  messages.append({"role":"user", "content": message})
16
 
17
- response = client.chat_completion(
 
 
 
 
 
 
18
  messages,
19
  max_tokens=500,
20
- temperature = 0.3,
21
- top_p = 0.3
22
- )
 
 
 
23
  # change length using max_tokens
24
 
25
  print(response['choices'][0]['message']['content'].strip())
26
 
27
- return response['choices'][0]['message']['content'].strip()
28
 
29
  def random_message(message, history):
30
  choices = ["yes.", "no.", "it is certain","without a doubt","outlook good","ask again later", "better not tell you now","very doubtful","don't count on it","my sources say no","outlook not so good","very doubtful","reply hazy, try again", "cannot predict now"]
 
7
 
8
  def respond(message, history):
9
 
10
+ messages = [{"role": "system", "content": "You are a sassy chatbot."}] #change the personality
11
 
12
  if history:
13
  messages.extend(history)
14
 
15
  messages.append({"role":"user", "content": message})
16
 
17
+ response = ""
18
+ client.chat_completion(stream=True)
19
+ for message in response:
20
+
21
+
22
+ response = ""
23
+ for message in client.chat_completion(
24
  messages,
25
  max_tokens=500,
26
+ stream=True,
27
+ #temperature = 0.3,
28
+ #top_p = 0.3
29
+ ):
30
+ token = messages.choices[0].delta.content
31
+ response += token
32
  # change length using max_tokens
33
 
34
  print(response['choices'][0]['message']['content'].strip())
35
 
36
+ yield response['choices'][0]['message']['content'].strip()
37
 
38
  def random_message(message, history):
39
  choices = ["yes.", "no.", "it is certain","without a doubt","outlook good","ask again later", "better not tell you now","very doubtful","don't count on it","my sources say no","outlook not so good","very doubtful","reply hazy, try again", "cannot predict now"]