mmargg commited on
Commit
08e328d
·
verified ·
1 Parent(s): cbbb32c

update temperature and content of chatbot

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,14 +9,14 @@ client= InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
9
  #defining role of AI and user
10
  def respond(message,history):
11
 
12
- messages = [{"role": "assistant", "content": "You are a friendly chatbot."}]
13
 
14
  if history:
15
  messages.extend(history) #keep adding history
16
 
17
  messages.append({"role":"user", "content": message})
18
 
19
- response=client.chat_completion(messages, temperature=1.5, max_tokens=300) #capping how many words the LLM is allowed to generate as a respond (300 words)
20
 
21
  return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
22
 
 
9
  #defining role of AI and user
10
  def respond(message,history):
11
 
12
+ messages = [{"role": "system", "content": "You are acting like a comforting, guiding parent helping their child navigate academia.."}]
13
 
14
  if history:
15
  messages.extend(history) #keep adding history
16
 
17
  messages.append({"role":"user", "content": message})
18
 
19
+ response=client.chat_completion(messages, temperature=0.8, max_tokens=300) #capping how many words the LLM is allowed to generate as a respond (300 words)
20
 
21
  return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
22