Spaces:
Sleeping
Sleeping
update temperature and content of chatbot
Browse files
app.py
CHANGED
|
@@ -9,14 +9,14 @@ client= InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
|
|
| 9 |
#defining role of AI and user
|
| 10 |
def respond(message,history):
|
| 11 |
|
| 12 |
-
messages = [{"role": "
|
| 13 |
|
| 14 |
if history:
|
| 15 |
messages.extend(history) #keep adding history
|
| 16 |
|
| 17 |
messages.append({"role":"user", "content": message})
|
| 18 |
|
| 19 |
-
response=client.chat_completion(messages, temperature=
|
| 20 |
|
| 21 |
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
|
| 22 |
|
|
|
|
| 9 |
#defining role of AI and user
|
| 10 |
def respond(message,history):
|
| 11 |
|
| 12 |
+
messages = [{"role": "system", "content": "You are acting like a comforting, guiding parent helping their child navigate academia.."}]
|
| 13 |
|
| 14 |
if history:
|
| 15 |
messages.extend(history) #keep adding history
|
| 16 |
|
| 17 |
messages.append({"role":"user", "content": message})
|
| 18 |
|
| 19 |
+
response=client.chat_completion(messages, temperature=0.8, max_tokens=300) #capping how many words the LLM is allowed to generate as a respond (300 words)
|
| 20 |
|
| 21 |
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
|
| 22 |
|