Spaces:
Sleeping
Sleeping
temp and changed max tokens
Browse files
app.py
CHANGED
|
@@ -133,7 +133,7 @@ def respond(message,history):
|
|
| 133 |
|
| 134 |
messages.append({"role":"user","content": message})
|
| 135 |
|
| 136 |
-
response=client.chat_completion(messages, max_tokens=
|
| 137 |
|
| 138 |
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
|
| 139 |
|
|
|
|
| 133 |
|
| 134 |
messages.append({"role":"user","content": message})
|
| 135 |
|
| 136 |
+
response=client.chat_completion(messages, temperature=0.2, max_tokens=250)#capping how many words the LLM is allowed to generate as a respond (100 words)
|
| 137 |
|
| 138 |
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
|
| 139 |
|