Spaces:
Sleeping
Sleeping
adding temperature and top_p
Browse files
app.py
CHANGED
|
@@ -5,12 +5,12 @@ from huggingface_hub import InferenceClient
|
|
| 5 |
client = InferenceClient ("Qwen/Qwen2.5-72B-Instruct") #change the LLM
|
| 6 |
|
| 7 |
def respond(message, history):
|
| 8 |
-
messages = [{"role" : "system", "content" : "You are a chatbot."}] #change personality
|
| 9 |
if history:
|
| 10 |
messages.extend(history)
|
| 11 |
|
| 12 |
messages.append({"role" : "user", "content" : message})
|
| 13 |
-
response = client.chat_completion(messages, max_tokens = 10) #change length
|
| 14 |
|
| 15 |
print(response["choices"][0]["message"]["content"].strip())
|
| 16 |
return response["choices"][0]["message"]["content"].strip()
|
|
|
|
| 5 |
client = InferenceClient ("Qwen/Qwen2.5-72B-Instruct") #change the LLM
|
| 6 |
|
| 7 |
def respond(message, history):
|
| 8 |
+
messages = [{"role" : "system", "content" : "You are a sassy chatbot."}] #change personality
|
| 9 |
if history:
|
| 10 |
messages.extend(history)
|
| 11 |
|
| 12 |
messages.append({"role" : "user", "content" : message})
|
| 13 |
+
response = client.chat_completion(messages, max_tokens = 10, temperature = 0.9, top_p = 0.7) #change length
|
| 14 |
|
| 15 |
print(response["choices"][0]["message"]["content"].strip())
|
| 16 |
return response["choices"][0]["message"]["content"].strip()
|