Spaces:
Sleeping
Sleeping
Switch model to llama
Browse files
app.py
CHANGED
|
@@ -52,7 +52,8 @@ def handle_submit():
|
|
| 52 |
completion = client.chat.completions.create(
|
| 53 |
#model="Qwen/Qwen2.5-72B-Instruct",
|
| 54 |
#model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 55 |
-
model="mistralai/Mistral-7B-Instruct-v0.3",
|
|
|
|
| 56 |
messages=messages,
|
| 57 |
max_tokens=1000
|
| 58 |
)
|
|
|
|
| 52 |
completion = client.chat.completions.create(
|
| 53 |
#model="Qwen/Qwen2.5-72B-Instruct",
|
| 54 |
#model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 55 |
+
#model="mistralai/Mistral-7B-Instruct-v0.3",
|
| 56 |
+
model="meta-llama/Llama-3.2-3B"
|
| 57 |
messages=messages,
|
| 58 |
max_tokens=1000
|
| 59 |
)
|