Commit
·
88cb2f4
1
Parent(s):
cba8124
changed Llama model due to rate limit
Browse files
agent.py
CHANGED
|
@@ -19,7 +19,7 @@ from langgraph.checkpoint.memory import MemorySaver
|
|
| 19 |
def get_llm():
|
| 20 |
"""Get Groq LLM instance"""
|
| 21 |
return ChatGroq(
|
| 22 |
-
model="llama-3.
|
| 23 |
temperature=0,
|
| 24 |
max_tokens=8000,
|
| 25 |
timeout=60,
|
|
|
|
| 19 |
def get_llm():
|
| 20 |
"""Get Groq LLM instance"""
|
| 21 |
return ChatGroq(
|
| 22 |
+
model="llama-3.1-8b-instant",
|
| 23 |
temperature=0,
|
| 24 |
max_tokens=8000,
|
| 25 |
timeout=60,
|