AheedTahir commited on
Commit
88cb2f4
·
1 Parent(s): cba8124

changed Llama model due to rate limit

Browse files
Files changed (1) hide show
  1. agent.py +1 -1
agent.py CHANGED
@@ -19,7 +19,7 @@ from langgraph.checkpoint.memory import MemorySaver
19
  def get_llm():
20
  """Get Groq LLM instance"""
21
  return ChatGroq(
22
- model="llama-3.3-70b-versatile",
23
  temperature=0,
24
  max_tokens=8000,
25
  timeout=60,
 
19
  def get_llm():
20
  """Get Groq LLM instance"""
21
  return ChatGroq(
22
+ model="llama-3.1-8b-instant",
23
  temperature=0,
24
  max_tokens=8000,
25
  timeout=60,