mause123 commited on
Commit
ce370fe
·
1 Parent(s): 8ef6562

Fix Groq model - update to current llama3-8b-8192

Browse files
Files changed (1) hide show
  1. agent.py +1 -1
agent.py CHANGED
@@ -169,7 +169,7 @@ def build_graph(provider: str = "google"):
169
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
170
  elif provider == "groq":
171
  # Groq https://console.groq.com/docs/models
172
- llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
173
  else:
174
  raise ValueError("Invalid provider. Choose 'google' or 'groq'.")
175
 
 
169
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
170
  elif provider == "groq":
171
  # Groq https://console.groq.com/docs/models
172
+ llm = ChatGroq(model="llama3-8b-8192", temperature=0) # Current stable model
173
  else:
174
  raise ValueError("Invalid provider. Choose 'google' or 'groq'.")
175