mause123 commited on
Commit ·
ce370fe
1
Parent(s): 8ef6562
Fix Groq model - update to current llama3-8b-8192
Browse files
agent.py
CHANGED
|
@@ -169,7 +169,7 @@ def build_graph(provider: str = "google"):
|
|
| 169 |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 170 |
elif provider == "groq":
|
| 171 |
# Groq https://console.groq.com/docs/models
|
| 172 |
-
llm = ChatGroq(model="
|
| 173 |
else:
|
| 174 |
raise ValueError("Invalid provider. Choose 'google' or 'groq'.")
|
| 175 |
|
|
|
|
| 169 |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 170 |
elif provider == "groq":
|
| 171 |
# Groq https://console.groq.com/docs/models
|
| 172 |
+
llm = ChatGroq(model="llama3-8b-8192", temperature=0) # Current stable model
|
| 173 |
else:
|
| 174 |
raise ValueError("Invalid provider. Choose 'google' or 'groq'.")
|
| 175 |
|