Update llm_model.py
Browse files- llm_model.py +12 -13
llm_model.py
CHANGED
|
@@ -1,14 +1,13 @@
|
|
| 1 |
-
from langchain_groq import ChatGroq
|
| 2 |
-
import os
|
| 3 |
-
from dotenv import load_dotenv
|
| 4 |
-
|
| 5 |
-
load_dotenv()
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
max_retries=2,
|
| 14 |
)
|
|
|
|
| 1 |
+
from langchain_groq import ChatGroq
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
|
| 5 |
+
load_dotenv()
|
| 6 |
+
|
| 7 |
+
LLM_MODEL = ChatGroq(
|
| 8 |
+
model="groq/llama-3.3-70b-versatile",
|
| 9 |
+
temperature=0,
|
| 10 |
+
max_tokens=1024,
|
| 11 |
+
timeout=30,
|
| 12 |
+
max_retries=2,
|
|
|
|
| 13 |
)
|