from langchain_groq import ChatGroq import os from dotenv import load_dotenv load_dotenv() LLM_MODEL = ChatGroq( model="groq/llama-3.3-70b-versatile", temperature=0, max_tokens=1024, timeout=30, max_retries=2, )