Spaces:
Running
Running
| import os | |
| from dotenv import load_dotenv | |
| from langchain_groq import ChatGroq | |
| load_dotenv() | |
| class Config: | |
| GROQ_API_KEY = os.getenv("GROQ_API_KEY") | |
| TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") | |
| # Groq model - using compound-beta for best results | |
| # Other options: llama-3.3-70b-versatile, llama-3.1-8b-instant, mixtral-8x7b-32768 | |
| LLM_MODEL = os.getenv("LLM_MODEL", "compound-beta") | |
| CHUNK_SIZE = 400 | |
| CHUNK_OVERLAP = 80 | |
| # Debug flag | |
| DEBUG = os.getenv("DEBUG", "true").lower() == "true" | |
| def get_llm(cls): | |
| """Return chat LLM instance.""" | |
| if not cls.GROQ_API_KEY: | |
| print("⚠️ WARNING: GROQ_API_KEY not found in environment!") | |
| raise RuntimeError("GROQ_API_KEY missing - please set it in environment variables") | |
| print(f"🤖 Initializing LLM with model: {cls.LLM_MODEL}") | |
| return ChatGroq( | |
| groq_api_key=cls.GROQ_API_KEY, | |
| model_name=cls.LLM_MODEL, | |
| temperature=0.7 | |
| ) | |