import os from langchain_openai import ChatOpenAI from langchain_google_genai import ChatGoogleGenerativeAI from dotenv import load_dotenv load_dotenv() def get_llm(model_type="text", temperature=0.1): """ Factory to get the appropriate LLM based on task type. Args: model_type: "text" (DeepSeek) or "vision" (Gemini) temperature: Creativity level """ # Prioritize Gemini as requested for the demo google_key = os.getenv("GOOGLE_API_KEY") if google_key: return ChatGoogleGenerativeAI( model="gemini-2.5-flash", temperature=temperature, google_api_key=google_key ) if model_type == "text": api_key = os.getenv("DEEPSEEK_API_KEY") if not api_key: raise ValueError("No API keys found (DeepSeek or Google)") return ChatOpenAI( model="deepseek-chat", api_key=api_key, base_url="https://api.deepseek.com", temperature=temperature ) elif model_type == "vision": # Vision specific logic if needed, but Gemini handles both if google_key: return ChatGoogleGenerativeAI( model="gemini-2.5-flash", temperature=temperature, google_api_key=google_key ) raise ValueError("Google API Key required for vision") else: raise ValueError(f"Unknown model_type: {model_type}")