Spaces:
Sleeping
Sleeping
| import json | |
| from langchain_openai import ChatOpenAI | |
| from config import Settings, settings as default_settings | |
| class SafeChatOpenAI(ChatOpenAI): | |
| """ChatOpenAI subclass that handles providers returning string responses.""" | |
| def _create_chat_result(self, response, generation_info=None): | |
| if isinstance(response, str): | |
| response = json.loads(response) | |
| return super()._create_chat_result(response, generation_info) | |
| def get_llm(s: Settings | None = None, temperature: float = 0.1) -> SafeChatOpenAI: | |
| s = s or default_settings | |
| if not s.is_llm_configured: | |
| raise RuntimeError( | |
| "LLM not configured. Set LLM_BASE_URL and LLM_MODEL in .env " | |
| "or via the Settings panel in the UI." | |
| ) | |
| return SafeChatOpenAI( | |
| base_url=s.llm_base_url, | |
| model=s.llm_model, | |
| api_key=s.llm_api_key or "not-needed", | |
| temperature=temperature, | |
| ) | |