| from __future__ import annotations | |
| from langchain_mistralai import ChatMistralAI | |
| from memory_agent.config import AppConfig | |
| class MistralLLMFactory: | |
| def __init__(self, config: AppConfig) -> None: | |
| self._config = config | |
| def create(self) -> ChatMistralAI: | |
| return ChatMistralAI( | |
| api_key=self._config.mistral_api_key, | |
| model=self._config.mistral_model, | |
| temperature=0, | |
| max_retries=self._config.mistral_max_retries, | |
| max_concurrent_requests=self._config.mistral_max_concurrent_requests, | |
| timeout=self._config.mistral_timeout_seconds, | |
| ) | |