Spaces:
Sleeping
Sleeping
| """ | |
| ماژول مدیریت یکپارچه LLM ها | |
| پشتیبانی از: ChatGPT, Grok, Gemini, DeepSeek | |
| """ | |
| import os | |
| import requests | |
| import logging | |
| from typing import Optional | |
| logger = logging.getLogger(__name__) | |
| class UnifiedLLMSender: | |
| """کلاس یکپارچه برای ارسال به LLM های مختلف""" | |
| MODELS = { | |
| 'gpt-4o-mini': { | |
| 'provider': 'openai', | |
| 'api_base': 'https://api.openai.com/v1/chat/completions', | |
| 'env_key': 'OPENAI_API_KEY' | |
| }, | |
| 'gpt-4o': { | |
| 'provider': 'openai', | |
| 'api_base': 'https://api.openai.com/v1/chat/completions', | |
| 'env_key': 'OPENAI_API_KEY' | |
| }, | |
| 'o1': { | |
| 'provider': 'openai', | |
| 'api_base': 'https://api.openai.com/v1/chat/completions', | |
| 'env_key': 'OPENAI_API_KEY' | |
| }, | |
| 'o1-mini': { | |
| 'provider': 'openai', | |
| 'api_base': 'https://api.openai.com/v1/chat/completions', | |
| 'env_key': 'OPENAI_API_KEY' | |
| }, | |
| 'grok-beta': { | |
| 'provider': 'xai', | |
| 'api_base': 'https://api.x.ai/v1/chat/completions', | |
| 'env_key': 'XAI_API_KEY' | |
| }, | |
| 'grok-2-latest': { | |
| 'provider': 'xai', | |
| 'api_base': 'https://api.x.ai/v1/chat/completions', | |
| 'env_key': 'XAI_API_KEY' | |
| }, | |
| 'gemini-2.0-flash-exp': { | |
| 'provider': 'google', | |
| 'api_base': 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent', | |
| 'env_key': 'GOOGLE_API_KEY' | |
| }, | |
| 'gemini-1.5-pro': { | |
| 'provider': 'google', | |
| 'api_base': 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent', | |
| 'env_key': 'GOOGLE_API_KEY' | |
| }, | |
| 'deepseek-chat': { | |
| 'provider': 'deepseek', | |
| 'api_base': 'https://api.deepseek.com/v1/chat/completions', | |
| 'env_key': 'DEEPSEEK_API_KEY' | |
| }, | |
| 'deepseek-reasoner': { | |
| 'provider': 'deepseek', | |
| 'api_base': 'https://api.deepseek.com/v1/chat/completions', | |
| 'env_key': 'DEEPSEEK_API_KEY' | |
| } | |
| } | |
| def __init__(self, model: str = 'gpt-4o-mini'): | |
| """ | |
| مقداردهی اولیه | |
| Args: | |
| model: نام مدل (مثلاً 'gpt-4o-mini', 'grok-beta', 'gemini-2.0-flash-exp', 'deepseek-chat') | |
| """ | |
| self.model = model | |
| if model not in self.MODELS: | |
| raise ValueError(f"مدل {model} پشتیبانی نمیشود. مدلهای موجود: {list(self.MODELS.keys())}") | |
| self.config = self.MODELS[model] | |
| self.provider = self.config['provider'] | |
| self.api_key = os.getenv(self.config['env_key']) | |
| if not self.api_key: | |
| logger.warning(f"⚠️ API Key برای {model} تنظیم نشده: {self.config['env_key']}") | |
| def send(self, | |
| text: str, | |
| system_msg: Optional[str] = None, | |
| max_tokens: int = 4096, | |
| temperature: float = 0.3, | |
| lang: str = 'fa') -> str: | |
| """ | |
| ارسال پیام به LLM | |
| Args: | |
| text: متن ورودی | |
| system_msg: پیام سیستم | |
| max_tokens: حداکثر توکن خروجی | |
| temperature: دما | |
| lang: زبان | |
| Returns: | |
| پاسخ مدل | |
| """ | |
| if not self.api_key: | |
| return f"❌ API Key برای {self.model} موجود نیست. لطفاً {self.config['env_key']} را تنظیم کنید." | |
| try: | |
| if self.provider == 'google': | |
| return self._send_google(text, system_msg, max_tokens, temperature) | |
| else: | |
| # OpenAI-compatible APIs (OpenAI, xAI, DeepSeek) | |
| return self._send_openai_compatible(text, system_msg, max_tokens, temperature) | |
| except Exception as e: | |
| logger.error(f"❌ خطا در ارسال به {self.model}: {e}") | |
| return f"❌ خطا: {str(e)}" | |
| def _send_openai_compatible(self, text: str, system_msg: Optional[str], | |
| max_tokens: int, temperature: float) -> str: | |
| """ارسال به API های سازگار با OpenAI""" | |
| messages = [] | |
| if system_msg: | |
| messages.append({"role": "system", "content": system_msg}) | |
| messages.append({"role": "user", "content": text}) | |
| headers = { | |
| "Authorization": f"Bearer {self.api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": self.model, | |
| "messages": messages, | |
| "max_tokens": max_tokens, | |
| "temperature": temperature | |
| } | |
| logger.info(f"📤 ارسال به {self.model}...") | |
| response = requests.post( | |
| self.config['api_base'], | |
| headers=headers, | |
| json=payload, | |
| timeout=120 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json() | |
| content = result['choices'][0]['message']['content'] | |
| logger.info(f"✅ پاسخ از {self.model} دریافت شد ({len(content)} کاراکتر)") | |
| return content | |
| else: | |
| error_msg = f"خطای API {response.status_code}: {response.text}" | |
| logger.error(f"❌ {error_msg}") | |
| return f"❌ {error_msg}" | |
| def _send_google(self, text: str, system_msg: Optional[str], | |
| max_tokens: int, temperature: float) -> str: | |
| """ارسال به Google Gemini API""" | |
| # ترکیب system message با user message | |
| full_text = text | |
| if system_msg: | |
| full_text = f"{system_msg}\n\n{text}" | |
| url = f"{self.config['api_base']}?key={self.api_key}" | |
| payload = { | |
| "contents": [{ | |
| "parts": [{ | |
| "text": full_text | |
| }] | |
| }], | |
| "generationConfig": { | |
| "maxOutputTokens": max_tokens, | |
| "temperature": temperature | |
| } | |
| } | |
| logger.info(f"📤 ارسال به {self.model}...") | |
| response = requests.post( | |
| url, | |
| headers={"Content-Type": "application/json"}, | |
| json=payload, | |
| timeout=120 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json() | |
| content = result['candidates'][0]['content']['parts'][0]['text'] | |
| logger.info(f"✅ پاسخ از {self.model} دریافت شد ({len(content)} کاراکتر)") | |
| return content | |
| else: | |
| error_msg = f"خطای API {response.status_code}: {response.text}" | |
| logger.error(f"❌ {error_msg}") | |
| return f"❌ {error_msg}" | |
| # تابع کمکی برای گرفتن لیست مدلهای موجود | |
| def get_available_models() -> dict: | |
| """دریافت لیست مدلهای موجود و وضعیت API key های آنها""" | |
| available = {} | |
| for model_name, config in UnifiedLLMSender.MODELS.items(): | |
| env_key = config['env_key'] | |
| has_key = bool(os.getenv(env_key)) | |
| available[model_name] = { | |
| 'provider': config['provider'], | |
| 'has_key': has_key, | |
| 'env_key': env_key | |
| } | |
| return available | |
| def get_model_display_names() -> dict: | |
| """دریافت نامهای نمایشی مدلها""" | |
| return { | |
| 'gpt-4o-mini': '🤖 ChatGPT 4o-mini', | |
| 'gpt-4o': '🤖 ChatGPT 4o', | |
| 'o1': '🤖 ChatGPT o1', | |
| 'o1-mini': '🤖 ChatGPT o1-mini', | |
| 'grok-beta': '🚀 Grok Beta', | |
| 'grok-2-latest': '🚀 Grok 2', | |
| 'gemini-2.0-flash-exp': '✨ Gemini 2.0 Flash', | |
| 'gemini-1.5-pro': '✨ Gemini 1.5 Pro', | |
| 'deepseek-chat': '🧠 DeepSeek Chat', | |
| 'deepseek-reasoner': '🧠 DeepSeek Reasoner' | |
| } | |