File size: 13,003 Bytes
646ba30
 
 
 
 
 
b7db63d
646ba30
b7db63d
 
646ba30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7db63d
 
 
646ba30
b7db63d
 
 
 
 
 
646ba30
 
b7db63d
 
 
 
 
 
646ba30
 
b7db63d
 
 
646ba30
b7db63d
646ba30
 
b7db63d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646ba30
 
 
 
 
b7db63d
646ba30
b7db63d
646ba30
b7db63d
 
 
 
 
646ba30
 
 
 
 
 
 
 
 
 
 
 
 
b7db63d
646ba30
 
 
 
 
b7db63d
646ba30
 
 
 
 
 
b7db63d
646ba30
 
 
 
 
 
b7db63d
646ba30
 
 
 
 
b7db63d
646ba30
 
 
 
 
 
b7db63d
646ba30
 
 
 
 
b7db63d
646ba30
 
 
 
 
b7db63d
646ba30
 
 
b7db63d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646ba30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
import os
from typing import Optional, Dict, Any
from dataclasses import dataclass
import openai
import anthropic
from dotenv import load_dotenv
from huggingface_hub import InferenceClient

# Force reload environment variables
load_dotenv(override=True)

@dataclass
class LLMResponse:
    content: str
    model: str
    success: bool
    error: Optional[str] = None

class LLMClientManager:
    """Manages connections to different LLM providers."""
    
    def __init__(self):
        self.clients = {}
        self._initialize_clients()
    
    def _initialize_clients(self):
        """Initialize available LLM clients based on API keys."""
        # Debug: Print available API keys
        print("πŸ” Initializing LLM clients...")
        
        # OpenAI
        openai_key = os.getenv("OPENAI_API_KEY")
        if openai_key:
            print(f"βœ… OpenAI API key found: {openai_key[:8]}...{openai_key[-4:]}")
            self.clients["openai"] = openai.OpenAI(api_key=openai_key)
        else:
            print("❌ OpenAI API key not found")
        
        # Anthropic
        anthropic_key = os.getenv("ANTHROPIC_API_KEY")
        if anthropic_key:
            print(f"βœ… Anthropic API key found: {anthropic_key[:8]}...{anthropic_key[-4:]}")
            self.clients["anthropic"] = anthropic.Anthropic(api_key=anthropic_key)
        else:
            print("❌ Anthropic API key not found")
        
        # DeepSeek (uses OpenAI-compatible API)
        deepseek_key = os.getenv("DEEPSEEK_API_KEY")
        if deepseek_key:
            print(f"βœ… DeepSeek API key found: {deepseek_key[:8]}...{deepseek_key[-4:]}")
            self.clients["deepseek"] = openai.OpenAI(
                api_key=deepseek_key,
                base_url="https://api.deepseek.com/v1"
            )
        else:
            print("❌ DeepSeek API key not found")

        # Mercury API (OpenAI-compatible via Inception Labs)
        # Support both MERCURY_API_KEY and INCEPTION_API_KEY
        mercury_key = os.getenv("MERCURY_API_KEY") or os.getenv("INCEPTION_API_KEY")
        if mercury_key:
            print(f"βœ… Mercury API key found: {mercury_key[:8]}...{mercury_key[-4:]}")
            try:
                # Prefer explicit base URL envs; default to Inception Labs documented endpoint
                base_url = (
                    os.getenv("MERCURY_BASE_URL")
                    or os.getenv("INCEPTION_BASE_URL")
                    or "https://api.inceptionlabs.ai/v1"
                )
                self.clients["mercury"] = openai.OpenAI(api_key=mercury_key, base_url=base_url)
                print("βœ… Mercury client initialized successfully")
            except Exception as e:
                print(f"⚠️  Mercury client initialization failed: {e}")
                # Still add to clients so it appears in UI, but will show error when used
                self.clients["mercury"] = None
        else:
            print("❌ Mercury API key not found")

        # Check for Hugging Face API key with multiple possible names
        hf_token = os.getenv("HUGGINGFACE_API_KEY") or os.getenv("HUGGINGFACEHUB_API_TOKEN") or os.getenv("HF_TOKEN")
        if hf_token:
            self.clients["huggingface"] = InferenceClient(token=hf_token)
    
    def get_available_models(self) -> Dict[str, str]:
        """Return available models with display names."""
        models = {}
        if "openai" in self.clients:
            models["openai"] = "OpenAI GPT-4o-mini"
        if "anthropic" in self.clients:
            models["anthropic"] = "Claude 4.5 Haiku"
        if "deepseek" in self.clients:
            models["deepseek"] = "DeepSeek Coder V2"
        if "mercury" in self.clients:
            models["mercury"] = "Mercury Fast LLM"
        if "huggingface" in self.clients:
            models["huggingface"] = "Hugging Face (Mixtral)"
        return models
    
    def query(self, model: str, prompt: str, temperature: float = 0.1) -> LLMResponse:
        """Query a specific LLM model."""
        try:
            if model == "openai" and "openai" in self.clients:
                response = self.clients["openai"].chat.completions.create(
                    model="gpt-4o-mini",
                    messages=[{"role": "user", "content": prompt}],
                    temperature=temperature
                )
                return LLMResponse(
                    content=response.choices[0].message.content,
                    model="OpenAI GPT-4o-mini",
                    success=True
                )
            
            elif model == "anthropic" and "anthropic" in self.clients:
                response = self.clients["anthropic"].messages.create(
                    model="claude-3-5-haiku-20241022",
                    messages=[{"role": "user", "content": prompt}],
                    max_tokens=2000,
                    temperature=temperature
                )
                return LLMResponse(
                    content=response.content[0].text,
                    model="Claude 4.5 Haiku",
                    success=True
                )
            
            elif model == "deepseek" and "deepseek" in self.clients:
                try:
                    response = self.clients["deepseek"].chat.completions.create(
                        model="deepseek-coder-v2",
                        messages=[{"role": "user", "content": prompt}],
                        temperature=temperature
                    )
                    return LLMResponse(
                        content=response.choices[0].message.content,
                        model="DeepSeek Coder V2",
                        success=True
                    )
                except Exception as deepseek_error:
                    # Try with alternative model name if the first one fails
                    try:
                        response = self.clients["deepseek"].chat.completions.create(
                            model="deepseek-coder",
                            messages=[{"role": "user", "content": prompt}],
                            temperature=temperature
                        )
                        return LLMResponse(
                            content=response.choices[0].message.content,
                            model="DeepSeek Coder V2",
                            success=True
                        )
                    except Exception as second_error:
                        return LLMResponse(
                            content="",
                            model="DeepSeek Coder V2",
                            success=False,
                            error=f"DeepSeek API Error: {str(deepseek_error)}. Also tried alternative model: {str(second_error)}"
                        )

            elif model == "mercury" and "mercury" in self.clients:
                # Check if Mercury client is properly initialized
                if self.clients["mercury"] is None:
                    return LLMResponse(
                        content="",
                        model="Mercury Fast LLM",
                        success=False,
                        error="Mercury API client not properly initialized. Check your API key and endpoint configuration."
                    )

                # Build candidate base URLs (env first, then known defaults)
                candidate_base_urls = []
                if os.getenv("MERCURY_BASE_URL"):
                    candidate_base_urls.append(os.getenv("MERCURY_BASE_URL"))
                if os.getenv("INCEPTION_BASE_URL"):
                    candidate_base_urls.append(os.getenv("INCEPTION_BASE_URL"))
                candidate_base_urls.extend([
                    "https://api.inceptionlabs.ai/v1",
                    "https://api.mercury.ai/v1",
                    "https://api.mercury.ai",
                ])

                # Candidate model names (env first, then fallbacks)
                candidate_models = []
                env_model = os.getenv("MERCURY_MODEL_NAME") or os.getenv("INCEPTION_MODEL_NAME")
                if env_model:
                    candidate_models.append(env_model)
                candidate_models.extend(["mercury", "mercury-fast", "mercury-pro", "gpt-4", "gpt-3.5-turbo"])

                last_error: Optional[str] = None

                for base_url in candidate_base_urls:
                    try:
                        client = openai.OpenAI(
                            api_key=(os.getenv("MERCURY_API_KEY") or os.getenv("INCEPTION_API_KEY")),
                            base_url=base_url,
                        )
                        for mercury_model in candidate_models:
                            try:
                                response = client.chat.completions.create(
                                    model=mercury_model,
                                    messages=[{"role": "user", "content": prompt}],
                                    temperature=temperature,
                                    max_tokens=2000,
                                )
                                return LLMResponse(
                                    content=response.choices[0].message.content,
                                    model="Mercury Fast LLM",
                                    success=True,
                                )
                            except Exception as model_error:
                                last_error = f"{type(model_error).__name__}: {str(model_error)}"
                                continue
                    except Exception as client_error:
                        last_error = f"{type(client_error).__name__}: {str(client_error)}"
                        continue

                # If all attempts failed, provide a consolidated error
                if last_error and "503" in last_error:
                    return LLMResponse(
                        content="",
                        model="Mercury Fast LLM",
                        success=False,
                        error=(
                            "Mercury/Inception API returned 503 across endpoints. Service may be down. "
                            "Tried endpoints: " + ", ".join(candidate_base_urls)
                        ),
                    )
                return LLMResponse(
                    content="",
                    model="Mercury Fast LLM",
                    success=False,
                    error=(
                        "Mercury API request failed after trying multiple endpoints and models. "
                        f"Last error: {last_error or 'unknown error'}"
                    ),
                )

            elif model == "huggingface" and "huggingface" in self.clients:
                try:
                    # Use chat completion API for Mixtral model (most compatible)
                    response = self.clients["huggingface"].chat_completion(
                        messages=[{"role": "user", "content": prompt}],
                        model="mistralai/Mixtral-8x7B-Instruct-v0.1",
                        max_tokens=2000,
                        temperature=temperature if temperature > 0 else 0.1,
                    )
                    return LLMResponse(
                        content=response.choices[0].message.content,
                        model="Hugging Face (Mixtral)",
                        success=True
                    )
                except Exception as hf_error:
                    # Fallback to text generation with a simpler model
                    try:
                        response = self.clients["huggingface"].text_generation(
                            prompt,
                            model="microsoft/DialoGPT-medium",
                            max_new_tokens=2000,
                            temperature=temperature if temperature > 0 else 0.1,
                        )
                        return LLMResponse(
                            content=response,
                            model="Hugging Face (DialoGPT)",
                            success=True
                        )
                    except Exception as fallback_error:
                        return LLMResponse(
                            content="",
                            model="Hugging Face (Mixtral)",
                            success=False,
                            error=f"Hugging Face API Error: {str(hf_error)}. Fallback also failed: {str(fallback_error)}"
                        )
            
            else:
                return LLMResponse(
                    content="",
                    model=model,
                    success=False,
                    error=f"Model {model} not available or not configured"
                )
                
        except Exception as e:
            return LLMResponse(
                content="",
                model=model,
                success=False,
                error=str(e)
            )