Alibrown commited on
Commit
891cc60
·
verified ·
1 Parent(s): 92368cc

Update app/providers.py

Browse files
Files changed (1) hide show
  1. app/providers.py +115 -26
app/providers.py CHANGED
@@ -1,5 +1,6 @@
1
  # =============================================================================
2
  # app/providers.py
 
3
  # LLM + Search Provider Registry + Fallback Chain
4
  # Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
5
  # Copyright 2026 - Volkan Kücükbudak
@@ -18,7 +19,7 @@
18
  #
19
  # FALLBACK CHAIN:
20
  # Defined in .pyfun per provider via fallback_to field.
21
- # anthropic → fails → openrouter → fails → RuntimeError
22
  # Visited set prevents infinite loops.
23
  #
24
  # SECURITY NOTE:
@@ -26,6 +27,12 @@
26
  # All errors are sanitized before propagation — only HTTP status codes
27
  # and safe_url (query params stripped) are ever exposed in logs.
28
  #
 
 
 
 
 
 
29
  # DEPENDENCY CHAIN (app/* only, no fundaments!):
30
  # config.py → parses app/.pyfun — single source of truth
31
  # providers.py → LLM + Search registry + fallback chain
@@ -187,10 +194,109 @@ class HuggingFaceProvider(BaseProvider):
187
  return data["choices"][0]["message"]["content"]
188
 
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  # =============================================================================
191
  # SECTION 3 — Provider Registry
192
  # Built from .pyfun [LLM_PROVIDERS] at initialize().
193
- # Maps provider names to classes — add new providers here.
 
194
  # =============================================================================
195
 
196
  _PROVIDER_CLASSES = {
@@ -198,6 +304,9 @@ _PROVIDER_CLASSES = {
198
  "gemini": GeminiProvider,
199
  "openrouter": OpenRouterProvider,
200
  "huggingface": HuggingFaceProvider,
 
 
 
201
  }
202
 
203
  _registry: dict = {}
@@ -250,7 +359,6 @@ async def llm_complete(
250
  Returns:
251
  Model response as plain text string.
252
  """
253
- # Default provider from .pyfun [TOOL.llm_complete] → default_provider
254
  if not provider_name:
255
  tools_cfg = config.get_active_tools()
256
  provider_name = tools_cfg.get("llm_complete", {}).get("default_provider", "anthropic")
@@ -276,7 +384,6 @@ async def llm_complete(
276
  f"Provider '{current}' failed: {type(e).__name__}: {e} — trying fallback."
277
  )
278
 
279
- # Next in fallback chain from .pyfun
280
  cfg = config.get_active_llm_providers().get(current, {})
281
  current = cfg.get("fallback_to", "")
282
 
@@ -322,39 +429,21 @@ async def search(
322
  # =============================================================================
323
 
324
  def list_active_llm() -> list:
325
- """
326
- List all active LLM provider names.
327
- Used by mcp.py to decide whether to register llm_complete tool.
328
-
329
- Returns:
330
- List of active LLM provider name strings.
331
- """
332
  return list(_registry.keys())
333
 
334
 
335
  def list_active_search() -> list:
336
  """
337
- List all active search provider names.
338
- Used by mcp.py to decide whether to register web_search tool.
339
- Returns empty list until search providers are implemented.
340
-
341
- Returns:
342
- List of active search provider name strings.
343
  """
344
  # TODO: return list(_search_registry.keys()) when search providers are ready
345
  return []
346
 
347
 
348
  def get(name: str) -> BaseProvider:
349
- """
350
- Get a specific provider instance by name.
351
-
352
- Args:
353
- name: Provider name (e.g. 'anthropic', 'huggingface').
354
-
355
- Returns:
356
- Provider instance, or None if not registered.
357
- """
358
  return _registry.get(name)
359
 
360
 
 
1
  # =============================================================================
2
  # app/providers.py
3
+ # 09.03.2026
4
  # LLM + Search Provider Registry + Fallback Chain
5
  # Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
6
  # Copyright 2026 - Volkan Kücükbudak
 
19
  #
20
  # FALLBACK CHAIN:
21
  # Defined in .pyfun per provider via fallback_to field.
22
+ # anthropic → fails → gemini → fails → openrouter → fails → RuntimeError
23
  # Visited set prevents infinite loops.
24
  #
25
  # SECURITY NOTE:
 
27
  # All errors are sanitized before propagation — only HTTP status codes
28
  # and safe_url (query params stripped) are ever exposed in logs.
29
  #
30
+ # HOW TO ADD A NEW LLM PROVIDER — 3 steps, nothing else to touch:
31
+ # 1. Add class below (copy a dummy, implement complete())
32
+ # 2. Register name → class in _PROVIDER_CLASSES dict
33
+ # 3. Add [LLM_PROVIDER.yourprovider] block in app/.pyfun
34
+ # → env_key, base_url, default_model, fallback_to
35
+ #
36
  # DEPENDENCY CHAIN (app/* only, no fundaments!):
37
  # config.py → parses app/.pyfun — single source of truth
38
  # providers.py → LLM + Search registry + fallback chain
 
194
  return data["choices"][0]["message"]["content"]
195
 
196
 
197
+ # =============================================================================
198
+ # DUMMY PROVIDERS — copy, uncomment, adapt
199
+ # Steps: (1) uncomment class (2) add to _PROVIDER_CLASSES (3) add to .pyfun
200
+ # =============================================================================
201
+
202
+ # --- OpenAI -------------------------------------------------------------------
203
+ # .pyfun block to add:
204
+ #
205
+ # [LLM_PROVIDER.openai]
206
+ # active = "true"
207
+ # base_url = "https://api.openai.com/v1"
208
+ # env_key = "OPENAI_API_KEY"
209
+ # default_model = "gpt-4o-mini"
210
+ # models = "gpt-4o, gpt-4o-mini, gpt-3.5-turbo"
211
+ # fallback_to = ""
212
+ # [LLM_PROVIDER.openai_END]
213
+ #
214
+ # class OpenAIProvider(BaseProvider):
215
+ # """OpenAI API — OpenAI-compatible chat completions endpoint."""
216
+ #
217
+ # async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
218
+ # data = await self._post(
219
+ # f"{self.base_url}/chat/completions",
220
+ # headers={
221
+ # "Authorization": f"Bearer {self.key}",
222
+ # "content-type": "application/json",
223
+ # },
224
+ # payload={
225
+ # "model": model or self.model,
226
+ # "max_tokens": max_tokens,
227
+ # "messages": [{"role": "user", "content": prompt}],
228
+ # },
229
+ # )
230
+ # return data["choices"][0]["message"]["content"]
231
+
232
+
233
+ # --- Mistral ------------------------------------------------------------------
234
+ # .pyfun block to add:
235
+ #
236
+ # [LLM_PROVIDER.mistral]
237
+ # active = "true"
238
+ # base_url = "https://api.mistral.ai/v1"
239
+ # env_key = "MISTRAL_API_KEY"
240
+ # default_model = "mistral-large-latest"
241
+ # models = "mistral-large-latest, mistral-small-latest, codestral-latest"
242
+ # fallback_to = ""
243
+ # [LLM_PROVIDER.mistral_END]
244
+ #
245
+ # class MistralProvider(BaseProvider):
246
+ # """Mistral AI API — OpenAI-compatible chat completions endpoint."""
247
+ #
248
+ # async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
249
+ # data = await self._post(
250
+ # f"{self.base_url}/chat/completions",
251
+ # headers={
252
+ # "Authorization": f"Bearer {self.key}",
253
+ # "content-type": "application/json",
254
+ # },
255
+ # payload={
256
+ # "model": model or self.model,
257
+ # "max_tokens": max_tokens,
258
+ # "messages": [{"role": "user", "content": prompt}],
259
+ # },
260
+ # )
261
+ # return data["choices"][0]["message"]["content"]
262
+
263
+
264
+ # --- xAI (Grok) ---------------------------------------------------------------
265
+ # .pyfun block to add:
266
+ #
267
+ # [LLM_PROVIDER.xai]
268
+ # active = "true"
269
+ # base_url = "https://api.x.ai/v1"
270
+ # env_key = "XAI_API_KEY"
271
+ # default_model = "grok-3-mini"
272
+ # models = "grok-3, grok-3-mini, grok-3-fast"
273
+ # fallback_to = ""
274
+ # [LLM_PROVIDER.xai_END]
275
+ #
276
+ # class XAIProvider(BaseProvider):
277
+ # """xAI Grok API — OpenAI-compatible chat completions endpoint."""
278
+ #
279
+ # async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
280
+ # data = await self._post(
281
+ # f"{self.base_url}/chat/completions",
282
+ # headers={
283
+ # "Authorization": f"Bearer {self.key}",
284
+ # "content-type": "application/json",
285
+ # },
286
+ # payload={
287
+ # "model": model or self.model,
288
+ # "max_tokens": max_tokens,
289
+ # "messages": [{"role": "user", "content": prompt}],
290
+ # },
291
+ # )
292
+ # return data["choices"][0]["message"]["content"]
293
+
294
+
295
  # =============================================================================
296
  # SECTION 3 — Provider Registry
297
  # Built from .pyfun [LLM_PROVIDERS] at initialize().
298
+ # Maps provider names classes.
299
+ # To activate a dummy: uncomment class above + add entry here.
300
  # =============================================================================
301
 
302
  _PROVIDER_CLASSES = {
 
304
  "gemini": GeminiProvider,
305
  "openrouter": OpenRouterProvider,
306
  "huggingface": HuggingFaceProvider,
307
+ # "openai": OpenAIProvider, # ← uncomment to activate
308
+ # "mistral": MistralProvider, # ← uncomment to activate
309
+ # "xai": XAIProvider, # ← uncomment to activate
310
  }
311
 
312
  _registry: dict = {}
 
359
  Returns:
360
  Model response as plain text string.
361
  """
 
362
  if not provider_name:
363
  tools_cfg = config.get_active_tools()
364
  provider_name = tools_cfg.get("llm_complete", {}).get("default_provider", "anthropic")
 
384
  f"Provider '{current}' failed: {type(e).__name__}: {e} — trying fallback."
385
  )
386
 
 
387
  cfg = config.get_active_llm_providers().get(current, {})
388
  current = cfg.get("fallback_to", "")
389
 
 
429
  # =============================================================================
430
 
431
  def list_active_llm() -> list:
432
+ """Returns list of active LLM provider names."""
 
 
 
 
 
 
433
  return list(_registry.keys())
434
 
435
 
436
  def list_active_search() -> list:
437
  """
438
+ Returns list of active search provider names.
439
+ Empty until search providers are implemented.
 
 
 
 
440
  """
441
  # TODO: return list(_search_registry.keys()) when search providers are ready
442
  return []
443
 
444
 
445
  def get(name: str) -> BaseProvider:
446
+ """Get a specific provider instance by name."""
 
 
 
 
 
 
 
 
447
  return _registry.get(name)
448
 
449