Spaces:
Paused
Paused
Upload core/llm.py with huggingface_hub
Browse files- core/llm.py +144 -0
core/llm.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
LLM Connector - Multi-Provider Intelligence
|
| 3 |
+
=============================================
|
| 4 |
+
Connects to available LLM APIs for content generation and reasoning.
|
| 5 |
+
Falls back gracefully between providers.
|
| 6 |
+
"""
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import urllib.request
|
| 10 |
+
import urllib.error
|
| 11 |
+
from typing import Optional
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger("openclaw.llm")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class LLMConnector:
|
| 17 |
+
"""Multi-provider LLM connector."""
|
| 18 |
+
|
| 19 |
+
PROVIDERS = {
|
| 20 |
+
"groq": {
|
| 21 |
+
"url": "https://api.groq.com/openai/v1/chat/completions",
|
| 22 |
+
"model": "llama-3.3-70b-versatile",
|
| 23 |
+
"header_key": "Authorization",
|
| 24 |
+
"header_prefix": "Bearer ",
|
| 25 |
+
},
|
| 26 |
+
"gemini": {
|
| 27 |
+
"url": "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent",
|
| 28 |
+
"model": "gemini-2.0-flash",
|
| 29 |
+
"header_key": "x-goog-api-key",
|
| 30 |
+
"header_prefix": "",
|
| 31 |
+
},
|
| 32 |
+
"nvidia": {
|
| 33 |
+
"url": "https://integrate.api.nvidia.com/v1/chat/completions",
|
| 34 |
+
"model": "meta/llama-3.1-70b-instruct",
|
| 35 |
+
"header_key": "Authorization",
|
| 36 |
+
"header_prefix": "Bearer ",
|
| 37 |
+
},
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
def __init__(self, provider: str, api_key: str):
|
| 41 |
+
self.provider = provider
|
| 42 |
+
self.api_key = api_key
|
| 43 |
+
self.config = self.PROVIDERS.get(provider, {})
|
| 44 |
+
|
| 45 |
+
def generate(self, prompt: str, system: str = "", max_tokens: int = 1024, temperature: float = 0.7) -> Optional[str]:
|
| 46 |
+
"""Generate text using the configured LLM."""
|
| 47 |
+
if not self.api_key or not self.config:
|
| 48 |
+
logger.warning(f"LLM provider '{self.provider}' not configured")
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
if self.provider == "gemini":
|
| 53 |
+
return self._generate_gemini(prompt, system, max_tokens, temperature)
|
| 54 |
+
else:
|
| 55 |
+
return self._generate_openai_compat(prompt, system, max_tokens, temperature)
|
| 56 |
+
except Exception as e:
|
| 57 |
+
logger.error(f"LLM generation failed ({self.provider}): {e}")
|
| 58 |
+
return None
|
| 59 |
+
|
| 60 |
+
def _generate_openai_compat(self, prompt: str, system: str, max_tokens: int, temperature: float) -> Optional[str]:
|
| 61 |
+
"""Generate using OpenAI-compatible API (Groq, NVIDIA)."""
|
| 62 |
+
messages = []
|
| 63 |
+
if system:
|
| 64 |
+
messages.append({"role": "system", "content": system})
|
| 65 |
+
messages.append({"role": "user", "content": prompt})
|
| 66 |
+
|
| 67 |
+
data = json.dumps({
|
| 68 |
+
"model": self.config["model"],
|
| 69 |
+
"messages": messages,
|
| 70 |
+
"max_tokens": max_tokens,
|
| 71 |
+
"temperature": temperature,
|
| 72 |
+
}).encode()
|
| 73 |
+
|
| 74 |
+
headers = {
|
| 75 |
+
"Content-Type": "application/json",
|
| 76 |
+
self.config["header_key"]: f"{self.config['header_prefix']}{self.api_key}",
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
req = urllib.request.Request(self.config["url"], data=data, headers=headers, method="POST")
|
| 80 |
+
|
| 81 |
+
with urllib.request.urlopen(req, timeout=60) as resp:
|
| 82 |
+
result = json.loads(resp.read().decode())
|
| 83 |
+
|
| 84 |
+
return result["choices"][0]["message"]["content"]
|
| 85 |
+
|
| 86 |
+
def _generate_gemini(self, prompt: str, system: str, max_tokens: int, temperature: float) -> Optional[str]:
|
| 87 |
+
"""Generate using Google Gemini API."""
|
| 88 |
+
url = f"{self.config['url']}?key={self.api_key}"
|
| 89 |
+
|
| 90 |
+
parts = []
|
| 91 |
+
if system:
|
| 92 |
+
parts.append({"text": f"System: {system}\n\nUser: {prompt}"})
|
| 93 |
+
else:
|
| 94 |
+
parts.append({"text": prompt})
|
| 95 |
+
|
| 96 |
+
data = json.dumps({
|
| 97 |
+
"contents": [{"parts": parts}],
|
| 98 |
+
"generationConfig": {
|
| 99 |
+
"maxOutputTokens": max_tokens,
|
| 100 |
+
"temperature": temperature,
|
| 101 |
+
}
|
| 102 |
+
}).encode()
|
| 103 |
+
|
| 104 |
+
headers = {"Content-Type": "application/json"}
|
| 105 |
+
req = urllib.request.Request(url, data=data, headers=headers, method="POST")
|
| 106 |
+
|
| 107 |
+
with urllib.request.urlopen(req, timeout=60) as resp:
|
| 108 |
+
result = json.loads(resp.read().decode())
|
| 109 |
+
|
| 110 |
+
return result["candidates"][0]["content"]["parts"][0]["text"]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class MultiLLM:
|
| 114 |
+
"""Try multiple LLM providers in order, with key rotation."""
|
| 115 |
+
|
| 116 |
+
def __init__(self, providers: dict[str, str]):
|
| 117 |
+
"""providers: dict of {provider_name: api_key} or {provider_name: 'key1,key2,key3'}"""
|
| 118 |
+
self.connectors = []
|
| 119 |
+
# Priority order: nvidia (working), groq (fast), gemini (free)
|
| 120 |
+
for name in ["nvidia", "groq", "gemini"]:
|
| 121 |
+
if name in providers and providers[name]:
|
| 122 |
+
# Support comma-separated multiple keys
|
| 123 |
+
keys = [k.strip() for k in providers[name].split(",") if k.strip()]
|
| 124 |
+
for key in keys:
|
| 125 |
+
self.connectors.append(LLMConnector(name, key))
|
| 126 |
+
|
| 127 |
+
def generate(self, prompt: str, system: str = "", max_tokens: int = 1024, temperature: float = 0.7) -> str:
|
| 128 |
+
"""Try each provider until one works."""
|
| 129 |
+
for connector in self.connectors:
|
| 130 |
+
try:
|
| 131 |
+
result = connector.generate(prompt, system, max_tokens, temperature)
|
| 132 |
+
if result:
|
| 133 |
+
logger.info(f"LLM response from {connector.provider}")
|
| 134 |
+
return result
|
| 135 |
+
except Exception as e:
|
| 136 |
+
logger.warning(f"Provider {connector.provider} failed: {e}")
|
| 137 |
+
continue
|
| 138 |
+
|
| 139 |
+
logger.warning("All LLM providers failed, using template")
|
| 140 |
+
return ""
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def available(self) -> bool:
|
| 144 |
+
return len(self.connectors) > 0
|