LLM / llm.py
renatavl's picture
init
3256847
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
import requests
class LLMError(RuntimeError):
pass
def chat_completion(
api_key: str,
messages: List[Dict[str, str]],
model: str = "gpt-4o-mini",
base_url: str = "https://api.openai.com",
timeout_s: int = 45,
) -> str:
"""
Minimal OpenAI-compatible Chat Completions call.
Expects provider that supports:
POST {base_url}/v1/chat/completions
"""
if not api_key or not api_key.strip():
raise LLMError("LLM API key is empty.")
url = base_url.rstrip("/") + "/v1/chat/completions"
headers = {
"Authorization": f"Bearer {api_key.strip()}",
"Content-Type": "application/json",
}
payload: Dict[str, Any] = {
"model": model,
"messages": messages,
"temperature": 0.3,
}
try:
r = requests.post(url, headers=headers, json=payload, timeout=timeout_s)
except Exception as e:
raise LLMError(f"Request failed: {e}") from e
if r.status_code >= 400:
raise LLMError(f"LLM HTTP {r.status_code}: {r.text[:400]}")
data = r.json()
try:
return data["choices"][0]["message"]["content"]
except Exception:
raise LLMError(f"Unexpected LLM response shape: {json.dumps(data)[:600]}")