videoscriber-backend / lib /llm-provider.js
Rimas Kavaliauskas
Switch Space to Docker backend and sync Videoscriber post-processing
6782be3
import { getConfig } from './config.js';
export class LlmProviderError extends Error {
constructor(message, status = 500, data = null) {
super(message);
this.name = 'LlmProviderError';
this.status = status;
this.data = data;
}
}
function joinUrl(baseUrl, path) {
if (!baseUrl) return '';
return `${baseUrl.replace(/\/+$/, '')}${path.startsWith('/') ? '' : '/'}${path}`;
}
function normalizeResponseText(payload) {
const first = payload?.choices?.[0];
const text = first?.message?.content || first?.text || '';
if (Array.isArray(text)) {
return text.map((part) => part?.text || '').join(' ').trim();
}
return String(text || '').trim();
}
export async function generateText({
systemPrompt = '',
userPrompt = '',
temperature = 0.2,
maxTokens = 1200,
responseFormat = null,
} = {}) {
const cfg = getConfig();
if (!cfg.llmBaseUrl || !cfg.llmApiKey || !cfg.llmModel) {
throw new LlmProviderError('LLM provider is not configured', 503);
}
if (cfg.llmProvider !== 'openai_compatible') {
throw new LlmProviderError(`Unsupported LLM provider: ${cfg.llmProvider}`, 503);
}
const endpoint = joinUrl(cfg.llmBaseUrl, '/chat/completions');
const body = {
model: cfg.llmModel,
temperature,
max_tokens: maxTokens,
messages: [
{
role: 'system',
content: String(systemPrompt || 'You are a helpful assistant.'),
},
{
role: 'user',
content: String(userPrompt || ''),
},
],
};
if (responseFormat) {
body.response_format = responseFormat;
}
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${cfg.llmApiKey}`,
},
body: JSON.stringify(body),
});
const rawText = await response.text();
let payload;
try {
payload = JSON.parse(rawText);
} catch {
payload = { raw: rawText };
}
if (!response.ok) {
const message =
payload?.error?.message || payload?.message || `LLM request failed (${response.status})`;
throw new LlmProviderError(message, response.status, payload);
}
const text = normalizeResponseText(payload);
if (!text) {
throw new LlmProviderError('LLM returned an empty response', 502, payload);
}
return {
text,
payload,
model: cfg.llmModel,
provider: cfg.llmProvider,
};
}