everything / ai_engine.js
everydaycats's picture
Update ai_engine.js
eec45b9 verified
import OpenAI from "openai";
// Initialize OpenRouter using the standard OpenAI SDK
const openRouterAI = new OpenAI({
apiKey: process.env.OPENROUTER_API_KEY,
baseURL: "https://openrouter.ai/api/v1",
defaultHeaders: {
"HTTP-Referer": process.env.YOUR_SITE_URL || "https://everydaycats.web.app",
"X-Title": process.env.YOUR_SITE_NAME || "Aura Scanner",
}
});
// --- DYNAMIC MODEL ROUTER ---
const getModelId = (modelName) => {
switch(modelName?.toLowerCase()) {
case "llama":
case "maverick":
return "meta-llama/llama-4-maverick:floor";
case "llama-3.2-11b":
return "meta-llama/llama-3.2-11b-vision-instruct:free";
case "qwen-2-vl-7b":
return "qwen/qwen-2-vl-7b-instruct:free";
case "nemotron-12b":
return "nvidia/nemotron-nano-12b-v2-vl:free";
case "gpt":
case "gpt-5.4-nano":
return "openai/gpt-5.4-nano";
case "qwen-non":
return "qwen/qwen3-vl-30b-a3b-instruct";
case "qwen":
default:
return "qwen/qwen3-vl-30b-a3b-thinking:floor";
}
};
// --- DEDICATED ERROR LOGGER ---
const logOpenRouterError = (error, context) => {
const status = error.status || error.response?.status;
if (status === 402) {
console.error(`๐Ÿ’ธ [OPENROUTER FUNDS - 402] ${context}: Insufficient credits! Top up your OpenRouter account.`);
} else if (status === 429) {
console.error(`๐Ÿšจ[OPENROUTER THROTTLING - 429] ${context}: Rate limit exceeded. Provider is congested.`);
} else if (status === 401) {
console.error(`๐Ÿ›‘[OPENROUTER AUTH - 401] ${context}: Invalid API Key.`);
} else if (status === 400) {
console.error(`โš ๏ธ[OPENROUTER BAD REQUEST - 400] ${context}: Invalid payload, image format, or context length exceeded.`);
} else if (status >= 500) {
console.error(`๐Ÿ”ฅ[OPENROUTER SERVER ERROR - ${status}] ${context}: The specific AI provider backend is down.`);
} else if (error.code === 'ECONNRESET' || error.code === 'ETIMEDOUT') {
console.error(`โณ [NETWORK TIMEOUT] ${context}: Connection to OpenRouter dropped.`);
} else {
console.error(`โŒ[OPENROUTER ERROR] ${context}:`, error.message || error);
}
};
export const generateCompletion = async ({ model, prompt, system_prompt, images }) => {
try {
const targetModel = getModelId(model);
let messagesPayload =[
{ role: "system", content: system_prompt || "You are an elite AI assistant." }
];
// ๐Ÿšจ VISION SUPPORT: OpenAI/OpenRouter requires the data URI prefix for base64
if (images && Array.isArray(images) && images.length > 0) {
let userContent =[{ type: "text", text: prompt }];
images.forEach((imgStr) => {
// --- SAFETY CHECK: Prevent massively oversized base64 strings ---
// Reject images larger than ~5MB of base64 text to save bandwidth & API errors
if (imgStr.length > 7000000) {
throw new Error("Image is too large. Please upload an image under 4MB.");
}
const formattedImg = imgStr.startsWith('data:image')
? imgStr
: `data:image/jpeg;base64,${imgStr}`;
userContent.push({ type: "image_url", image_url: { url: formattedImg } });
});
messagesPayload.push({ role: "user", content: userContent });
} else {
messagesPayload.push({ role: "user", content: prompt });
}
const response = await openRouterAI.chat.completions.create({
model: targetModel,
messages: messagesPayload,
response_format: { type: "json_object" },
max_tokens: 1200 //, // <--- CRITICAL FIX: Caps the reserved output tokens to stop 402 wallet errors
// include_reasoning: false
});
return {
success: true,
data: response.choices[0].message.content,
usage: response.usage,
model_used: targetModel
};
} catch (error) {
logOpenRouterError(error, "generateCompletion");
return {
success: false,
error: error.message,
status: error.status || 500
};
}
};
export const streamCompletion = async ({ model, prompt, system_prompt, images, res }) => {
try {
const targetModel = getModelId(model);
let messagesPayload =[
{ role: "system", content: system_prompt || "You are an elite AI assistant." }
];
if (images && Array.isArray(images) && images.length > 0) {
let userContent =[{ type: "text", text: prompt }];
images.forEach((imgStr) => {
if (imgStr.length > 7000000) {
throw new Error("Image is too large. Please upload an image under 4MB.");
}
const formattedImg = imgStr.startsWith('data:image')
? imgStr
: `data:image/jpeg;base64,${imgStr}`;
userContent.push({ type: "image_url", image_url: { url: formattedImg } });
});
messagesPayload.push({ role: "user", content: userContent });
} else {
messagesPayload.push({ role: "user", content: prompt });
}
const stream = await openRouterAI.chat.completions.create({
model: targetModel,
messages: messagesPayload,
stream: true,
stream_options: { include_usage: true },
max_tokens: 2000 // <--- CRITICAL FIX
});
let totalTokenCount = 0;
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta;
// Handle Qwen's specific "Thinking" blocks if they stream them
if (delta?.reasoning_content) {
res.write(`__THINK__${delta.reasoning_content}`);
} else if (delta?.content) {
res.write(delta.content);
}
if (chunk.usage) {
totalTokenCount = chunk.usage.total_tokens;
}
}
res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
res.end();
} catch (error) {
logOpenRouterError(error, "streamCompletion");
// Safely write the error to the stream so the frontend doesn't hang forever
res.write(`\n\n[ERROR]: ${error.message}`);
res.end();
}
};