Spaces:
Running
Running
File size: 6,732 Bytes
61008c3 64d44b4 1845876 64d44b4 61008c3 64d44b4 1845876 9925882 1fe8656 5941374 64d44b4 1d76b35 64d44b4 07a3d94 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 1845876 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 1845876 61008c3 1845876 61008c3 64d44b4 61008c3 1845876 eec45b9 388ec3b eec45b9 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 1845876 61008c3 1845876 61008c3 64d44b4 61008c3 1845876 61008c3 64d44b4 61008c3 64d44b4 61008c3 64d44b4 61008c3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | import OpenAI from "openai";
// Initialize OpenRouter using the standard OpenAI SDK
const openRouterAI = new OpenAI({
apiKey: process.env.OPENROUTER_API_KEY,
baseURL: "https://openrouter.ai/api/v1",
defaultHeaders: {
"HTTP-Referer": process.env.YOUR_SITE_URL || "https://everydaycats.web.app",
"X-Title": process.env.YOUR_SITE_NAME || "Aura Scanner",
}
});
// --- DYNAMIC MODEL ROUTER ---
const getModelId = (modelName) => {
switch(modelName?.toLowerCase()) {
case "llama":
case "maverick":
return "meta-llama/llama-4-maverick:floor";
case "llama-3.2-11b":
return "meta-llama/llama-3.2-11b-vision-instruct:free";
case "qwen-2-vl-7b":
return "qwen/qwen-2-vl-7b-instruct:free";
case "nemotron-12b":
return "nvidia/nemotron-nano-12b-v2-vl:free";
case "gpt":
case "gpt-5.4-nano":
return "openai/gpt-5.4-nano";
case "qwen-non":
return "qwen/qwen3-vl-30b-a3b-instruct";
case "qwen":
default:
return "qwen/qwen3-vl-30b-a3b-thinking:floor";
}
};
// --- DEDICATED ERROR LOGGER ---
const logOpenRouterError = (error, context) => {
const status = error.status || error.response?.status;
if (status === 402) {
console.error(`💸 [OPENROUTER FUNDS - 402] ${context}: Insufficient credits! Top up your OpenRouter account.`);
} else if (status === 429) {
console.error(`🚨[OPENROUTER THROTTLING - 429] ${context}: Rate limit exceeded. Provider is congested.`);
} else if (status === 401) {
console.error(`🛑[OPENROUTER AUTH - 401] ${context}: Invalid API Key.`);
} else if (status === 400) {
console.error(`⚠️[OPENROUTER BAD REQUEST - 400] ${context}: Invalid payload, image format, or context length exceeded.`);
} else if (status >= 500) {
console.error(`🔥[OPENROUTER SERVER ERROR - ${status}] ${context}: The specific AI provider backend is down.`);
} else if (error.code === 'ECONNRESET' || error.code === 'ETIMEDOUT') {
console.error(`⏳ [NETWORK TIMEOUT] ${context}: Connection to OpenRouter dropped.`);
} else {
console.error(`❌[OPENROUTER ERROR] ${context}:`, error.message || error);
}
};
export const generateCompletion = async ({ model, prompt, system_prompt, images }) => {
try {
const targetModel = getModelId(model);
let messagesPayload =[
{ role: "system", content: system_prompt || "You are an elite AI assistant." }
];
// 🚨 VISION SUPPORT: OpenAI/OpenRouter requires the data URI prefix for base64
if (images && Array.isArray(images) && images.length > 0) {
let userContent =[{ type: "text", text: prompt }];
images.forEach((imgStr) => {
// --- SAFETY CHECK: Prevent massively oversized base64 strings ---
// Reject images larger than ~5MB of base64 text to save bandwidth & API errors
if (imgStr.length > 7000000) {
throw new Error("Image is too large. Please upload an image under 4MB.");
}
const formattedImg = imgStr.startsWith('data:image')
? imgStr
: `data:image/jpeg;base64,${imgStr}`;
userContent.push({ type: "image_url", image_url: { url: formattedImg } });
});
messagesPayload.push({ role: "user", content: userContent });
} else {
messagesPayload.push({ role: "user", content: prompt });
}
const response = await openRouterAI.chat.completions.create({
model: targetModel,
messages: messagesPayload,
response_format: { type: "json_object" },
max_tokens: 1200 //, // <--- CRITICAL FIX: Caps the reserved output tokens to stop 402 wallet errors
// include_reasoning: false
});
return {
success: true,
data: response.choices[0].message.content,
usage: response.usage,
model_used: targetModel
};
} catch (error) {
logOpenRouterError(error, "generateCompletion");
return {
success: false,
error: error.message,
status: error.status || 500
};
}
};
export const streamCompletion = async ({ model, prompt, system_prompt, images, res }) => {
try {
const targetModel = getModelId(model);
let messagesPayload =[
{ role: "system", content: system_prompt || "You are an elite AI assistant." }
];
if (images && Array.isArray(images) && images.length > 0) {
let userContent =[{ type: "text", text: prompt }];
images.forEach((imgStr) => {
if (imgStr.length > 7000000) {
throw new Error("Image is too large. Please upload an image under 4MB.");
}
const formattedImg = imgStr.startsWith('data:image')
? imgStr
: `data:image/jpeg;base64,${imgStr}`;
userContent.push({ type: "image_url", image_url: { url: formattedImg } });
});
messagesPayload.push({ role: "user", content: userContent });
} else {
messagesPayload.push({ role: "user", content: prompt });
}
const stream = await openRouterAI.chat.completions.create({
model: targetModel,
messages: messagesPayload,
stream: true,
stream_options: { include_usage: true },
max_tokens: 2000 // <--- CRITICAL FIX
});
let totalTokenCount = 0;
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta;
// Handle Qwen's specific "Thinking" blocks if they stream them
if (delta?.reasoning_content) {
res.write(`__THINK__${delta.reasoning_content}`);
} else if (delta?.content) {
res.write(delta.content);
}
if (chunk.usage) {
totalTokenCount = chunk.usage.total_tokens;
}
}
res.write(`__USAGE__${JSON.stringify({ totalTokenCount })}`);
res.end();
} catch (error) {
logOpenRouterError(error, "streamCompletion");
// Safely write the error to the stream so the frontend doesn't hang forever
res.write(`\n\n[ERROR]: ${error.message}`);
res.end();
}
}; |