| |
| |
| |
| |
| |
| |
|
|
|
|
| import crypto from 'crypto';
|
| import { createContext, sendMessageStreaming, sendMultiChunkStreaming } from './chat.js';
|
| import { openaiToText, resolveModel, splitToChunks } from './message-convert.js';
|
| import { transformToOpenAISSE, collectFullResponse, transformToOpenAISSEWithTools, probeStream } from './stream-transform.js';
|
| import { parseToolCalls, toOpenAIToolCalls } from './tool-prompt.js';
|
| import config from './config.js';
|
|
|
| const MAX_RETRY = 3;
|
|
|
| |
| |
| |
|
|
| function isRetryable(e) {
|
| const msg = (e.message || '').toLowerCase();
|
|
|
| if (msg.includes('maximumlengthmessage') || msg.includes('message should be no more')) return false;
|
| const code = e.statusCode || 0;
|
|
|
| if (code === 401 || code === 429) return true;
|
|
|
| if (code === 403) return true;
|
| if (msg.includes('limit') || msg.includes('quota') || msg.includes('exhaust')
|
| || msg.includes('exceed') || msg.includes('too many') || msg.includes('no available')
|
| || msg.includes('insufficient') || msg.includes('credits')) return true;
|
| return false;
|
| }
|
|
|
| |
| |
|
|
| function releaseOnError(pool, account, e) {
|
| const msg = (e.message || '').toLowerCase();
|
| const code = e.statusCode || 0;
|
|
|
| if (msg.includes('maximumlengthmessage') || msg.includes('message should be no more')) {
|
| pool.release(account, { success: true });
|
| } else if (code === 401) {
|
| pool.release(account, { sessionExpired: true });
|
| } else if (code === 403) {
|
|
|
| pool.release(account, { sessionExpired: true });
|
| } else if (code === 429) {
|
| pool.release(account, { quotaExhausted: true });
|
| } else {
|
| pool.release(account, { success: false });
|
| }
|
| }
|
|
|
| |
| |
|
|
| export async function handleChatCompletions(body, res, pool) {
|
| const requestId = 'chatcmpl-' + crypto.randomBytes(12).toString('hex');
|
|
|
|
|
| if (!body.messages || !Array.isArray(body.messages) || body.messages.length === 0) {
|
| sendError(res, 400, 'messages is required and must be a non-empty array');
|
| return;
|
| }
|
|
|
| const text = openaiToText(body.messages, body.tools, body.tool_choice);
|
| const chunks = splitToChunks(text);
|
| const model = resolveModel(body.model);
|
| const clientModel = body.model || 'gpt-4o';
|
| const stream = body.stream === true;
|
| const hasTools = body.tools && body.tools.length > 0;
|
| const isMultiChunk = chunks.length > 1;
|
| const cost = config.modelCost?.[model] ?? config.defaultModelCost ?? 2;
|
|
|
| if (!text) {
|
| sendError(res, 400, 'No valid message content found');
|
| return;
|
| }
|
|
|
|
|
| if (stream) {
|
| let lastError;
|
| for (let attempt = 1; attempt <= MAX_RETRY; attempt++) {
|
| let account;
|
| try {
|
| account = await pool.acquire();
|
| } catch (e) {
|
| sendError(res, 503, 'No available account: ' + e.message);
|
| return;
|
| }
|
|
|
| try {
|
| const title = text.substring(0, 100);
|
| const ctx = await createContext(account.cookies, model, title);
|
| if (ctx.cookies) account.cookies = ctx.cookies;
|
|
|
| const result = isMultiChunk
|
| ? await sendMultiChunkStreaming(account.cookies, ctx.chatId, chunks, model)
|
| : await sendMessageStreaming(account.cookies, ctx.chatId, chunks[0], model);
|
| if (result.cookies) account.cookies = result.cookies;
|
|
|
|
|
|
|
| const probed = await probeStream(result.stream);
|
|
|
|
|
| if (hasTools) {
|
| transformToOpenAISSEWithTools(probed, res, clientModel, requestId, (errMsg) => {
|
| const msg = (errMsg || '').toLowerCase();
|
| if (msg.includes('limit') || msg.includes('quota') || msg.includes('exhaust')
|
| || msg.includes('exceed') || msg.includes('insufficient') || msg.includes('credits')) {
|
| pool.release(account, { quotaExhausted: true });
|
| } else {
|
| pool.release(account, { success: false });
|
| }
|
| account = null;
|
| });
|
| } else {
|
| transformToOpenAISSE(probed, res, clientModel, requestId, (errMsg) => {
|
| const msg = (errMsg || '').toLowerCase();
|
| if (msg.includes('limit') || msg.includes('quota') || msg.includes('exhaust')
|
| || msg.includes('exceed') || msg.includes('insufficient') || msg.includes('credits')) {
|
| pool.release(account, { quotaExhausted: true });
|
| } else {
|
| pool.release(account, { success: false });
|
| }
|
| account = null;
|
| });
|
| }
|
| probed.on('end', () => { if (account) pool.release(account, { success: true, cost }); });
|
| probed.on('error', (err) => {
|
| if (!account) return;
|
| const msg = (err?.message || '').toLowerCase();
|
| if (msg.includes('limit') || msg.includes('quota') || msg.includes('exhaust')
|
| || msg.includes('insufficient') || msg.includes('credits')) {
|
| pool.release(account, { quotaExhausted: true });
|
| } else {
|
| pool.release(account, { success: false });
|
| }
|
| });
|
| return;
|
| } catch (e) {
|
| releaseOnError(pool, account, e);
|
| lastError = e;
|
| if (isRetryable(e) && attempt < MAX_RETRY) {
|
| console.log(`[OpenAI] 请求失败 (${e.statusCode || e.message}), 换号重试 ${attempt + 1}/${MAX_RETRY}`);
|
| continue;
|
| }
|
| }
|
| }
|
|
|
| if (!res.headersSent) {
|
| sendError(res, 502, `Upstream error after ${MAX_RETRY} retries: ${lastError?.message}`);
|
| }
|
| return;
|
| }
|
|
|
|
|
| let lastError;
|
| for (let attempt = 1; attempt <= MAX_RETRY; attempt++) {
|
| let account;
|
| try {
|
| account = await pool.acquire();
|
| } catch (e) {
|
| sendError(res, 503, 'No available account: ' + e.message);
|
| return;
|
| }
|
|
|
| try {
|
| const title = text.substring(0, 100);
|
| const ctx = await createContext(account.cookies, model, title);
|
| if (ctx.cookies) account.cookies = ctx.cookies;
|
|
|
| const result = isMultiChunk
|
| ? await sendMultiChunkStreaming(account.cookies, ctx.chatId, chunks, model)
|
| : await sendMessageStreaming(account.cookies, ctx.chatId, chunks[0], model);
|
| if (result.cookies) account.cookies = result.cookies;
|
|
|
| const full = await collectFullResponse(result.stream);
|
| pool.release(account, { success: true, cost });
|
|
|
|
|
| const { hasToolCalls, toolCalls, textContent } = hasTools
|
| ? parseToolCalls(full.text)
|
| : { hasToolCalls: false, toolCalls: [], textContent: full.text };
|
|
|
| const message = { role: 'assistant' };
|
| if (hasToolCalls) {
|
| message.content = textContent || null;
|
| message.tool_calls = toOpenAIToolCalls(toolCalls);
|
| } else {
|
| message.content = full.text;
|
| }
|
|
|
| res.writeHead(200, {
|
| 'Content-Type': 'application/json',
|
| 'Access-Control-Allow-Origin': '*',
|
| });
|
| res.end(JSON.stringify({
|
| id: requestId,
|
| object: 'chat.completion',
|
| created: Math.floor(Date.now() / 1000),
|
| model: clientModel,
|
| choices: [{
|
| index: 0,
|
| message,
|
| finish_reason: hasToolCalls ? 'tool_calls' : 'stop',
|
| }],
|
| usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
|
| }));
|
| return;
|
| } catch (e) {
|
| releaseOnError(pool, account, e);
|
| lastError = e;
|
| if (isRetryable(e) && attempt < MAX_RETRY) {
|
| console.log(`[OpenAI] 请求失败 (${e.statusCode || e.message}), 换号重试 ${attempt + 1}/${MAX_RETRY}`);
|
| continue;
|
| }
|
| }
|
| }
|
|
|
| if (!res.headersSent) {
|
| sendError(res, 502, `Upstream error after ${MAX_RETRY} retries: ${lastError?.message}`);
|
| }
|
| }
|
|
|
| function sendError(res, status, message) {
|
| res.writeHead(status, { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' });
|
| res.end(JSON.stringify({
|
| error: { message, type: 'invalid_request_error', code: status },
|
| }));
|
| }
|
|
|