|
|
const express = require('express')
|
|
|
const axios = require('axios')
|
|
|
const FormData = require('form-data')
|
|
|
const { v4: uuidv4 } = require('uuid')
|
|
|
const { MODEL_MAPPING, MAMMOUTH_API_URL, AUTH_TOKEN, UNLIMITED_MODELS } = require('../config')
|
|
|
const accountManager = require('../lib/manager')
|
|
|
const imageUploader = require('../lib/uploader')
|
|
|
const logger = require('../lib/logger')
|
|
|
const ErrorHandler = require('../lib/errorHandler')
|
|
|
|
|
|
const router = express.Router()
|
|
|
|
|
|
|
|
|
const authenticate = (req, res, next) => {
|
|
|
const authHeader = req.headers.authorization || req.headers.Authorization || req.headers['x-api-key']
|
|
|
|
|
|
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
|
|
return res.status(401).json({
|
|
|
error: {
|
|
|
message: '缺少有效的API密钥',
|
|
|
type: 'authentication_error',
|
|
|
code: 'invalid_api_key'
|
|
|
}
|
|
|
})
|
|
|
}
|
|
|
|
|
|
const apiKey = authHeader.substring(7)
|
|
|
|
|
|
if (apiKey !== AUTH_TOKEN) {
|
|
|
return res.status(401).json({
|
|
|
error: {
|
|
|
message: 'API密钥无效',
|
|
|
type: 'authentication_error',
|
|
|
code: 'invalid_api_key'
|
|
|
}
|
|
|
})
|
|
|
}
|
|
|
|
|
|
next()
|
|
|
}
|
|
|
|
|
|
|
|
|
function isUnlimitedModel(model) {
|
|
|
return UNLIMITED_MODELS.includes(model)
|
|
|
}
|
|
|
|
|
|
|
|
|
async function convertOpenAIToMammouth(openaiRequest, requestId = null) {
|
|
|
const form = new FormData()
|
|
|
|
|
|
|
|
|
const requestedModel = openaiRequest.model
|
|
|
const mammouthModel = MODEL_MAPPING[requestedModel] || openaiRequest.model
|
|
|
form.append('model', mammouthModel)
|
|
|
|
|
|
|
|
|
if (openaiRequest.stream === true) {
|
|
|
form.append('stream', 'true')
|
|
|
form.append('streaming', 'true')
|
|
|
|
|
|
if (requestId) {
|
|
|
console.log(`[请求转换] 添加流式响应参数: stream=true, streaming=true`)
|
|
|
}
|
|
|
} else {
|
|
|
|
|
|
if (requestId) {
|
|
|
console.log(`[请求转换] 非流式请求,不添加流式参数`)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
let systemMessages = []
|
|
|
let regularMessages = []
|
|
|
|
|
|
openaiRequest.messages.forEach(message => {
|
|
|
if (message.role === 'system') {
|
|
|
systemMessages.push(message.content)
|
|
|
} else {
|
|
|
regularMessages.push(message)
|
|
|
}
|
|
|
})
|
|
|
|
|
|
|
|
|
let preprompt = systemMessages.join('\n\n')
|
|
|
|
|
|
|
|
|
const hasLongImages = regularMessages.some(message =>
|
|
|
Array.isArray(message.content) &&
|
|
|
message.content.some(part => part.type === 'image_url')
|
|
|
)
|
|
|
|
|
|
if (hasLongImages) {
|
|
|
const longImagePrompt = `
|
|
|
|
|
|
重要说明:本次对话可能包含长图片段。当你看到标记为"[长图片段 X/Y]"的图片时:
|
|
|
1. 这些是同一张长图的不同部分,按顺序排列
|
|
|
2. 请分析每个片段的内容,记住之前片段的信息
|
|
|
3. 在处理最后一个片段时,请提供基于所有片段的完整分析
|
|
|
4. 确保回答涵盖整张长图的所有重要内容,不要遗漏任何部分`
|
|
|
|
|
|
preprompt = preprompt ? `${preprompt}${longImagePrompt}` : longImagePrompt.trim()
|
|
|
}
|
|
|
|
|
|
form.append('preprompt', preprompt)
|
|
|
|
|
|
|
|
|
let totalImageCount = 0
|
|
|
|
|
|
|
|
|
regularMessages.forEach((message, index) => {
|
|
|
console.log(`[调试] 消息${index}内容类型:`, typeof message.content, Array.isArray(message.content) ? '数组' : '非数组')
|
|
|
if (Array.isArray(message.content)) {
|
|
|
const imageCount = message.content.filter(part => part.type === 'image_url').length
|
|
|
totalImageCount += imageCount
|
|
|
console.log(`[调试] 消息${index}包含${imageCount}张图片`)
|
|
|
message.content.forEach((part, partIndex) => {
|
|
|
console.log(`[调试] 消息${index}部分${partIndex}类型:`, part.type)
|
|
|
})
|
|
|
}
|
|
|
})
|
|
|
|
|
|
console.log(`[调试] 总图片数量: ${totalImageCount}`)
|
|
|
|
|
|
if (requestId && totalImageCount > 0) {
|
|
|
logger.logImageProcessingStart(requestId, totalImageCount)
|
|
|
}
|
|
|
|
|
|
let currentImageIndex = 0
|
|
|
|
|
|
for (const message of regularMessages) {
|
|
|
|
|
|
let content = message.content
|
|
|
let processedMessages = []
|
|
|
|
|
|
|
|
|
if (Array.isArray(message.content)) {
|
|
|
const textParts = []
|
|
|
const imageParts = []
|
|
|
|
|
|
|
|
|
for (const part of message.content) {
|
|
|
if (part.type === 'text') {
|
|
|
textParts.push(part.text)
|
|
|
} else if (part.type === 'image_url') {
|
|
|
imageParts.push(part)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
const combinedText = textParts.join('\n')
|
|
|
|
|
|
|
|
|
const longImageSegments = []
|
|
|
const normalImageResults = []
|
|
|
let hasProcessedText = false
|
|
|
let normalImageCount = 0
|
|
|
|
|
|
|
|
|
console.log(`[图片处理开始] 共${imageParts.length}张图片待处理,将严格按顺序处理`)
|
|
|
|
|
|
for (let imagePartIndex = 0; imagePartIndex < imageParts.length; imagePartIndex++) {
|
|
|
const imagePart = imageParts[imagePartIndex]
|
|
|
console.log(`[图片处理] 开始处理第${imagePartIndex + 1}张图片 (消息位置: ${imagePartIndex}, 全局索引: ${currentImageIndex + 1})`)
|
|
|
|
|
|
try {
|
|
|
|
|
|
let imageUrl = imagePart.image_url
|
|
|
if (typeof imageUrl === 'object' && imageUrl.url) {
|
|
|
imageUrl = imageUrl.url
|
|
|
}
|
|
|
|
|
|
console.log(`[图片处理] 图片${currentImageIndex + 1}类型: ${imageUrl.startsWith('data:image') ? 'Base64' : 'URL'}`)
|
|
|
|
|
|
|
|
|
let uploadedUrls = []
|
|
|
if (imageUrl.startsWith('data:image')) {
|
|
|
uploadedUrls = await imageUploader.uploadFromBase64Smart(
|
|
|
imageUrl,
|
|
|
null,
|
|
|
requestId,
|
|
|
currentImageIndex,
|
|
|
false
|
|
|
)
|
|
|
} else {
|
|
|
uploadedUrls = await imageUploader.uploadFromUrlSmart(
|
|
|
imageUrl,
|
|
|
null,
|
|
|
requestId,
|
|
|
currentImageIndex,
|
|
|
false
|
|
|
)
|
|
|
}
|
|
|
|
|
|
console.log(`[图片处理] 图片${currentImageIndex + 1}上传完成,获得${uploadedUrls.length}个URL,位置索引: ${imagePartIndex}`)
|
|
|
|
|
|
|
|
|
if (uploadedUrls.length > 1) {
|
|
|
console.log(`[长图处理] 图片${currentImageIndex + 1}被切割为${uploadedUrls.length}个片段,将按顺序发送`)
|
|
|
|
|
|
if (requestId) {
|
|
|
logger.logMessageSegmentation(requestId, currentImageIndex, uploadedUrls.length)
|
|
|
}
|
|
|
|
|
|
uploadedUrls.forEach((url, segmentIndex) => {
|
|
|
|
|
|
let segmentText = ''
|
|
|
|
|
|
if (segmentIndex === 0) {
|
|
|
|
|
|
const originalText = combinedText || '请分析这张长图的内容'
|
|
|
segmentText = `${originalText}
|
|
|
|
|
|
注意:这是一张长图,已被切割为${uploadedUrls.length}个片段。请分析每个片段的内容,并在最后一个片段时提供完整的总结。
|
|
|
|
|
|
[长图片段 ${segmentIndex + 1}/${uploadedUrls.length}] - 这是长图的开始部分`
|
|
|
hasProcessedText = true
|
|
|
} else if (segmentIndex === uploadedUrls.length - 1) {
|
|
|
|
|
|
segmentText = `[长图片段 ${segmentIndex + 1}/${uploadedUrls.length}] - 这是长图的结束部分
|
|
|
|
|
|
请基于所有${uploadedUrls.length}个片段的内容,提供这张长图的完整分析和总结。`
|
|
|
} else {
|
|
|
|
|
|
segmentText = `[长图片段 ${segmentIndex + 1}/${uploadedUrls.length}] - 这是长图的中间部分,请继续分析内容`
|
|
|
}
|
|
|
|
|
|
processedMessages.push({
|
|
|
content: segmentText,
|
|
|
imagesData: [url],
|
|
|
documentsData: []
|
|
|
})
|
|
|
|
|
|
console.log(`[消息生成] 长图片段${segmentIndex + 1}: "${segmentText.substring(0, 80)}..."`)
|
|
|
})
|
|
|
} else {
|
|
|
|
|
|
const imageResult = {
|
|
|
index: imagePartIndex,
|
|
|
urls: uploadedUrls,
|
|
|
originalIndex: currentImageIndex,
|
|
|
processOrder: normalImageCount
|
|
|
}
|
|
|
|
|
|
normalImageResults.push(imageResult)
|
|
|
normalImageCount++
|
|
|
|
|
|
console.log(`[图片收集] 普通图片${currentImageIndex + 1}已收集 (消息位置: ${imagePartIndex}, 处理顺序: ${normalImageCount}),当前共${normalImageCount}张普通图片`)
|
|
|
}
|
|
|
|
|
|
currentImageIndex++
|
|
|
} catch (error) {
|
|
|
if (requestId) {
|
|
|
logger.logError(requestId, 'IMAGE_PROCESSING_ERROR', error.message, {
|
|
|
imageIndex: currentImageIndex,
|
|
|
imagePartIndex: imagePartIndex,
|
|
|
imageUrl: typeof imagePart.image_url === 'string' ? imagePart.image_url.substring(0, 100) : 'object'
|
|
|
})
|
|
|
}
|
|
|
console.error(`图片处理错误 (位置${imagePartIndex}, 全局${currentImageIndex + 1}):`, error.message)
|
|
|
|
|
|
|
|
|
const errorPlaceholder = {
|
|
|
index: imagePartIndex,
|
|
|
urls: [],
|
|
|
originalIndex: currentImageIndex,
|
|
|
processOrder: normalImageCount,
|
|
|
error: true,
|
|
|
errorMessage: error.message
|
|
|
}
|
|
|
|
|
|
normalImageResults.push(errorPlaceholder)
|
|
|
normalImageCount++
|
|
|
|
|
|
console.log(`[图片错误] 图片${currentImageIndex + 1}处理失败,已添加错误占位符`)
|
|
|
currentImageIndex++
|
|
|
}
|
|
|
}
|
|
|
|
|
|
console.log(`[图片处理完成] 共处理${imageParts.length}张图片,成功收集${normalImageResults.length}张普通图片`)
|
|
|
|
|
|
|
|
|
if (normalImageResults.length > 0) {
|
|
|
console.log(`[排序前验证] 收集到${normalImageResults.length}张普通图片`)
|
|
|
normalImageResults.forEach((result, idx) => {
|
|
|
console.log(` 图片${idx + 1}: 消息位置=${result.index}, 全局索引=${result.originalIndex}, 处理顺序=${result.processOrder}`)
|
|
|
})
|
|
|
|
|
|
|
|
|
console.log(`[开始排序] 严格按消息中的位置索引排序...`)
|
|
|
normalImageResults.sort((a, b) => {
|
|
|
const diff = a.index - b.index
|
|
|
console.log(`[排序比较] 位置${a.index} vs 位置${b.index} = ${diff}`)
|
|
|
return diff
|
|
|
})
|
|
|
|
|
|
console.log(`[排序后验证] 最终图片顺序:`)
|
|
|
normalImageResults.forEach((result, idx) => {
|
|
|
console.log(` 第${idx + 1}位: 消息位置=${result.index}, 全局索引=${result.originalIndex}, 处理顺序=${result.processOrder}`)
|
|
|
})
|
|
|
|
|
|
|
|
|
const orderedImageUrls = []
|
|
|
const errorMessages = []
|
|
|
|
|
|
normalImageResults.forEach((result, idx) => {
|
|
|
if (result.error) {
|
|
|
console.log(`[URL提取] 第${idx + 1}个结果,位置${result.index},图片处理失败: ${result.errorMessage}`)
|
|
|
errorMessages.push(`图片${result.originalIndex + 1}处理失败`)
|
|
|
} else {
|
|
|
console.log(`[URL提取] 第${idx + 1}个结果,位置${result.index},添加${result.urls.length}个URL`)
|
|
|
orderedImageUrls.push(...result.urls)
|
|
|
}
|
|
|
})
|
|
|
|
|
|
const includeOriginalText = !hasProcessedText && combinedText
|
|
|
let normalImageText = includeOriginalText ? combinedText : ''
|
|
|
|
|
|
|
|
|
const successCount = orderedImageUrls.length
|
|
|
const errorCount = errorMessages.length
|
|
|
|
|
|
if (successCount > 0 && errorCount > 0) {
|
|
|
const statusText = `[包含 ${successCount} 张图片,${errorCount} 张图片处理失败]`
|
|
|
normalImageText = normalImageText ? `${normalImageText}\n\n${statusText}` : statusText
|
|
|
} else if (successCount > 0) {
|
|
|
const statusText = `[包含 ${successCount} 张图片]`
|
|
|
normalImageText = normalImageText ? `${normalImageText}\n\n${statusText}` : statusText
|
|
|
} else if (errorCount > 0) {
|
|
|
const statusText = `[${errorCount} 张图片处理失败]`
|
|
|
normalImageText = normalImageText ? `${normalImageText}\n\n${statusText}` : statusText
|
|
|
}
|
|
|
|
|
|
processedMessages.push({
|
|
|
content: normalImageText || '.',
|
|
|
imagesData: orderedImageUrls,
|
|
|
documentsData: []
|
|
|
})
|
|
|
|
|
|
console.log(`[消息生成] 普通图片批量消息: ${successCount}张成功,${errorCount}张失败,严格按顺序排列`)
|
|
|
console.log(`[最终顺序验证] 图片顺序: ${normalImageResults.map(r => `位置${r.index}(图片${r.originalIndex + 1}${r.error ? '-失败' : ''})`).join(' -> ')}`)
|
|
|
}
|
|
|
|
|
|
|
|
|
if (imageParts.length === 0) {
|
|
|
processedMessages.push({
|
|
|
content: combinedText || '.',
|
|
|
imagesData: [],
|
|
|
documentsData: []
|
|
|
})
|
|
|
}
|
|
|
} else {
|
|
|
|
|
|
processedMessages.push({
|
|
|
content: content || '.',
|
|
|
imagesData: [],
|
|
|
documentsData: []
|
|
|
})
|
|
|
}
|
|
|
|
|
|
|
|
|
processedMessages.forEach(msg => {
|
|
|
form.append('messages', JSON.stringify(msg))
|
|
|
})
|
|
|
}
|
|
|
|
|
|
|
|
|
const totalMessages = form.getBuffer().toString().split('messages').length - 1
|
|
|
if (requestId && totalMessages > 4) {
|
|
|
console.warn(`[消息转换] 警告:消息数量较多(${totalMessages}个),可能影响流式响应性能`)
|
|
|
logger.logError(requestId, 'HIGH_MESSAGE_COUNT', `消息数量过多: ${totalMessages}个`, {
|
|
|
totalMessages,
|
|
|
recommendation: '考虑减少长图片段数量或使用非流式模式'
|
|
|
})
|
|
|
}
|
|
|
|
|
|
return form
|
|
|
}
|
|
|
|
|
|
|
|
|
async function handleStreamResponse(axiosResponse, res, requestedModel, logRequestId = null) {
|
|
|
const responseId = uuidv4()
|
|
|
const timestamp = Math.floor(Date.now() / 1000)
|
|
|
const decoder = new TextDecoder()
|
|
|
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 开始处理流式响应,请求ID: ${logRequestId}`)
|
|
|
console.log(`[流式响应] 响应状态码: ${axiosResponse.status}`)
|
|
|
console.log(`[流式响应] 响应头: ${JSON.stringify(axiosResponse.headers)}`)
|
|
|
console.log(`[流式响应] 数据流类型: ${typeof axiosResponse.data}`)
|
|
|
}
|
|
|
|
|
|
|
|
|
const initialData = {
|
|
|
id: `chatcmpl-${responseId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: { role: "assistant", content: "" },
|
|
|
finish_reason: null
|
|
|
}]
|
|
|
}
|
|
|
|
|
|
const initialMessage = `data: ${JSON.stringify(initialData)}\n\n`
|
|
|
res.write(initialMessage)
|
|
|
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 已发送初始角色数据`)
|
|
|
}
|
|
|
|
|
|
let totalChunks = 0
|
|
|
let totalContentLength = 0
|
|
|
|
|
|
axiosResponse.data.on('data', (chunk) => {
|
|
|
totalChunks++
|
|
|
|
|
|
try {
|
|
|
const chunkStr = decoder.decode(chunk, { stream: true })
|
|
|
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 收到数据块 ${totalChunks},原始长度: ${chunk.length},解码后长度: ${chunkStr.length}`)
|
|
|
console.log(`[流式响应] 数据块内容预览: "${chunkStr.substring(0, 200)}${chunkStr.length > 200 ? '...' : ''}"`)
|
|
|
}
|
|
|
|
|
|
|
|
|
const textToSend = chunkStr.trim()
|
|
|
if (textToSend && textToSend.length > 0) {
|
|
|
totalContentLength += textToSend.length
|
|
|
|
|
|
const responseData = {
|
|
|
id: `chatcmpl-${responseId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: { content: textToSend },
|
|
|
finish_reason: null
|
|
|
}]
|
|
|
}
|
|
|
|
|
|
const responseMessage = `data: ${JSON.stringify(responseData)}\n\n`
|
|
|
res.write(responseMessage)
|
|
|
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 已发送内容块 ${totalChunks},内容长度: ${textToSend.length}`)
|
|
|
}
|
|
|
} else {
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 数据块 ${totalChunks} 为空或无效,跳过发送`)
|
|
|
}
|
|
|
}
|
|
|
} catch (decodeError) {
|
|
|
if (logRequestId) {
|
|
|
console.error(`[流式响应] 数据块 ${totalChunks} 解码失败: ${decodeError.message}`)
|
|
|
logger.logError(logRequestId, 'STREAM_DECODE_ERROR', decodeError.message, {
|
|
|
chunkLength: chunk.length,
|
|
|
chunkNumber: totalChunks
|
|
|
})
|
|
|
}
|
|
|
}
|
|
|
})
|
|
|
|
|
|
axiosResponse.data.on('end', async () => {
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 数据流结束,总共处理 ${totalChunks} 个数据块,总内容长度: ${totalContentLength}`)
|
|
|
|
|
|
|
|
|
if (totalChunks === 0) {
|
|
|
console.warn(`[流式响应] 警告:未收到任何数据块,可能API返回了非流式响应`)
|
|
|
|
|
|
|
|
|
try {
|
|
|
if (axiosResponse.data && typeof axiosResponse.data === 'object' && axiosResponse.data.choices) {
|
|
|
console.log(`[流式回退] 检测到非流式JSON响应,尝试转换为流式格式`)
|
|
|
const content = axiosResponse.data.choices[0]?.message?.content || ''
|
|
|
|
|
|
if (content) {
|
|
|
|
|
|
const chunkSize = 20
|
|
|
for (let i = 0; i < content.length; i += chunkSize) {
|
|
|
const chunk = content.substring(i, i + chunkSize)
|
|
|
|
|
|
const chunkData = {
|
|
|
id: `chatcmpl-${responseId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: { content: chunk },
|
|
|
finish_reason: null
|
|
|
}]
|
|
|
}
|
|
|
|
|
|
const chunkMessage = `data: ${JSON.stringify(chunkData)}\n\n`
|
|
|
res.write(chunkMessage)
|
|
|
|
|
|
await new Promise(resolve => setTimeout(resolve, 50))
|
|
|
}
|
|
|
|
|
|
console.log(`[流式回退] 成功转换非流式响应为流式格式,内容长度: ${content.length}`)
|
|
|
totalContentLength = content.length
|
|
|
}
|
|
|
}
|
|
|
} catch (parseError) {
|
|
|
console.error(`[流式回退] 解析非流式响应失败: ${parseError.message}`)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
if (totalContentLength === 0) {
|
|
|
console.warn(`[流式响应] 警告:未收到任何内容,使用默认回退机制`)
|
|
|
logger.logError(logRequestId, 'STREAM_NO_CONTENT', '流式响应未收到任何内容,尝试回退', {
|
|
|
totalChunks,
|
|
|
model: requestedModel
|
|
|
})
|
|
|
|
|
|
|
|
|
console.log(`[流式回退] 使用默认消息回退`)
|
|
|
|
|
|
const fallbackMessage = "抱歉,图片处理完成但响应出现问题。\n\n这可能是由于长图切割导致的流式响应问题。建议:\n1. 重新发送请求\n2. 使用非流式模式\n3. 或尝试上传较短的图片"
|
|
|
|
|
|
|
|
|
const chunkSize = 20
|
|
|
for (let i = 0; i < fallbackMessage.length; i += chunkSize) {
|
|
|
const chunk = fallbackMessage.substring(i, i + chunkSize)
|
|
|
|
|
|
const chunkData = {
|
|
|
id: `chatcmpl-${responseId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: { content: chunk },
|
|
|
finish_reason: null
|
|
|
}]
|
|
|
}
|
|
|
|
|
|
const chunkMessage = `data: ${JSON.stringify(chunkData)}\n\n`
|
|
|
res.write(chunkMessage)
|
|
|
|
|
|
|
|
|
await new Promise(resolve => setTimeout(resolve, 50))
|
|
|
}
|
|
|
|
|
|
console.log(`[流式回退] 默认消息发送完成`)
|
|
|
totalContentLength = fallbackMessage.length
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
const endData = {
|
|
|
id: `chatcmpl-${responseId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: {},
|
|
|
finish_reason: "stop"
|
|
|
}]
|
|
|
}
|
|
|
|
|
|
const endMessage = `data: ${JSON.stringify(endData)}\n\n`
|
|
|
res.write(endMessage)
|
|
|
res.write('data: [DONE]\n\n')
|
|
|
res.end()
|
|
|
|
|
|
if (logRequestId) {
|
|
|
console.log(`[流式响应] 已发送完成信号和结束标记`)
|
|
|
}
|
|
|
})
|
|
|
|
|
|
axiosResponse.data.on('error', (err) => {
|
|
|
if (logRequestId) {
|
|
|
logger.logError(logRequestId, 'STREAM_ERROR', err.message, {
|
|
|
model: requestedModel,
|
|
|
totalChunks,
|
|
|
totalContentLength
|
|
|
})
|
|
|
console.log(`[流式响应] 流处理错误: ${err.message},已处理 ${totalChunks} 个数据块`)
|
|
|
}
|
|
|
console.error('流数据处理错误:', err)
|
|
|
res.status(500).end()
|
|
|
})
|
|
|
}
|
|
|
|
|
|
|
|
|
function handleNonStreamResponse(axiosResponse, res, requestedModel, logRequestId = null) {
|
|
|
const responseId = uuidv4()
|
|
|
const timestamp = Math.floor(Date.now() / 1000)
|
|
|
|
|
|
|
|
|
let content = axiosResponse.data.content;
|
|
|
|
|
|
|
|
|
if (typeof content === 'string' && content.startsWith('"') && content.endsWith('"')) {
|
|
|
content = content.slice(1, -1);
|
|
|
}
|
|
|
|
|
|
const responseData = {
|
|
|
id: `chatcmpl-${responseId}`,
|
|
|
object: "chat.completion",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
message: {
|
|
|
role: "assistant",
|
|
|
content: content || axiosResponse.data
|
|
|
},
|
|
|
finish_reason: "stop"
|
|
|
}],
|
|
|
usage: {
|
|
|
prompt_tokens: 0,
|
|
|
completion_tokens: 0,
|
|
|
total_tokens: 0
|
|
|
}
|
|
|
}
|
|
|
|
|
|
res.json(responseData)
|
|
|
}
|
|
|
|
|
|
|
|
|
async function retryWithNewCookie(req, res, config, currentCookie, requestedModel, isStreamRequest) {
|
|
|
try {
|
|
|
|
|
|
accountManager.markAsUnavailable(currentCookie)
|
|
|
|
|
|
|
|
|
const newCookie = accountManager.getNextAvailableCookie()
|
|
|
|
|
|
|
|
|
config.headers.Cookie = `auth_session=${newCookie}`
|
|
|
|
|
|
|
|
|
const response = await axios(config)
|
|
|
|
|
|
|
|
|
if (isStreamRequest) {
|
|
|
handleStreamResponse(response, res, requestedModel)
|
|
|
} else {
|
|
|
handleNonStreamResponse(response, res, requestedModel)
|
|
|
}
|
|
|
|
|
|
return true
|
|
|
} catch (error) {
|
|
|
|
|
|
return false
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
router.post('/completions', authenticate, async (req, res) => {
|
|
|
let requestId = null
|
|
|
const startTime = Date.now()
|
|
|
|
|
|
try {
|
|
|
const openaiRequest = req.body
|
|
|
const isStreamRequest = openaiRequest.stream === true
|
|
|
const requestedModel = openaiRequest.model
|
|
|
|
|
|
|
|
|
requestId = logger.logRequestStart(
|
|
|
req.method,
|
|
|
req.originalUrl,
|
|
|
req.headers,
|
|
|
openaiRequest
|
|
|
)
|
|
|
|
|
|
|
|
|
if (isStreamRequest) {
|
|
|
res.setHeader('Content-Type', 'text/event-stream')
|
|
|
res.setHeader('Cache-Control', 'no-cache')
|
|
|
res.setHeader('Connection', 'keep-alive')
|
|
|
}
|
|
|
|
|
|
|
|
|
const form = await convertOpenAIToMammouth(openaiRequest, requestId)
|
|
|
|
|
|
|
|
|
const cookieValue = isUnlimitedModel(requestedModel)
|
|
|
? accountManager.getAnyCookie()
|
|
|
: accountManager.getNextAvailableCookie()
|
|
|
|
|
|
|
|
|
const mammouthModel = MODEL_MAPPING[requestedModel] || requestedModel
|
|
|
logger.logModelCallStart(requestId, requestedModel, mammouthModel)
|
|
|
|
|
|
|
|
|
const config = {
|
|
|
method: 'post',
|
|
|
url: MAMMOUTH_API_URL,
|
|
|
headers: {
|
|
|
...form.getHeaders(),
|
|
|
'Cookie': `auth_session=${cookieValue}`,
|
|
|
'origin': 'https://mammouth.ai'
|
|
|
},
|
|
|
data: form,
|
|
|
responseType: isStreamRequest ? 'stream' : 'json',
|
|
|
|
|
|
...(isStreamRequest && {
|
|
|
timeout: 60000,
|
|
|
maxRedirects: 0
|
|
|
})
|
|
|
}
|
|
|
|
|
|
if (requestId && isStreamRequest) {
|
|
|
console.log(`[请求配置] 流式请求配置: responseType=stream, timeout=60s`)
|
|
|
}
|
|
|
|
|
|
try {
|
|
|
|
|
|
const modelCallStartTime = Date.now()
|
|
|
const response = await axios(config)
|
|
|
const modelCallDuration = Date.now() - modelCallStartTime
|
|
|
|
|
|
|
|
|
logger.logModelCallEnd(requestId, true, null, modelCallDuration)
|
|
|
|
|
|
|
|
|
if (isStreamRequest) {
|
|
|
handleStreamResponse(response, res, requestedModel, requestId)
|
|
|
} else {
|
|
|
handleNonStreamResponse(response, res, requestedModel, requestId)
|
|
|
}
|
|
|
|
|
|
|
|
|
logger.logRequestEnd(requestId, 200, {
|
|
|
responseType: isStreamRequest ? 'stream' : 'json',
|
|
|
totalDuration: Date.now() - startTime
|
|
|
})
|
|
|
|
|
|
} catch (error) {
|
|
|
|
|
|
const modelCallDuration = Date.now() - startTime
|
|
|
logger.logModelCallEnd(requestId, false, error.message, modelCallDuration)
|
|
|
|
|
|
|
|
|
const errorStatus = error.response?.status || 'unknown'
|
|
|
const errorMessage = error.response?.data?.message || error.message || 'Unknown error'
|
|
|
|
|
|
|
|
|
logger.logError(requestId, 'MODEL_CALL_ERROR', errorMessage, {
|
|
|
status: errorStatus,
|
|
|
model: requestedModel,
|
|
|
isStream: isStreamRequest,
|
|
|
cookieUsed: cookieValue?.substring(0, 8) + '...'
|
|
|
})
|
|
|
|
|
|
console.error(`API转发错误: [${errorStatus}] ${errorMessage}`)
|
|
|
|
|
|
|
|
|
if (error.response && error.response.status === 403) {
|
|
|
|
|
|
|
|
|
console.log(`账号 ${cookieValue.substring(0, 5)}... 使用模型 ${requestedModel} 已达到使用限制`)
|
|
|
|
|
|
|
|
|
if (isUnlimitedModel(requestedModel)) {
|
|
|
|
|
|
accountManager.markAsUnavailable(cookieValue)
|
|
|
|
|
|
|
|
|
const newCookie = accountManager.getAnyCookie()
|
|
|
console.log(`尝试使用不受限模型的另一个账号: ${newCookie.substring(0, 5)}...`)
|
|
|
|
|
|
|
|
|
config.headers.Cookie = `auth_session=${newCookie}`
|
|
|
|
|
|
try {
|
|
|
|
|
|
const response = await axios(config)
|
|
|
|
|
|
|
|
|
if (isStreamRequest) {
|
|
|
handleStreamResponse(response, res, requestedModel)
|
|
|
} else {
|
|
|
handleNonStreamResponse(response, res, requestedModel)
|
|
|
}
|
|
|
|
|
|
|
|
|
return
|
|
|
} catch (retryError) {
|
|
|
console.error(`无限制模型二次尝试也失败: ${retryError.message}`)
|
|
|
|
|
|
}
|
|
|
} else {
|
|
|
|
|
|
console.log(`尝试使用新账号...`)
|
|
|
const cookieRetrySuccess = await retryWithNewCookie(
|
|
|
req, res, config, cookieValue, requestedModel, isStreamRequest
|
|
|
)
|
|
|
|
|
|
|
|
|
if (cookieRetrySuccess) return
|
|
|
}
|
|
|
|
|
|
|
|
|
const errorMessage =
|
|
|
error.response.data?.message ||
|
|
|
error.response.data?.statusMessage ||
|
|
|
'使用限制:所有账号已临时达到使用限制。请稍后再试。'
|
|
|
|
|
|
const requestId = uuidv4()
|
|
|
const timestamp = Math.floor(Date.now() / 1000)
|
|
|
|
|
|
if (isStreamRequest) {
|
|
|
|
|
|
res.write(`data: ${JSON.stringify({
|
|
|
id: `chatcmpl-${requestId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: { role: "assistant", content: "" },
|
|
|
finish_reason: null
|
|
|
}]
|
|
|
})}\n\n`)
|
|
|
|
|
|
|
|
|
res.write(`data: ${JSON.stringify({
|
|
|
id: `chatcmpl-${requestId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: { content: errorMessage },
|
|
|
finish_reason: null
|
|
|
}]
|
|
|
})}\n\n`)
|
|
|
|
|
|
|
|
|
res.write(`data: ${JSON.stringify({
|
|
|
id: `chatcmpl-${requestId}`,
|
|
|
object: "chat.completion.chunk",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
delta: {},
|
|
|
finish_reason: "stop"
|
|
|
}]
|
|
|
})}\n\n`)
|
|
|
|
|
|
res.write('data: [DONE]\n\n')
|
|
|
res.end()
|
|
|
} else {
|
|
|
|
|
|
res.json({
|
|
|
id: `chatcmpl-${requestId}`,
|
|
|
object: "chat.completion",
|
|
|
created: timestamp,
|
|
|
model: requestedModel,
|
|
|
choices: [{
|
|
|
index: 0,
|
|
|
message: {
|
|
|
role: "assistant",
|
|
|
content: errorMessage
|
|
|
},
|
|
|
finish_reason: "stop"
|
|
|
}],
|
|
|
usage: {
|
|
|
prompt_tokens: 0,
|
|
|
completion_tokens: 0,
|
|
|
total_tokens: 0
|
|
|
}
|
|
|
})
|
|
|
}
|
|
|
} else {
|
|
|
|
|
|
logger.logRequestEnd(requestId, 500, {
|
|
|
error: error.message,
|
|
|
totalDuration: Date.now() - startTime
|
|
|
})
|
|
|
|
|
|
ErrorHandler.handleModelError(res, error, requestId, requestedModel, isStreamRequest)
|
|
|
}
|
|
|
}
|
|
|
} catch (error) {
|
|
|
|
|
|
if (requestId) {
|
|
|
logger.logRequestEnd(requestId, 500, {
|
|
|
error: error.message,
|
|
|
totalDuration: Date.now() - startTime
|
|
|
})
|
|
|
}
|
|
|
|
|
|
ErrorHandler.handleApiError(res, error, requestId, {
|
|
|
totalDuration: Date.now() - startTime,
|
|
|
endpoint: '/v1/chat/completions'
|
|
|
})
|
|
|
}
|
|
|
})
|
|
|
|
|
|
|
|
|
module.exports = router |