|
|
import express from 'express'; |
|
|
import fetch from 'node-fetch'; |
|
|
import dotenv from 'dotenv'; |
|
|
dotenv.config(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const PORT = process.env.PORT || 3000; |
|
|
const PROXY_AUTH_KEY = process.env.PROXY_AUTH_KEY; |
|
|
const UPSTREAM_URL = process.env.UPSTREAM_URL || 'https://llm-gateway.assemblyai.com/v1/chat/completions'; |
|
|
|
|
|
|
|
|
const rawKeys = process.env.UPSTREAM_KEYS || ''; |
|
|
|
|
|
const UPSTREAM_KEYS = rawKeys.split(';').map(k => k.trim()).filter(k => k.length > 0); |
|
|
|
|
|
|
|
|
if (!PROXY_AUTH_KEY) { |
|
|
console.error('Error: 请在环境变量中配置 PROXY_AUTH_KEY'); |
|
|
process.exit(1); |
|
|
} |
|
|
|
|
|
if (UPSTREAM_KEYS.length === 0) { |
|
|
console.error('Error: 请在环境变量 UPSTREAM_KEYS 中至少配置一个 Key'); |
|
|
process.exit(1); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let keyIndex = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const getNextKey = () => { |
|
|
const key = UPSTREAM_KEYS[keyIndex]; |
|
|
keyIndex = (keyIndex + 1) % UPSTREAM_KEYS.length; |
|
|
return key; |
|
|
}; |
|
|
|
|
|
const app = express(); |
|
|
|
|
|
|
|
|
app.use(express.json({ limit: '50mb' })); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const authMiddleware = (req, res, next) => { |
|
|
const authHeader = req.headers['authorization']; |
|
|
if (!authHeader) { |
|
|
return res.status(401).json({ error: 'Missing Authorization header' }); |
|
|
} |
|
|
|
|
|
|
|
|
const token = authHeader.replace('Bearer ', '').trim(); |
|
|
|
|
|
if (token !== PROXY_AUTH_KEY) { |
|
|
return res.status(403).json({ error: 'Invalid Proxy Authorization Key' }); |
|
|
} |
|
|
|
|
|
next(); |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const processMessages = (messages) => { |
|
|
if (!Array.isArray(messages)) return []; |
|
|
|
|
|
const processedMessages = []; |
|
|
let currentMessage = null; |
|
|
|
|
|
for (const msg of messages) { |
|
|
|
|
|
const role = msg.role === 'system' ? 'user' : msg.role; |
|
|
|
|
|
|
|
|
let textContent = ''; |
|
|
|
|
|
if (typeof msg.content === 'string') { |
|
|
textContent = msg.content; |
|
|
} else if (Array.isArray(msg.content)) { |
|
|
|
|
|
textContent = msg.content |
|
|
.filter(item => item.type === 'text') |
|
|
.map(item => item.text) |
|
|
.join('\n'); |
|
|
} |
|
|
|
|
|
|
|
|
if (!textContent) continue; |
|
|
|
|
|
|
|
|
if (currentMessage && currentMessage.role === role) { |
|
|
|
|
|
currentMessage.textBuffer += '\n' + textContent; |
|
|
} else { |
|
|
|
|
|
if (currentMessage) { |
|
|
processedMessages.push({ |
|
|
role: currentMessage.role, |
|
|
content: currentMessage.textBuffer |
|
|
}); |
|
|
} |
|
|
|
|
|
currentMessage = { |
|
|
role: role, |
|
|
textBuffer: textContent |
|
|
}; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (currentMessage) { |
|
|
processedMessages.push({ |
|
|
role: currentMessage.role, |
|
|
content: currentMessage.textBuffer |
|
|
}); |
|
|
} |
|
|
|
|
|
return processedMessages; |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.post('/v1/chat/completions', authMiddleware, async (req, res) => { |
|
|
try { |
|
|
const { model, messages, stream = false, ...restBody } = req.body; |
|
|
|
|
|
|
|
|
const newMessages = processMessages(messages); |
|
|
|
|
|
|
|
|
const currentKey = getNextKey(); |
|
|
console.log(`[Request] Model: ${model}, Using Key Index: ${keyIndex === 0 ? UPSTREAM_KEYS.length - 1 : keyIndex - 1}`); |
|
|
|
|
|
|
|
|
const upstreamBody = { |
|
|
model: model, |
|
|
messages: newMessages, |
|
|
stream: stream, |
|
|
...restBody |
|
|
}; |
|
|
|
|
|
|
|
|
const response = await fetch(UPSTREAM_URL, { |
|
|
method: 'POST', |
|
|
headers: { |
|
|
'Content-Type': 'application/json', |
|
|
'Authorization': `Bearer ${currentKey}` |
|
|
}, |
|
|
body: JSON.stringify(upstreamBody) |
|
|
}); |
|
|
|
|
|
if (!response.ok) { |
|
|
const errText = await response.text(); |
|
|
console.error('[Upstream Error]', response.status, errText); |
|
|
return res.status(response.status).send(errText); |
|
|
} |
|
|
|
|
|
|
|
|
if (stream) { |
|
|
res.setHeader('Content-Type', 'text/event-stream'); |
|
|
res.setHeader('Cache-Control', 'no-cache'); |
|
|
res.setHeader('Connection', 'keep-alive'); |
|
|
|
|
|
if (response.body) { |
|
|
for await (const chunk of response.body) { |
|
|
res.write(chunk); |
|
|
} |
|
|
res.end(); |
|
|
} |
|
|
} else { |
|
|
|
|
|
const data = await response.json(); |
|
|
res.json(data); |
|
|
} |
|
|
|
|
|
} catch (error) { |
|
|
console.error('[Server Error]', error); |
|
|
res.status(500).json({ error: 'Internal Server Error', details: error.message }); |
|
|
} |
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.get('/v1/models', authMiddleware, (req, res) => { |
|
|
const modelsData = { |
|
|
object: "list", |
|
|
data: [ |
|
|
{ |
|
|
"id": "claude-3-haiku-20240307", |
|
|
"name": "claude-3-haiku-20240307", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 4096 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "claude-3-5-haiku-20241022", |
|
|
"name": "claude-3-5-haiku-20241022", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 8192 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "claude-sonnet-4-20250514", |
|
|
"name": "claude-sonnet-4-20250514", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 64000 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "claude-sonnet-4-5-20250929", |
|
|
"name": "claude-sonnet-4-5-20250929", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 64000 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "claude-haiku-4-5-20251001", |
|
|
"name": "claude-haiku-4-5-20251001", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 64000 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "claude-opus-4-20250514", |
|
|
"name": "claude-opus-4-20250514", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 32768 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gpt-oss-20b", |
|
|
"name": "gpt-oss-20b", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 131072, |
|
|
"max_completion_tokens": 131072 |
|
|
}, |
|
|
"context_length": 131072, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gpt-oss-120b", |
|
|
"name": "gpt-oss-120b", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 131072, |
|
|
"max_completion_tokens": 131072 |
|
|
}, |
|
|
"context_length": 131072, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gpt-5", |
|
|
"name": "gpt-5", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 400000, |
|
|
"max_completion_tokens": 128000 |
|
|
}, |
|
|
"context_length": 400000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gpt-5-nano", |
|
|
"name": "gpt-5-nano", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 400000, |
|
|
"max_completion_tokens": 128000 |
|
|
}, |
|
|
"context_length": 400000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gpt-5-mini", |
|
|
"name": "gpt-5-mini", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 400000, |
|
|
"max_completion_tokens": 128000 |
|
|
}, |
|
|
"context_length": 400000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gpt-4.1", |
|
|
"name": "gpt-4.1", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 1047576, |
|
|
"max_completion_tokens": 32768 |
|
|
}, |
|
|
"context_length": 1047576, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "chatgpt-4o-latest", |
|
|
"name": "chatgpt-4o-latest", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 128000, |
|
|
"max_completion_tokens": 16384 |
|
|
}, |
|
|
"context_length": 128000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gemini-2.5-flash-lite", |
|
|
"name": "gemini-2.5-flash-lite", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 1048576, |
|
|
"max_completion_tokens": 65535 |
|
|
}, |
|
|
"context_length": 1048576, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gemini-2.5-flash", |
|
|
"name": "gemini-2.5-flash", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 1048576, |
|
|
"max_completion_tokens": 65535 |
|
|
}, |
|
|
"context_length": 1048576, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gemini-2.5-pro", |
|
|
"name": "gemini-2.5-pro", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 200000, |
|
|
"max_completion_tokens": 65535 |
|
|
}, |
|
|
"context_length": 200000, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
}, |
|
|
{ |
|
|
"id": "gemini-3-pro-preview", |
|
|
"name": "gemini-3-pro-preview", |
|
|
"description": "", |
|
|
"top_provider": { |
|
|
"is_moderated": false, |
|
|
"context_length": 1048575, |
|
|
"max_completion_tokens": 65535 |
|
|
}, |
|
|
"context_length": 1048575, |
|
|
"supported_parameters": [], |
|
|
"default_parameters": { |
|
|
"temperature": null, |
|
|
"top_p": null, |
|
|
"frequency_penalty": null |
|
|
} |
|
|
} |
|
|
] |
|
|
}; |
|
|
res.json(modelsData); |
|
|
}); |
|
|
|
|
|
|
|
|
app.listen(PORT, () => { |
|
|
console.log(`Proxy Server running on port ${PORT}`); |
|
|
console.log(`Loaded ${UPSTREAM_KEYS.length} upstream keys.`); |
|
|
}); |