Hyy890627 commited on
Commit
14019cb
·
verified ·
1 Parent(s): d33bac3

Upload dark-server-vps.js

Browse files
Files changed (1) hide show
  1. dark-server-vps.js +413 -0
dark-server-vps.js ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const express = require("express");
2
+ const WebSocket = require("ws");
3
+ const http = require("http");
4
+
5
+ const SECRET_KEY = process.env.MY_SECRET_KEY || "123456";
6
+ const DEFAULT_STREAMING_MODE = "real";
7
+
8
+ const log = (level, msg) => console[level](`[${level.toUpperCase()}] ${new Date().toISOString()} - ${msg}`);
9
+ const genId = () => `${Date.now()}_${Math.random().toString(36).slice(2, 11)}`;
10
+
11
+ class Queue {
12
+ #msgs = [];
13
+ #waiters = [];
14
+ #closed = false;
15
+ push(msg) {
16
+ if (this.#closed) return;
17
+ this.#waiters.length ? this.#waiters.shift().resolve(msg) : this.#msgs.push(msg);
18
+ }
19
+ async pop() {
20
+ if (this.#closed) throw new Error("Queue closed");
21
+ if (this.#msgs.length) return this.#msgs.shift();
22
+ return new Promise((resolve, reject) => this.#waiters.push({ resolve, reject }));
23
+ }
24
+ close() {
25
+ this.#closed = true;
26
+ this.#waiters.forEach((w) => w.reject(new Error("Queue closed")));
27
+ this.#waiters = [];
28
+ }
29
+ }
30
+
31
+ class Connections {
32
+ #conn = null;
33
+ #queues = new Map();
34
+ #heartbeat = null;
35
+
36
+ add(ws, info) {
37
+ if (this.#conn) {
38
+ log("warn", "已有客户端连接,关闭旧连接并替换。");
39
+ this.#conn.close(1000, "Replaced by new connection");
40
+ }
41
+ this.#conn = ws;
42
+ ws.isAlive = true;
43
+ log("info", `客户端连接: ${info.address}`);
44
+ ws.on("pong", () => { ws.isAlive = true; });
45
+ ws.on("message", (data) => this.#onMessage(data.toString()));
46
+ ws.on("close", () => this.#onClose(ws));
47
+ ws.on("error", (err) => log("error", `WS错误: ${err.message}`));
48
+ if (!this.#heartbeat) this.#startHeartbeat();
49
+ }
50
+
51
+ #startHeartbeat() {
52
+ log("info", "心跳启动");
53
+ this.#heartbeat = setInterval(() => {
54
+ if (!this.#conn) return;
55
+ if (!this.#conn.isAlive) return this.#conn.terminate();
56
+ this.#conn.isAlive = false;
57
+ this.#conn.ping();
58
+ }, 30000);
59
+ }
60
+
61
+ #onClose(ws) {
62
+ if (this.#conn !== ws) return;
63
+ this.#conn = null;
64
+ log("info", "客户端断开");
65
+ this.#queues.forEach((q) => q.close());
66
+ this.#queues.clear();
67
+ this.#stopHeartbeat();
68
+ }
69
+
70
+ #stopHeartbeat() {
71
+ clearInterval(this.#heartbeat);
72
+ this.#heartbeat = null;
73
+ log("info", "心跳停止");
74
+ }
75
+
76
+ #onMessage(data) {
77
+ try {
78
+ const msg = JSON.parse(data);
79
+ const queue = this.#queues.get(msg.request_id);
80
+ if (!queue) return;
81
+ if (msg.event_type === "stream_close") queue.push({ type: "STREAM_END" });
82
+ else if (["response_headers", "chunk", "error"].includes(msg.event_type)) queue.push(msg);
83
+ } catch (err) {
84
+ log("error", `解析消息失败: ${err.message}`);
85
+ }
86
+ }
87
+
88
+ hasConn = () => !!this.#conn;
89
+ getConn = () => (this.#conn?.isAlive ? this.#conn : null);
90
+ createQueue = (id) => this.#queues.set(id, new Queue()).get(id);
91
+ removeQueue = (id) => this.#queues.get(id)?.close() && this.#queues.delete(id);
92
+ forward = (proxyReq) => this.getConn()?.send(JSON.stringify(proxyReq));
93
+ }
94
+
95
+ class FormatConverter {
96
+ toOpenAIModels(geminiData) {
97
+ return {
98
+ object: "list",
99
+ data: (geminiData.models || []).map(model => ({
100
+ id: model.name.replace("models/", ""),
101
+ object: "model",
102
+ created: Math.floor(new Date(model.updateTime || Date.now()).getTime() / 1000),
103
+ owned_by: "google",
104
+ })),
105
+ };
106
+ }
107
+
108
+ fromOpenAIRequest(req, id, streamingMode) {
109
+ const { body: openaiBody } = req;
110
+ const geminiBody = this.#convertOpenAIToGemini(openaiBody);
111
+ return {
112
+ path: this.#convertOpenAIPath(openaiBody),
113
+ method: "POST",
114
+ headers: req.headers,
115
+ query_params: this.#convertOpenAIQuery(req),
116
+ body: JSON.stringify(geminiBody),
117
+ request_id: id,
118
+ streaming_mode: streamingMode,
119
+ is_openai: true,
120
+ };
121
+ }
122
+
123
+ toOpenAIResponse(geminiData, isStream) {
124
+ const { text, finishReason } = this.#parseGeminiCandidate(geminiData);
125
+ const finish = finishReason === "STOP" ? "stop" : null;
126
+ if (isStream) {
127
+ const chunk = {
128
+ id: `chatcmpl-${genId()}`, object: "chat.completion.chunk", created: Date.now() / 1000 | 0, model: "gpt-4",
129
+ choices: [{ index: 0, delta: text ? { content: text } : {}, finish_reason: finish }],
130
+ };
131
+ return `data: ${JSON.stringify(chunk)}\n\n`;
132
+ }
133
+ return JSON.stringify({
134
+ id: `chatcmpl-${genId()}`, object: "chat.completion", created: Date.now() / 1000 | 0, model: "gpt-4",
135
+ choices: [{ index: 0, message: { role: "assistant", content: text }, finish_reason: finish || "length" }],
136
+ });
137
+ }
138
+
139
+ toOpenAISSE(sseData) {
140
+ return sseData.split("\n")
141
+ .filter(line => line.startsWith("data: "))
142
+ .map(line => this.toOpenAIResponse(line.slice(6), true))
143
+ .join("");
144
+ }
145
+
146
+ #convertOpenAIPath = (body) => `/v1beta/models/${body?.model || "gemini-pro"}:${body?.stream ? "streamGenerateContent" : "generateContent"}`;
147
+
148
+ #convertOpenAIQuery = (req) => {
149
+ const query = { ...req.query };
150
+ delete query.key;
151
+ if (req.body?.stream) query.alt = "sse";
152
+ return query;
153
+ };
154
+
155
+ #convertOpenAIToGemini(body) {
156
+ const geminiBody = { contents: [] };
157
+ const systemParts = [];
158
+ (body.messages || []).forEach(msg => {
159
+ if (msg.role === "system") {
160
+ const text = typeof msg.content === "string" ? msg.content : (msg.content.find(c => c.type === "text")?.text || "");
161
+ systemParts.push({ text });
162
+ } else {
163
+ const parts = [];
164
+ if (typeof msg.content === "string") parts.push({ text: msg.content });
165
+ else if (Array.isArray(msg.content)) {
166
+ msg.content.forEach(item => {
167
+ if (item.type === "text") parts.push({ text: item.text });
168
+ else if (item.type === "image_url" && item.image_url.url.startsWith("data:")) {
169
+ const match = item.image_url.url.match(/^data:image\/(\w+);base64,(.+)$/);
170
+ if (match) parts.push({ inlineData: { mimeType: `image/${match[1]}`, data: match[2] } });
171
+ }
172
+ });
173
+ }
174
+ if (parts.length > 0) geminiBody.contents.push({ role: msg.role === "assistant" ? "model" : "user", parts });
175
+ }
176
+ });
177
+ if (systemParts.length > 0) geminiBody.systemInstruction = { parts: systemParts };
178
+ const genConfig = {};
179
+ if (body.temperature !== undefined) genConfig.temperature = body.temperature;
180
+ if (body.max_tokens !== undefined) genConfig.maxOutputTokens = body.max_tokens;
181
+ if (body.top_p !== undefined) genConfig.topP = body.top_p;
182
+ if (body.top_k !== undefined) genConfig.topK = body.top_k;
183
+ if (body.stop) genConfig.stopSequences = Array.isArray(body.stop) ? body.stop : [body.stop];
184
+ if (body.thinking_budget > 0) genConfig.thinkingConfig = { thoughtGenerationTokenBudget: Math.floor(body.thinking_budget) };
185
+ if (Object.keys(genConfig).length > 0) geminiBody.generationConfig = genConfig;
186
+ return geminiBody;
187
+ }
188
+
189
+ #parseGeminiCandidate(geminiData) {
190
+ try {
191
+ const gemini = typeof geminiData === "string" ? JSON.parse(geminiData) : geminiData;
192
+ const candidate = gemini.candidates?.[0];
193
+ if (!candidate) return { text: "", finishReason: null };
194
+ const text = candidate.content?.parts?.map(p => p.text || "").join("") || "";
195
+ return { text, finishReason: candidate.finishReason };
196
+ } catch {
197
+ return { text: "", finishReason: null };
198
+ }
199
+ }
200
+ }
201
+
202
+ class Handler {
203
+ #server;
204
+ #conns;
205
+ #converter = new FormatConverter();
206
+
207
+ constructor(server, conns) {
208
+ this.#server = server;
209
+ this.#conns = conns;
210
+ }
211
+
212
+ async handle(req, res, isOpenAI = false) {
213
+ if (!this.#auth(req, res) || !this.#checkConn(res, isOpenAI)) return;
214
+ const id = genId();
215
+ const queue = this.#conns.createQueue(id);
216
+ try {
217
+ const proxyReq = isOpenAI
218
+ ? this.#converter.fromOpenAIRequest(req, id, this.#server.mode)
219
+ : this.#buildNativeReq(req, id);
220
+ await this.#dispatch(req, res, proxyReq, queue, isOpenAI);
221
+ } catch (err) {
222
+ this.#error(err, res, isOpenAI);
223
+ } finally {
224
+ this.#conns.removeQueue(id);
225
+ }
226
+ }
227
+
228
+ async handleModels(req, res) {
229
+ log("info", "模型列表请求");
230
+ if (!this.#checkConn(res, true)) return;
231
+ const id = genId();
232
+ const queue = this.#conns.createQueue(id);
233
+ try {
234
+ this.#conns.forward({ path: "/v1beta/models", method: "GET", request_id: id });
235
+ const header = await queue.pop();
236
+ if (header.event_type === "error") return this.#send(res, header.status, header.message, true);
237
+ const data = await queue.pop();
238
+ await queue.pop();
239
+ if (data.data) res.json(this.#converter.toOpenAIModels(JSON.parse(data.data)));
240
+ else this.#send(res, 500, "无法获取模型列表", true);
241
+ } catch (err) {
242
+ this.#error(err, res, true);
243
+ } finally {
244
+ this.#conns.removeQueue(id);
245
+ }
246
+ }
247
+
248
+ #auth = (req, res) => {
249
+ // 允许预检请求通过认证,或检查 key
250
+ if (req.method === 'OPTIONS') return true;
251
+ if ((req.query.key || req.headers.authorization?.substring(7)) === SECRET_KEY) return true;
252
+ this.#send(res, 401, "Unauthorized", true);
253
+ return false;
254
+ }
255
+
256
+ #checkConn = (res, isOpenAI) => {
257
+ if (this.#conns.hasConn()) return true;
258
+ this.#send(res, 503, "无可用连接", isOpenAI);
259
+ return false;
260
+ }
261
+
262
+ #buildNativeReq(req, id) {
263
+ const query = { ...req.query };
264
+ delete query.key;
265
+ const body = Buffer.isBuffer(req.body) ? req.body.toString("utf-8") : (typeof req.body === "object" ? JSON.stringify(req.body) : req.body);
266
+ return { path: req.path, method: req.method, headers: req.headers, query_params: query, body, request_id: id, streaming_mode: this.#server.mode };
267
+ }
268
+
269
+ async #dispatch(req, res, proxyReq, queue, isOpenAI) {
270
+ const isStream = isOpenAI ? req.body?.stream : req.path.includes("streamGenerateContent");
271
+ if (this.#server.mode === "fake") {
272
+ isStream ? await this.#fakeStream(req, res, proxyReq, queue, isOpenAI) : await this.#fakeNonStream(res, proxyReq, queue, isOpenAI);
273
+ } else {
274
+ await this.#realStream(res, proxyReq, queue, isStream, isOpenAI);
275
+ }
276
+ }
277
+
278
+ async #fakeNonStream(res, proxyReq, queue, isOpenAI) {
279
+ this.#conns.forward(proxyReq);
280
+ const header = await queue.pop();
281
+ if (header.event_type === "error") return this.#send(res, header.status, header.message, isOpenAI);
282
+ this.#setHeaders(res, header);
283
+ const data = await queue.pop();
284
+ await queue.pop();
285
+ if (data.data) isOpenAI ? res.json(JSON.parse(this.#converter.toOpenAIResponse(data.data, false))) : res.send(data.data);
286
+ }
287
+
288
+ async #fakeStream(req, res, proxyReq, queue, isOpenAI) {
289
+ this.#sseHeaders(res);
290
+ const timer = setInterval(() => res.write(this.#keepAlive(isOpenAI)), 1000);
291
+ try {
292
+ this.#conns.forward(proxyReq);
293
+ const header = await queue.pop();
294
+ if (header.event_type === "error") throw new Error(header.message);
295
+ const data = await queue.pop();
296
+ await queue.pop();
297
+ if (data.data) {
298
+ if (isOpenAI) {
299
+ res.write(this.#converter.toOpenAIResponse(data.data, true));
300
+ res.write("data: [DONE]\n\n");
301
+ } else res.write(`data: ${data.data}\n\n`);
302
+ }
303
+ } catch (err) {
304
+ this.#sseError(res, err.message);
305
+ } finally {
306
+ clearInterval(timer);
307
+ if (!res.writableEnded) res.end();
308
+ }
309
+ }
310
+
311
+ async #realStream(res, proxyReq, queue, isStream, isOpenAI) {
312
+ this.#conns.forward(proxyReq);
313
+ const header = await queue.pop();
314
+ if (header.event_type === "error") return this.#send(res, header.status, header.message, isOpenAI);
315
+ if (isStream && !header.headers?.["content-type"]) (header.headers ||= {})["content-type"] = "text/event-stream";
316
+ this.#setHeaders(res, header);
317
+ let fullResponse = "";
318
+ try {
319
+ while (true) {
320
+ const data = await queue.pop();
321
+ if (data.type === "STREAM_END") break;
322
+ if (!data.data) continue;
323
+ if (isOpenAI && isStream) res.write(this.#converter.toOpenAISSE(data.data));
324
+ else if (isOpenAI && !isStream) fullResponse += data.data;
325
+ else res.write(data.data);
326
+ }
327
+ if (isOpenAI && isStream) res.write("data: [DONE]\n\n");
328
+ else if (isOpenAI && !isStream) res.json(JSON.parse(this.#converter.toOpenAIResponse(fullResponse, false)));
329
+ } finally {
330
+ if (!res.writableEnded) res.end();
331
+ }
332
+ }
333
+
334
+ #setHeaders = (res, header) => {
335
+ res.status(header.status || 200);
336
+ Object.entries(header.headers || {}).forEach(([k, v]) => k.toLowerCase() !== "content-length" && res.set(k, v));
337
+ }
338
+ #sseHeaders = (res) => res.status(200).set({ "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive" });
339
+ #keepAlive = (isOpenAI) => `data: ${isOpenAI ? JSON.stringify({ id: `cmpl-${genId()}`, choices: [{ delta: {} }] }) : "{}"}\n\n`;
340
+ #sseError = (res, msg) => !res.writableEnded && res.write(`data: ${JSON.stringify({ error: { message: `[代理] ${msg}` } })}\n\n`);
341
+ #error(err, res, isOpenAI) {
342
+ log("error", `错误: ${err.message}`);
343
+ if (res.headersSent) {
344
+ if (this.#server.mode === "fake") this.#sseError(res, err.message);
345
+ if (!res.writableEnded) res.end();
346
+ } else this.#send(res, 500, `代理错误: ${err.message}`, isOpenAI);
347
+ }
348
+ #send(res, status, msg, isOpenAI) {
349
+ if (res.headersSent) return;
350
+ isOpenAI && status >= 400 ? res.status(status).json({ error: { message: msg } }) : res.status(status).type("text/plain").send(msg);
351
+ }
352
+ }
353
+
354
+ class Server {
355
+ #handler;
356
+ mode = DEFAULT_STREAMING_MODE;
357
+
358
+ constructor() {
359
+ const conns = new Connections();
360
+ this.#handler = new Handler(this, conns);
361
+ this.start(conns);
362
+ }
363
+
364
+ async start(conns) {
365
+ const app = express();
366
+
367
+ // --- 🔥🔥🔥 CORS 修复开始 🔥🔥🔥 ---
368
+ // 手动添加跨域头,无需安装额外依赖
369
+ app.use((req, res, next) => {
370
+ res.header("Access-Control-Allow-Origin", "*"); // 允许所有来源
371
+ res.header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS");
372
+ res.header("Access-Control-Allow-Headers", "*"); // 允许所有头 (Content-Type, Authorization 等)
373
+
374
+ // 如果是浏览器的预检请求(OPTIONS),直接返回成功
375
+ if (req.method === 'OPTIONS') {
376
+ return res.sendStatus(200);
377
+ }
378
+ next();
379
+ });
380
+ // --- 🔥🔥🔥 CORS 修复结束 🔥🔥🔥 ---
381
+
382
+ app.use(express.json({ limit: "100mb" }));
383
+ app.use(express.raw({ type: "*/*", limit: "100mb" }));
384
+
385
+ app.get("/", (req, res) => res.status(conns.hasConn() ? 200 : 404).send(conns.hasConn() ? "✅ 代理就绪" : "❌ 无连接"));
386
+ app.get("/favicon.ico", (req, res) => res.status(204).send());
387
+
388
+ app.get("/admin/set-mode", (req, res) => {
389
+ if (["fake", "real"].includes(req.query.mode)) {
390
+ this.mode = req.query.mode;
391
+ log("info", `模式切换: ${this.mode}`);
392
+ return res.send(`模式已切换: ${this.mode}`);
393
+ }
394
+ res.status(400).send('无效模式');
395
+ });
396
+ app.get("/admin/get-mode", (req, res) => res.send(`当前模式: ${this.mode}`));
397
+
398
+ app.get("/v1/models", (req, res) => this.#handler.handleModels(req, res));
399
+ app.post("/v1/chat/completions", (req, res) => this.#handler.handle(req, res, true));
400
+ app.all(/(.*)/, (req, res) => this.#handler.handle(req, res, false));
401
+
402
+ const httpServer = http.createServer(app);
403
+ const wss = new WebSocket.Server({ server: httpServer });
404
+ wss.on("connection", (ws, req) => conns.add(ws, { address: req.socket.remoteAddress }));
405
+
406
+ httpServer.listen(process.env.PORT || 7860, "0.0.0.0", () => {
407
+ log("info", `服务启动于 http://0.0.0.0:${process.env.PORT || 7860}`);
408
+ log("info", `模式: ${this.mode}`);
409
+ });
410
+ }
411
+ }
412
+
413
+ new Server();