hongshi-files commited on
Commit
cb8f7bb
·
verified ·
1 Parent(s): f91d99e

Update main.ts

Browse files
Files changed (1) hide show
  1. main.ts +357 -335
main.ts CHANGED
@@ -1,217 +1,209 @@
1
- import { serve } from "https://deno.land/std@0.220.1/http/server.ts";
 
2
 
3
- // 定义常量
4
- const UNLIMITED_AI_URL = "https://app.unlimitedai.chat/api/chat";
5
- const PORT = 7860;
6
- const MAX_RETRIES = 3;
7
 
8
- // 定义接口
9
- interface UnlimitedAIMessage {
10
- id: string;
11
- createdAt: string;
12
- role: string;
13
- content: string;
14
- parts: Array<{ type: string; text: string }>;
15
- }
 
 
 
 
 
16
 
17
- interface OpenAIMessage {
18
- role: string;
19
- content: string;
20
- }
21
 
22
- // 将OpenAI消息转换为UnlimitedAI消息
23
- function convertOpenAIToUnlimitedMessages(messages: OpenAIMessage[]): UnlimitedAIMessage[] {
24
- // 提取系统消息
25
- const systemMessages = messages.filter(msg => msg.role === "system");
26
- const nonSystemMessages = messages.filter(msg => msg.role !== "system");
27
-
28
- const result: UnlimitedAIMessage[] = [];
29
-
30
- // 如果有系统消息,将其转换为用户消息和助手回复
31
- if (systemMessages.length > 0) {
32
- // 合并所有系统消息内容
33
- const systemContent = systemMessages.map(msg => msg.content).join("\n\n");
34
-
35
- // 添加作为用户消息的系统提示
36
- result.push({
37
- id: crypto.randomUUID(),
38
- createdAt: new Date().toISOString(),
39
- role: "user",
40
- content: systemContent,
41
- parts: [{ type: "text", text: systemContent }],
42
- });
43
-
44
- // 添加助手确认回复
45
- result.push({
46
- id: crypto.randomUUID(),
47
- createdAt: new Date().toISOString(),
48
- role: "assistant",
49
- content: "Ok, I got it, I'll remember it and do it.",
50
- parts: [{ type: "text", text: "Ok, I got it, I'll remember it and do it." }],
51
- });
52
  }
53
-
54
- // 添加其余非系统消息
55
- nonSystemMessages.forEach(msg => {
56
- result.push({
57
- id: crypto.randomUUID(),
58
- createdAt: new Date().toISOString(),
59
- role: msg.role,
60
- content: msg.content,
61
- parts: [{ type: "text", text: msg.content }],
62
- });
63
- });
64
-
65
- return result;
66
  }
67
 
68
- // OpenAI请求体转换为UnlimitedAI请求体
69
- function convertOpenAIToUnlimitedBody(openaiBody: any): any {
70
  return {
71
- id: openaiBody.id || crypto.randomUUID(),
72
- messages: convertOpenAIToUnlimitedMessages(openaiBody.messages),
73
- selectedChatModel: openaiBody.model || "chat-model-reasoning",
 
 
 
 
 
 
 
74
  };
75
  }
76
 
77
- // 处理流式响应
78
- async function* transformStreamResponse(
79
- reader: ReadableStreamDefaultReader<Uint8Array>
80
- ): AsyncGenerator<string> {
81
- let buffer = "";
82
- const decoder = new TextDecoder();
83
- let messageId = "";
84
- let firstResult = true;
85
-
86
  try {
87
  while (true) {
88
  const { done, value } = await reader.read();
89
- if (done) {
90
- yield "data: [DONE]\n\n";
91
- break;
92
- }
93
 
94
- buffer += decoder.decode(value, { stream: true });
95
- let lines = buffer.split("\n");
96
- buffer = lines.pop() || "";
97
-
98
- for (const line of lines) {
99
- if (!line.trim()) continue;
100
- const idx = line.indexOf(":");
101
- if (idx === -1) continue;
102
-
103
- const key = line.slice(0, idx);
104
- let val = line.slice(idx + 1).trim();
105
- if (val.startsWith('"') && val.endsWith('"')) {
106
- val = val.slice(1, -1);
107
- }
108
-
109
- if (key === "f") {
110
- // 记录 messageId
111
- try {
112
- const obj = JSON.parse(val);
113
- messageId = obj.messageId || "";
114
- } catch (error) {
115
- console.error("Error parsing messageId:", error);
116
- }
117
- } else if (key === "g") {
118
- const delta = firstResult
119
- ? {
120
- role: "assistant",
121
- reasoning_content: val.replace(/\\n/g, "\n"),
122
- }
123
- : { reasoning_content: val.replace(/\\n/g, "\n") };
124
-
125
- // 思考过程
126
- const chunk = {
127
- id: messageId || crypto.randomUUID(),
128
- object: "chat.completion.chunk",
129
- created: Math.floor(Date.now() / 1000),
130
- model: "chat-model-reasoning",
131
- choices: [
132
- {
133
- delta,
134
- index: 0,
135
- finish_reason: null,
136
- },
137
- ],
138
- };
139
-
140
- yield `data: ${JSON.stringify(chunk)}\n\n`;
141
- } else if (key === "0") {
142
- // 最终结果
143
- const delta = { content: val.replace(/\\n/g, "\n") };
144
- const chunk = {
145
- id: messageId || crypto.randomUUID(),
146
- object: "chat.completion.chunk",
147
- created: Math.floor(Date.now() / 1000),
148
- model: "chat-model-reasoning",
149
- choices: [
150
- {
151
- delta,
152
- index: 0,
153
- finish_reason: null,
154
- },
155
- ],
156
- };
157
-
158
- yield `data: ${JSON.stringify(chunk)}\n\n`;
159
- firstResult = false;
160
- } else if (key === "e" || key === "d") {
161
- // 结束
162
- yield "data: [DONE]\n\n";
163
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  }
165
  }
 
 
 
 
 
 
 
 
166
  } catch (error) {
167
- console.error("Stream transformation error:", error);
168
- yield "data: [DONE]\n\n";
169
- } finally {
170
- reader.releaseLock();
 
 
 
171
  }
172
  }
173
 
174
- // 转换非流式响应
175
- async function transformNonStreamResponse(text: string): Promise<any> {
176
- const lines = text.split("\n");
177
- const data: Record<string, any> = {};
178
-
179
- for (const line of lines) {
180
- if (!line.trim()) continue;
181
- const idx = line.indexOf(":");
182
- if (idx === -1) continue;
 
 
 
 
 
 
 
 
 
 
183
 
184
- const key = line.slice(0, idx);
185
- let val = line.slice(idx + 1).trim();
186
- try {
187
- val = JSON.parse(val);
188
- } catch (error) {
189
- // 如果解析失败,保持原始字符串
190
  }
191
 
192
- data[key] = val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  }
194
-
195
- const content = data["0"];
196
- const reasoning_content = data.g;
197
-
198
  return {
199
- id: data.f?.messageId || crypto.randomUUID(),
200
  object: "chat.completion",
201
  created: Math.floor(Date.now() / 1000),
202
- model: "chat-model-reasoning",
203
  choices: [
204
  {
205
  index: 0,
206
  message: {
207
  role: "assistant",
208
- reasoning_content,
209
- content,
210
  },
211
- finish_reason: "stop",
212
  },
213
  ],
214
- usage: {
215
  prompt_tokens: 0,
216
  completion_tokens: 0,
217
  total_tokens: 0,
@@ -219,197 +211,227 @@ async function transformNonStreamResponse(text: string): Promise<any> {
219
  };
220
  }
221
 
222
- // 处理聊天完成请求
223
- async function handleChatCompletions(
224
- openaiBody: any,
225
- isStream: boolean,
226
- retryCount = 0
227
- ): Promise<Response> {
228
- try {
229
- // 转换为 UnlimitedAI.Chat 请求体
230
- const unlimitedBody = convertOpenAIToUnlimitedBody(openaiBody);
231
-
232
- // 只转发必要 headers
233
- const upstreamHeaders = {
234
- "content-type": "application/json",
235
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
236
- // 可以根据需要转发 Authorization 等
237
- "referer": "https://app.unlimitedai.chat/"
238
- };
239
 
240
- // 获取 X-API-TOKEN
241
- const XApiTokenRes = await fetch("https://app.unlimitedai.chat/api/token", {
242
- method: "GET",
243
- headers: upstreamHeaders
244
- });
245
- const XApiToken = await XApiTokenRes.json().then(data => data.token);
246
- upstreamHeaders["X-API-TOKEN"] = XApiToken;
247
-
248
- // 转发到 UnlimitedAI.Chat
249
- const upstreamRes = await fetch(UNLIMITED_AI_URL, {
250
- method: "POST",
251
- headers: upstreamHeaders,
252
- body: JSON.stringify(unlimitedBody),
253
- });
254
-
255
- if (!upstreamRes.ok) {
256
- throw new Error(`Chat completion failed: ${upstreamRes.status}`);
257
  }
258
-
259
- if (isStream) {
260
- // 流式响应处理
261
- const reader = upstreamRes.body?.getReader();
262
- if (!reader) {
263
- throw new Error("Failed to get response body reader");
264
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
- const transformedStream = new ReadableStream({
267
- async start(controller) {
268
- try {
269
- for await (const chunk of transformStreamResponse(reader)) {
270
- controller.enqueue(new TextEncoder().encode(chunk));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  }
272
- controller.close();
273
- } catch (error) {
274
- console.error("Stream transformation error:", error);
275
- controller.error(error);
276
  }
277
- },
278
- });
279
-
280
- return new Response(transformedStream, {
281
- headers: {
282
- "Content-Type": "text/event-stream",
283
- "Cache-Control": "no-cache",
284
- "Connection": "keep-alive",
285
- "Access-Control-Allow-Origin": "*",
286
- },
287
- });
288
- } else {
289
- // 非流式响应处理
290
- const text = await upstreamRes.text();
291
- const transformedResponse = await transformNonStreamResponse(text);
292
-
293
- return new Response(JSON.stringify(transformedResponse), {
294
- status: 200,
295
- headers: {
296
- "Content-Type": "application/json",
297
- "Access-Control-Allow-Origin": "*",
298
- },
299
- });
300
  }
301
- } catch (error) {
302
- console.error("Request handling error:", error);
303
-
304
- return new Response(
305
- JSON.stringify({ error: "Internal server error", message: error.message }),
306
- {
307
- status: 500,
308
- headers: {
309
- "Content-Type": "application/json",
310
- "Access-Control-Allow-Origin": "*",
311
- },
312
- },
313
- );
314
- }
315
  }
316
 
317
- // 主处理函数
318
- async function handler(req: Request): Promise<Response> {
319
- const url = new URL(req.url);
320
  const path = url.pathname;
321
 
322
- // CORS预检请求处理
323
- if (req.method === "OPTIONS") {
324
  return new Response(null, {
325
- status: 204,
326
  headers: {
327
  "Access-Control-Allow-Origin": "*",
328
  "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
329
  "Access-Control-Allow-Headers": "Content-Type, Authorization",
330
- "Access-Control-Max-Age": "86400",
331
  },
332
  });
333
  }
334
 
 
 
 
 
 
 
335
  try {
336
  // 模型列表接口
337
- if (path === "/v1/models" && req.method === "GET") {
338
- return new Response(
339
- JSON.stringify({
340
- object: "list",
341
- data: [
342
- {
343
- id: "chat-model-reasoning",
344
- object: "model",
345
- created: 0,
346
- owned_by: "unlimitedai",
347
- permission: [{
348
- id: "modelperm-chat-model-reasoning",
349
- object: "model_permission",
350
- created: 0,
351
- allow_create_engine: false,
352
- allow_sampling: true,
353
- allow_logprobs: false,
354
- allow_search_indices: false,
355
- allow_view: true,
356
- allow_fine_tuning: false,
357
- organization: "*",
358
- group: null,
359
- is_blocking: false,
360
- }],
361
- root: "chat-model-reasoning",
362
- parent: null,
363
- },
364
- ],
365
- }),
366
- {
367
- status: 200,
368
- headers: {
369
- "Content-Type": "application/json",
370
- "Access-Control-Allow-Origin": "*",
371
- },
372
- }
373
- );
374
  }
375
 
376
  // 聊天完成接口
377
- else if (path === "/v1/chat/completions" && req.method === "POST") {
378
- const openaiBody = await req.json();
379
- const isStream = openaiBody.stream === true;
 
 
380
 
381
- return await handleChatCompletions(openaiBody, isStream);
 
 
 
 
 
 
 
 
 
382
  }
383
 
384
- // 未找到路由
385
  else {
386
- return new Response(
387
- JSON.stringify({ error: "Not found", message: "Endpoint not supported" }),
388
- {
389
- status: 404,
390
- headers: {
391
- "Content-Type": "application/json",
392
- "Access-Control-Allow-Origin": "*",
393
- },
394
  }
395
- );
396
  }
397
  } catch (error) {
398
- console.error("Request handling error:", error);
399
-
400
- return new Response(
401
- JSON.stringify({ error: "Internal server error", message: error.message }),
402
- {
403
- status: 500,
404
- headers: {
405
- "Content-Type": "application/json",
406
- "Access-Control-Allow-Origin": "*",
407
- },
408
  }
409
- );
410
  }
411
  }
412
 
413
  // 启动服务器
414
- console.log(`Starting server on port ${PORT}...`);
415
- serve(handler, { port: PORT });
 
 
1
+ // freeai_proxy.ts
2
+ import { serve } from "https://deno.land/std@0.190.0/http/server.ts";
3
 
4
+ const FREEAI_API_BASE = "https://freeaichatplayground.com/api/v1";
5
+ const DEFAULT_MODEL = "Deepseek R1";
 
 
6
 
7
+ // 获取可用模型列表
8
+ async function fetchModels() {
9
+ try {
10
+ const response = await fetch(`${FREEAI_API_BASE}/models`, {
11
+ method: "POST",
12
+ headers: {
13
+ "Content-Type": "application/json",
14
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
15
+ "Origin": "https://freeaichatplayground.com",
16
+ "Referer": "https://freeaichatplayground.com/chat",
17
+ },
18
+ body: JSON.stringify({ type: "text" }),
19
+ });
20
 
21
+ if (!response.ok) {
22
+ throw new Error(`Failed to fetch models: ${response.status}`);
23
+ }
 
24
 
25
+ const models = await response.json();
26
+ return models;
27
+ } catch (error) {
28
+ console.error("Error fetching models:", error);
29
+ return [];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  }
32
 
33
+ // 转换为 OpenAI 格式的模型列表
34
+ function transformModelsToOpenAIFormat(models) {
35
  return {
36
+ object: "list",
37
+ data: models.map(model => ({
38
+ id: model.name,
39
+ object: "model",
40
+ created: new Date(model.createdAt).getTime() / 1000,
41
+ owned_by: model.provider,
42
+ permission: [],
43
+ root: model.name,
44
+ parent: null,
45
+ })),
46
  };
47
  }
48
 
49
+ // 解析 SSE 格式的响应
50
+ async function parseSSEResponse(response) {
51
+ const reader = response.body.getReader();
52
+ let content = "";
53
+ let id = `chatcmpl-${Date.now()}`;
54
+ let finishReason = "stop";
55
+
 
 
56
  try {
57
  while (true) {
58
  const { done, value } = await reader.read();
59
+ if (done) break;
 
 
 
60
 
61
+ const chunk = new TextDecoder().decode(value);
62
+ content += chunk;
63
+ }
64
+
65
+ // 解析所有 SSE 消息
66
+ const messages = content.split('\n\n')
67
+ .filter(msg => msg.trim().startsWith('data:'))
68
+ .map(msg => {
69
+ const jsonStr = msg.replace('data:', '').trim();
70
+ try {
71
+ return JSON.parse(jsonStr);
72
+ } catch (e) {
73
+ console.warn("Failed to parse SSE message:", jsonStr);
74
+ return null;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  }
76
+ })
77
+ .filter(Boolean);
78
+
79
+ // 找到最后一条完整消息
80
+ const lastCompleteMessage = messages.findLast(msg =>
81
+ msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content
82
+ );
83
+
84
+ if (lastCompleteMessage) {
85
+ id = lastCompleteMessage.id || id;
86
+ if (lastCompleteMessage.choices &&
87
+ lastCompleteMessage.choices[0] &&
88
+ lastCompleteMessage.choices[0].finish_reason) {
89
+ finishReason = lastCompleteMessage.choices[0].finish_reason;
90
+ }
91
+
92
+ return {
93
+ id,
94
+ content: lastCompleteMessage.choices[0].message.content,
95
+ finish_reason: finishReason,
96
+ usage: lastCompleteMessage.usage || null
97
+ };
98
+ }
99
+
100
+ // 如果没有找到完整消息,尝试从所有消息中提取内容
101
+ let combinedContent = "";
102
+ for (const msg of messages) {
103
+ if (msg.choices && msg.choices[0] && msg.choices[0].delta && msg.choices[0].delta.content) {
104
+ combinedContent += msg.choices[0].delta.content;
105
+ } else if (msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content) {
106
+ combinedContent += msg.choices[0].message.content;
107
  }
108
  }
109
+
110
+ return {
111
+ id,
112
+ content: combinedContent || "No content found in response",
113
+ finish_reason: finishReason,
114
+ usage: null
115
+ };
116
+
117
  } catch (error) {
118
+ console.error("Error parsing SSE response:", error);
119
+ return {
120
+ id,
121
+ content: "Error parsing response: " + error.message,
122
+ finish_reason: "error",
123
+ usage: null
124
+ };
125
  }
126
  }
127
 
128
+ // 发送聊天请求到 freeaichatplayground
129
+ async function sendChatRequest(modelName, messages) {
130
+ try {
131
+ const formattedMessages = messages.map((msg, index) => ({
132
+ id: `${Date.now() + index}`,
133
+ role: msg.role,
134
+ content: msg.content,
135
+ model: {
136
+ id: "", // 这个ID会在下面被填充
137
+ name: modelName,
138
+ icon: "",
139
+ provider: "",
140
+ contextWindow: 63920
141
+ }
142
+ }));
143
+
144
+ // 获取模型列表以找到正确的ID
145
+ const models = await fetchModels();
146
+ const selectedModel = models.find(m => m.name === modelName);
147
 
148
+ if (!selectedModel) {
149
+ throw new Error(`Model "${modelName}" not found`);
 
 
 
 
150
  }
151
 
152
+ // 填充模型信息
153
+ formattedMessages.forEach(msg => {
154
+ if (msg.model) {
155
+ msg.model.id = selectedModel.id;
156
+ msg.model.icon = selectedModel.icon;
157
+ msg.model.provider = selectedModel.provider;
158
+ }
159
+ });
160
+
161
+ const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
162
+ method: "POST",
163
+ headers: {
164
+ "Content-Type": "application/json",
165
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
166
+ "Origin": "https://freeaichatplayground.com",
167
+ "Referer": "https://freeaichatplayground.com/chat",
168
+ },
169
+ body: JSON.stringify({
170
+ model: modelName,
171
+ messages: formattedMessages,
172
+ }),
173
+ });
174
+
175
+ if (!response.ok) {
176
+ const errorText = await response.text();
177
+ throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
178
+ }
179
+
180
+ // 处理 SSE 流式响应
181
+ const parsedResponse = await parseSSEResponse(response);
182
+ return parsedResponse;
183
+ } catch (error) {
184
+ console.error("Error in chat completion:", error);
185
+ throw error;
186
  }
187
+ }
188
+
189
+ // 转换为 OpenAI 格式的聊天响应
190
+ function transformChatResponseToOpenAIFormat(response, modelName) {
191
  return {
192
+ id: response.id || `chatcmpl-${Date.now()}`,
193
  object: "chat.completion",
194
  created: Math.floor(Date.now() / 1000),
195
+ model: modelName,
196
  choices: [
197
  {
198
  index: 0,
199
  message: {
200
  role: "assistant",
201
+ content: response.content,
 
202
  },
203
+ finish_reason: response.finish_reason || "stop",
204
  },
205
  ],
206
+ usage: response.usage || {
207
  prompt_tokens: 0,
208
  completion_tokens: 0,
209
  total_tokens: 0,
 
211
  };
212
  }
213
 
214
+ // 处理流式响应请求
215
+ async function handleStreamRequest(request, modelName, messages) {
216
+ const encoder = new TextEncoder();
217
+ const formattedMessages = messages.map((msg, index) => ({
218
+ id: `${Date.now() + index}`,
219
+ role: msg.role,
220
+ content: msg.content,
221
+ model: {
222
+ id: "", // 这个ID会在下面被填充
223
+ name: modelName,
224
+ icon: "",
225
+ provider: "",
226
+ contextWindow: 63920
227
+ }
228
+ }));
 
 
229
 
230
+ // 获取模型列表以找到正确的ID
231
+ const models = await fetchModels();
232
+ const selectedModel = models.find(m => m.name === modelName);
233
+
234
+ if (!selectedModel) {
235
+ throw new Error(`Model "${modelName}" not found`);
236
+ }
237
+
238
+ // 填充模型信息
239
+ formattedMessages.forEach(msg => {
240
+ if (msg.model) {
241
+ msg.model.id = selectedModel.id;
242
+ msg.model.icon = selectedModel.icon;
243
+ msg.model.provider = selectedModel.provider;
 
 
 
244
  }
245
+ });
246
+
247
+ const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
248
+ method: "POST",
249
+ headers: {
250
+ "Content-Type": "application/json",
251
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
252
+ "Origin": "https://freeaichatplayground.com",
253
+ "Referer": "https://freeaichatplayground.com/chat",
254
+ },
255
+ body: JSON.stringify({
256
+ model: modelName,
257
+ messages: formattedMessages,
258
+ stream: true,
259
+ }),
260
+ });
261
+
262
+ if (!response.ok) {
263
+ const errorText = await response.text();
264
+ throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
265
+ }
266
+
267
+ const stream = new ReadableStream({
268
+ async start(controller) {
269
+ const reader = response.body.getReader();
270
+ const chatId = `chatcmpl-${Date.now()}`;
271
 
272
+ // 发送初始消息
273
+ const initialChunk = {
274
+ id: chatId,
275
+ object: "chat.completion.chunk",
276
+ created: Math.floor(Date.now() / 1000),
277
+ model: modelName,
278
+ choices: [{
279
+ index: 0,
280
+ delta: { role: "assistant" },
281
+ finish_reason: null
282
+ }]
283
+ };
284
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(initialChunk)}\n\n`));
285
+
286
+ try {
287
+ let buffer = "";
288
+
289
+ while (true) {
290
+ const { done, value } = await reader.read();
291
+ if (done) break;
292
+
293
+ const chunk = new TextDecoder().decode(value);
294
+ buffer += chunk;
295
+
296
+ // 处理缓冲区中的所有完整 SSE 消息
297
+ const messages = buffer.split('\n\n');
298
+ buffer = messages.pop() || ""; // 保留最后一个可能不完整的消息
299
+
300
+ for (const msg of messages) {
301
+ if (!msg.trim().startsWith('data:')) continue;
302
+
303
+ try {
304
+ const jsonStr = msg.replace('data:', '').trim();
305
+ const data = JSON.parse(jsonStr);
306
+
307
+ if (data.choices && data.choices[0]) {
308
+ // 转换为 OpenAI 流式格式
309
+ const openAIChunk = {
310
+ id: chatId,
311
+ object: "chat.completion.chunk",
312
+ created: Math.floor(Date.now() / 1000),
313
+ model: modelName,
314
+ choices: [{
315
+ index: 0,
316
+ delta: {},
317
+ finish_reason: data.choices[0].finish_reason || null
318
+ }]
319
+ };
320
+
321
+ // 提取内容
322
+ if (data.choices[0].delta && data.choices[0].delta.content) {
323
+ openAIChunk.choices[0].delta.content = data.choices[0].delta.content;
324
+ } else if (data.choices[0].message && data.choices[0].message.content) {
325
+ openAIChunk.choices[0].delta.content = data.choices[0].message.content;
326
+ }
327
+
328
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(openAIChunk)}\n\n`));
329
+
330
+ // 如果是最后一条消息,发送 [DONE]
331
+ if (data.choices[0].finish_reason) {
332
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
333
+ }
334
+ }
335
+ } catch (e) {
336
+ console.warn("Failed to parse SSE message:", msg);
337
+ continue;
338
  }
 
 
 
 
339
  }
340
+ }
341
+
342
+ // 确保发送最终的 [DONE] 消息
343
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
344
+ controller.close();
345
+ } catch (error) {
346
+ console.error("Stream processing error:", error);
347
+ controller.error(error);
348
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  }
350
+ });
351
+
352
+ return new Response(stream, {
353
+ headers: {
354
+ "Content-Type": "text/event-stream",
355
+ "Cache-Control": "no-cache",
356
+ "Connection": "keep-alive",
357
+ "Access-Control-Allow-Origin": "*",
358
+ }
359
+ });
 
 
 
 
360
  }
361
 
362
+ // 处理请求
363
+ async function handleRequest(request) {
364
+ const url = new URL(request.url);
365
  const path = url.pathname;
366
 
367
+ // CORS 预检请求处理
368
+ if (request.method === "OPTIONS") {
369
  return new Response(null, {
370
+ status: 200,
371
  headers: {
372
  "Access-Control-Allow-Origin": "*",
373
  "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
374
  "Access-Control-Allow-Headers": "Content-Type, Authorization",
 
375
  },
376
  });
377
  }
378
 
379
+ // 设置通用响应头
380
+ const headers = {
381
+ "Content-Type": "application/json",
382
+ "Access-Control-Allow-Origin": "*",
383
+ };
384
+
385
  try {
386
  // 模型列表接口
387
+ if (path === "/v1/models" && request.method === "GET") {
388
+ const models = await fetchModels();
389
+ const openAIModels = transformModelsToOpenAIFormat(models);
390
+ return new Response(JSON.stringify(openAIModels), { headers });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
  }
392
 
393
  // 聊天完成接口
394
+ else if (path === "/v1/chat/completions" && request.method === "POST") {
395
+ const requestData = await request.json();
396
+ const modelName = requestData.model || DEFAULT_MODEL;
397
+ const messages = requestData.messages || [];
398
+ const stream = requestData.stream || false;
399
 
400
+ // 处理流式响应
401
+ if (stream) {
402
+ return handleStreamRequest(request, modelName, messages);
403
+ }
404
+
405
+ // 处理普通响应
406
+ const chatResponse = await sendChatRequest(modelName, messages);
407
+ const openAIResponse = transformChatResponseToOpenAIFormat(chatResponse, modelName);
408
+
409
+ return new Response(JSON.stringify(openAIResponse), { headers });
410
  }
411
 
412
+ // 未知路径
413
  else {
414
+ return new Response(JSON.stringify({
415
+ error: {
416
+ message: "Not found",
417
+ type: "invalid_request_error",
418
+ code: "path_not_found",
 
 
 
419
  }
420
+ }), { status: 404, headers });
421
  }
422
  } catch (error) {
423
+ console.error("Error handling request:", error);
424
+ return new Response(JSON.stringify({
425
+ error: {
426
+ message: error.message,
427
+ type: "server_error",
428
+ code: "internal_server_error",
 
 
 
 
429
  }
430
+ }), { status: 500, headers });
431
  }
432
  }
433
 
434
  // 启动服务器
435
+ const port = parseInt(Deno.env.get("PORT") || "7860");
436
+ console.log(`Starting server on port ${port}...`);
437
+ serve(handleRequest, { port });