hongshi-files commited on
Commit
ac689ba
·
verified ·
1 Parent(s): cb8f7bb

Update main.ts

Browse files
Files changed (1) hide show
  1. main.ts +254 -359
main.ts CHANGED
@@ -1,209 +1,202 @@
1
- // freeai_proxy.ts
2
- import { serve } from "https://deno.land/std@0.190.0/http/server.ts";
3
 
4
- const FREEAI_API_BASE = "https://freeaichatplayground.com/api/v1";
5
- const DEFAULT_MODEL = "Deepseek R1";
 
6
 
7
- // 获取可用模型列表
8
- async function fetchModels() {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  try {
10
- const response = await fetch(`${FREEAI_API_BASE}/models`, {
11
- method: "POST",
12
- headers: {
13
- "Content-Type": "application/json",
14
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
15
- "Origin": "https://freeaichatplayground.com",
16
- "Referer": "https://freeaichatplayground.com/chat",
 
17
  },
18
- body: JSON.stringify({ type: "text" }),
19
- });
20
 
21
  if (!response.ok) {
22
- throw new Error(`Failed to fetch models: ${response.status}`);
 
 
 
 
23
  }
24
 
25
- const models = await response.json();
26
- return models;
 
27
  } catch (error) {
28
- console.error("Error fetching models:", error);
29
- return [];
30
  }
31
  }
32
 
33
- // 转换为 OpenAI 格式的模型列表
34
- function transformModelsToOpenAIFormat(models) {
 
35
  return {
36
  object: "list",
37
- data: models.map(model => ({
38
- id: model.name,
 
39
  object: "model",
40
- created: new Date(model.createdAt).getTime() / 1000,
41
- owned_by: model.provider,
42
- permission: [],
43
- root: model.name,
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  parent: null,
45
  })),
46
  };
47
  }
48
 
49
- // 解析 SSE 格式的响应
50
- async function parseSSEResponse(response) {
51
- const reader = response.body.getReader();
52
- let content = "";
53
- let id = `chatcmpl-${Date.now()}`;
54
- let finishReason = "stop";
55
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  try {
57
  while (true) {
58
  const { done, value } = await reader.read();
59
  if (done) break;
60
-
61
- const chunk = new TextDecoder().decode(value);
62
- content += chunk;
63
- }
64
-
65
- // 解析所有 SSE 消息
66
- const messages = content.split('\n\n')
67
- .filter(msg => msg.trim().startsWith('data:'))
68
- .map(msg => {
69
- const jsonStr = msg.replace('data:', '').trim();
 
 
 
 
70
  try {
71
- return JSON.parse(jsonStr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  } catch (e) {
73
- console.warn("Failed to parse SSE message:", jsonStr);
74
- return null;
75
  }
76
- })
77
- .filter(Boolean);
78
-
79
- // 找到最后一条完整消息
80
- const lastCompleteMessage = messages.findLast(msg =>
81
- msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content
82
- );
83
-
84
- if (lastCompleteMessage) {
85
- id = lastCompleteMessage.id || id;
86
- if (lastCompleteMessage.choices &&
87
- lastCompleteMessage.choices[0] &&
88
- lastCompleteMessage.choices[0].finish_reason) {
89
- finishReason = lastCompleteMessage.choices[0].finish_reason;
90
  }
91
-
92
- return {
93
- id,
94
- content: lastCompleteMessage.choices[0].message.content,
95
- finish_reason: finishReason,
96
- usage: lastCompleteMessage.usage || null
97
- };
98
  }
99
-
100
- // 如果没有找到完整消息,尝试从所有消息中提取内容
101
- let combinedContent = "";
102
- for (const msg of messages) {
103
- if (msg.choices && msg.choices[0] && msg.choices[0].delta && msg.choices[0].delta.content) {
104
- combinedContent += msg.choices[0].delta.content;
105
- } else if (msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content) {
106
- combinedContent += msg.choices[0].message.content;
107
- }
108
- }
109
-
110
- return {
111
- id,
112
- content: combinedContent || "No content found in response",
113
- finish_reason: finishReason,
114
- usage: null
115
- };
116
-
117
- } catch (error) {
118
- console.error("Error parsing SSE response:", error);
119
- return {
120
- id,
121
- content: "Error parsing response: " + error.message,
122
- finish_reason: "error",
123
- usage: null
124
- };
125
  }
126
  }
127
 
128
- // 发送聊天请求到 freeaichatplayground
129
- async function sendChatRequest(modelName, messages) {
130
- try {
131
- const formattedMessages = messages.map((msg, index) => ({
132
- id: `${Date.now() + index}`,
133
- role: msg.role,
134
- content: msg.content,
135
- model: {
136
- id: "", // 这个ID会在下面被填充
137
- name: modelName,
138
- icon: "",
139
- provider: "",
140
- contextWindow: 63920
141
- }
142
- }));
143
-
144
- // 获取模型列表以找到正确的ID
145
- const models = await fetchModels();
146
- const selectedModel = models.find(m => m.name === modelName);
147
-
148
- if (!selectedModel) {
149
- throw new Error(`Model "${modelName}" not found`);
150
- }
151
-
152
- // 填充模型信息
153
- formattedMessages.forEach(msg => {
154
- if (msg.model) {
155
- msg.model.id = selectedModel.id;
156
- msg.model.icon = selectedModel.icon;
157
- msg.model.provider = selectedModel.provider;
158
- }
159
- });
160
 
161
- const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
162
- method: "POST",
163
- headers: {
164
- "Content-Type": "application/json",
165
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
166
- "Origin": "https://freeaichatplayground.com",
167
- "Referer": "https://freeaichatplayground.com/chat",
168
- },
169
- body: JSON.stringify({
170
- model: modelName,
171
- messages: formattedMessages,
172
- }),
173
- });
174
-
175
- if (!response.ok) {
176
- const errorText = await response.text();
177
- throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
178
- }
179
-
180
- // 处理 SSE 流式响应
181
- const parsedResponse = await parseSSEResponse(response);
182
- return parsedResponse;
183
- } catch (error) {
184
- console.error("Error in chat completion:", error);
185
- throw error;
186
- }
187
- }
188
-
189
- // 转换为 OpenAI 格式的聊天响应
190
- function transformChatResponseToOpenAIFormat(response, modelName) {
191
  return {
192
- id: response.id || `chatcmpl-${Date.now()}`,
193
  object: "chat.completion",
194
  created: Math.floor(Date.now() / 1000),
195
- model: modelName,
196
  choices: [
197
  {
198
  index: 0,
199
  message: {
200
  role: "assistant",
201
- content: response.content,
202
  },
203
- finish_reason: response.finish_reason || "stop",
204
  },
205
  ],
206
- usage: response.usage || {
207
  prompt_tokens: 0,
208
  completion_tokens: 0,
209
  total_tokens: 0,
@@ -211,227 +204,129 @@ function transformChatResponseToOpenAIFormat(response, modelName) {
211
  };
212
  }
213
 
214
- // 处理流式响应请求
215
- async function handleStreamRequest(request, modelName, messages) {
216
- const encoder = new TextEncoder();
217
- const formattedMessages = messages.map((msg, index) => ({
218
- id: `${Date.now() + index}`,
219
- role: msg.role,
220
- content: msg.content,
221
- model: {
222
- id: "", // 这个ID会在下面被填充
223
- name: modelName,
224
- icon: "",
225
- provider: "",
226
- contextWindow: 63920
227
- }
228
- }));
229
-
230
- // 获取模型列表以找到正确的ID
231
- const models = await fetchModels();
232
- const selectedModel = models.find(m => m.name === modelName);
233
-
234
- if (!selectedModel) {
235
- throw new Error(`Model "${modelName}" not found`);
236
- }
237
-
238
- // 填充模型信息
239
- formattedMessages.forEach(msg => {
240
- if (msg.model) {
241
- msg.model.id = selectedModel.id;
242
- msg.model.icon = selectedModel.icon;
243
- msg.model.provider = selectedModel.provider;
244
- }
245
- });
246
-
247
- const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
248
- method: "POST",
249
- headers: {
250
- "Content-Type": "application/json",
251
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
252
- "Origin": "https://freeaichatplayground.com",
253
- "Referer": "https://freeaichatplayground.com/chat",
254
- },
255
- body: JSON.stringify({
256
- model: modelName,
257
- messages: formattedMessages,
258
- stream: true,
259
- }),
260
- });
261
-
262
- if (!response.ok) {
263
- const errorText = await response.text();
264
- throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
265
- }
266
 
267
- const stream = new ReadableStream({
268
- async start(controller) {
269
- const reader = response.body.getReader();
270
- const chatId = `chatcmpl-${Date.now()}`;
271
-
272
- // 发送初始消息
273
- const initialChunk = {
274
- id: chatId,
275
- object: "chat.completion.chunk",
276
- created: Math.floor(Date.now() / 1000),
277
- model: modelName,
278
- choices: [{
279
- index: 0,
280
- delta: { role: "assistant" },
281
- finish_reason: null
282
- }]
283
- };
284
- controller.enqueue(encoder.encode(`data: ${JSON.stringify(initialChunk)}\n\n`));
285
-
286
- try {
287
- let buffer = "";
288
-
289
- while (true) {
290
- const { done, value } = await reader.read();
291
- if (done) break;
292
-
293
- const chunk = new TextDecoder().decode(value);
294
- buffer += chunk;
295
-
296
- // 处理缓冲区中的所有完整 SSE 消息
297
- const messages = buffer.split('\n\n');
298
- buffer = messages.pop() || ""; // 保留最后一个可能不完整的消息
299
-
300
- for (const msg of messages) {
301
- if (!msg.trim().startsWith('data:')) continue;
302
-
303
- try {
304
- const jsonStr = msg.replace('data:', '').trim();
305
- const data = JSON.parse(jsonStr);
306
-
307
- if (data.choices && data.choices[0]) {
308
- // 转换为 OpenAI 流式格式
309
- const openAIChunk = {
310
- id: chatId,
311
- object: "chat.completion.chunk",
312
- created: Math.floor(Date.now() / 1000),
313
- model: modelName,
314
- choices: [{
315
- index: 0,
316
- delta: {},
317
- finish_reason: data.choices[0].finish_reason || null
318
- }]
319
- };
320
-
321
- // 提取内容
322
- if (data.choices[0].delta && data.choices[0].delta.content) {
323
- openAIChunk.choices[0].delta.content = data.choices[0].delta.content;
324
- } else if (data.choices[0].message && data.choices[0].message.content) {
325
- openAIChunk.choices[0].delta.content = data.choices[0].message.content;
326
- }
327
-
328
- controller.enqueue(encoder.encode(`data: ${JSON.stringify(openAIChunk)}\n\n`));
329
-
330
- // 如果是最后一条消息,发送 [DONE]
331
- if (data.choices[0].finish_reason) {
332
- controller.enqueue(encoder.encode("data: [DONE]\n\n"));
333
- }
334
- }
335
- } catch (e) {
336
- console.warn("Failed to parse SSE message:", msg);
337
- continue;
338
- }
339
- }
340
- }
341
-
342
- // 确保发送最终的 [DONE] 消息
343
- controller.enqueue(encoder.encode("data: [DONE]\n\n"));
344
- controller.close();
345
- } catch (error) {
346
- console.error("Stream processing error:", error);
347
- controller.error(error);
348
  }
349
- }
350
- });
351
-
352
- return new Response(stream, {
353
- headers: {
354
- "Content-Type": "text/event-stream",
355
- "Cache-Control": "no-cache",
356
- "Connection": "keep-alive",
357
- "Access-Control-Allow-Origin": "*",
358
- }
359
- });
360
- }
361
-
362
- // 处理请求
363
- async function handleRequest(request) {
364
- const url = new URL(request.url);
365
- const path = url.pathname;
366
 
367
- // CORS 预检请求处理
368
- if (request.method === "OPTIONS") {
369
  return new Response(null, {
370
- status: 200,
371
  headers: {
372
  "Access-Control-Allow-Origin": "*",
373
  "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
374
  "Access-Control-Allow-Headers": "Content-Type, Authorization",
 
375
  },
376
  });
377
  }
378
 
379
- // 设置通用响应头
380
- const headers = {
381
- "Content-Type": "application/json",
382
- "Access-Control-Allow-Origin": "*",
383
- };
384
 
385
  try {
386
  // 模型列表接口
387
- if (path === "/v1/models" && request.method === "GET") {
388
- const models = await fetchModels();
389
  const openAIModels = transformModelsToOpenAIFormat(models);
390
- return new Response(JSON.stringify(openAIModels), { headers });
 
 
 
 
 
 
 
 
391
  }
392
-
393
  // 聊天完成接口
394
- else if (path === "/v1/chat/completions" && request.method === "POST") {
395
- const requestData = await request.json();
396
- const modelName = requestData.model || DEFAULT_MODEL;
397
- const messages = requestData.messages || [];
398
- const stream = requestData.stream || false;
399
-
400
- // 处理流式响应
 
401
  if (stream) {
402
- return handleStreamRequest(request, modelName, messages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  }
404
-
405
- // 处理普通响应
406
- const chatResponse = await sendChatRequest(modelName, messages);
407
- const openAIResponse = transformChatResponseToOpenAIFormat(chatResponse, modelName);
408
-
409
- return new Response(JSON.stringify(openAIResponse), { headers });
410
  }
411
-
412
- // 未
413
  else {
414
- return new Response(JSON.stringify({
415
- error: {
416
- message: "Not found",
417
- type: "invalid_request_error",
418
- code: "path_not_found",
419
- }
420
- }), { status: 404, headers });
 
 
 
421
  }
422
  } catch (error) {
423
- console.error("Error handling request:", error);
424
- return new Response(JSON.stringify({
425
- error: {
426
- message: error.message,
427
- type: "server_error",
428
- code: "internal_server_error",
429
- }
430
- }), { status: 500, headers });
 
 
 
 
431
  }
432
  }
433
 
434
- // 启动服务器
435
- const port = parseInt(Deno.env.get("PORT") || "7860");
436
- console.log(`Starting server on port ${port}...`);
437
- serve(handleRequest, { port });
 
1
+ import { serve } from "https://deno.land/std@0.220.1/http/server.ts";
 
2
 
3
+ // 定义常量
4
+ const BASE_URL = "https://api.codegpt.co/api";
5
+ const PORT = 3000;
6
 
7
+ interface CodegptModel {
8
+ id: string;
9
+ org_id: string;
10
+ name: string;
11
+ model: string;
12
+ }
13
+
14
+ // 从请求头中获取API_KEY
15
+ function getApiKeyFromRequest(req: Request): string | null {
16
+ const authHeader = req.headers.get("authorization");
17
+ if (!authHeader) return null;
18
+
19
+ // 支持 "Bearer sk-xxx" 或直接 "sk-xxx" 格式
20
+ const match = authHeader.match(/^(Bearer\s+)?(sk-[a-zA-Z0-9-]+)$/);
21
+ return match ? match[2] : null;
22
+ }
23
+
24
+ // 获取模型列表
25
+ async function getModels(apiKey: string): Promise<CodegptModel[]> {
26
  try {
27
+ const response = await fetch(
28
+ `${BASE_URL}/v1/agent`,
29
+ {
30
+ method: "GET",
31
+ headers: {
32
+ "Accept": "application/json",
33
+ "Authorization": `Bearer ${apiKey}`,
34
+ },
35
  },
36
+ );
 
37
 
38
  if (!response.ok) {
39
+ const errorData = await response.json();
40
+ if (errorData.message === "Invalid credentials") {
41
+ throw new Error("🚀 ~ Your API key or access token is invalid.!");
42
+ }
43
+ throw new Error(`Failed to get models: ${response.status}`);
44
  }
45
 
46
+ const data = await response.json();
47
+ // console.log("🚀 ~ codegpt agents 响应:", data);
48
+ return data;
49
  } catch (error) {
50
+ console.error("Error getting models:", error);
51
+ throw error;
52
  }
53
  }
54
 
55
+
56
+ // codegpt agent 转换为 OpenAI model 格式
57
+ function transformModelsToOpenAIFormat(models: CodegptModel[]) {
58
  return {
59
  object: "list",
60
+ data: models.map((model) => ({
61
+ id: model.id,
62
+ name: model.model,
63
  object: "model",
64
+ created: Date.now(),
65
+ owned_by: "codegpt",
66
+ permission: [{
67
+ id: `modelperm-${model.model}`,
68
+ object: "model_permission",
69
+ created: Date.now(),
70
+ allow_create_engine: false,
71
+ allow_sampling: true,
72
+ allow_logprobs: false,
73
+ allow_search_indices: false,
74
+ allow_view: true,
75
+ allow_fine_tuning: false,
76
+ organization: "*",
77
+ group: null,
78
+ is_blocking: false,
79
+ }],
80
+ root: model.model,
81
  parent: null,
82
  })),
83
  };
84
  }
85
 
86
+ // 处理聊天完成请求
87
+ async function handleChatCompletions(
88
+ apiKey: string,
89
+ requestBody: any,
90
+ stream: boolean,
91
+ ) {
92
+
93
+ const CodegptBody = {
94
+ stream: stream,
95
+ agentId: requestBody.model,
96
+ messages: requestBody.messages,
97
+ format: "json",
98
+ };
99
+ // console.log("🚀 ~ 内部实际请求: ", CodegptBody);
100
+
101
+ const response = await fetch(
102
+ `${BASE_URL}/v1/chat/completions`,
103
+ {
104
+ method: "POST",
105
+ headers: {
106
+ "Content-Type": "application/json",
107
+ "Authorization": `Bearer ${apiKey}`,
108
+ "Accept": stream ? "text/event-stream" : "application/json",
109
+ },
110
+ body: JSON.stringify(CodegptBody),
111
+ },
112
+ );
113
+ // console.log("🚀 ~ 内部请求实际响应: \n", response);
114
+
115
+ if (!response.ok) {
116
+ throw new Error(`Chat completion failed: ${response.status}`);
117
+ }
118
+
119
+ return response;
120
+ }
121
+
122
+ // 转换流式响应
123
+ async function* transformStreamResponse(
124
+ readableStream: ReadableStream<Uint8Array>,
125
+ ) {
126
+ const reader = readableStream.getReader();
127
+ const decoder = new TextDecoder();
128
+ let buffer = "";
129
+
130
  try {
131
  while (true) {
132
  const { done, value } = await reader.read();
133
  if (done) break;
134
+
135
+ buffer += decoder.decode(value, { stream: true });
136
+ const lines = buffer.split("\n");
137
+ buffer = lines.pop() || "";
138
+
139
+ for (const line of lines) {
140
+ if (line.trim() === "" || !line.startsWith("data:")) continue;
141
+
142
+ const data = line.substring(5).trim();
143
+ if (data === "[DONE]") {
144
+ yield "data: [DONE]\n\n";
145
+ continue;
146
+ }
147
+
148
  try {
149
+ const CodegptEvent = JSON.parse(data);
150
+
151
+ // 转换为OpenAI格式的事件
152
+ const openAIEvent = {
153
+ id: CodegptEvent.agentName || `chatcmpl-${Date.now()}`,
154
+ object: "chat.completion.chunk",
155
+ created: Math.floor(Date.now() / 1000),
156
+ model: CodegptEvent.model || CodegptEvent.agentName,
157
+ choices: [
158
+ {
159
+ index: 0,
160
+ delta: {
161
+ reasoning_content: CodegptEvent.choices?.[0]?.delta?.contents?.[0]?.type === "think" ? CodegptEvent.choices[0].delta.contents[0].content || "" : "",
162
+ content: CodegptEvent.choices?.[0]?.delta?.content || "",
163
+ },
164
+ finish_reason: CodegptEvent.choices?.[0]?.finish_reason || null,
165
+ },
166
+ ],
167
+ };
168
+
169
+ yield `data: ${JSON.stringify(openAIEvent)}\n\n`;
170
  } catch (e) {
171
+ console.error("Error parsing event:", e, "Line:", line);
 
172
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  }
 
 
 
 
 
 
 
174
  }
175
+ } finally {
176
+ reader.releaseLock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  }
178
  }
179
 
180
+ // 转换非流式响应
181
+ async function transformNonStreamResponse(response: Response) {
182
+ const CodegptResponse = await response.json();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  return {
185
+ id: CodegptResponse.id || `chatcmpl-${Date.now()}`,
186
  object: "chat.completion",
187
  created: Math.floor(Date.now() / 1000),
188
+ model: CodegptResponse.model || CodegptResponse.agentName,
189
  choices: [
190
  {
191
  index: 0,
192
  message: {
193
  role: "assistant",
194
+ content: CodegptResponse.choices?.[0]?.message?.content || "",
195
  },
196
+ finish_reason: CodegptResponse.choices?.[0]?.finish_reason || "stop",
197
  },
198
  ],
199
+ usage: CodegptResponse.usage || {
200
  prompt_tokens: 0,
201
  completion_tokens: 0,
202
  total_tokens: 0,
 
204
  };
205
  }
206
 
207
+ // 处理函数
208
+ async function handler(req: Request): Promise<Response> {
209
+ const url = new URL(req.url);
210
+ const path = url.pathname;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
+ const apiKey = getApiKeyFromRequest(req);
213
+ if (!apiKey) {
214
+ return new Response(
215
+ // 没有填写 apikey 时返回的错误信息
216
+ JSON.stringify({ error: "Unauthorized", message: "Your API key or access token is invalid." }),
217
+ {
218
+ status: 401,
219
+ headers: {
220
+ "Content-Type": "application/json",
221
+ "Access-Control-Allow-Origin": "*",
222
+ },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  }
224
+ );
225
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
+ // CORS预检请求处理
228
+ if (req.method === "OPTIONS") {
229
  return new Response(null, {
230
+ status: 204,
231
  headers: {
232
  "Access-Control-Allow-Origin": "*",
233
  "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
234
  "Access-Control-Allow-Headers": "Content-Type, Authorization",
235
+ "Access-Control-Max-Age": "86400",
236
  },
237
  });
238
  }
239
 
 
 
 
 
 
240
 
241
  try {
242
  // 模型列表接口
243
+ if (path === "/v1/models" && req.method === "GET") {
244
+ const models = await getModels(apiKey);
245
  const openAIModels = transformModelsToOpenAIFormat(models);
246
+ // console.log("🚀 ~ openAI格式models: ", openAIModels);
247
+
248
+ return new Response(JSON.stringify(openAIModels), {
249
+ status: 200,
250
+ headers: {
251
+ "Content-Type": "application/json",
252
+ "Access-Control-Allow-Origin": "*",
253
+ },
254
+ });
255
  }
256
+
257
  // 聊天完成接口
258
+ else if (path === "/v1/chat/completions" && req.method === "POST") {
259
+ const requestBody = await req.json();
260
+ // console.log("🚀 ~ 外部客户端请求: ", requestBody);
261
+
262
+ const stream = requestBody.stream === true;
263
+
264
+ const codegptResponse = await handleChatCompletions(apiKey, requestBody, stream);
265
+
266
  if (stream) {
267
+ const transformedStream = new ReadableStream({
268
+ async start(controller) {
269
+ try {
270
+ for await (const chunk of transformStreamResponse(codegptResponse.body!)) {
271
+ controller.enqueue(new TextEncoder().encode(chunk));
272
+ }
273
+ controller.close();
274
+ } catch (error) {
275
+ console.error("Stream transformation error:", error);
276
+ controller.error(error);
277
+ }
278
+ },
279
+ });
280
+
281
+ return new Response(transformedStream, {
282
+ headers: {
283
+ "Content-Type": "text/event-stream",
284
+ "Cache-Control": "no-cache",
285
+ "Connection": "keep-alive",
286
+ "Access-Control-Allow-Origin": "*",
287
+ },
288
+ });
289
+ } else {
290
+ const transformedResponse = await transformNonStreamResponse(codegptResponse);
291
+
292
+ return new Response(JSON.stringify(transformedResponse), {
293
+ status: 200,
294
+ headers: {
295
+ "Content-Type": "application/json",
296
+ "Access-Control-Allow-Origin": "*",
297
+ },
298
+ });
299
  }
 
 
 
 
 
 
300
  }
301
+
302
+ // 未找到
303
  else {
304
+ return new Response(
305
+ JSON.stringify({ error: "Not found", message: "Endpoint not supported" }),
306
+ {
307
+ status: 404,
308
+ headers: {
309
+ "Content-Type": "application/json",
310
+ "Access-Control-Allow-Origin": "*",
311
+ },
312
+ },
313
+ );
314
  }
315
  } catch (error) {
316
+ console.error("Request handling error:", error);
317
+
318
+ return new Response(
319
+ JSON.stringify({ error: "Internal server error", message: error.message }),
320
+ {
321
+ status: 500,
322
+ headers: {
323
+ "Content-Type": "application/json",
324
+ "Access-Control-Allow-Origin": "*",
325
+ },
326
+ },
327
+ );
328
  }
329
  }
330
 
331
+ console.log(`Starting server on port ${PORT}...`);
332
+ serve(handler, { port: PORT });