hongshi-files commited on
Commit
6ab460a
·
verified ·
1 Parent(s): e7476bb

Update main.ts

Browse files
Files changed (1) hide show
  1. main.ts +384 -243
main.ts CHANGED
@@ -1,274 +1,415 @@
1
- const lunaryApiBaseUrl = "https://api.lunary.ai/v1";
2
 
3
- // Using Deno.Kv for caching (requires --allow-env and --allow-read for .env or config)
4
- const kv = await Deno.openKv();
 
 
5
 
6
- async function getOrgId(apiKey: string): Promise<string | null> {
7
- // Direct storage of API key as the cache key
8
- const cachedOrgId = await kv.get(["orgIdCache", apiKey]);
9
-
10
- if (cachedOrgId.value) {
11
- console.log(`Cache hit for orgId: ${cachedOrgId.value}`);
12
- return cachedOrgId.value as string;
13
- }
14
-
15
- console.log("Cache miss for orgId, fetching from Lunary API...");
16
-
17
- const response = await fetch(`${lunaryApiBaseUrl}/users/me/org`, {
18
- method: "GET",
19
- headers: {
20
- Authorization: `Bearer ${apiKey}`,
21
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:138.0) Gecko/20100101 Firefox/138.0",
22
- "Accept": "application/json",
23
- "Accept-Language": "en-US,en;q=0.5",
24
- "Referer": "https://app.lunary.ai/",
25
- "Origin": "https://app.lunary.ai",
26
- "DNT": "1",
27
- "Sec-GPC": "1",
28
- "Connection": "keep-alive",
29
- "Sec-Fetch-Dest": "empty",
30
- "Sec-Fetch-Mode": "cors",
31
- "Sec-Fetch-Site": "same-site",
32
- "Priority": "u=4",
33
- "Pragma": "no-cache",
34
- "Cache-Control": "no-cache",
35
- "TE": "trailers",
36
- },
37
- });
38
-
39
- if (!response.ok) {
40
- console.error(`Failed to get orgId: ${response.status} - ${await response.text()}`);
41
- return null;
42
- }
43
 
44
- const data = await response.json();
45
- const orgId = data.id;
 
 
46
 
47
- if (orgId) {
48
- // Cache the orgId for 24 hours (adjust TTL as needed)
49
- await kv.set(["orgIdCache", apiKey], orgId, { expireIn: 24 * 60 * 60 * 1000 });
50
- console.log(`OrgId cached: ${orgId}`);
51
- return orgId;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- return null;
 
 
 
 
 
 
55
  }
56
 
57
- async function handler(req: Request): Promise<Response> {
58
- if (req.method !== "POST") {
59
- return new Response("Method Not Allowed", { status: 405 });
60
- }
 
 
 
 
61
 
62
- const authHeader = req.headers.get("Authorization");
63
- const apiKey = authHeader?.split(" ")[1];
64
- if (!apiKey) {
65
- return new Response("Unauthorized", { status: 401 });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  }
 
67
 
68
- const orgId = await getOrgId(apiKey);
69
- if (!orgId) {
70
- return new Response("Could not retrieve organization ID for the provided API key.", { status: 400 });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- let requestBody;
 
 
 
 
 
74
  try {
75
- requestBody = await req.json();
76
- } catch (error) {
77
- return new Response("Invalid JSON", { status: 400 });
78
- }
 
 
 
 
 
 
79
 
80
- const { messages, model, stream, ...extraParams } = requestBody;
81
-
82
- if (!messages || !Array.isArray(messages)) {
83
- return new Response("Missing or invalid 'messages' parameter", {
84
- status: 400,
85
  });
86
- }
87
-
88
- if (!model || typeof model !== "string") {
89
- return new Response("Missing or invalid 'model' parameter", {
90
- status: 400,
 
 
 
91
  });
92
- }
93
-
94
- const modelParts = model.split("/");
95
- if (modelParts.length !== 2) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  return new Response(
97
- "Invalid 'model' format. Expected 'provider/model_id'",
98
- { status: 400 }
 
 
 
 
 
 
99
  );
100
  }
 
101
 
102
- const [provider, modelId] = modelParts;
103
-
104
- const lunaryRequestBody = {
105
- content: messages,
106
- extra: {
107
- model: {
108
- id: modelId,
109
- name: modelId, // Lunary uses name and id as the same
110
- provider: provider,
111
- },
112
- temperature: extraParams.temperature ?? 1, // Default temperature
113
- stream: false, // This API doesn't support streaming directly
114
- tools: extraParams.tools,
115
- ...extraParams, // Include other extra parameters
116
- },
117
- variables: extraParams.variables ?? {}, // Include variables if provided
118
- };
119
 
120
- const lunaryResponse = await fetch(
121
- `${lunaryApiBaseUrl}/orgs/${orgId}/playground`,
122
- {
123
- method: "POST",
124
  headers: {
125
- "Content-Type": "application/json",
126
- Authorization: `Bearer ${apiKey}`, // Use the original API key for the playground request
127
- // Add realistic browser headers
128
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:138.0) Gecko/20100101 Firefox/138.0",
129
- "Accept": "*/*",
130
- "Accept-Language": "en-US,en;q=0.5",
131
- "Referer": "https://app.lunary.ai/",
132
- "Origin": "https://app.lunary.ai",
133
- "DNT": "1",
134
- "Sec-GPC": "1",
135
- "Connection": "keep-alive",
136
- "Sec-Fetch-Dest": "empty",
137
- "Sec-Fetch-Mode": "cors",
138
- "Sec-Fetch-Site": "same-site",
139
- "Priority": "u=4",
140
- "Pragma": "no-cache",
141
- "Cache-Control": "no-cache",
142
- "TE": "trailers",
143
  },
144
- body: JSON.stringify(lunaryRequestBody),
145
- }
146
- );
147
-
148
- if (!lunaryResponse.ok) {
149
- const errorText = await lunaryResponse.text();
150
- console.error(`Lunary Playground API error: ${lunaryResponse.status} - ${errorText}`);
151
- return new Response(`Lunary Playground API error: ${lunaryResponse.status} - ${errorText}`, {
152
- status: lunaryResponse.status,
153
  });
154
  }
155
 
156
- const lunaryData = await lunaryResponse.json();
157
-
158
- const openaiFormattedResponse = {
159
- id: lunaryData.id,
160
- object: "chat.completion",
161
- created: lunaryData.created,
162
- model: lunaryData.model, // Lunary returns the actual model name
163
- choices: lunaryData.choices.map((choice: any) => ({
164
- index: choice.index,
165
- message: {
166
- role: choice.message.role,
167
- content: choice.message.content,
168
- tool_calls: choice.message.tool_calls,
169
- },
170
- logprobs: choice.logprobs,
171
- finish_reason: choice.finish_reason,
172
- })),
173
- usage: lunaryData.usage,
174
- system_fingerprint: lunaryData.system_fingerprint,
175
- };
176
-
177
- if (stream) {
178
- const encoder = new TextEncoder();
179
- const stream = new ReadableStream({
180
- async start(controller) {
181
- // Simulate streaming by sending chunks of the content
182
- // In a real streaming scenario, you would process the upstream
183
- // stream and send chunks as they arrive. Since Lunary's playground
184
- // doesn't stream, we'll break down the final response.
185
-
186
- // Send the initial chunk with role and potential tool_calls if any
187
- if (openaiFormattedResponse.choices && openaiFormattedResponse.choices.length > 0) {
188
- const firstChoice = openaiFormattedResponse.choices[0];
189
- const initialChunk = {
190
- id: openaiFormattedResponse.id,
191
- object: "chat.completion.chunk",
192
- created: openaiFormattedResponse.created,
193
- model: openaiFormattedResponse.model,
194
- choices: [
195
- {
196
- index: 0,
197
- delta: {
198
- role: firstChoice.message.role,
199
- tool_calls: firstChoice.message.tool_calls // Include tool_calls if present
200
- },
201
- logprobs: firstChoice.logprobs, // Include logprobs if present
202
- finish_reason: null, // Indicate not finished yet
203
- }
204
- ],
205
- system_fingerprint: openaiFormattedResponse.system_fingerprint,
206
- };
207
- controller.enqueue(encoder.encode(`data: ${JSON.stringify(initialChunk)}\n\n`));
208
-
209
- // Break down the content into smaller chunks
210
- const content = firstChoice.message.content || "";
211
- const chunkSize = 10; // Adjust chunk size as needed for a more realistic stream feel
212
- for (let i = 0; i < content.length; i += chunkSize) {
213
- const chunkContent = content.substring(i, i + chunkSize);
214
- const contentChunk = {
215
- id: openaiFormattedResponse.id,
216
- object: "chat.completion.chunk",
217
- created: openaiFormattedResponse.created,
218
- model: openaiFormattedResponse.model,
219
- choices: [
220
- {
221
- index: 0,
222
- delta: {
223
- content: chunkContent
224
- },
225
- logprobs: null, // Logprobs are typically not sent in content chunks
226
- finish_reason: null,
227
- }
228
- ],
229
- system_fingerprint: openaiFormattedResponse.system_fingerprint,
230
- };
231
- controller.enqueue(encoder.encode(`data: ${JSON.stringify(contentChunk)}\n\n`));
232
- // Add a small delay to better simulate streaming
233
- await new Promise(resolve => setTimeout(resolve, 20)); // Adjust delay as needed
234
- }
235
-
236
- // Send the final chunk with the finish_reason
237
- const finalChunk = {
238
- id: openaiFormattedResponse.id,
239
- object: "chat.completion.chunk",
240
- created: openaiFormattedResponse.created,
241
- model: openaiFormattedResponse.model,
242
- choices: [
243
- {
244
- index: 0,
245
- delta: {}, // Delta is empty for the last chunk
246
- logprobs: null,
247
- finish_reason: firstChoice.finish_reason,
248
- }
249
- ],
250
- system_fingerprint: openaiFormattedResponse.system_fingerprint,
251
- usage: openaiFormattedResponse.usage, // Include usage in the final chunk
252
- };
253
- controller.enqueue(encoder.encode(`data: ${JSON.stringify(finalChunk)}\n\n`));
254
  }
255
-
256
-
257
- // Send the [DONE] marker
258
- controller.enqueue(encoder.encode("data: [DONE]\n\n"));
259
- controller.close();
260
- },
261
- });
262
-
263
- return new Response(stream, {
264
- headers: { "Content-Type": "text/event-stream" },
265
- });
266
- } else {
267
- // Non-streaming: return the JSON response directly
268
- return new Response(JSON.stringify(openaiFormattedResponse), {
269
- headers: { "Content-Type": "application/json" },
270
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  }
272
  }
273
 
274
- Deno.serve(handler);
 
 
 
1
+ import { serve } from "https://deno.land/std@0.220.1/http/server.ts";
2
 
3
+ // 定义常量
4
+ const UNLIMITED_AI_URL = "https://app.unlimitedai.chat/api/chat";
5
+ const PORT = 3000;
6
+ const MAX_RETRIES = 3;
7
 
8
+ // 定义接口
9
+ interface UnlimitedAIMessage {
10
+ id: string;
11
+ createdAt: string;
12
+ role: string;
13
+ content: string;
14
+ parts: Array<{ type: string; text: string }>;
15
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ interface OpenAIMessage {
18
+ role: string;
19
+ content: string;
20
+ }
21
 
22
+ // 将OpenAI消息转换为UnlimitedAI消息
23
+ function convertOpenAIToUnlimitedMessages(messages: OpenAIMessage[]): UnlimitedAIMessage[] {
24
+ // 提取系统消息
25
+ const systemMessages = messages.filter(msg => msg.role === "system");
26
+ const nonSystemMessages = messages.filter(msg => msg.role !== "system");
27
+
28
+ const result: UnlimitedAIMessage[] = [];
29
+
30
+ // 如果有系统消息,将其转换为用户消息和助手回复
31
+ if (systemMessages.length > 0) {
32
+ // 合并所有系统消息内容
33
+ const systemContent = systemMessages.map(msg => msg.content).join("\n\n");
34
+
35
+ // 添加作为用户消息的系统提示
36
+ result.push({
37
+ id: crypto.randomUUID(),
38
+ createdAt: new Date().toISOString(),
39
+ role: "user",
40
+ content: systemContent,
41
+ parts: [{ type: "text", text: systemContent }],
42
+ });
43
+
44
+ // 添加助手确认回复
45
+ result.push({
46
+ id: crypto.randomUUID(),
47
+ createdAt: new Date().toISOString(),
48
+ role: "assistant",
49
+ content: "Ok, I got it, I'll remember it and do it.",
50
+ parts: [{ type: "text", text: "Ok, I got it, I'll remember it and do it." }],
51
+ });
52
  }
53
+
54
+ // 添加其余非系统消息
55
+ nonSystemMessages.forEach(msg => {
56
+ result.push({
57
+ id: crypto.randomUUID(),
58
+ createdAt: new Date().toISOString(),
59
+ role: msg.role,
60
+ content: msg.content,
61
+ parts: [{ type: "text", text: msg.content }],
62
+ });
63
+ });
64
+
65
+ return result;
66
+ }
67
 
68
+ // 将OpenAI请求体转换为UnlimitedAI请求体
69
+ function convertOpenAIToUnlimitedBody(openaiBody: any): any {
70
+ return {
71
+ id: openaiBody.id || crypto.randomUUID(),
72
+ messages: convertOpenAIToUnlimitedMessages(openaiBody.messages),
73
+ selectedChatModel: openaiBody.model || "chat-model-reasoning",
74
+ };
75
  }
76
 
77
+ // 处理流式响应
78
+ async function* transformStreamResponse(
79
+ reader: ReadableStreamDefaultReader<Uint8Array>
80
+ ): AsyncGenerator<string> {
81
+ let buffer = "";
82
+ const decoder = new TextDecoder();
83
+ let messageId = "";
84
+ let firstResult = true;
85
 
86
+ try {
87
+ while (true) {
88
+ const { done, value } = await reader.read();
89
+ if (done) {
90
+ yield "data: [DONE]\n\n";
91
+ break;
92
+ }
93
+
94
+ buffer += decoder.decode(value, { stream: true });
95
+ let lines = buffer.split("\n");
96
+ buffer = lines.pop() || "";
97
+
98
+ for (const line of lines) {
99
+ if (!line.trim()) continue;
100
+ const idx = line.indexOf(":");
101
+ if (idx === -1) continue;
102
+
103
+ const key = line.slice(0, idx);
104
+ let val = line.slice(idx + 1).trim();
105
+ if (val.startsWith('"') && val.endsWith('"')) {
106
+ val = val.slice(1, -1);
107
+ }
108
+
109
+ if (key === "f") {
110
+ // 记录 messageId
111
+ try {
112
+ const obj = JSON.parse(val);
113
+ messageId = obj.messageId || "";
114
+ } catch (error) {
115
+ console.error("Error parsing messageId:", error);
116
+ }
117
+ } else if (key === "g") {
118
+ const delta = firstResult
119
+ ? {
120
+ role: "assistant",
121
+ reasoning_content: val.replace(/\\n/g, "\n"),
122
+ }
123
+ : { reasoning_content: val.replace(/\\n/g, "\n") };
124
+
125
+ // 思考过程
126
+ const chunk = {
127
+ id: messageId || crypto.randomUUID(),
128
+ object: "chat.completion.chunk",
129
+ created: Math.floor(Date.now() / 1000),
130
+ model: "chat-model-reasoning",
131
+ choices: [
132
+ {
133
+ delta,
134
+ index: 0,
135
+ finish_reason: null,
136
+ },
137
+ ],
138
+ };
139
+
140
+ yield `data: ${JSON.stringify(chunk)}\n\n`;
141
+ } else if (key === "0") {
142
+ // 最终结果
143
+ const delta = { content: val.replace(/\\n/g, "\n") };
144
+ const chunk = {
145
+ id: messageId || crypto.randomUUID(),
146
+ object: "chat.completion.chunk",
147
+ created: Math.floor(Date.now() / 1000),
148
+ model: "chat-model-reasoning",
149
+ choices: [
150
+ {
151
+ delta,
152
+ index: 0,
153
+ finish_reason: null,
154
+ },
155
+ ],
156
+ };
157
+
158
+ yield `data: ${JSON.stringify(chunk)}\n\n`;
159
+ firstResult = false;
160
+ } else if (key === "e" || key === "d") {
161
+ // 结束
162
+ yield "data: [DONE]\n\n";
163
+ }
164
+ }
165
+ }
166
+ } catch (error) {
167
+ console.error("Stream transformation error:", error);
168
+ yield "data: [DONE]\n\n";
169
+ } finally {
170
+ reader.releaseLock();
171
  }
172
+ }
173
 
174
+ // 转换非流式响应
175
+ async function transformNonStreamResponse(text: string): Promise<any> {
176
+ const lines = text.split("\n");
177
+ const data: Record<string, any> = {};
178
+
179
+ for (const line of lines) {
180
+ if (!line.trim()) continue;
181
+ const idx = line.indexOf(":");
182
+ if (idx === -1) continue;
183
+
184
+ const key = line.slice(0, idx);
185
+ let val = line.slice(idx + 1).trim();
186
+ try {
187
+ val = JSON.parse(val);
188
+ } catch (error) {
189
+ // 如果解析失败,保持原始字符串
190
+ }
191
+
192
+ data[key] = val;
193
  }
194
+
195
+ const content = data["0"];
196
+ const reasoning_content = data.g;
197
+
198
+ return {
199
+ id: data.f?.messageId || crypto.randomUUID(),
200
+ object: "chat.completion",
201
+ created: Math.floor(Date.now() / 1000),
202
+ model: "chat-model-reasoning",
203
+ choices: [
204
+ {
205
+ index: 0,
206
+ message: {
207
+ role: "assistant",
208
+ reasoning_content,
209
+ content,
210
+ },
211
+ finish_reason: "stop",
212
+ },
213
+ ],
214
+ usage: {
215
+ prompt_tokens: 0,
216
+ completion_tokens: 0,
217
+ total_tokens: 0,
218
+ },
219
+ };
220
+ }
221
 
222
+ // 处理聊天完成请求
223
+ async function handleChatCompletions(
224
+ openaiBody: any,
225
+ isStream: boolean,
226
+ retryCount = 0
227
+ ): Promise<Response> {
228
  try {
229
+ // 转换为 UnlimitedAI.Chat 请求体
230
+ const unlimitedBody = convertOpenAIToUnlimitedBody(openaiBody);
231
+
232
+ // 只转发必要 headers
233
+ const upstreamHeaders = {
234
+ "content-type": "application/json",
235
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
236
+ // 可以根据需要转发 Authorization 等
237
+ "referer": "https://app.unlimitedai.chat/"
238
+ };
239
 
240
+ // 获取 X-API-TOKEN
241
+ const XApiTokenRes = await fetch("https://app.unlimitedai.chat/api/token", {
242
+ method: "GET",
243
+ headers: upstreamHeaders
 
244
  });
245
+ const XApiToken = await XApiTokenRes.json().then(data => data.token);
246
+ upstreamHeaders["X-API-TOKEN"] = XApiToken;
247
+
248
+ // 转发到 UnlimitedAI.Chat
249
+ const upstreamRes = await fetch(UNLIMITED_AI_URL, {
250
+ method: "POST",
251
+ headers: upstreamHeaders,
252
+ body: JSON.stringify(unlimitedBody),
253
  });
254
+
255
+ if (!upstreamRes.ok) {
256
+ throw new Error(`Chat completion failed: ${upstreamRes.status}`);
257
+ }
258
+
259
+ if (isStream) {
260
+ // 流式响应处理
261
+ const reader = upstreamRes.body?.getReader();
262
+ if (!reader) {
263
+ throw new Error("Failed to get response body reader");
264
+ }
265
+
266
+ const transformedStream = new ReadableStream({
267
+ async start(controller) {
268
+ try {
269
+ for await (const chunk of transformStreamResponse(reader)) {
270
+ controller.enqueue(new TextEncoder().encode(chunk));
271
+ }
272
+ controller.close();
273
+ } catch (error) {
274
+ console.error("Stream transformation error:", error);
275
+ controller.error(error);
276
+ }
277
+ },
278
+ });
279
+
280
+ return new Response(transformedStream, {
281
+ headers: {
282
+ "Content-Type": "text/event-stream",
283
+ "Cache-Control": "no-cache",
284
+ "Connection": "keep-alive",
285
+ "Access-Control-Allow-Origin": "*",
286
+ },
287
+ });
288
+ } else {
289
+ // 非流式响应处理
290
+ const text = await upstreamRes.text();
291
+ const transformedResponse = await transformNonStreamResponse(text);
292
+
293
+ return new Response(JSON.stringify(transformedResponse), {
294
+ status: 200,
295
+ headers: {
296
+ "Content-Type": "application/json",
297
+ "Access-Control-Allow-Origin": "*",
298
+ },
299
+ });
300
+ }
301
+ } catch (error) {
302
+ console.error("Request handling error:", error);
303
+
304
  return new Response(
305
+ JSON.stringify({ error: "Internal server error", message: error.message }),
306
+ {
307
+ status: 500,
308
+ headers: {
309
+ "Content-Type": "application/json",
310
+ "Access-Control-Allow-Origin": "*",
311
+ },
312
+ },
313
  );
314
  }
315
+ }
316
 
317
+ // 主处理函数
318
+ async function handler(req: Request): Promise<Response> {
319
+ const url = new URL(req.url);
320
+ const path = url.pathname;
 
 
 
 
 
 
 
 
 
 
 
 
 
321
 
322
+ // CORS预检请求处理
323
+ if (req.method === "OPTIONS") {
324
+ return new Response(null, {
325
+ status: 204,
326
  headers: {
327
+ "Access-Control-Allow-Origin": "*",
328
+ "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
329
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
330
+ "Access-Control-Max-Age": "86400",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  },
 
 
 
 
 
 
 
 
 
332
  });
333
  }
334
 
335
+ try {
336
+ // 模型列表接口
337
+ if (path === "/v1/models" && req.method === "GET") {
338
+ return new Response(
339
+ JSON.stringify({
340
+ object: "list",
341
+ data: [
342
+ {
343
+ id: "chat-model-reasoning",
344
+ object: "model",
345
+ created: 0,
346
+ owned_by: "unlimitedai",
347
+ permission: [{
348
+ id: "modelperm-chat-model-reasoning",
349
+ object: "model_permission",
350
+ created: 0,
351
+ allow_create_engine: false,
352
+ allow_sampling: true,
353
+ allow_logprobs: false,
354
+ allow_search_indices: false,
355
+ allow_view: true,
356
+ allow_fine_tuning: false,
357
+ organization: "*",
358
+ group: null,
359
+ is_blocking: false,
360
+ }],
361
+ root: "chat-model-reasoning",
362
+ parent: null,
363
+ },
364
+ ],
365
+ }),
366
+ {
367
+ status: 200,
368
+ headers: {
369
+ "Content-Type": "application/json",
370
+ "Access-Control-Allow-Origin": "*",
371
+ },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
  }
373
+ );
374
+ }
375
+
376
+ // 聊天完成接口
377
+ else if (path === "/v1/chat/completions" && req.method === "POST") {
378
+ const openaiBody = await req.json();
379
+ const isStream = openaiBody.stream === true;
380
+
381
+ return await handleChatCompletions(openaiBody, isStream);
382
+ }
383
+
384
+ // 未找到路由
385
+ else {
386
+ return new Response(
387
+ JSON.stringify({ error: "Not found", message: "Endpoint not supported" }),
388
+ {
389
+ status: 404,
390
+ headers: {
391
+ "Content-Type": "application/json",
392
+ "Access-Control-Allow-Origin": "*",
393
+ },
394
+ }
395
+ );
396
+ }
397
+ } catch (error) {
398
+ console.error("Request handling error:", error);
399
+
400
+ return new Response(
401
+ JSON.stringify({ error: "Internal server error", message: error.message }),
402
+ {
403
+ status: 500,
404
+ headers: {
405
+ "Content-Type": "application/json",
406
+ "Access-Control-Allow-Origin": "*",
407
+ },
408
+ }
409
+ );
410
  }
411
  }
412
 
413
+ // 启动服务器
414
+ console.log(`Starting server on port ${PORT}...`);
415
+ serve(handler, { port: PORT });