ropic commited on
Commit
fde3190
·
verified ·
1 Parent(s): 2ca2057

Create freeai_proxy.ts

Browse files
Files changed (1) hide show
  1. freeai_proxy.ts +437 -0
freeai_proxy.ts ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // freeai_proxy.ts
2
+ import { serve } from "https://deno.land/std@0.190.0/http/server.ts";
3
+
4
+ const FREEAI_API_BASE = "https://freeaichatplayground.com/api/v1";
5
+ const DEFAULT_MODEL = "Deepseek R1";
6
+
7
+ // 获取可用模型列表
8
+ async function fetchModels() {
9
+ try {
10
+ const response = await fetch(`${FREEAI_API_BASE}/models`, {
11
+ method: "POST",
12
+ headers: {
13
+ "Content-Type": "application/json",
14
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
15
+ "Origin": "https://freeaichatplayground.com",
16
+ "Referer": "https://freeaichatplayground.com/chat",
17
+ },
18
+ body: JSON.stringify({ type: "text" }),
19
+ });
20
+
21
+ if (!response.ok) {
22
+ throw new Error(`Failed to fetch models: ${response.status}`);
23
+ }
24
+
25
+ const models = await response.json();
26
+ return models;
27
+ } catch (error) {
28
+ console.error("Error fetching models:", error);
29
+ return [];
30
+ }
31
+ }
32
+
33
+ // 转换为 OpenAI 格式的模型列表
34
+ function transformModelsToOpenAIFormat(models) {
35
+ return {
36
+ object: "list",
37
+ data: models.map(model => ({
38
+ id: model.name,
39
+ object: "model",
40
+ created: new Date(model.createdAt).getTime() / 1000,
41
+ owned_by: model.provider,
42
+ permission: [],
43
+ root: model.name,
44
+ parent: null,
45
+ })),
46
+ };
47
+ }
48
+
49
+ // 解析 SSE 格式的响应
50
+ async function parseSSEResponse(response) {
51
+ const reader = response.body.getReader();
52
+ let content = "";
53
+ let id = `chatcmpl-${Date.now()}`;
54
+ let finishReason = "stop";
55
+
56
+ try {
57
+ while (true) {
58
+ const { done, value } = await reader.read();
59
+ if (done) break;
60
+
61
+ const chunk = new TextDecoder().decode(value);
62
+ content += chunk;
63
+ }
64
+
65
+ // 解析所有 SSE 消息
66
+ const messages = content.split('\n\n')
67
+ .filter(msg => msg.trim().startsWith('data:'))
68
+ .map(msg => {
69
+ const jsonStr = msg.replace('data:', '').trim();
70
+ try {
71
+ return JSON.parse(jsonStr);
72
+ } catch (e) {
73
+ console.warn("Failed to parse SSE message:", jsonStr);
74
+ return null;
75
+ }
76
+ })
77
+ .filter(Boolean);
78
+
79
+ // 找到最后一条完整消息
80
+ const lastCompleteMessage = messages.findLast(msg =>
81
+ msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content
82
+ );
83
+
84
+ if (lastCompleteMessage) {
85
+ id = lastCompleteMessage.id || id;
86
+ if (lastCompleteMessage.choices &&
87
+ lastCompleteMessage.choices[0] &&
88
+ lastCompleteMessage.choices[0].finish_reason) {
89
+ finishReason = lastCompleteMessage.choices[0].finish_reason;
90
+ }
91
+
92
+ return {
93
+ id,
94
+ content: lastCompleteMessage.choices[0].message.content,
95
+ finish_reason: finishReason,
96
+ usage: lastCompleteMessage.usage || null
97
+ };
98
+ }
99
+
100
+ // 如果没有找到完整消息,尝试从所有消息中提取内容
101
+ let combinedContent = "";
102
+ for (const msg of messages) {
103
+ if (msg.choices && msg.choices[0] && msg.choices[0].delta && msg.choices[0].delta.content) {
104
+ combinedContent += msg.choices[0].delta.content;
105
+ } else if (msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content) {
106
+ combinedContent += msg.choices[0].message.content;
107
+ }
108
+ }
109
+
110
+ return {
111
+ id,
112
+ content: combinedContent || "No content found in response",
113
+ finish_reason: finishReason,
114
+ usage: null
115
+ };
116
+
117
+ } catch (error) {
118
+ console.error("Error parsing SSE response:", error);
119
+ return {
120
+ id,
121
+ content: "Error parsing response: " + error.message,
122
+ finish_reason: "error",
123
+ usage: null
124
+ };
125
+ }
126
+ }
127
+
128
+ // 发送聊天请求到 freeaichatplayground
129
+ async function sendChatRequest(modelName, messages) {
130
+ try {
131
+ const formattedMessages = messages.map((msg, index) => ({
132
+ id: `${Date.now() + index}`,
133
+ role: msg.role,
134
+ content: msg.content,
135
+ model: {
136
+ id: "", // 这个ID会在下面被填充
137
+ name: modelName,
138
+ icon: "",
139
+ provider: "",
140
+ contextWindow: 63920
141
+ }
142
+ }));
143
+
144
+ // 获取模型列表以找到正确的ID
145
+ const models = await fetchModels();
146
+ const selectedModel = models.find(m => m.name === modelName);
147
+
148
+ if (!selectedModel) {
149
+ throw new Error(`Model "${modelName}" not found`);
150
+ }
151
+
152
+ // 填充模型信息
153
+ formattedMessages.forEach(msg => {
154
+ if (msg.model) {
155
+ msg.model.id = selectedModel.id;
156
+ msg.model.icon = selectedModel.icon;
157
+ msg.model.provider = selectedModel.provider;
158
+ }
159
+ });
160
+
161
+ const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
162
+ method: "POST",
163
+ headers: {
164
+ "Content-Type": "application/json",
165
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
166
+ "Origin": "https://freeaichatplayground.com",
167
+ "Referer": "https://freeaichatplayground.com/chat",
168
+ },
169
+ body: JSON.stringify({
170
+ model: modelName,
171
+ messages: formattedMessages,
172
+ }),
173
+ });
174
+
175
+ if (!response.ok) {
176
+ const errorText = await response.text();
177
+ throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
178
+ }
179
+
180
+ // 处理 SSE 流式响应
181
+ const parsedResponse = await parseSSEResponse(response);
182
+ return parsedResponse;
183
+ } catch (error) {
184
+ console.error("Error in chat completion:", error);
185
+ throw error;
186
+ }
187
+ }
188
+
189
+ // 转换为 OpenAI 格式的聊天响应
190
+ function transformChatResponseToOpenAIFormat(response, modelName) {
191
+ return {
192
+ id: response.id || `chatcmpl-${Date.now()}`,
193
+ object: "chat.completion",
194
+ created: Math.floor(Date.now() / 1000),
195
+ model: modelName,
196
+ choices: [
197
+ {
198
+ index: 0,
199
+ message: {
200
+ role: "assistant",
201
+ content: response.content,
202
+ },
203
+ finish_reason: response.finish_reason || "stop",
204
+ },
205
+ ],
206
+ usage: response.usage || {
207
+ prompt_tokens: 0,
208
+ completion_tokens: 0,
209
+ total_tokens: 0,
210
+ },
211
+ };
212
+ }
213
+
214
+ // 处理流式响应请求
215
+ async function handleStreamRequest(request, modelName, messages) {
216
+ const encoder = new TextEncoder();
217
+ const formattedMessages = messages.map((msg, index) => ({
218
+ id: `${Date.now() + index}`,
219
+ role: msg.role,
220
+ content: msg.content,
221
+ model: {
222
+ id: "", // 这个ID会在下面被填充
223
+ name: modelName,
224
+ icon: "",
225
+ provider: "",
226
+ contextWindow: 63920
227
+ }
228
+ }));
229
+
230
+ // 获取模型列表以找到正确的ID
231
+ const models = await fetchModels();
232
+ const selectedModel = models.find(m => m.name === modelName);
233
+
234
+ if (!selectedModel) {
235
+ throw new Error(`Model "${modelName}" not found`);
236
+ }
237
+
238
+ // 填充模型信息
239
+ formattedMessages.forEach(msg => {
240
+ if (msg.model) {
241
+ msg.model.id = selectedModel.id;
242
+ msg.model.icon = selectedModel.icon;
243
+ msg.model.provider = selectedModel.provider;
244
+ }
245
+ });
246
+
247
+ const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
248
+ method: "POST",
249
+ headers: {
250
+ "Content-Type": "application/json",
251
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
252
+ "Origin": "https://freeaichatplayground.com",
253
+ "Referer": "https://freeaichatplayground.com/chat",
254
+ },
255
+ body: JSON.stringify({
256
+ model: modelName,
257
+ messages: formattedMessages,
258
+ stream: true,
259
+ }),
260
+ });
261
+
262
+ if (!response.ok) {
263
+ const errorText = await response.text();
264
+ throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
265
+ }
266
+
267
+ const stream = new ReadableStream({
268
+ async start(controller) {
269
+ const reader = response.body.getReader();
270
+ const chatId = `chatcmpl-${Date.now()}`;
271
+
272
+ // 发送初始消息
273
+ const initialChunk = {
274
+ id: chatId,
275
+ object: "chat.completion.chunk",
276
+ created: Math.floor(Date.now() / 1000),
277
+ model: modelName,
278
+ choices: [{
279
+ index: 0,
280
+ delta: { role: "assistant" },
281
+ finish_reason: null
282
+ }]
283
+ };
284
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(initialChunk)}\n\n`));
285
+
286
+ try {
287
+ let buffer = "";
288
+
289
+ while (true) {
290
+ const { done, value } = await reader.read();
291
+ if (done) break;
292
+
293
+ const chunk = new TextDecoder().decode(value);
294
+ buffer += chunk;
295
+
296
+ // 处理缓冲区中的所有完整 SSE 消息
297
+ const messages = buffer.split('\n\n');
298
+ buffer = messages.pop() || ""; // 保留最后一个可能不完整的消息
299
+
300
+ for (const msg of messages) {
301
+ if (!msg.trim().startsWith('data:')) continue;
302
+
303
+ try {
304
+ const jsonStr = msg.replace('data:', '').trim();
305
+ const data = JSON.parse(jsonStr);
306
+
307
+ if (data.choices && data.choices[0]) {
308
+ // 转换为 OpenAI 流式格式
309
+ const openAIChunk = {
310
+ id: chatId,
311
+ object: "chat.completion.chunk",
312
+ created: Math.floor(Date.now() / 1000),
313
+ model: modelName,
314
+ choices: [{
315
+ index: 0,
316
+ delta: {},
317
+ finish_reason: data.choices[0].finish_reason || null
318
+ }]
319
+ };
320
+
321
+ // 提取内容
322
+ if (data.choices[0].delta && data.choices[0].delta.content) {
323
+ openAIChunk.choices[0].delta.content = data.choices[0].delta.content;
324
+ } else if (data.choices[0].message && data.choices[0].message.content) {
325
+ openAIChunk.choices[0].delta.content = data.choices[0].message.content;
326
+ }
327
+
328
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(openAIChunk)}\n\n`));
329
+
330
+ // 如果是最后一条消息,发送 [DONE]
331
+ if (data.choices[0].finish_reason) {
332
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
333
+ }
334
+ }
335
+ } catch (e) {
336
+ console.warn("Failed to parse SSE message:", msg);
337
+ continue;
338
+ }
339
+ }
340
+ }
341
+
342
+ // 确保发送最终的 [DONE] 消息
343
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
344
+ controller.close();
345
+ } catch (error) {
346
+ console.error("Stream processing error:", error);
347
+ controller.error(error);
348
+ }
349
+ }
350
+ });
351
+
352
+ return new Response(stream, {
353
+ headers: {
354
+ "Content-Type": "text/event-stream",
355
+ "Cache-Control": "no-cache",
356
+ "Connection": "keep-alive",
357
+ "Access-Control-Allow-Origin": "*",
358
+ }
359
+ });
360
+ }
361
+
362
+ // 处理请求
363
+ async function handleRequest(request) {
364
+ const url = new URL(request.url);
365
+ const path = url.pathname;
366
+
367
+ // CORS 预检请求处理
368
+ if (request.method === "OPTIONS") {
369
+ return new Response(null, {
370
+ status: 200,
371
+ headers: {
372
+ "Access-Control-Allow-Origin": "*",
373
+ "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
374
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
375
+ },
376
+ });
377
+ }
378
+
379
+ // 设置通用响应头
380
+ const headers = {
381
+ "Content-Type": "application/json",
382
+ "Access-Control-Allow-Origin": "*",
383
+ };
384
+
385
+ try {
386
+ // 模型列表接口
387
+ if (path === "/v1/models" && request.method === "GET") {
388
+ const models = await fetchModels();
389
+ const openAIModels = transformModelsToOpenAIFormat(models);
390
+ return new Response(JSON.stringify(openAIModels), { headers });
391
+ }
392
+
393
+ // 聊天完成接口
394
+ else if (path === "/v1/chat/completions" && request.method === "POST") {
395
+ const requestData = await request.json();
396
+ const modelName = requestData.model || DEFAULT_MODEL;
397
+ const messages = requestData.messages || [];
398
+ const stream = requestData.stream || false;
399
+
400
+ // 处理流式响应
401
+ if (stream) {
402
+ return handleStreamRequest(request, modelName, messages);
403
+ }
404
+
405
+ // 处理普通响应
406
+ const chatResponse = await sendChatRequest(modelName, messages);
407
+ const openAIResponse = transformChatResponseToOpenAIFormat(chatResponse, modelName);
408
+
409
+ return new Response(JSON.stringify(openAIResponse), { headers });
410
+ }
411
+
412
+ // 未知路径
413
+ else {
414
+ return new Response(JSON.stringify({
415
+ error: {
416
+ message: "Not found",
417
+ type: "invalid_request_error",
418
+ code: "path_not_found",
419
+ }
420
+ }), { status: 404, headers });
421
+ }
422
+ } catch (error) {
423
+ console.error("Error handling request:", error);
424
+ return new Response(JSON.stringify({
425
+ error: {
426
+ message: error.message,
427
+ type: "server_error",
428
+ code: "internal_server_error",
429
+ }
430
+ }), { status: 500, headers });
431
+ }
432
+ }
433
+
434
+ // 启动服务器
435
+ const port = parseInt(Deno.env.get("PORT") || "8000");
436
+ console.log(`Starting server on port ${port}...`);
437
+ serve(handleRequest, { port });