github-actions[bot] commited on
Commit
e39a317
·
1 Parent(s): 0c1cc48

Update from GitHub Actions

Browse files
Files changed (1) hide show
  1. server.js +86 -41
server.js CHANGED
@@ -70,7 +70,7 @@ function convertMessagesToFalPrompt(messages) {
70
  let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
71
  switch (message.role) {
72
  case 'system':
73
- fixed_system_prompt_content += `System: ${content}\n\n`;
74
  break;
75
  case 'user':
76
  conversation_message_blocks.push(`Human: ${content}\n\n`);
@@ -101,9 +101,9 @@ function convertMessagesToFalPrompt(messages) {
101
  if (fixed_system_prompt_content.length > 0) {
102
  // 如果固定内容不为空,计算其长度 + 后面可能的分隔符的长度(如果需要)
103
  // 暂时只计算内容长度,分隔符在组合时再考虑
104
- space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // 预留 \n\n...\n\n 的长度
105
  }
106
- const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
107
  console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`);
108
 
109
 
@@ -140,12 +140,12 @@ function convertMessagesToFalPrompt(messages) {
140
  // 如果 prompt 满了,尝试放入 system_prompt 的剩余空间
141
  if (!systemHistoryFull) {
142
  if (current_system_history_length + block_length <= remaining_system_limit) {
143
- system_prompt_history_blocks.unshift(message_block);
144
- current_system_history_length += block_length;
145
- continue;
146
  } else {
147
- systemHistoryFull = true;
148
- console.log(`System history limit (${remaining_system_limit}) reached.`);
149
  }
150
  }
151
  }
@@ -191,43 +191,88 @@ function convertMessagesToFalPrompt(messages) {
191
  }
192
  // === convertMessagesToFalPrompt 函数结束 ===
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  // POST /v1/chat/completions endpoint (保持不变)
196
  app.post('/v1/chat/completions', async (req, res) => {
197
 
198
- let authKey = null;
199
- const authHeader = req.headers.authorization;
200
-
201
- if (authHeader) {
202
- const parts = authHeader.split(' ');
203
- if (parts.length === 2) {
204
- const scheme = parts[0];
205
- const credentials = parts[1];
206
-
207
- if (scheme === 'Bearer') {
208
- authKey = credentials; // JWT 或其他 token
209
- } else if (scheme === 'Basic') {
210
- // Basic 认证解码
211
- const decoded = Buffer.from(credentials, 'base64').toString('utf8');
212
- const [username, password] = decoded.split(':');
213
- req.auth = { username, password };
214
- authKey = decoded; // 或者只保存 username
215
- } else if (scheme === 'ApiKey' || scheme === 'Key') {
216
- authKey = credentials;
217
- }
 
218
  }
219
- }
220
-
221
- fal.config({
222
- credentials: authKey,
223
- });
224
-
225
  const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
226
 
227
  console.log(`Received chat completion request for model: ${model}, stream: ${stream}`);
228
 
229
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
230
- console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`);
231
  }
232
  if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
233
  console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
@@ -236,7 +281,7 @@ app.post('/v1/chat/completions', async (req, res) => {
236
 
237
  try {
238
  // *** 使用更新后的转换函数 ***
239
- const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
240
 
241
  const falInput = {
242
  model: model,
@@ -244,7 +289,7 @@ app.post('/v1/chat/completions', async (req, res) => {
244
  ...(system_prompt && { system_prompt: system_prompt }),
245
  reasoning: !!reasoning,
246
  };
247
- console.log("Fal Input:", JSON.stringify(falInput, null, 2));
248
  console.log("Forwarding request to fal-ai with system-priority + separator + recency input:");
249
  console.log("System Prompt Length:", system_prompt?.length || 0);
250
  console.log("Prompt Length:", prompt?.length || 0);
@@ -321,8 +366,8 @@ app.post('/v1/chat/completions', async (req, res) => {
321
  console.log("Received non-stream result from fal-ai:", JSON.stringify(result, null, 2));
322
 
323
  if (result && result.error) {
324
- console.error("Fal-ai returned an error in non-stream mode:", result.error);
325
- return res.status(500).json({ object: "error", message: `Fal-ai error: ${JSON.stringify(result.error)}`, type: "fal_ai_error", param: null, code: null });
326
  }
327
 
328
  const openAIResponse = {
@@ -341,8 +386,8 @@ app.post('/v1/chat/completions', async (req, res) => {
341
  const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
342
  res.status(500).json({ error: 'Internal Server Error in Proxy', details: errorMessage });
343
  } else if (!res.writableEnded) {
344
- console.error("Headers already sent, ending response.");
345
- res.end();
346
  }
347
  }
348
  });
 
70
  let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
71
  switch (message.role) {
72
  case 'system':
73
+ fixed_system_prompt_content += `${content}\n\n`;
74
  break;
75
  case 'user':
76
  conversation_message_blocks.push(`Human: ${content}\n\n`);
 
101
  if (fixed_system_prompt_content.length > 0) {
102
  // 如果固定内容不为空,计算其长度 + 后面可能的分隔符的长度(如果需要)
103
  // 暂时只计算内容长度,分隔符在组合时再考虑
104
+ space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // 预留 \n\n...\n\n 的长度
105
  }
106
+ const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
107
  console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`);
108
 
109
 
 
140
  // 如果 prompt 满了,尝试放入 system_prompt 的剩余空间
141
  if (!systemHistoryFull) {
142
  if (current_system_history_length + block_length <= remaining_system_limit) {
143
+ system_prompt_history_blocks.unshift(message_block);
144
+ current_system_history_length += block_length;
145
+ continue;
146
  } else {
147
+ systemHistoryFull = true;
148
+ console.log(`System history limit (${remaining_system_limit}) reached.`);
149
  }
150
  }
151
  }
 
191
  }
192
  // === convertMessagesToFalPrompt 函数结束 ===
193
 
194
+ // === 新的 convertMessagesToFal 函数 ===
195
+ function convertMessagesToFal(messages) {
196
+ let system_prompt = "";
197
+ let prompt = "";
198
+
199
+ // 遍历所有消息
200
+ for (const message of messages) {
201
+ let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
202
+
203
+ switch (message.role) {
204
+ case 'system':
205
+ // 系统消息添加到 system_prompt
206
+ system_prompt += content;
207
+ break;
208
+ case 'user':
209
+ // 用户消息添加到 prompt
210
+ prompt += `Human: ${content}\n\n`;
211
+ break;
212
+ case 'assistant':
213
+ // 助手消息添加到 prompt
214
+ prompt += `Assistant: ${content}\n\n`;
215
+ break;
216
+ default:
217
+ console.warn(`Unsupported role: ${message.role}`);
218
+ continue;
219
+ }
220
+ }
221
+
222
+ // 清理可能的多余空白
223
+ system_prompt = system_prompt.trim();
224
+ prompt = prompt.trim();
225
+
226
+ // 返回结果对象
227
+ const result = {
228
+ system_prompt: system_prompt,
229
+ prompt: prompt
230
+ };
231
+
232
+ console.log(`New function - system_prompt length: ${result.system_prompt.length}`);
233
+ console.log(`New function - prompt length: ${result.prompt.length}`);
234
+
235
+ return result;
236
+ }
237
+ // === convertMessagesToFal 函数结束 ===
238
+
239
 
240
  // POST /v1/chat/completions endpoint (保持不变)
241
  app.post('/v1/chat/completions', async (req, res) => {
242
 
243
+ let authKey = null;
244
+ const authHeader = req.headers.authorization;
245
+
246
+ if (authHeader) {
247
+ const parts = authHeader.split(' ');
248
+ if (parts.length === 2) {
249
+ const scheme = parts[0];
250
+ const credentials = parts[1];
251
+
252
+ if (scheme === 'Bearer') {
253
+ authKey = credentials; // JWT 或其他 token
254
+ } else if (scheme === 'Basic') {
255
+ // Basic 认证解码
256
+ const decoded = Buffer.from(credentials, 'base64').toString('utf8');
257
+ const [username, password] = decoded.split(':');
258
+ req.auth = { username, password };
259
+ authKey = decoded; // 或者只保存 username
260
+ } else if (scheme === 'ApiKey' || scheme === 'Key') {
261
+ authKey = credentials;
262
+ }
263
+ }
264
  }
265
+
266
+ fal.config({
267
+ credentials: authKey,
268
+ });
269
+
 
270
  const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
271
 
272
  console.log(`Received chat completion request for model: ${model}, stream: ${stream}`);
273
 
274
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
275
+ console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`);
276
  }
277
  if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
278
  console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
 
281
 
282
  try {
283
  // *** 使用更新后的转换函数 ***
284
+ const { prompt, system_prompt } = convertMessagesToFal(messages);
285
 
286
  const falInput = {
287
  model: model,
 
289
  ...(system_prompt && { system_prompt: system_prompt }),
290
  reasoning: !!reasoning,
291
  };
292
+ console.log("Fal Input:", JSON.stringify(falInput, null, 2));
293
  console.log("Forwarding request to fal-ai with system-priority + separator + recency input:");
294
  console.log("System Prompt Length:", system_prompt?.length || 0);
295
  console.log("Prompt Length:", prompt?.length || 0);
 
366
  console.log("Received non-stream result from fal-ai:", JSON.stringify(result, null, 2));
367
 
368
  if (result && result.error) {
369
+ console.error("Fal-ai returned an error in non-stream mode:", result.error);
370
+ return res.status(500).json({ object: "error", message: `Fal-ai error: ${JSON.stringify(result.error)}`, type: "fal_ai_error", param: null, code: null });
371
  }
372
 
373
  const openAIResponse = {
 
386
  const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
387
  res.status(500).json({ error: 'Internal Server Error in Proxy', details: errorMessage });
388
  } else if (!res.writableEnded) {
389
+ console.error("Headers already sent, ending response.");
390
+ res.end();
391
  }
392
  }
393
  });