ropic commited on
Commit
8793159
·
verified ·
1 Parent(s): 5eadde9

Upload 5 files

Browse files
Calmlo OpenAI Proxy Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 使用官方 Node.js 18 LTS 镜像作为基础
2
+ FROM node:18-alpine
3
+
4
+ # 设置工作目录
5
+ WORKDIR /usr/src/app
6
+
7
+ # 复制 package.json 和 package-lock.json (如果存在)
8
+ COPY package*.json ./
9
+
10
+ # 安装项目依赖
11
+ RUN npm install
12
+
13
+ # 复制应用源代码
14
+ COPY . .
15
+
16
+ # 暴露应用程序使用的端口
17
+ EXPOSE 3000
18
+
19
+ # 定义环境变量 (可以在 docker-compose 中覆盖)
20
+ ENV PORT=3000
21
+ # FAL_KEY 应该在运行时通过 docker-compose 传入,而不是硬编码在这里
22
+
23
+ # 运行应用程序的命令
24
+ CMD [ "npm", "start" ]
Calmlo fal-openai-proxy README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: fal-openai-proxy
3
+ emoji: 🐳
4
+ colorFrom: red
5
+ colorTo: red
6
+ sdk: docker
7
+ sdk_version: "{{sdkVersion}}"
8
+ app_file: app.py
9
+ pinned: false
10
+ app_port: 3000
11
+ ---
12
+
13
+
14
+
15
+ ## openai请求格式转fal
16
+ 输入限制: System prompt 和 prompt 分别最长为 5000 字符(不是 token)。
17
+
18
+ 输出长度: 测试了下输出长篇小说,出了 5W 多 token。
19
+
20
+ 上下文: 不支持。
21
+
22
+ 于是用 gemini 糊了个 openaiToFal 的服务,模拟上下文以 5000 字符为分界线,分别塞到 System prompt 和 prompt,这样可以把输入扩展到 1W 字符,太早的聊天记录会被顶掉。github 地址是一个 docker compose 包,把你的 key 填入 docker-compose.yml,一键启动 docker compose up -d 即可。默认端口 13000。
23
+
24
+ ## 部署步骤
25
+ 1、修改docker-compose.yml填入fal的api key
26
+
27
+ 2、`docker compose up -d`启动
28
+
29
+ ## 重要
30
+ 我是搭配newapi管理使用,所以**没有鉴权**,有需要自己加。
fal-openai-proxy docker-compose.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ fal-openai-proxy:
3
+ build: . # 构建当前目录下的 Dockerfile
4
+ container_name: fal_openai_proxy
5
+ ports:
6
+ - "13000:3000" # 将主机的 3000 端口映射到容器的 3000 端口
7
+ environment:
8
+ # 在这里设置你的 Fal AI API Key
9
+ # 或者,为了安全起见,你可以创建一个 .env 文件,并在其中定义 FAL_KEY
10
+ # 然后取消下面行的注释:
11
+ # env_file:
12
+ # - .env
13
+ FAL_KEY: "" # !! 重要:替换为你的真实 Fal AI Key !!
14
+ PORT: 3000 # 确保容器内的端口与 Dockerfile 和 server.js 中一致
15
+ restart: unless-stopped # 服务失败时自动重启,除非手动停止
fal-openai-proxy package.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "fal-openai-proxy",
3
+ "version": "1.0.0",
4
+ "description": "Proxy server to convert OpenAI requests to fal-ai format",
5
+ "main": "server.js",
6
+ "type": "module",
7
+ "scripts": {
8
+ "start": "node server.js"
9
+ },
10
+ "dependencies": {
11
+ "@fal-ai/client": "latest",
12
+ "express": "^4.19.2"
13
+ },
14
+ "engines": {
15
+ "node": ">=18.0.0"
16
+ }
17
+ }
fal-openai-proxy server.js ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express';
2
+ import { fal } from '@fal-ai/client';
3
+
4
+ // --- Multi-Key Configuration ---
5
+ // *** 使用 FAL_KEY 环境变量读取逗号分隔的密钥 ***
6
+ const rawFalKeys = process.env.FAL_KEY; // Expect comma-separated keys: key1,key2,key3 in FAL_KEY
7
+ const API_KEY = process.env.API_KEY; // Custom API Key for proxy auth remains the same
8
+
9
+ if (!rawFalKeys) {
10
+ // *** 更新错误信息以引用 FAL_KEY ***
11
+ console.error("Error: FAL_KEY environment variable is not set (should be comma-separated).");
12
+ process.exit(1);
13
+ }
14
+
15
+ if (!API_KEY) {
16
+ console.error("Error: API_KEY environment variable is not set.");
17
+ process.exit(1);
18
+ }
19
+
20
+ // Parse and prepare the keys
21
+ let falKeys = rawFalKeys.split(',')
22
+ .map(key => key.trim())
23
+ .filter(key => key.length > 0)
24
+ .map(key => ({
25
+ key: key,
26
+ failed: false, // Track if the key is currently considered failed
27
+ failedTimestamp: 0 // Timestamp when the key was marked as failed
28
+ }));
29
+
30
+ if (falKeys.length === 0) {
31
+ // *** 更新错误信息以引用 FAL_KEY ***
32
+ console.error("Error: No valid keys found in FAL_KEY after processing the environment variable.");
33
+ process.exit(1);
34
+ }
35
+
36
+ let currentKeyIndex = 0;
37
+ const failedKeyCooldown = 60 * 1000; // Cooldown period in milliseconds (e.g., 60 seconds) before retrying a failed key
38
+
39
+ // *** 更新日志信息以引用 FAL_KEY ***
40
+ console.log(`Loaded ${falKeys.length} FAL API Key(s) from FAL_KEY environment variable.`);
41
+ console.log(`Failed key cooldown period: ${failedKeyCooldown / 1000} seconds.`);
42
+
43
+ // NOTE: We will configure fal client per request now, so initial global config is removed.
44
+
45
+ // --- Key Management Functions ---
46
+
47
+ /**
48
+ * Selects the next available FAL key using round-robin and skipping recently failed keys.
49
+ * @returns {object | null} Key info object { key, failed, failedTimestamp } or null if all keys are failed.
50
+ */
51
+ function getNextKey() {
52
+ const totalKeys = falKeys.length;
53
+ if (totalKeys === 0) return null;
54
+
55
+ let attempts = 0;
56
+ while (attempts < totalKeys) {
57
+ const keyIndex = currentKeyIndex % totalKeys;
58
+ const keyInfo = falKeys[keyIndex];
59
+ // Increment index for the *next* call, ensuring round-robin
60
+ currentKeyIndex = (currentKeyIndex + 1) % totalKeys;
61
+
62
+ // Check if key is marked as failed and if cooldown has passed
63
+ if (keyInfo.failed) {
64
+ const now = Date.now();
65
+ if (now - keyInfo.failedTimestamp < failedKeyCooldown) {
66
+ // console.log(`Key index ${keyIndex} is in cooldown. Skipping.`);
67
+ attempts++;
68
+ continue; // Skip this key, it's still in cooldown
69
+ } else {
70
+ console.log(`Cooldown finished for key index ${keyIndex}. Resetting failure status.`);
71
+ keyInfo.failed = false; // Cooldown expired, reset status
72
+ keyInfo.failedTimestamp = 0;
73
+ }
74
+ }
75
+ // console.log(`Selected key index: ${keyIndex}`);
76
+ return keyInfo; // Return the valid key info object
77
+ }
78
+
79
+ console.warn("All FAL keys are currently marked as failed and in cooldown.");
80
+ return null; // All keys are currently failed and within cooldown
81
+ }
82
+
83
+ /**
84
+ * Marks a specific key as failed.
85
+ * @param {object} keyInfo - The key info object to mark as failed.
86
+ */
87
+ function markKeyFailed(keyInfo) {
88
+ if (keyInfo && !keyInfo.failed) { // Only mark if not already marked
89
+ keyInfo.failed = true;
90
+ keyInfo.failedTimestamp = Date.now();
91
+ const keyIndex = falKeys.findIndex(k => k.key === keyInfo.key);
92
+ console.warn(`Marking key index ${keyIndex} (ending ...${keyInfo.key.slice(-4)}) as failed.`);
93
+ }
94
+ }
95
+
96
+ /**
97
+ * Determines if an error likely indicates an API key issue (auth, quota, etc.).
98
+ * This needs refinement based on actual errors from fal.ai.
99
+ * @param {Error} error - The error object caught from the fal client.
100
+ * @returns {boolean} - True if the error suggests a key failure, false otherwise.
101
+ */
102
+ function isKeyRelatedError(error) {
103
+ const errorMessage = error?.message?.toLowerCase() || '';
104
+ const errorStatus = error?.status; // Assuming the error object might have a status property
105
+
106
+ // Check for common indicators of key issues
107
+ if (errorStatus === 401 || errorStatus === 403 || // Unauthorized, Forbidden
108
+ errorMessage.includes('authentication failed') ||
109
+ errorMessage.includes('invalid api key') ||
110
+ errorMessage.includes('permission denied')) {
111
+ return true;
112
+ }
113
+ if (errorStatus === 429 || // Too Many Requests (Rate Limit / Quota)
114
+ errorMessage.includes('rate limit exceeded') ||
115
+ errorMessage.includes('quota exceeded')) {
116
+ return true;
117
+ }
118
+ // Add more specific error messages or codes from fal.ai if known
119
+ // console.log("Error does not appear to be key-related:", error); // Debugging
120
+ return false;
121
+ }
122
+
123
+ // --- Express App Setup ---
124
+ const app = express();
125
+ app.use(express.json({ limit: '50mb' }));
126
+ app.use(express.urlencoded({ extended: true, limit: '50mb' }));
127
+
128
+ const PORT = process.env.PORT || 3000;
129
+
130
+ // API Key 鉴权中间件 (Remains the same, checks custom API_KEY)
131
+ const apiKeyAuth = (req, res, next) => {
132
+ const authHeader = req.headers['authorization'];
133
+
134
+ if (!authHeader) {
135
+ console.warn('Unauthorized: No Authorization header provided');
136
+ return res.status(401).json({ error: 'Unauthorized: No API Key provided' });
137
+ }
138
+
139
+ const authParts = authHeader.split(' ');
140
+ if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') {
141
+ console.warn('Unauthorized: Invalid Authorization header format');
142
+ return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' });
143
+ }
144
+
145
+ const providedKey = authParts[1];
146
+ if (providedKey !== API_KEY) {
147
+ console.warn('Unauthorized: Invalid API Key');
148
+ return res.status(401).json({ error: 'Unauthorized: Invalid API Key' });
149
+ }
150
+
151
+ next();
152
+ };
153
+
154
+ app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth);
155
+
156
+ // === 全局定义限制 === (Remains the same)
157
+ const PROMPT_LIMIT = 4800;
158
+ const SYSTEM_PROMPT_LIMIT = 4800;
159
+ // === 限制定义结束 ===
160
+
161
+ // 定义 fal-ai/any-llm 支持的模型列表 (Remains the same)
162
+ const FAL_SUPPORTED_MODELS = [
163
+ "anthropic/claude-3.7-sonnet",
164
+ "anthropic/claude-3.5-sonnet",
165
+ "anthropic/claude-3-5-haiku",
166
+ "anthropic/claude-3-haiku",
167
+ "google/gemini-pro-1.5",
168
+ "google/gemini-flash-1.5",
169
+ "google/gemini-flash-1.5-8b",
170
+ "google/gemini-2.0-flash-001",
171
+ "meta-llama/llama-3.2-1b-instruct",
172
+ "meta-llama/llama-3.2-3b-instruct",
173
+ "meta-llama/llama-3.1-8b-instruct",
174
+ "meta-llama/llama-3.1-70b-instruct",
175
+ "openai/gpt-4o-mini",
176
+ "openai/gpt-4o",
177
+ "deepseek/deepseek-r1",
178
+ "meta-llama/llama-4-maverick",
179
+ "meta-llama/llama-4-scout"
180
+ ];
181
+
182
+ // Helper function to get owner from model ID (Remains the same)
183
+ const getOwner = (modelId) => {
184
+ if (modelId && modelId.includes('/')) {
185
+ return modelId.split('/')[0];
186
+ }
187
+ return 'fal-ai';
188
+ };
189
+
190
+ // GET /v1/models endpoint (Remains the same)
191
+ app.get('/v1/models', (req, res) => {
192
+ console.log("Received request for GET /v1/models");
193
+ try {
194
+ const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
195
+ id: modelId, object: "model", created: 1700000000, owned_by: getOwner(modelId)
196
+ }));
197
+ res.json({ object: "list", data: modelsData });
198
+ console.log("Successfully returned model list.");
199
+ } catch (error) {
200
+ console.error("Error processing GET /v1/models:", error);
201
+ res.status(500).json({ error: "Failed to retrieve model list." });
202
+ }
203
+ });
204
+
205
+ // === convertMessagesToFalPrompt 函数 (Remains the same) ===
206
+ function convertMessagesToFalPrompt(messages) {
207
+ let fixed_system_prompt_content = "";
208
+ const conversation_message_blocks = [];
209
+ // console.log(`Original messages count: ${messages.length}`); // Less verbose logging
210
+
211
+ // 1. 分离 System 消息,格式化 User/Assistant 消息
212
+ for (const message of messages) {
213
+ let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
214
+ switch (message.role) {
215
+ case 'system':
216
+ fixed_system_prompt_content += `System: ${content}\n\n`;
217
+ break;
218
+ case 'user':
219
+ conversation_message_blocks.push(`Human: ${content}\n\n`);
220
+ break;
221
+ case 'assistant':
222
+ conversation_message_blocks.push(`Assistant: ${content}\n\n`);
223
+ break;
224
+ default:
225
+ console.warn(`Unsupported role: ${message.role}`);
226
+ continue;
227
+ }
228
+ }
229
+
230
+ // 2. 截断合并后的 system 消息(如果超长)
231
+ if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) {
232
+ const originalLength = fixed_system_prompt_content.length;
233
+ fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT);
234
+ console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT}`);
235
+ }
236
+ fixed_system_prompt_content = fixed_system_prompt_content.trim();
237
+
238
+
239
+ // 3. 计算 system_prompt 中留给对话历史的剩余空间
240
+ let space_occupied_by_fixed_system = 0;
241
+ if (fixed_system_prompt_content.length > 0) {
242
+ space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // 预留 \n\n...\n\n 的长度
243
+ }
244
+ const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
245
+ // console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`);
246
+
247
+
248
+ // 4. 反向填充 User/Assistant 对话历史
249
+ const prompt_history_blocks = [];
250
+ const system_prompt_history_blocks = [];
251
+ let current_prompt_length = 0;
252
+ let current_system_history_length = 0;
253
+ let promptFull = false;
254
+ let systemHistoryFull = (remaining_system_limit <= 0);
255
+
256
+ // console.log(`Processing ${conversation_message_blocks.length} user/assistant messages for recency filling.`);
257
+ for (let i = conversation_message_blocks.length - 1; i >= 0; i--) {
258
+ const message_block = conversation_message_blocks[i];
259
+ const block_length = message_block.length;
260
+
261
+ if (promptFull && systemHistoryFull) {
262
+ // console.log(`Both prompt and system history slots full. Omitting older messages from index ${i}.`);
263
+ break;
264
+ }
265
+
266
+ // 优先尝试放入 prompt
267
+ if (!promptFull) {
268
+ if (current_prompt_length + block_length <= PROMPT_LIMIT) {
269
+ prompt_history_blocks.unshift(message_block);
270
+ current_prompt_length += block_length;
271
+ continue;
272
+ } else {
273
+ promptFull = true;
274
+ // console.log(`Prompt limit (${PROMPT_LIMIT}) reached. Trying system history slot.`);
275
+ }
276
+ }
277
+
278
+ // 如果 prompt 满了,尝试放入 system_prompt 的剩余空间
279
+ if (!systemHistoryFull) {
280
+ if (current_system_history_length + block_length <= remaining_system_limit) {
281
+ system_prompt_history_blocks.unshift(message_block);
282
+ current_system_history_length += block_length;
283
+ continue;
284
+ } else {
285
+ systemHistoryFull = true;
286
+ // console.log(`System history limit (${remaining_system_limit}) reached.`);
287
+ }
288
+ }
289
+ }
290
+
291
+ // 5. *** 组合最终的 prompt 和 system_prompt (包含分隔符逻辑) ***
292
+ const system_prompt_history_content = system_prompt_history_blocks.join('').trim();
293
+ const final_prompt = prompt_history_blocks.join('').trim();
294
+
295
+ // 定义分隔符
296
+ const SEPARATOR = "\n\n-------下面是比较早之前的对话内容-----\n\n";
297
+
298
+ let final_system_prompt = "";
299
+
300
+ const hasFixedSystem = fixed_system_prompt_content.length > 0;
301
+ const hasSystemHistory = system_prompt_history_content.length > 0;
302
+
303
+ if (hasFixedSystem && hasSystemHistory) {
304
+ final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content;
305
+ // console.log("Combining fixed system prompt and history with separator.");
306
+ } else if (hasFixedSystem) {
307
+ final_system_prompt = fixed_system_prompt_content;
308
+ // console.log("Using only fixed system prompt.");
309
+ } else if (hasSystemHistory) {
310
+ final_system_prompt = system_prompt_history_content;
311
+ // console.log("Using only history in system prompt slot.");
312
+ }
313
+
314
+ // 6. 返回结果
315
+ const result = {
316
+ system_prompt: final_system_prompt,
317
+ prompt: final_prompt
318
+ };
319
+
320
+ console.log(`Final system_prompt length: ${result.system_prompt.length}, Final prompt length: ${result.prompt.length}`);
321
+
322
+ return result;
323
+ }
324
+ // === convertMessagesToFalPrompt 函数结束 ===
325
+
326
+
327
+ /**
328
+ * Wraps the fal.ai API call with retry logic using available keys.
329
+ * @param {'stream' | 'subscribe'} operation - The fal operation to perform.
330
+ * @param {string} functionId - The fal function ID (e.g., "fal-ai/any-llm").
331
+ * @param {object} params - The parameters for the fal function call (input, logs, etc.).
332
+ * @returns {Promise<any>} - The result from the successful fal call (stream or subscription result).
333
+ * @throws {Error} - Throws an error if all keys fail or a non-key-related error occurs.
334
+ */
335
+ async function tryFalCallWithFailover(operation, functionId, params) {
336
+ const maxRetries = falKeys.length; // Try each key at most once per request cycle
337
+ let lastError = null;
338
+
339
+ for (let i = 0; i < maxRetries; i++) {
340
+ const keyInfo = getNextKey();
341
+ if (!keyInfo) {
342
+ throw new Error(lastError ? `All FAL keys failed. Last error: ${lastError.message}` : "All FAL keys are currently unavailable (failed or in cooldown).");
343
+ }
344
+
345
+ const currentFalKey = keyInfo.key;
346
+ console.log(`Attempt ${i + 1}/${maxRetries}: Using key ending in ...${currentFalKey.slice(-4)}`);
347
+
348
+ try {
349
+ // --- Configure fal client with the selected key for this attempt ---
350
+ // WARNING: This global config change might have concurrency issues in high-load scenarios
351
+ // if the fal client library doesn't isolate requests properly.
352
+ fal.config({ credentials: currentFalKey });
353
+
354
+ if (operation === 'stream') {
355
+ const streamResult = await fal.stream(functionId, params);
356
+ console.log(`Successfully initiated stream with key ending in ...${currentFalKey.slice(-4)}`);
357
+ return streamResult;
358
+ } else { // 'subscribe' (non-stream)
359
+ const result = await fal.subscribe(functionId, params);
360
+ console.log(`Successfully completed subscribe request with key ending in ...${currentFalKey.slice(-4)}`);
361
+
362
+ if (result && result.error) {
363
+ console.warn(`Fal-ai returned an application error (non-stream) with key ...${currentFalKey.slice(-4)}: ${JSON.stringify(result.error)}`);
364
+ }
365
+ return result;
366
+ }
367
+ } catch (error) {
368
+ console.error(`Error using key ending in ...${currentFalKey.slice(-4)}:`, error.message || error);
369
+ lastError = error;
370
+
371
+ if (isKeyRelatedError(error)) {
372
+ markKeyFailed(keyInfo);
373
+ console.log(`Key marked as failed. Trying next key if available...`);
374
+ } else {
375
+ console.error("Non-key related error occurred. Aborting retries.");
376
+ throw error;
377
+ }
378
+ }
379
+ }
380
+
381
+ console.error("All FAL keys failed after attempting each one.");
382
+ throw new Error(lastError ? `All FAL keys failed. Last error: ${lastError.message}` : "All FAL API keys failed.");
383
+ }
384
+
385
+
386
+ // POST /v1/chat/completions endpoint (Modified to use tryFalCallWithFailover)
387
+ app.post('/v1/chat/completions', async (req, res) => {
388
+ const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
389
+
390
+ console.log(`Received chat completion request for model: ${model}, stream: ${stream}`);
391
+
392
+ if (!FAL_SUPPORTED_MODELS.includes(model)) {
393
+ console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`);
394
+ }
395
+ if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
396
+ console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
397
+ return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' });
398
+ }
399
+
400
+ try {
401
+ const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
402
+
403
+ const falInput = {
404
+ model: model,
405
+ prompt: prompt,
406
+ ...(system_prompt && { system_prompt: system_prompt }),
407
+ reasoning: !!reasoning,
408
+ };
409
+
410
+ console.log("Prepared Fal Input (lengths):", { system_prompt: system_prompt?.length, prompt: prompt?.length });
411
+
412
+ if (stream) {
413
+ res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
414
+ res.setHeader('Cache-Control', 'no-cache');
415
+ res.setHeader('Connection', 'keep-alive');
416
+ res.setHeader('Access-Control-Allow-Origin', '*');
417
+ res.flushHeaders();
418
+
419
+ let previousOutput = '';
420
+ let falStream;
421
+
422
+ try {
423
+ falStream = await tryFalCallWithFailover('stream', "fal-ai/any-llm", { input: falInput });
424
+
425
+ for await (const event of falStream) {
426
+ const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
427
+ const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
428
+ const errorInfo = (event && event.error) ? event.error : null;
429
+
430
+ if (errorInfo) {
431
+ console.error("Error received *during* fal stream:", errorInfo);
432
+ const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
433
+ res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
434
+ break;
435
+ }
436
+
437
+ let deltaContent = '';
438
+ if (currentOutput.startsWith(previousOutput)) {
439
+ deltaContent = currentOutput.substring(previousOutput.length);
440
+ } else if (currentOutput.length > 0) {
441
+ console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length });
442
+ deltaContent = currentOutput;
443
+ previousOutput = '';
444
+ }
445
+ previousOutput = currentOutput;
446
+
447
+ if (deltaContent || !isPartial) {
448
+ const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] };
449
+ res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`);
450
+ }
451
+ }
452
+ res.write(`data: [DONE]\n\n`);
453
+ res.end();
454
+ console.log("Stream finished successfully.");
455
+
456
+ } catch (streamError) {
457
+ console.error('Error during stream processing:', streamError);
458
+ if (!res.writableEnded) {
459
+ try {
460
+ const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
461
+ const finalErrorChunk = { error: { message: "Stream failed", type: "proxy_error", details: errorDetails } };
462
+ res.write(`data: ${JSON.stringify(finalErrorChunk)}\n\n`);
463
+ res.write(`data: [DONE]\n\n`);
464
+ res.end();
465
+ } catch (finalError) {
466
+ console.error('Error sending final stream error message to client:', finalError);
467
+ if (!res.writableEnded) { res.end(); }
468
+ }
469
+ }
470
+ }
471
+
472
+ } else { // Non-stream
473
+ console.log("Executing non-stream request with failover...");
474
+ const result = await tryFalCallWithFailover('subscribe', "fal-ai/any-llm", { input: falInput, logs: true });
475
+
476
+ console.log("Received non-stream result from fal-ai via failover wrapper.");
477
+
478
+ if (result && result.error) {
479
+ console.error("Fal-ai returned an application error in non-stream mode (after successful API call):", result.error);
480
+ return res.status(500).json({
481
+ object: "error",
482
+ message: `Fal-ai application error: ${JSON.stringify(result.error)}`,
483
+ type: "fal_ai_error",
484
+ param: null,
485
+ code: result.error.code || null
486
+ });
487
+ }
488
+
489
+ const openAIResponse = {
490
+ id: `chatcmpl-${result?.requestId || Date.now()}`,
491
+ object: "chat.completion",
492
+ created: Math.floor(Date.now() / 1000),
493
+ model: model,
494
+ choices: [{
495
+ index: 0,
496
+ message: {
497
+ role: "assistant",
498
+ content: result?.output || ""
499
+ },
500
+ finish_reason: "stop"
501
+ }],
502
+ usage: {
503
+ prompt_tokens: null,
504
+ completion_tokens: null,
505
+ total_tokens: null
506
+ },
507
+ system_fingerprint: null,
508
+ ...(result?.reasoning && { fal_reasoning: result.reasoning }),
509
+ };
510
+ res.json(openAIResponse);
511
+ console.log("Returned non-stream response successfully.");
512
+ }
513
+
514
+ } catch (error) {
515
+ console.error('Unhandled error in /v1/chat/completions:', error);
516
+ if (!res.headersSent) {
517
+ const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
518
+ const errorType = error.message?.includes("All FAL keys failed") ? "api_key_error" : "proxy_internal_error";
519
+ res.status(500).json({
520
+ error: {
521
+ message: `Internal Server Error in Proxy: ${errorMessage}`,
522
+ type: errorType,
523
+ details: error.stack // Optional: include stack in dev/debug mode
524
+ }
525
+ });
526
+ } else if (!res.writableEnded) {
527
+ console.error("Headers already sent, attempting to end response after error.");
528
+ res.end();
529
+ }
530
+ }
531
+ });
532
+
533
+ // --- Server Start ---
534
+ app.listen(PORT, () => {
535
+ console.log(`===========================================================`);
536
+ console.log(` Fal OpenAI Proxy Server (Multi-Key Failover)`);
537
+ console.log(` Listening on port: ${PORT}`);
538
+ // *** 更新日志信息以引用 FAL_KEY ***
539
+ console.log(` Loaded ${falKeys.length} FAL API Key(s) from FAL_KEY.`);
540
+ console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
541
+ console.log(` Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
542
+ console.log(` Chat Completions: POST http://localhost:${PORT}/v1/chat/completions`);
543
+ console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`);
544
+ console.log(`===========================================================`);
545
+ });
546
+
547
+ // Root path response
548
+ app.get('/', (req, res) => {
549
+ res.send('Fal OpenAI Proxy (Multi-Key Failover) is running.');
550
+ });