devme commited on
Commit
fac30f2
·
verified ·
1 Parent(s): 4050342

Delete routes.js

Browse files
Files changed (1) hide show
  1. routes.js +0 -608
routes.js DELETED
@@ -1,608 +0,0 @@
1
- import express from 'express'
2
- import fetch from 'node-fetch'
3
- import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning } from './config.js'
4
- import { logError, logRequest, logResponse } from './logger.js'
5
- import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js'
6
- import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js'
7
- import { transformToCommon, getCommonHeaders } from './transformers/request-common.js'
8
- import { AnthropicResponseTransformer } from './transformers/response-anthropic.js'
9
- import { OpenAIResponseTransformer } from './transformers/response-openai.js'
10
- import { getNextProxyAgent } from './proxy-manager.js'
11
-
12
- /**
13
- * 从请求头获取认证信息
14
- */
15
- function getAuthHeader(req) {
16
- return req.headers.authorization || req.headers.Authorization || req.headers['x-api-key']
17
- }
18
-
19
- const router = express.Router();
20
-
21
- /**
22
- * 将 /v1/responses API 结果转换为 /v1/chat/completions 兼容格式
23
- * 适用于非流式响应
24
- */
25
- function convertResponseToChatCompletion(resp) {
26
- if (!resp || typeof resp !== 'object') {
27
- throw new Error('Invalid response object');
28
- }
29
-
30
- const outputMsg = (resp.output || []).find(o => o.type === 'message');
31
- const textBlocks = outputMsg?.content?.filter(c => c.type === 'output_text') || [];
32
- const content = textBlocks.map(c => c.text).join('');
33
-
34
- const chatCompletion = {
35
- id: resp.id ? resp.id.replace(/^resp_/, 'chatcmpl-') : `chatcmpl-${Date.now()}`,
36
- object: 'chat.completion',
37
- created: resp.created_at || Math.floor(Date.now() / 1000),
38
- model: resp.model || 'unknown-model',
39
- choices: [
40
- {
41
- index: 0,
42
- message: {
43
- role: outputMsg?.role || 'assistant',
44
- content: content || ''
45
- },
46
- finish_reason: resp.status === 'completed' ? 'stop' : 'unknown'
47
- }
48
- ],
49
- usage: {
50
- prompt_tokens: resp.usage?.input_tokens ?? 0,
51
- completion_tokens: resp.usage?.output_tokens ?? 0,
52
- total_tokens: resp.usage?.total_tokens ?? 0
53
- }
54
- };
55
-
56
- return chatCompletion;
57
- }
58
-
59
- router.get('/v1/models', (req, res) => {
60
- try {
61
- const config = getConfig()
62
- const models = config.models.map(model => ({
63
- id: model.id,
64
- object: 'model',
65
- created: Date.now(),
66
- owned_by: model.type,
67
- permission: [],
68
- root: model.id,
69
- parent: null
70
- }))
71
-
72
- res.json({
73
- object: 'list',
74
- data: models
75
- })
76
- } catch (error) {
77
- logError('GET /v1/models', error)
78
- res.status(500).json({ error: 'Internal server error' })
79
- }
80
- })
81
-
82
- // 标准 OpenAI 聊天补全处理函数(带格式转换)
83
- async function handleChatCompletions(req, res) {
84
- try {
85
- const openaiRequest = req.body
86
- const modelId = openaiRequest.model
87
-
88
- if (!modelId) {
89
- return res.status(400).json({ error: '需要提供 model 参数' })
90
- }
91
-
92
- const model = getModelById(modelId)
93
- if (!model) {
94
- return res.status(404).json({ error: `未找到模型 ${modelId}` })
95
- }
96
-
97
- const endpoint = getEndpointByType(model.type)
98
- if (!endpoint) {
99
- return res.status(500).json({ error: `未找到端点类型 ${model.type}` })
100
- }
101
-
102
- // 获取认证信息
103
- const authHeader = getAuthHeader(req)
104
- if (!authHeader) {
105
- return res.status(401).json({
106
- error: '未提供认证信息',
107
- message: '请在请求头中提供 Authorization 或 x-api-key'
108
- })
109
- }
110
-
111
- let transformedRequest
112
- let headers
113
- const clientHeaders = req.headers
114
-
115
- // 转换请求格式
116
- if (model.type === 'anthropic') {
117
- transformedRequest = transformToAnthropic(openaiRequest)
118
- const isStreaming = openaiRequest.stream === true
119
- headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId)
120
- } else if (model.type === 'openai') {
121
- transformedRequest = transformToOpenAI(openaiRequest)
122
- headers = getOpenAIHeaders(authHeader, clientHeaders)
123
- } else if (model.type === 'common') {
124
- transformedRequest = transformToCommon(openaiRequest)
125
- headers = getCommonHeaders(authHeader, clientHeaders)
126
- } else {
127
- return res.status(500).json({ error: `未知的端点类型: ${model.type}` })
128
- }
129
-
130
- logRequest('POST', endpoint.base_url, headers, transformedRequest)
131
-
132
- const proxyAgentInfo = getNextProxyAgent(endpoint.base_url)
133
- const fetchOptions = {
134
- method: 'POST',
135
- headers,
136
- body: JSON.stringify(transformedRequest)
137
- }
138
-
139
- if (proxyAgentInfo?.agent) {
140
- fetchOptions.agent = proxyAgentInfo.agent
141
- }
142
-
143
- const response = await fetch(endpoint.base_url, fetchOptions)
144
-
145
- if (!response.ok) {
146
- const errorText = await response.text()
147
- logError(`端点错误: ${response.status}`, new Error(errorText))
148
- return res.status(response.status).json({
149
- error: `端点返回 ${response.status}`,
150
- details: errorText
151
- })
152
- }
153
-
154
- const isStreaming = transformedRequest.stream === true
155
-
156
- if (isStreaming) {
157
- res.setHeader('Content-Type', 'text/event-stream')
158
- res.setHeader('Cache-Control', 'no-cache')
159
- res.setHeader('Connection', 'keep-alive')
160
-
161
- // common 类型直接转发,不使用 transformer
162
- if (model.type === 'common') {
163
- try {
164
- for await (const chunk of response.body) {
165
- res.write(chunk)
166
- }
167
- res.end()
168
- } catch (streamError) {
169
- logError('流错误', streamError)
170
- res.end()
171
- }
172
- } else {
173
- // anthropic 和 openai 类型使用 transformer
174
- let transformer
175
- if (model.type === 'anthropic') {
176
- transformer = new AnthropicResponseTransformer(modelId, `chatcmpl-${Date.now()}`)
177
- } else if (model.type === 'openai') {
178
- transformer = new OpenAIResponseTransformer(modelId, `chatcmpl-${Date.now()}`)
179
- }
180
-
181
- try {
182
- for await (const chunk of transformer.transformStream(response.body)) {
183
- res.write(chunk)
184
- }
185
- res.end()
186
- } catch (streamError) {
187
- logError('流错误', streamError)
188
- res.end()
189
- }
190
- }
191
- } else {
192
- const data = await response.json()
193
- if (model.type === 'openai') {
194
- try {
195
- const converted = convertResponseToChatCompletion(data)
196
- logResponse(200, null, converted)
197
- res.json(converted)
198
- } catch (e) {
199
- logResponse(200, null, data)
200
- res.json(data)
201
- }
202
- } else {
203
- logResponse(200, null, data)
204
- res.json(data)
205
- }
206
- }
207
-
208
- } catch (error) {
209
- logError('/v1/chat/completions', error)
210
- res.status(500).json({
211
- error: '内部服务器错误',
212
- message: error.message
213
- })
214
- }
215
- }
216
-
217
- // 直接转发 OpenAI 请求(不做格式转换)
218
- async function handleDirectResponses(req, res) {
219
- try {
220
- const openaiRequest = req.body
221
- const modelId = openaiRequest.model
222
-
223
- if (!modelId) {
224
- return res.status(400).json({ error: '需要提供 model 参数' })
225
- }
226
-
227
- const model = getModelById(modelId)
228
- if (!model) {
229
- return res.status(404).json({ error: `未找到模型 ${modelId}` })
230
- }
231
-
232
- // 只允许 openai 类型端点
233
- if (model.type !== 'openai') {
234
- return res.status(400).json({
235
- error: '无效的端点类型',
236
- message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
237
- })
238
- }
239
-
240
- const endpoint = getEndpointByType(model.type)
241
- if (!endpoint) {
242
- return res.status(500).json({ error: `未找到端点类型 ${model.type}` })
243
- }
244
-
245
- // 获取认证信息
246
- const authHeader = getAuthHeader(req);
247
- if (!authHeader) {
248
- return res.status(401).json({
249
- error: '未提供认证信息',
250
- message: '请在请求头中提供 Authorization 或 x-api-key'
251
- });
252
- }
253
-
254
- // 如果是 x-api-key,转换为 Bearer 格式
255
- const finalAuthHeader = authHeader.startsWith('Bearer ')
256
- ? authHeader
257
- : `Bearer ${authHeader}`;
258
-
259
- const clientHeaders = req.headers;
260
-
261
- // 获取 headers
262
- const headers = getOpenAIHeaders(finalAuthHeader, clientHeaders);
263
-
264
- // 注入系统提示到 instructions 字段
265
- const systemPrompt = getSystemPrompt();
266
- const modifiedRequest = { ...openaiRequest };
267
- if (systemPrompt) {
268
- // 如果已有 instructions,则在前面添加系统提示
269
- if (modifiedRequest.instructions) {
270
- modifiedRequest.instructions = systemPrompt + modifiedRequest.instructions;
271
- } else {
272
- // 否则直接设置系统提示
273
- modifiedRequest.instructions = systemPrompt;
274
- }
275
- }
276
-
277
- // 处理reasoning字段
278
- const reasoningLevel = getModelReasoning(modelId);
279
- if (reasoningLevel === 'auto') {
280
- // Auto模式:保持原始请求的reasoning字段不变
281
- // 如果原始请求有reasoning字段就保留,没有就不添加
282
- } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
283
- modifiedRequest.reasoning = {
284
- effort: reasoningLevel,
285
- summary: 'auto'
286
- };
287
- } else {
288
- // 如果配置是off或无效,移除reasoning字段
289
- delete modifiedRequest.reasoning;
290
- }
291
-
292
- logRequest('POST', endpoint.base_url, headers, modifiedRequest)
293
-
294
- const proxyAgentInfo = getNextProxyAgent(endpoint.base_url)
295
- const fetchOptions = {
296
- method: 'POST',
297
- headers,
298
- body: JSON.stringify(modifiedRequest)
299
- }
300
-
301
- if (proxyAgentInfo?.agent) {
302
- fetchOptions.agent = proxyAgentInfo.agent
303
- }
304
-
305
- const response = await fetch(endpoint.base_url, fetchOptions)
306
-
307
- if (!response.ok) {
308
- const errorText = await response.text()
309
- logError(`端点错误: ${response.status}`, new Error(errorText))
310
- return res.status(response.status).json({
311
- error: `端点返回 ${response.status}`,
312
- details: errorText
313
- })
314
- }
315
-
316
- const isStreaming = openaiRequest.stream === true
317
-
318
- if (isStreaming) {
319
- res.setHeader('Content-Type', 'text/event-stream')
320
- res.setHeader('Cache-Control', 'no-cache')
321
- res.setHeader('Connection', 'keep-alive')
322
-
323
- try {
324
- for await (const chunk of response.body) {
325
- res.write(chunk)
326
- }
327
- res.end()
328
- } catch (streamError) {
329
- logError('流错误', streamError)
330
- res.end()
331
- }
332
- } else {
333
- const data = await response.json()
334
- logResponse(200, null, data)
335
- res.json(data)
336
- }
337
-
338
- } catch (error) {
339
- logError('/v1/responses', error)
340
- res.status(500).json({
341
- error: '内部服务器错误',
342
- message: error.message
343
- })
344
- }
345
- }
346
-
347
- // 直接转发 Anthropic 请求(不做格式转换)
348
- async function handleDirectMessages(req, res) {
349
- logInfo('POST /v1/messages');
350
-
351
- try {
352
- const anthropicRequest = req.body;
353
- const modelId = anthropicRequest.model;
354
-
355
- if (!modelId) {
356
- return res.status(400).json({ error: '需要提供 model 参数' });
357
- }
358
-
359
- const model = getModelById(modelId);
360
- if (!model) {
361
- return res.status(404).json({ error: `未找到模型 ${modelId}` });
362
- }
363
-
364
- // 只允许 anthropic 类型端点
365
- if (model.type !== 'anthropic') {
366
- return res.status(400).json({
367
- error: '无效的端点类型',
368
- message: `/v1/messages 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
369
- });
370
- }
371
-
372
- const endpoint = getEndpointByType(model.type);
373
- if (!endpoint) {
374
- return res.status(500).json({ error: `未找到端点类型 ${model.type}` });
375
- }
376
-
377
- logInfo(`直接转发到 ${model.type} 端点: ${endpoint.base_url}`);
378
-
379
- // 获取认证信息
380
- const authHeader = getAuthHeader(req);
381
- if (!authHeader) {
382
- return res.status(401).json({
383
- error: '未提供认证信息',
384
- message: '请在请求头中提供 Authorization 或 x-api-key'
385
- });
386
- }
387
-
388
- // 如果是 x-api-key,转换为 Bearer 格式
389
- const finalAuthHeader = authHeader.startsWith('Bearer ')
390
- ? authHeader
391
- : `Bearer ${authHeader}`;
392
-
393
- const clientHeaders = req.headers;
394
-
395
- // 获取 headers
396
- const isStreaming = anthropicRequest.stream === true;
397
- const headers = getAnthropicHeaders(finalAuthHeader, clientHeaders, isStreaming, modelId);
398
-
399
- // 注入系统提示到 system 字段
400
- const systemPrompt = getSystemPrompt();
401
- const modifiedRequest = { ...anthropicRequest };
402
-
403
- // 清理cc中的 "You are Claude Code, Anthropic's official CLI for Claude."
404
- if (modifiedRequest.system && Array.isArray(modifiedRequest.system)) {
405
- for (const msg of modifiedRequest.system) {
406
- if (msg.type === 'text') {
407
- msg.text = msg.text.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.');
408
- }
409
- }
410
- }
411
-
412
- if (systemPrompt) {
413
- if (modifiedRequest.system && Array.isArray(modifiedRequest.system)) {
414
- // 如果已有 system 数组,则在最前面插入系统提示
415
- modifiedRequest.system = [
416
- { type: 'text', text: systemPrompt },
417
- ...modifiedRequest.system
418
- ];
419
- } else {
420
- // 否则创建新的 system 数组
421
- modifiedRequest.system = [
422
- { type: 'text', text: systemPrompt }
423
- ];
424
- }
425
- }
426
-
427
- // 处理thinking字段
428
- const reasoningLevel = getModelReasoning(modelId);
429
- if (reasoningLevel === 'auto') {
430
- // Auto模式:保持原始请求的thinking字段不变
431
- // 如果原始请求有thinking字段就保留,没有就不添加
432
- } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
433
- const budgetTokens = {
434
- 'low': 4096,
435
- 'medium': 12288,
436
- 'high': 24576
437
- };
438
-
439
- modifiedRequest.thinking = {
440
- type: 'enabled',
441
- budget_tokens: budgetTokens[reasoningLevel]
442
- };
443
- } else {
444
- // 如果配置是off或无效,移除thinking字段
445
- delete modifiedRequest.thinking;
446
- }
447
-
448
- logRequest('POST', endpoint.base_url, headers, modifiedRequest);
449
- // console.log(modifiedRequest);
450
-
451
- // 转发修改后的请求
452
- const proxyAgentInfo = getNextProxyAgent(endpoint.base_url);
453
- const fetchOptions = {
454
- method: 'POST',
455
- headers,
456
- body: JSON.stringify(modifiedRequest)
457
- };
458
-
459
- if (proxyAgentInfo?.agent) {
460
- fetchOptions.agent = proxyAgentInfo.agent;
461
- }
462
-
463
- const response = await fetch(endpoint.base_url, fetchOptions);
464
-
465
- logInfo(`响应状态: ${response.status}`);
466
-
467
- if (!response.ok) {
468
- const errorText = await response.text();
469
- logError(`端点错误: ${response.status}`, new Error(errorText));
470
- return res.status(response.status).json({
471
- error: `端点返回 ${response.status}`,
472
- details: errorText
473
- });
474
- }
475
-
476
- if (isStreaming) {
477
- // 直接转发流式响应,不做任何转换
478
- res.setHeader('Content-Type', 'text/event-stream');
479
- res.setHeader('Cache-Control', 'no-cache');
480
- res.setHeader('Connection', 'keep-alive');
481
-
482
- try {
483
- // 直接将原始响应流转发给客户端
484
- for await (const chunk of response.body) {
485
- res.write(chunk);
486
- }
487
- res.end();
488
- logInfo('流转发成功');
489
- } catch (streamError) {
490
- logError('流错误', streamError);
491
- res.end();
492
- }
493
- } else {
494
- // 直接转发非流式响应,不做任何转换
495
- const data = await response.json();
496
- logResponse(200, null, data);
497
- res.json(data);
498
- }
499
-
500
- } catch (error) {
501
- logError('/v1/messages 错误', error);
502
- res.status(500).json({
503
- error: '内部服务器错误',
504
- message: error.message
505
- });
506
- }
507
- }
508
-
509
- // 处理 Anthropic count_tokens 请求
510
- async function handleCountTokens(req, res) {
511
- logInfo('POST /v1/messages/count_tokens');
512
-
513
- try {
514
- const anthropicRequest = req.body;
515
- const modelId = anthropicRequest.model;
516
-
517
- if (!modelId) {
518
- return res.status(400).json({ error: '需要提供 model 参数' });
519
- }
520
-
521
- const model = getModelById(modelId);
522
- if (!model) {
523
- return res.status(404).json({ error: `未找到模型 ${modelId}` });
524
- }
525
-
526
- // 只允许 anthropic 类型端点
527
- if (model.type !== 'anthropic') {
528
- return res.status(400).json({
529
- error: '无效的端点类型',
530
- message: `/v1/messages/count_tokens 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
531
- });
532
- }
533
-
534
- const endpoint = getEndpointByType('anthropic');
535
- if (!endpoint) {
536
- return res.status(500).json({ error: '未找到端点类型 anthropic' });
537
- }
538
-
539
- // 获取认证信息
540
- const authHeader = getAuthHeader(req);
541
- if (!authHeader) {
542
- return res.status(401).json({
543
- error: '未提供认证信息',
544
- message: '请在请求头中提供 Authorization 或 x-api-key'
545
- });
546
- }
547
-
548
- // 如果是 x-api-key,转换为 Bearer 格式
549
- const finalAuthHeader = authHeader.startsWith('Bearer ')
550
- ? authHeader
551
- : `Bearer ${authHeader}`;
552
-
553
- const clientHeaders = req.headers;
554
- const headers = getAnthropicHeaders(finalAuthHeader, clientHeaders, false, modelId);
555
-
556
- // 构建 count_tokens 端点 URL
557
- const countTokensUrl = endpoint.base_url.replace('/v1/messages', '/v1/messages/count_tokens');
558
-
559
- // 使用原始请求体
560
- const modifiedRequest = { ...anthropicRequest };
561
-
562
- logInfo(`转发到 count_tokens 端点: ${countTokensUrl}`);
563
- logRequest('POST', countTokensUrl, headers, modifiedRequest);
564
-
565
- const proxyAgentInfo = getNextProxyAgent(countTokensUrl);
566
- const fetchOptions = {
567
- method: 'POST',
568
- headers,
569
- body: JSON.stringify(modifiedRequest)
570
- };
571
-
572
- if (proxyAgentInfo?.agent) {
573
- fetchOptions.agent = proxyAgentInfo.agent;
574
- }
575
-
576
- const response = await fetch(countTokensUrl, fetchOptions);
577
-
578
- logInfo(`响应状态: ${response.status}`);
579
-
580
- if (!response.ok) {
581
- const errorText = await response.text();
582
- logError(`计数令牌错误: ${response.status}`, new Error(errorText));
583
- return res.status(response.status).json({
584
- error: `端点返回 ${response.status}`,
585
- details: errorText
586
- });
587
- }
588
-
589
- const data = await response.json();
590
- logResponse(200, null, data);
591
- res.json(data);
592
-
593
- } catch (error) {
594
- logError('/v1/messages/count_tokens 错误', error);
595
- res.status(500).json({
596
- error: '内部服务器错误',
597
- message: error.message
598
- });
599
- }
600
- }
601
-
602
- // 注册路由
603
- router.post('/v1/chat/completions', handleChatCompletions);
604
- router.post('/v1/responses', handleDirectResponses);
605
- router.post('/v1/messages', handleDirectMessages);
606
- router.post('/v1/messages/count_tokens', handleCountTokens);
607
-
608
- export default router;